blob: 93f06614e91bae2a2f67aafa7e069915afd78737 [file] [log] [blame]
Matthew Wilcox1366c372016-03-17 14:21:45 -07001#include <stdlib.h>
2#include <string.h>
3#include <malloc.h>
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -08004#include <pthread.h>
Matthew Wilcox1366c372016-03-17 14:21:45 -07005#include <unistd.h>
6#include <assert.h>
7
Matthew Wilcox12ea6532016-12-16 14:53:45 -05008#include <linux/gfp.h>
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -08009#include <linux/poison.h>
Matthew Wilcox1366c372016-03-17 14:21:45 -070010#include <linux/slab.h>
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080011#include <linux/radix-tree.h>
Matthew Wilcox1366c372016-03-17 14:21:45 -070012#include <urcu/uatomic.h>
13
14int nr_allocated;
Matthew Wilcox847d3572016-12-14 15:08:02 -080015int preempt_count;
Matthew Wilcox1366c372016-03-17 14:21:45 -070016
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080017struct kmem_cache {
18 pthread_mutex_t lock;
19 int size;
20 int nr_objs;
21 void *objs;
22 void (*ctor)(void *);
23};
24
Matthew Wilcox1366c372016-03-17 14:21:45 -070025void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
26{
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080027 struct radix_tree_node *node;
Matthew Wilcox31023cd2016-12-14 15:07:59 -080028
29 if (flags & __GFP_NOWARN)
30 return NULL;
31
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080032 pthread_mutex_lock(&cachep->lock);
33 if (cachep->nr_objs) {
34 cachep->nr_objs--;
35 node = cachep->objs;
36 cachep->objs = node->private_data;
37 pthread_mutex_unlock(&cachep->lock);
38 node->private_data = NULL;
39 } else {
40 pthread_mutex_unlock(&cachep->lock);
41 node = malloc(cachep->size);
42 if (cachep->ctor)
43 cachep->ctor(node);
44 }
45
Matthew Wilcox1366c372016-03-17 14:21:45 -070046 uatomic_inc(&nr_allocated);
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080047 return node;
Matthew Wilcox1366c372016-03-17 14:21:45 -070048}
49
50void kmem_cache_free(struct kmem_cache *cachep, void *objp)
51{
52 assert(objp);
53 uatomic_dec(&nr_allocated);
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080054 pthread_mutex_lock(&cachep->lock);
55 if (cachep->nr_objs > 10) {
56 memset(objp, POISON_FREE, cachep->size);
57 free(objp);
58 } else {
59 struct radix_tree_node *node = objp;
60 cachep->nr_objs++;
61 node->private_data = cachep->objs;
62 cachep->objs = node;
63 }
64 pthread_mutex_unlock(&cachep->lock);
Matthew Wilcox1366c372016-03-17 14:21:45 -070065}
66
Matthew Wilcoxde1af8f2016-12-14 15:09:25 -080067void *kmalloc(size_t size, gfp_t gfp)
68{
69 void *ret = malloc(size);
70 uatomic_inc(&nr_allocated);
71 return ret;
72}
73
74void kfree(void *p)
75{
76 if (!p)
77 return;
78 uatomic_dec(&nr_allocated);
79 free(p);
80}
81
Matthew Wilcox1366c372016-03-17 14:21:45 -070082struct kmem_cache *
83kmem_cache_create(const char *name, size_t size, size_t offset,
84 unsigned long flags, void (*ctor)(void *))
85{
86 struct kmem_cache *ret = malloc(sizeof(*ret));
87
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080088 pthread_mutex_init(&ret->lock, NULL);
Matthew Wilcox1366c372016-03-17 14:21:45 -070089 ret->size = size;
Matthew Wilcoxbbe9d712016-12-14 15:09:28 -080090 ret->nr_objs = 0;
91 ret->objs = NULL;
Matthew Wilcox1366c372016-03-17 14:21:45 -070092 ret->ctor = ctor;
93 return ret;
94}