^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <malloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <pthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <assert.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/poison.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/radix-tree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <urcu/uatomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) int nr_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) int preempt_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) int kmalloc_verbose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) int test_verbose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct kmem_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) pthread_mutex_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned int align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) int nr_objs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void *objs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void (*ctor)(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (!(gfp & __GFP_DIRECT_RECLAIM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) pthread_mutex_lock(&cachep->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (cachep->nr_objs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct radix_tree_node *node = cachep->objs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) cachep->nr_objs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) cachep->objs = node->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) pthread_mutex_unlock(&cachep->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) node->parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) p = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) pthread_mutex_unlock(&cachep->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (cachep->align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) posix_memalign(&p, cachep->align, cachep->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) p = malloc(cachep->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (cachep->ctor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) cachep->ctor(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) else if (gfp & __GFP_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) memset(p, 0, cachep->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) uatomic_inc(&nr_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (kmalloc_verbose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) printf("Allocating %p from slab\n", p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) void kmem_cache_free(struct kmem_cache *cachep, void *objp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) assert(objp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) uatomic_dec(&nr_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (kmalloc_verbose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) printf("Freeing %p to slab\n", objp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) pthread_mutex_lock(&cachep->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (cachep->nr_objs > 10 || cachep->align) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) memset(objp, POISON_FREE, cachep->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) free(objp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct radix_tree_node *node = objp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) cachep->nr_objs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) node->parent = cachep->objs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) cachep->objs = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) pthread_mutex_unlock(&cachep->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void *kmalloc(size_t size, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (!(gfp & __GFP_DIRECT_RECLAIM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ret = malloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) uatomic_inc(&nr_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (kmalloc_verbose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) printf("Allocating %p from malloc\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (gfp & __GFP_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) memset(ret, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void kfree(void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) uatomic_dec(&nr_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (kmalloc_verbose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) printf("Freeing %p to malloc\n", p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct kmem_cache *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) kmem_cache_create(const char *name, unsigned int size, unsigned int align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned int flags, void (*ctor)(void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct kmem_cache *ret = malloc(sizeof(*ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) pthread_mutex_init(&ret->lock, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ret->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ret->align = align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ret->nr_objs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ret->objs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ret->ctor = ctor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }