commit a421e7a9cdbcd2a01b3cc02c1e031e9e90c36923
parent 922ce7fefb250da0e8532c5c5c1679d5facde497
Author: Joris Vink <joris@coders.se>
Date: Thu, 5 Jan 2023 22:47:29 +0100
Add memory protection with KORE_MEM_GUARD.
When KORE_MEM_GUARD is set in the environment when Kore is started
it will enable a few memory protection techniques for all kore pools:
1) The metadata is placed away from the actual user pointer returned.
2) Each entry in a pool is placed in such a way that it is followed
immediately by a guard page which has PROT_NONE. Accessing a guard
page will cause an immediate crash.
3) Each entry is marked with PROT_NONE until it is allocated. Once it
is returned to a pool it becomes PROT_NONE again, protecting against
use after frees.
This commit also removes the magic goo from the mem facitilies such
as kore_malloc and friends and moves these as canaries into the kore
pool facilities instead.
Note that using this will increase memory pressure and decrease performance.
It is recommended to enable this during development to catch bugs.
Diffstat:
include/kore/kore.h | | | 25 | +++++++++++-------------- |
src/kore.c | | | 43 | ++++++++++++++++++++++++++++--------------- |
src/mem.c | | | 187 | ++++++++++++++++++++++++++++++++++++------------------------------------------- |
src/pool.c | | | 232 | +++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------- |
4 files changed, 292 insertions(+), 195 deletions(-)
diff --git a/include/kore/kore.h b/include/kore/kore.h
@@ -607,29 +607,24 @@ struct kore_json_item {
TAILQ_ENTRY(kore_json_item) list;
};
-struct kore_pool_region {
- void *start;
- size_t length;
- LIST_ENTRY(kore_pool_region) list;
-};
-
struct kore_pool_entry {
u_int8_t state;
- struct kore_pool_region *region;
- LIST_ENTRY(kore_pool_entry) list;
+ void *uptr;
+ void *canary;
+ struct kore_pool_entry *nextfree;
};
struct kore_pool {
- size_t elen;
- size_t slen;
- size_t elms;
- size_t inuse;
+ size_t memsz;
size_t growth;
+ size_t pagesz;
+ size_t elmlen;
+ size_t uselen;
+ u_int64_t canary;
volatile int lock;
char *name;
- LIST_HEAD(, kore_pool_region) regions;
- LIST_HEAD(, kore_pool_entry) freelist;
+ struct kore_pool_entry *freelist;
};
struct kore_timer {
@@ -708,6 +703,7 @@ extern int kore_quit;
extern int kore_quiet;
extern int skip_chroot;
extern int skip_runas;
+extern int kore_mem_guard;
extern int kore_foreground;
extern char *kore_pidfile;
@@ -904,6 +900,7 @@ void kore_parse_config_file(FILE *);
/* mem.c */
void *kore_malloc(size_t);
+void *kore_mmap_region(size_t);
void *kore_calloc(size_t, size_t);
void *kore_realloc(void *, size_t);
void kore_free(void *);
diff --git a/src/kore.c b/src/kore.c
@@ -61,6 +61,7 @@ int skip_runas = 0;
int skip_chroot = 0;
u_int8_t worker_count = 0;
char **kore_argv = NULL;
+int kore_mem_guard = 0;
int kore_foreground = 0;
char *kore_progname = NULL;
u_int32_t kore_socket_backlog = 5000;
@@ -103,21 +104,30 @@ usage(void)
#endif
printf("\n");
- printf("Available options:\n");
+ printf("Command-line options:\n");
#if !defined(KORE_SINGLE_BINARY)
- printf("\t-c\tconfiguration to use\n");
+ printf("\t-c\tThe configuration file to load when starting.\n");
#endif
-#if defined(KORE_DEBUG)
- printf("\t-d\trun with debug on\n");
-#endif
- printf("\t-f\tstart in foreground\n");
- printf("\t-h\tthis help text\n");
- printf("\t-n\tdo not chroot on any worker\n");
- printf("\t-q\tonly log errors\n");
- printf("\t-r\tdo not change user on any worker\n");
- printf("\t-v\tdisplay %s build information\n", __progname);
+ printf("\t-f\tDo not daemonize, everything runs in the foreground.\n");
+ printf("\t-h\tThis help text.\n");
+ printf("\t-n\tDo not do the chroot privsep step.\n");
+ printf("\t-q\tQuiet mode, only logs errors.\n");
+ printf("\t-r\tDo not do the privsep user swapping step.\n");
+ printf("\t-v\tDisplay %s build information.\n", __progname);
- printf("\nFind more information on https://kore.io\n");
+ printf("\n");
+ printf("Environment options:\n");
+ printf(" env KORE_MEM_GUARD=1\n");
+ printf(" Enables memory pool guards and other protections.\n");
+ printf("\n");
+ printf(" Enabling this will include guard pages for each\n");
+ printf(" pool entry allocations and mark pool entries as\n");
+ printf(" PROT_NONE when unused.\n");
+ printf("\n");
+ printf(" This catches bugs and prevents memory vulnerabilities\n");
+ printf(" but with performance and memory pressure costs.\n");
+
+ printf("\n");
exit(1);
}
@@ -138,17 +148,18 @@ version(void)
#if defined(KORE_USE_TASKS)
printf("tasks ");
#endif
-#if defined(KORE_DEBUG)
- printf("debug ");
-#endif
#if defined(KORE_USE_PYTHON)
printf("python-%s ", PY_VERSION);
#endif
#if defined(KORE_USE_ACME)
printf("acme ");
#endif
+#if defined(KORE_DEBUG)
+ printf("debug ");
+#endif
if (!kore_tls_supported())
printf("notls ");
+
printf("\n");
exit(0);
}
@@ -789,6 +800,8 @@ kore_server_start(int argc, char *argv[])
if (!kore_quiet) {
kore_log(LOG_INFO, "%s %s starting, built=%s",
__progname, kore_version, kore_build_date);
+ kore_log(LOG_INFO, "memory pool protections: %s",
+ kore_mem_guard ? "enabled" : "disabled");
kore_log(LOG_INFO, "built-ins: "
#if defined(__linux__)
"seccomp "
diff --git a/src/mem.c b/src/mem.c
@@ -14,35 +14,32 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <sys/param.h>
+/*
+ * The memory facitilies such as kore_malloc / kore_calloc are all
+ * based on the kore pool system as long as the allocations are
+ * below 8192 bytes.
+ *
+ * Anything over 8192 bytes will get an mmap() allocation instead
+ * that does not benefit from the protections offered by the kore_pool API.
+ */
+
#include <sys/types.h>
+#include <sys/mman.h>
#include <stdlib.h>
#include <stdint.h>
#include "kore.h"
-#define KORE_MEM_BLOCKS 11
-#define KORE_MEM_BLOCK_SIZE_MAX 8192
-#define KORE_MEM_BLOCK_PREALLOC 128
+#define KORE_MEM_POOLS 11
+#define KORE_MEM_POOLS_PREALLOC 32
+#define KORE_MEM_POOLS_SIZE_MAX 8192
-#define KORE_MEM_ALIGN 16
-#define KORE_MEM_MAGIC 0xd0d0
-
-#define KORE_MEM_TAGGED 0x0001
-
-struct memsize {
- size_t len;
- size_t magic;
-} __attribute__((packed));
+#define KORE_MEM_TAGGED 0x0001
struct meminfo {
+ size_t len;
u_int16_t flags;
- u_int16_t magic;
-} __attribute__((packed));
-
-struct memblock {
- struct kore_pool pool;
};
struct tag {
@@ -51,36 +48,38 @@ struct tag {
TAILQ_ENTRY(tag) list;
};
-static inline struct memsize *memsize(void *);
static inline struct meminfo *meminfo(void *);
-static size_t memblock_index(size_t);
+static void *mem_alloc(size_t);
+static size_t mem_index(size_t);
static TAILQ_HEAD(, tag) tags;
static struct kore_pool tag_pool;
-static struct memblock blocks[KORE_MEM_BLOCKS];
+static struct kore_pool mempools[KORE_MEM_POOLS];
void
kore_mem_init(void)
{
+ const char *opt;
int i, len;
char name[32];
- u_int32_t size, elm, mlen;
+ size_t size, elm, mlen;
+
+ if ((opt = getenv("KORE_MEM_GUARD")) != NULL && !strcmp(opt, "1"))
+ kore_mem_guard = 1;
size = 8;
TAILQ_INIT(&tags);
- kore_pool_init(&tag_pool, "tag_pool", sizeof(struct tag), 100);
+ kore_pool_init(&tag_pool, "tag_pool", sizeof(struct tag), 4);
- for (i = 0; i < KORE_MEM_BLOCKS; i++) {
- len = snprintf(name, sizeof(name), "block-%u", size);
+ for (i = 0; i < KORE_MEM_POOLS; i++) {
+ len = snprintf(name, sizeof(name), "block-%zu", size);
if (len == -1 || (size_t)len >= sizeof(name))
fatal("kore_mem_init: snprintf");
- elm = (KORE_MEM_BLOCK_PREALLOC * 1024) / size;
- mlen = sizeof(struct memsize) + size +
- sizeof(struct meminfo) + KORE_MEM_ALIGN;
- mlen = mlen & ~(KORE_MEM_ALIGN - 1);
+ elm = (KORE_MEM_POOLS_PREALLOC * 1024) / size;
+ mlen = sizeof(struct meminfo) + size;
- kore_pool_init(&blocks[i].pool, name, mlen, elm);
+ kore_pool_init(&mempools[i], name, mlen, elm);
size = size << 1;
}
@@ -91,59 +90,43 @@ kore_mem_cleanup(void)
{
int i;
- for (i = 0; i < KORE_MEM_BLOCKS; i++) {
- kore_pool_cleanup(&blocks[i].pool);
+ for (i = 0; i < KORE_MEM_POOLS; i++) {
+ kore_pool_cleanup(&mempools[i]);
}
}
void *
-kore_malloc(size_t len)
+kore_mmap_region(size_t len)
{
- void *ptr;
- struct meminfo *mem;
- struct memsize *size;
- u_int8_t *addr;
- size_t mlen, idx;
-
- if (len == 0)
- len = 8;
-
- if (len <= KORE_MEM_BLOCK_SIZE_MAX) {
- idx = memblock_index(len);
- ptr = kore_pool_get(&blocks[idx].pool);
- } else {
- mlen = sizeof(struct memsize) + len + sizeof(struct meminfo);
- if ((ptr = calloc(1, mlen)) == NULL)
- fatal("kore_malloc(%zu): %d", len, errno);
- }
-
- size = (struct memsize *)ptr;
- size->len = len;
- size->magic = KORE_MEM_MAGIC;
+ void *ptr;
- addr = (u_int8_t *)ptr + sizeof(struct memsize);
+ if ((ptr = mmap(NULL, len, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)) == MAP_FAILED)
+ fatal("%s: mmap: %s", __func__, errno_s);
- mem = (struct meminfo *)(addr + size->len);
- mem->flags = 0;
- mem->magic = KORE_MEM_MAGIC;
+ return (ptr);
+}
- return (addr);
+void *
+kore_malloc(size_t len)
+{
+ return (mem_alloc(len));
}
void *
kore_realloc(void *ptr, size_t len)
{
- struct memsize *size;
+ struct meminfo *mem;
void *nptr;
if (ptr == NULL) {
- nptr = kore_malloc(len);
+ nptr = mem_alloc(len);
} else {
- size = memsize(ptr);
- if (len == size->len)
+ mem = meminfo(ptr);
+ if (len == mem->len)
return (ptr);
- nptr = kore_malloc(len);
- memcpy(nptr, ptr, MIN(len, size->len));
+ nptr = mem_alloc(len);
+ memcpy(nptr, ptr, MIN(len, mem->len));
kore_free(ptr);
}
@@ -160,7 +143,7 @@ kore_calloc(size_t memb, size_t len)
fatal("kore_calloc(): memb * len > SIZE_MAX");
total = memb * len;
- ptr = kore_malloc(total);
+ ptr = mem_alloc(total);
memset(ptr, 0, total);
return (ptr);
@@ -171,7 +154,6 @@ kore_free(void *ptr)
{
size_t idx;
struct meminfo *mem;
- struct memsize *size;
u_int8_t *addr;
if (ptr == NULL)
@@ -183,14 +165,14 @@ kore_free(void *ptr)
mem->flags &= ~KORE_MEM_TAGGED;
}
- size = memsize(ptr);
- addr = (u_int8_t *)ptr - sizeof(struct memsize);
+ addr = (u_int8_t *)ptr - sizeof(struct meminfo);
- if (size->len <= KORE_MEM_BLOCK_SIZE_MAX) {
- idx = memblock_index(size->len);
- kore_pool_put(&blocks[idx].pool, addr);
+ if (mem->len <= KORE_MEM_POOLS_SIZE_MAX) {
+ idx = mem_index(mem->len);
+ kore_pool_put(&mempools[idx], addr);
} else {
- free(addr);
+ if (munmap(addr, sizeof(*mem) + mem->len) == -1)
+ fatal("%s: munmap: %s", __func__, errno_s);
}
}
@@ -201,7 +183,7 @@ kore_strdup(const char *str)
char *nstr;
len = strlen(str) + 1;
- nstr = kore_malloc(len);
+ nstr = mem_alloc(len);
(void)kore_strlcpy(nstr, str, len);
return (nstr);
@@ -212,7 +194,7 @@ kore_malloc_tagged(size_t len, u_int32_t tag)
{
void *ptr;
- ptr = kore_malloc(len);
+ ptr = mem_alloc(len);
kore_mem_tag(ptr, tag);
return (ptr);
@@ -278,8 +260,33 @@ kore_mem_zero(void *ptr, size_t len)
}
}
+static void *
+mem_alloc(size_t len)
+{
+ void *ptr;
+ struct meminfo *mem;
+ size_t mlen, idx;
+
+ if (len == 0)
+ len = 8;
+
+ if (len <= KORE_MEM_POOLS_SIZE_MAX) {
+ idx = mem_index(len);
+ ptr = kore_pool_get(&mempools[idx]);
+ } else {
+ mlen = sizeof(struct meminfo) + len;
+ ptr = kore_mmap_region(mlen);
+ }
+
+ mem = (struct meminfo *)ptr;
+ mem->len = len;
+ mem->flags = 0;
+
+ return ((u_int8_t *)ptr + sizeof(struct meminfo));
+}
+
static size_t
-memblock_index(size_t len)
+mem_index(size_t len)
{
size_t mlen, idx;
@@ -290,36 +297,14 @@ memblock_index(size_t len)
mlen = mlen << 1;
}
- if (idx > (KORE_MEM_BLOCKS - 1))
- fatal("kore_malloc: idx too high");
+ if (idx > (KORE_MEM_POOLS - 1))
+ fatal("mem_index: idx too high");
return (idx);
}
-static inline struct memsize *
-memsize(void *ptr)
-{
- struct memsize *ms;
-
- ms = (struct memsize *)((u_int8_t *)ptr - sizeof(*ms));
-
- if (ms->magic != KORE_MEM_MAGIC)
- fatal("%s: bad memsize magic (0x%zx)", __func__, ms->magic);
-
- return (ms);
-}
-
static inline struct meminfo *
meminfo(void *ptr)
{
- struct memsize *ms;
- struct meminfo *info;
-
- ms = memsize(ptr);
- info = (struct meminfo *)((u_int8_t *)ptr + ms->len);
-
- if (info->magic != KORE_MEM_MAGIC)
- fatal("%s: bad meminfo magic (0x%x)", __func__, info->magic);
-
- return (info);
+ return ((struct meminfo *)((u_int8_t *)ptr - sizeof(struct meminfo)));
}
diff --git a/src/pool.c b/src/pool.c
@@ -14,6 +14,33 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+/*
+ * A kore_pool is a memory pool containing fixed-sized objects that
+ * can quickly be obtained by a caller via kore_pool_get() and returned
+ * via kore_pool_put().
+ *
+ * Each entry in a pool will have a canary at the end that is used to
+ * catch any potential overruns when the entry is returned to the pool.
+ *
+ * If memory pool guards are enabled three additional things happen:
+ *
+ * 1) The metadata is placed at the start of a page instead
+ * of right before the returned user pointer.
+ *
+ * 2) Each pool entry gets a guard page at the end of its allocation
+ * that is marked as PROT_NONE. Touching a guard page will cause
+ * the application to receive a SIGSEGV.
+ *
+ * 3) Entries are only marked PROT_READ | PROT_WRITE when they are
+ * obtained with kore_pool_get(). Their memory protection is
+ * changed to PROT_NONE when returned to the pool via kore_pool_get().
+ *
+ * Caveats:
+ * Pools are designed to live for the entire lifetime of a Kore process
+ * until it will exit and are therefor not properly cleaned up when exit
+ * time arrives.
+ */
+
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/queue.h>
@@ -32,98 +59,153 @@ static void pool_lock(struct kore_pool *);
static void pool_unlock(struct kore_pool *);
#endif
-static void pool_region_create(struct kore_pool *, size_t);
-static void pool_region_destroy(struct kore_pool *);
+static void pool_grow(struct kore_pool *, size_t);
+
+static void pool_mark_entry_rw(struct kore_pool *, void *);
+static void pool_mark_entry_none(struct kore_pool *, void *);
void
kore_pool_init(struct kore_pool *pool, const char *name,
size_t len, size_t elm)
{
+ long pagesz;
+
if (elm < POOL_MIN_ELEMENTS)
elm = POOL_MIN_ELEMENTS;
+ if ((pagesz = sysconf(_SC_PAGESIZE)) == -1)
+ fatal("%s: sysconf: %s", __func__, errno_s);
+
if ((pool->name = strdup(name)) == NULL)
fatal("kore_pool_init: strdup %s", errno_s);
- len = (len + (8 - 1)) & ~(8 - 1);
+ pool->uselen = len;
+
+ len = len + sizeof(u_int64_t);
+ len = (len + (16 - 1)) & ~(16 - 1);
+
+ pool->elmlen = len;
pool->lock = 0;
- pool->elms = 0;
- pool->inuse = 0;
- pool->elen = len;
+ pool->freelist = NULL;
+ pool->pagesz = pagesz;
pool->growth = elm * 0.25f;
- pool->slen = pool->elen + sizeof(struct kore_pool_entry);
+ pool->canary = (u_int64_t)kore_platform_random_uint32() << 32 |
+ kore_platform_random_uint32();
- LIST_INIT(&(pool->regions));
- LIST_INIT(&(pool->freelist));
+ if (kore_mem_guard) {
+ pool->memsz = pool->pagesz * 2;
+
+ while (pool->elmlen >
+ pool->pagesz - sizeof(struct kore_pool_entry)) {
+ pool->memsz += pool->pagesz;
+ pool->elmlen -= MIN(pool->elmlen, pool->pagesz);
+ }
+
+ pool->elmlen = len;
+ } else {
+ pool->memsz = pool->elmlen;
+ }
- pool_region_create(pool, elm);
+ pool_grow(pool, elm);
}
void
kore_pool_cleanup(struct kore_pool *pool)
{
- pool->lock = 0;
- pool->elms = 0;
- pool->inuse = 0;
- pool->elen = 0;
- pool->slen = 0;
+ struct kore_pool_entry *entry, *next;
+
+ if (kore_mem_guard) {
+ for (entry = pool->freelist; entry != NULL; entry = next) {
+ pool_mark_entry_rw(pool, entry);
+ next = entry->nextfree;
+ (void)munmap(entry, pool->memsz);
+ }
+ }
free(pool->name);
- pool->name = NULL;
-
- pool_region_destroy(pool);
}
void *
kore_pool_get(struct kore_pool *pool)
{
- u_int8_t *ptr;
+ u_int64_t canary;
struct kore_pool_entry *entry;
#if defined(KORE_USE_TASKS)
pool_lock(pool);
#endif
- if (LIST_EMPTY(&(pool->freelist)))
- pool_region_create(pool, pool->growth);
+ if (pool->freelist == NULL)
+ pool_grow(pool, pool->growth);
+
+ entry = pool->freelist;
+
+ if (kore_mem_guard)
+ pool_mark_entry_rw(pool, entry);
+
+ pool->freelist = entry->nextfree;
- entry = LIST_FIRST(&(pool->freelist));
if (entry->state != POOL_ELEMENT_FREE)
fatal("%s: element %p was not free", pool->name, (void *)entry);
- LIST_REMOVE(entry, list);
+ entry->nextfree = NULL;
entry->state = POOL_ELEMENT_BUSY;
- ptr = (u_int8_t *)entry + sizeof(struct kore_pool_entry);
- pool->inuse++;
+ canary = pool->canary;
+ canary ^= (uintptr_t)entry;
+ canary ^= (uintptr_t)entry->uptr;
+
+ memcpy(entry->canary, &canary, sizeof(canary));
#if defined(KORE_USE_TASKS)
pool_unlock(pool);
#endif
- return (ptr);
+ return (entry->uptr);
}
void
kore_pool_put(struct kore_pool *pool, void *ptr)
{
+ void *base;
+ u_int64_t canary;
struct kore_pool_entry *entry;
#if defined(KORE_USE_TASKS)
pool_lock(pool);
#endif
- entry = (struct kore_pool_entry *)
- ((u_int8_t *)ptr - sizeof(struct kore_pool_entry));
+ if (kore_mem_guard) {
+ base = (u_int8_t *)ptr - ((uintptr_t)ptr % pool->pagesz);
+ } else {
+ base = (u_int8_t *)ptr - sizeof(*entry);
+ }
+
+ entry = (struct kore_pool_entry *)base;
+
+ if (entry->uptr != ptr) {
+ fatal("%s: uptr mismatch %p != %p",
+ pool->name, entry->uptr, ptr);
+ }
+
+ memcpy(&canary, entry->canary, sizeof(canary));
+ canary ^= (uintptr_t)entry;
+ canary ^= (uintptr_t)ptr;
+
+ if (canary != pool->canary)
+ fatal("%s: memory corruption detected", pool->name);
if (entry->state != POOL_ELEMENT_BUSY)
fatal("%s: element %p was not busy", pool->name, ptr);
entry->state = POOL_ELEMENT_FREE;
- LIST_INSERT_HEAD(&(pool->freelist), entry, list);
+ entry->nextfree = pool->freelist;
+
+ if (kore_mem_guard)
+ pool_mark_entry_none(pool, entry);
- pool->inuse--;
+ pool->freelist = entry;
#if defined(KORE_USE_TASKS)
pool_unlock(pool);
@@ -131,57 +213,77 @@ kore_pool_put(struct kore_pool *pool, void *ptr)
}
static void
-pool_region_create(struct kore_pool *pool, size_t elms)
+pool_grow(struct kore_pool *pool, size_t elms)
{
size_t i;
- u_int8_t *p;
- struct kore_pool_region *reg;
- struct kore_pool_entry *entry;
+ u_int8_t *base, *p;
+ struct kore_pool_entry *entry, *prev;
- if ((reg = calloc(1, sizeof(struct kore_pool_region))) == NULL)
- fatal("pool_region_create: calloc: %s", errno_s);
+ prev = pool->freelist;
- LIST_INSERT_HEAD(&(pool->regions), reg, list);
+ if (kore_mem_guard == 0)
+ base = kore_mmap_region(elms * (sizeof(*entry) + pool->elmlen));
+ else
+ base = NULL;
- if (SIZE_MAX / elms < pool->slen)
- fatal("pool_region_create: overflow");
+ for (i = 0; i < elms; i++) {
+ if (kore_mem_guard) {
+ base = kore_mmap_region(pool->memsz);
+ p = base + (pool->memsz - pool->pagesz - pool->elmlen);
+ entry = (struct kore_pool_entry *)base;
+ } else {
+ p = base + ((sizeof(*entry) + pool->elmlen) * i);
+ entry = (struct kore_pool_entry *)p;
+ p += sizeof(*entry);
+ }
+
+ entry->uptr = p;
+ entry->nextfree = NULL;
+ entry->state = POOL_ELEMENT_FREE;
+ entry->canary = p + pool->uselen;
- reg->length = elms * pool->slen;
- reg->start = mmap(NULL, reg->length, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- if (reg->start == MAP_FAILED)
- fatal("mmap: %s", errno_s);
+ if (prev != NULL) {
+ prev->nextfree = entry;
+ if (kore_mem_guard)
+ pool_mark_entry_none(pool, prev);
+ }
- p = (u_int8_t *)reg->start;
+ prev = entry;
- for (i = 0; i < elms; i++) {
- entry = (struct kore_pool_entry *)p;
- entry->region = reg;
- entry->state = POOL_ELEMENT_FREE;
- LIST_INSERT_HEAD(&(pool->freelist), entry, list);
+ if (pool->freelist == NULL)
+ pool->freelist = entry;
+
+ if (kore_mem_guard) {
+ p += pool->elmlen;
+
+ if (((uintptr_t)p % pool->pagesz) != 0)
+ fatal("%s: misaligned page", __func__);
- p = p + pool->slen;
+ if (mprotect(p, pool->pagesz, PROT_NONE) == -1)
+ fatal("%s: mprotect: %s", __func__, errno_s);
+
+ if (madvise(p, pool->pagesz, MADV_FREE) == -1)
+ fatal("%s: madvise: %s", __func__, errno_s);
+ }
}
- pool->elms += elms;
+ if (prev != NULL && kore_mem_guard)
+ pool_mark_entry_none(pool, prev);
}
static void
-pool_region_destroy(struct kore_pool *pool)
+pool_mark_entry_none(struct kore_pool *pool, void *ptr)
{
- struct kore_pool_region *reg;
-
- /* Take care iterating when modifying list contents */
- while (!LIST_EMPTY(&pool->regions)) {
- reg = LIST_FIRST(&pool->regions);
- LIST_REMOVE(reg, list);
- (void)munmap(reg->start, reg->length);
- free(reg);
- }
+ if (mprotect(ptr, pool->memsz - pool->pagesz, PROT_NONE) == -1)
+ fatal("%s: mprotect: %s", __func__, errno_s);
+}
- /* Freelist references into the regions memory allocations */
- LIST_INIT(&pool->freelist);
- pool->elms = 0;
+static void
+pool_mark_entry_rw(struct kore_pool *pool, void *ptr)
+{
+ if (mprotect(ptr, pool->memsz - pool->pagesz,
+ PROT_READ | PROT_WRITE) == -1)
+ fatal("%s: mprotect: %s", __func__, errno_s);
}
#if defined(KORE_USE_TASKS)