aboutsummaryrefslogtreecommitdiff
path: root/src/speedymalloc.c
diff options
context:
space:
mode:
authorFlorian Fischer <florian.fl.fischer@fau.de>2020-02-25 20:02:26 +0100
committerFlorian Fischer <florian.fl.fischer@fau.de>2020-02-25 21:14:34 +0100
commit19f858f0712aa38cd61d8380c7c879dd254804ad (patch)
treec0fdb0de1286396701039f8b8c5a20fe8d45d3b1 /src/speedymalloc.c
parent687c014f7eeee5c4f97293df0558d426f9804093 (diff)
downloadallocbench-19f858f0712aa38cd61d8380c7c879dd254804ad.tar.gz
allocbench-19f858f0712aa38cd61d8380c7c879dd254804ad.zip
add madvise MADV_WILLNEED
Touching every 32MB chunk prevents individual page faults.
Diffstat (limited to 'src/speedymalloc.c')
-rw-r--r--src/speedymalloc.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/src/speedymalloc.c b/src/speedymalloc.c
index d1edbf3..7df3c5a 100644
--- a/src/speedymalloc.c
+++ b/src/speedymalloc.c
@@ -14,6 +14,10 @@
#define MEMSIZE 1024*4*1024*1024l
#endif
+#ifndef NO_WILLNEED
+#define WILLNEED_SIZE 32 * 1024 * 1024
+#endif
+
// sizeof(tls_t) == 4096
#define CACHE_BINS 511
// max cached object: 511 * 64 - 1 = 32703
@@ -45,6 +49,7 @@ typedef struct TLStates {
} tls_t;
__thread tls_t* tls;
+__thread uintptr_t next_willneed;
static inline int size2bin(size_t size) {
assert(size > 0 && size < CACHE_BINS * CACHE_BIN_SEPERATION);
@@ -65,6 +70,9 @@ static void init_tls(void) {
tls = (tls_t*)mem;
tls->ptr = ((uintptr_t)tls) + sizeof(tls_t);
+#ifndef NO_WILLNEED
+ next_willneed = tls->ptr;
+#endif
}
static void* bump_alloc(size_t size) {
@@ -75,6 +83,15 @@ static void* bump_alloc(size_t size) {
size_t mask = MIN_ALIGNMENT -1;
tls->ptr = (tls->ptr + mask) & ~mask;
+#ifndef NO_WILLNEED
+ if(unlikely(tls->ptr >= next_willneed)) {
+ if (madvise((void*)next_willneed, WILLNEED_SIZE, MADV_WILLNEED) != 0) {
+ perror("madvice");
+ }
+ next_willneed += WILLNEED_SIZE;
+ }
+#endif
+
void* ptr = (void*)tls->ptr;
ptr2chunk(ptr)->size = size;
tls->ptr += size;