From 0d192f8cf3c21b2382e227fab057dabe16ea5e8b Mon Sep 17 00:00:00 2001 From: Florian Fischer Date: Mon, 17 Jun 2019 14:54:26 +0200 Subject: reduce code duplication by giving each known allocator its own class also move allocator related code to src/allocators --- .../glibc/glibc_2.28_no_passive_falsesharing.patch | 22 +++++++++++++++++++ .../glibc_2.28_no_passive_falsesharing_fancy.patch | 25 ++++++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 src/allocators/glibc/glibc_2.28_no_passive_falsesharing.patch create mode 100644 src/allocators/glibc/glibc_2.28_no_passive_falsesharing_fancy.patch (limited to 'src/allocators/glibc') diff --git a/src/allocators/glibc/glibc_2.28_no_passive_falsesharing.patch b/src/allocators/glibc/glibc_2.28_no_passive_falsesharing.patch new file mode 100644 index 0000000..fcc695c --- /dev/null +++ b/src/allocators/glibc/glibc_2.28_no_passive_falsesharing.patch @@ -0,0 +1,22 @@ +diff --git a/malloc/malloc.c b/malloc/malloc.c +index 27cf6137c2..3aadaddd1d 100644 +--- a/malloc/malloc.c ++++ b/malloc/malloc.c +@@ -4172,6 +4172,9 @@ _int_free (mstate av, mchunkptr p, int have_lock) + + #if USE_TCACHE + { ++ /* Check if chunk is from our own arena. */ ++ if (av == thread_arena) ++ { + size_t tc_idx = csize2tidx (size); + if (tcache != NULL && tc_idx < mp_.tcache_bins) + { +@@ -4201,6 +4204,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) + return; + } + } ++ } + } + #endif + diff --git a/src/allocators/glibc/glibc_2.28_no_passive_falsesharing_fancy.patch b/src/allocators/glibc/glibc_2.28_no_passive_falsesharing_fancy.patch new file mode 100644 index 0000000..044909b --- /dev/null +++ b/src/allocators/glibc/glibc_2.28_no_passive_falsesharing_fancy.patch @@ -0,0 +1,25 @@ +diff --git a/malloc/malloc.c b/malloc/malloc.c +index 27cf6137c2..fbd311801d 100644 +--- a/malloc/malloc.c ++++ b/malloc/malloc.c +@@ -4172,6 +4172,12 @@ _int_free (mstate av, mchunkptr p, int have_lock) + + #if USE_TCACHE + { ++ /* Check if chunk is from our own arena or false sharing is not possible ++ because the chunk is cache line aligned and it's size is a multiple ++ of a cacheline */ ++ if (av == thread_arena ++ || (((size_t)p & 63) == 0 && ((size + 2*SIZE_SZ) % 64) == 0)) ++ { + size_t tc_idx = csize2tidx (size); + if (tcache != NULL && tc_idx < mp_.tcache_bins) + { +@@ -4201,6 +4207,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) + return; + } + } ++ } + } + #endif + -- cgit v1.2.3