diff --git a/src/core/mem.c b/src/core/mem.c index 006fbb68..5f53bcec 100644 --- a/src/core/mem.c +++ b/src/core/mem.c @@ -75,6 +75,84 @@ #define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y)) #define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y)) +#if MEM_OVERFLOW_CHECK +#define MEM_SANITY_OFFSET MEM_SANITY_REGION_BEFORE_ALIGNED +#define MEM_SANITY_OVERHEAD (MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED) +#else +#define MEM_SANITY_OFFSET 0 +#define MEM_SANITY_OVERHEAD 0 +#endif + +#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK +/** + * Check if a mep element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) + * + * @param p the mem element to check + * @param size allocated size of the element + * @param descr1 description of the element source shown on error + * @param descr2 description of the element source shown on error + */ +void +mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2) +{ +#if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED + u16_t k; + u8_t *m; + +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128] = "detected mem overflow in "; + strcat(errstr, descr1); + strcat(errstr, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128] = "detected mem underflow in "; + strcat(errstr, descr1); + strcat(errstr, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */ +#else + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); + LWIP_UNUSED_ARG(descr); +#endif +} + +/** + * Initialize the restricted area of a mem element. + */ +void +mem_overflow_init_raw(void *p, size_t size) +{ +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 + u8_t *m; +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED); +#endif +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED); +#endif +#else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ +} +#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */ + #if MEM_LIBC_MALLOC || MEM_USE_POOLS /** mem_init is not used when using pools instead of a heap or using @@ -277,6 +355,10 @@ struct mem { mem_size_t prev; /** 1: this area is used; 0: this area is unused */ u8_t used; +#if MEM_OVERFLOW_CHECK + /** this keeps track of the user allocation size for guard checks */ + mem_size_t user_size; +#endif }; /** All allocated blocks will be MIN_SIZE bytes big, at least! @@ -337,6 +419,33 @@ static volatile u8_t mem_free_count; #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ +#if MEM_SANITY_CHECK +static void mem_sanity(void); +#define MEM_SANITY() mem_sanity() +#else +#define MEM_SANITY() +#endif + +#if MEM_OVERFLOW_CHECK +static void +mem_overflow_init_element(struct mem *mem, mem_size_t user_size) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem->user_size = user_size; + mem_overflow_init_raw(p, user_size); +} + +static void +mem_overflow_check_element(struct mem *mem) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem_overflow_check_raw(p, mem->user_size, "heap", ""); +} +#else /* MEM_OVERFLOW_CHECK */ +#define mem_overflow_init_element(mem, size) +#define mem_overflow_check_element(mem) +#endif /* MEM_OVERFLOW_CHECK */ + static struct mem * ptr_to_mem(mem_size_t ptr) { @@ -422,6 +531,7 @@ mem_init(void) ram_end->used = 1; ram_end->next = MEM_SIZE_ALIGNED; ram_end->prev = MEM_SIZE_ALIGNED; + MEM_SANITY(); /* initialize the lowest-free pointer to the start of the heap */ lfree = (struct mem *)(void *)ram; @@ -453,7 +563,7 @@ mem_link_valid(struct mem *mem) } #if MEM_SANITY_CHECK -void +static void mem_sanity(void) { struct mem *mem; @@ -522,7 +632,7 @@ mem_free(void *rmem) /* Get the corresponding struct mem: */ /* cast through void* to get rid of alignment warnings */ - mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) { LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory"); @@ -531,6 +641,9 @@ mem_free(void *rmem) MEM_STATS_INC_LOCKED(illegal); return; } +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif /* protect the heap from concurrent access */ LWIP_MEM_FREE_PROTECT(); /* mem has to be in a used state */ @@ -564,9 +677,7 @@ mem_free(void *rmem) /* finally, see if prev or next are free also */ plug_holes(mem); -#if MEM_SANITY_CHECK - mem_sanity(); -#endif /* MEM_SANITY_CHECK */ + MEM_SANITY(); #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT mem_free_count = 1; #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ @@ -595,14 +706,16 @@ mem_trim(void *rmem, mem_size_t new_size) /* Expand the size of the allocated memory region so that we can adjust for alignment. */ newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size); - if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) { - return NULL; - } - if (newsize < MIN_SIZE_ALIGNED) { /* every data block must be at least MIN_SIZE_ALIGNED long */ newsize = MIN_SIZE_ALIGNED; } +#if MEM_OVERFLOW_CHECK + newsize += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) { + return NULL; + } LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram && (u8_t *)rmem < (u8_t *)ram_end); @@ -615,11 +728,14 @@ mem_trim(void *rmem, mem_size_t new_size) } /* Get the corresponding struct mem ... */ /* cast through void* to get rid of alignment warnings */ - mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif /* ... and its offset pointer */ ptr = mem_to_ptr(mem); - size = (mem_size_t)((mem_size_t)(mem->next - ptr) - SIZEOF_STRUCT_MEM); + size = (mem_size_t)((mem_size_t)(mem->next - ptr) - (SIZEOF_STRUCT_MEM + MEM_SANITY_OVERHEAD)); LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); if (newsize > size) { /* not supported */ @@ -637,6 +753,7 @@ mem_trim(void *rmem, mem_size_t new_size) if (mem2->used == 0) { /* The next struct is unused, we can simply move it at little */ mem_size_t next; + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); /* remember the old next pointer */ next = mem2->next; /* create new struct mem which is moved directly after the shrinked mem */ @@ -669,6 +786,7 @@ mem_trim(void *rmem, mem_size_t new_size) * region that couldn't hold data, but when mem->next gets freed, * the 2 regions would be combined, resulting in more free memory */ ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); mem2 = ptr_to_mem(ptr2); if (mem2 < lfree) { lfree = mem2; @@ -689,6 +807,10 @@ mem_trim(void *rmem, mem_size_t new_size) -> don't do anyhting. -> the remaining space stays unused since it is too small } */ +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, new_size); +#endif + MEM_SANITY(); #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT mem_free_count = 1; #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ @@ -721,15 +843,16 @@ mem_malloc(mem_size_t size_in) /* Expand the size of the allocated memory region so that we can adjust for alignment. */ size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in); - if ((size > MEM_SIZE_ALIGNED) || - (size < size_in)) { - return NULL; - } - if (size < MIN_SIZE_ALIGNED) { /* every data block must be at least MIN_SIZE_ALIGNED long */ size = MIN_SIZE_ALIGNED; } +#if MEM_OVERFLOW_CHECK + size += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((size > MEM_SIZE_ALIGNED) || (size < size_in)) { + return NULL; + } /* protect the heap from concurrent access */ sys_mutex_lock(&mem_mutex); @@ -776,6 +899,7 @@ mem_malloc(mem_size_t size_in) * the 2 regions would be combined, resulting in more free memory */ ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size); + LWIP_ASSERT("invalid next ptr",ptr2 != MEM_SIZE_ALIGNED); /* create mem2 struct */ mem2 = ptr_to_mem(ptr2); mem2->used = 0; @@ -832,7 +956,11 @@ mem_malloc_adjust_lfree: LWIP_ASSERT("mem_malloc: sanity check alignment", (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0); - return (u8_t *)mem + SIZEOF_STRUCT_MEM; +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, size_in); +#endif + MEM_SANITY(); + return (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; } } #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT diff --git a/src/core/memp.c b/src/core/memp.c index 10932100..7ec83904 100644 --- a/src/core/memp.c +++ b/src/core/memp.c @@ -120,57 +120,16 @@ memp_sanity(const struct memp_desc *desc) #if MEMP_OVERFLOW_CHECK /** - * Check if a memp element was victim of an overflow - * (e.g. the restricted area after it has been altered) + * Check if a memp element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) * * @param p the memp element to check * @param desc the pool p comes from */ static void -memp_overflow_check_element_overflow(struct memp *p, const struct memp_desc *desc) +memp_overflow_check_element(struct memp *p, const struct memp_desc *desc) { -#if MEMP_SANITY_REGION_AFTER_ALIGNED > 0 - u16_t k; - u8_t *m; - m = (u8_t *)p + MEMP_SIZE + desc->size; - for (k = 0; k < MEMP_SANITY_REGION_AFTER_ALIGNED; k++) { - if (m[k] != 0xcd) { - char errstr[128] = "detected memp overflow in pool "; - strcat(errstr, desc->desc); - LWIP_ASSERT(errstr, 0); - } - } -#else /* MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ - LWIP_UNUSED_ARG(p); - LWIP_UNUSED_ARG(desc); -#endif /* MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ -} - -/** - * Check if a memp element was victim of an underflow - * (e.g. the restricted area before it has been altered) - * - * @param p the memp element to check - * @param desc the pool p comes from - */ -static void -memp_overflow_check_element_underflow(struct memp *p, const struct memp_desc *desc) -{ -#if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 - u16_t k; - u8_t *m; - m = (u8_t *)p + MEMP_SIZE - MEMP_SANITY_REGION_BEFORE_ALIGNED; - for (k = 0; k < MEMP_SANITY_REGION_BEFORE_ALIGNED; k++) { - if (m[k] != 0xcd) { - char errstr[128] = "detected memp underflow in pool "; - strcat(errstr, desc->desc); - LWIP_ASSERT(errstr, 0); - } - } -#else /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 */ - LWIP_UNUSED_ARG(p); - LWIP_UNUSED_ARG(desc); -#endif /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 */ + mem_overflow_check_raw((u8_t *)p + MEMP_SIZE, desc->size, "pool ", desc->desc); } /** @@ -179,20 +138,7 @@ memp_overflow_check_element_underflow(struct memp *p, const struct memp_desc *de static void memp_overflow_init_element(struct memp *p, const struct memp_desc *desc) { -#if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 || MEMP_SANITY_REGION_AFTER_ALIGNED > 0 - u8_t *m; -#if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 - m = (u8_t *)p + MEMP_SIZE - MEMP_SANITY_REGION_BEFORE_ALIGNED; - memset(m, 0xcd, MEMP_SANITY_REGION_BEFORE_ALIGNED); -#endif -#if MEMP_SANITY_REGION_AFTER_ALIGNED > 0 - m = (u8_t *)p + MEMP_SIZE + desc->size; - memset(m, 0xcd, MEMP_SANITY_REGION_AFTER_ALIGNED); -#endif -#else /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 || MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ - LWIP_UNUSED_ARG(p); - LWIP_UNUSED_ARG(desc); -#endif /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 || MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ + mem_overflow_init_raw((u8_t *)p + MEMP_SIZE, desc->size); } #if MEMP_OVERFLOW_CHECK >= 2 @@ -212,9 +158,8 @@ memp_overflow_check_all(void) for (i = 0; i < MEMP_MAX; ++i) { p = (struct memp *)LWIP_MEM_ALIGN(memp_pools[i]->base); for (j = 0; j < memp_pools[i]->num; ++j) { - memp_overflow_check_element_overflow(p, memp_pools[i]); - memp_overflow_check_element_underflow(p, memp_pools[i]); - p = LWIP_ALIGNMENT_CAST(struct memp *, ((u8_t *)p + MEMP_SIZE + memp_pools[i]->size + MEMP_SANITY_REGION_AFTER_ALIGNED)); + memp_overflow_check_element(p, memp_pools[i]); + p = LWIP_ALIGNMENT_CAST(struct memp *, ((u8_t *)p + MEMP_SIZE + memp_pools[i]->size + MEM_SANITY_REGION_AFTER_ALIGNED)); } } SYS_ARCH_UNPROTECT(old_level); @@ -243,7 +188,7 @@ memp_init_pool(const struct memp_desc *desc) /* force memset on pool memory */ memset(memp, 0, (size_t)desc->num * (MEMP_SIZE + desc->size #if MEMP_OVERFLOW_CHECK - + MEMP_SANITY_REGION_AFTER_ALIGNED + + MEM_SANITY_REGION_AFTER_ALIGNED #endif )); #endif @@ -257,7 +202,7 @@ memp_init_pool(const struct memp_desc *desc) /* cast through void* to get rid of alignment warnings */ memp = (struct memp *)(void *)((u8_t *)memp + MEMP_SIZE + desc->size #if MEMP_OVERFLOW_CHECK - + MEMP_SANITY_REGION_AFTER_ALIGNED + + MEM_SANITY_REGION_AFTER_ALIGNED #endif ); } @@ -319,8 +264,7 @@ do_memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int if (memp != NULL) { #if !MEMP_MEM_MALLOC #if MEMP_OVERFLOW_CHECK == 1 - memp_overflow_check_element_overflow(memp, desc); - memp_overflow_check_element_underflow(memp, desc); + memp_overflow_check_element(memp, desc); #endif /* MEMP_OVERFLOW_CHECK */ *desc->tab = memp->next; @@ -428,8 +372,7 @@ do_memp_free_pool(const struct memp_desc *desc, void *mem) SYS_ARCH_PROTECT(old_level); #if MEMP_OVERFLOW_CHECK == 1 - memp_overflow_check_element_overflow(memp, desc); - memp_overflow_check_element_underflow(memp, desc); + memp_overflow_check_element(memp, desc); #endif /* MEMP_OVERFLOW_CHECK */ #if MEMP_STATS diff --git a/src/include/lwip/opt.h b/src/include/lwip/opt.h index c19d8dc5..0c6f1fbc 100644 --- a/src/include/lwip/opt.h +++ b/src/include/lwip/opt.h @@ -318,6 +318,19 @@ #define MEMP_SANITY_CHECK 0 #endif +/** + * MEM_OVERFLOW_CHECK: mem overflow protection reserves a configurable + * amount of bytes before and after each heap allocation chunk and fills + * it with a prominent default value. + * MEM_OVERFLOW_CHECK == 0 no checking + * MEM_OVERFLOW_CHECK == 1 checks each element when it is freed + * MEM_OVERFLOW_CHECK >= 2 checks all heap elements every time + * mem_malloc() or mem_free() is called (useful but slow!) + */ +#if !defined MEM_OVERFLOW_CHECK || defined __DOXYGEN__ +#define MEM_OVERFLOW_CHECK 0 +#endif + /** * MEM_SANITY_CHECK==1: run a sanity check after each mem_free() to make * sure that the linked list of heap elements is not corrupted. diff --git a/src/include/lwip/priv/mem_priv.h b/src/include/lwip/priv/mem_priv.h new file mode 100644 index 00000000..8630d754 --- /dev/null +++ b/src/include/lwip/priv/mem_priv.h @@ -0,0 +1,84 @@ +/** + * @file + * lwIP internal memory implementations (do not use in application code) + */ + +/* + * Copyright (c) 2018 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_MEM_PRIV_H +#define LWIP_HDR_MEM_PRIV_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/mem.h" + +#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK +/* if MEM_OVERFLOW_CHECK or MEMP_OVERFLOW_CHECK is turned on, we reserve some + * bytes at the beginning and at the end of each element, initialize them as + * 0xcd and check them later. + * If MEM(P)_OVERFLOW_CHECK is >= 2, on every call to mem(p)_malloc or mem(p)_free, + * every single element in each pool/heap is checked! + * This is VERY SLOW but also very helpful. + * MEM_SANITY_REGION_BEFORE and MEM_SANITY_REGION_AFTER can be overridden in + * lwipopts.h to change the amount reserved for checking. */ +#ifndef MEM_SANITY_REGION_BEFORE +#define MEM_SANITY_REGION_BEFORE 16 +#endif /* MEM_SANITY_REGION_BEFORE*/ +#if MEM_SANITY_REGION_BEFORE > 0 +#define MEM_SANITY_REGION_BEFORE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SANITY_REGION_BEFORE) +#else +#define MEM_SANITY_REGION_BEFORE_ALIGNED 0 +#endif /* MEM_SANITY_REGION_BEFORE*/ +#ifndef MEM_SANITY_REGION_AFTER +#define MEM_SANITY_REGION_AFTER 16 +#endif /* MEM_SANITY_REGION_AFTER*/ +#if MEM_SANITY_REGION_AFTER > 0 +#define MEM_SANITY_REGION_AFTER_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SANITY_REGION_AFTER) +#else +#define MEM_SANITY_REGION_AFTER_ALIGNED 0 +#endif /* MEM_SANITY_REGION_AFTER*/ + +void mem_overflow_init_raw(void *p, size_t size); +void mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2); + +#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_PRIV_H */ diff --git a/src/include/lwip/priv/memp_priv.h b/src/include/lwip/priv/memp_priv.h index f246061d..1f14cb16 100644 --- a/src/include/lwip/priv/memp_priv.h +++ b/src/include/lwip/priv/memp_priv.h @@ -45,36 +45,14 @@ extern "C" { #endif #include "lwip/mem.h" +#include "lwip/priv/mem_priv.h" #if MEMP_OVERFLOW_CHECK -/* if MEMP_OVERFLOW_CHECK is turned on, we reserve some bytes at the beginning - * and at the end of each element, initialize them as 0xcd and check - * them later. */ -/* If MEMP_OVERFLOW_CHECK is >= 2, on every call to memp_malloc or memp_free, - * every single element in each pool is checked! - * This is VERY SLOW but also very helpful. */ -/* MEMP_SANITY_REGION_BEFORE and MEMP_SANITY_REGION_AFTER can be overridden in - * lwipopts.h to change the amount reserved for checking. */ -#ifndef MEMP_SANITY_REGION_BEFORE -#define MEMP_SANITY_REGION_BEFORE 16 -#endif /* MEMP_SANITY_REGION_BEFORE*/ -#if MEMP_SANITY_REGION_BEFORE > 0 -#define MEMP_SANITY_REGION_BEFORE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEMP_SANITY_REGION_BEFORE) -#else -#define MEMP_SANITY_REGION_BEFORE_ALIGNED 0 -#endif /* MEMP_SANITY_REGION_BEFORE*/ -#ifndef MEMP_SANITY_REGION_AFTER -#define MEMP_SANITY_REGION_AFTER 16 -#endif /* MEMP_SANITY_REGION_AFTER*/ -#if MEMP_SANITY_REGION_AFTER > 0 -#define MEMP_SANITY_REGION_AFTER_ALIGNED LWIP_MEM_ALIGN_SIZE(MEMP_SANITY_REGION_AFTER) -#else -#define MEMP_SANITY_REGION_AFTER_ALIGNED 0 -#endif /* MEMP_SANITY_REGION_AFTER*/ + /* MEMP_SIZE: save space for struct memp and for sanity check */ -#define MEMP_SIZE (LWIP_MEM_ALIGN_SIZE(sizeof(struct memp)) + MEMP_SANITY_REGION_BEFORE_ALIGNED) -#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x) + MEMP_SANITY_REGION_AFTER_ALIGNED) +#define MEMP_SIZE (LWIP_MEM_ALIGN_SIZE(sizeof(struct memp)) + MEM_SANITY_REGION_BEFORE_ALIGNED) +#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x) + MEM_SANITY_REGION_AFTER_ALIGNED) #else /* MEMP_OVERFLOW_CHECK */