40 #if defined(_SC_PAGE_SIZE) 41 #define MHD_SC_PAGESIZE _SC_PAGE_SIZE 42 #elif defined(_SC_PAGESIZE) 43 #define MHD_SC_PAGESIZE _SC_PAGESIZE 48 #if defined(MAP_ANON) && !defined(MAP_ANONYMOUS) 49 #define MAP_ANONYMOUS MAP_ANON 52 #define MAP_FAILED NULL 53 #elif !defined(MAP_FAILED) 54 #define MAP_FAILED ((void*)-1) 60 #define ALIGN_SIZE (2 * sizeof(void*)) 65 #define ROUND_TO_ALIGN(n) (((n)+(ALIGN_SIZE-1)) / (ALIGN_SIZE) * (ALIGN_SIZE)) 67 #if defined(PAGE_SIZE) 68 #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE 69 #elif defined(PAGESIZE) 70 #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE 72 #define MHD_DEF_PAGE_SIZE_ (4096) 86 #ifdef MHD_SC_PAGESIZE 88 result = sysconf (MHD_SC_PAGESIZE);
145 struct MemoryPool *pool;
148 pool = malloc (
sizeof (
struct MemoryPool));
151 #if defined(MAP_ANONYMOUS) || defined(_WIN32) 152 if ( (max <= 32 * 1024) ||
160 #if defined(MAP_ANONYMOUS) && !defined(_WIN32) 161 pool->memory = mmap (
NULL,
163 PROT_READ | PROT_WRITE,
164 MAP_PRIVATE | MAP_ANONYMOUS,
167 #elif defined(_WIN32) 168 pool->memory = VirtualAlloc (
NULL,
170 MEM_COMMIT | MEM_RESERVE,
180 pool->memory = malloc (alloc_size);
181 if (
NULL == pool->memory)
186 pool->is_mmap =
false;
188 #if defined(MAP_ANONYMOUS) || defined(_WIN32) 191 pool->is_mmap =
true;
195 pool->end = alloc_size;
196 pool->size = alloc_size;
213 mhd_assert (pool->size >= pool->end - pool->pos);
217 #if defined(MAP_ANONYMOUS) && !defined(_WIN32) 218 munmap (pool->memory,
220 #elif defined(_WIN32) 221 VirtualFree (pool->memory,
241 mhd_assert (pool->size >= pool->end - pool->pos);
242 return (pool->end - pool->pos);
266 mhd_assert (pool->size >= pool->end - pool->pos);
268 if ( (0 == asize) && (0 != size) )
270 if ( (pool->pos + asize > pool->end) ||
271 (pool->pos + asize < pool->pos))
275 ret = &pool->memory[pool->end - asize];
280 ret = &pool->memory[pool->pos];
314 mhd_assert (pool->size >= pool->end - pool->pos);
317 mhd_assert (old ==
NULL || pool->memory + pool->size >= (uint8_t*)old + old_size);
319 mhd_assert (old ==
NULL || pool->memory + pool->pos > (uint8_t*)old);
323 const size_t old_offset = (uint8_t*)old - pool->memory;
324 const bool shrinking = (old_size > new_size);
328 memset ((uint8_t*)old + new_size, 0, old_size - new_size);
335 if ( (new_apos > pool->end) ||
336 (new_apos < pool->pos) )
340 pool->pos = new_apos;
348 if ( ( (0 == asize) &&
350 (asize > pool->end - pool->pos) )
353 new_blc = pool->memory + pool->pos;
359 memcpy (new_blc, old, old_size);
361 memset (old, 0, old_size);
387 mhd_assert (pool->size >= pool->end - pool->pos);
391 mhd_assert (keep ==
NULL || pool->memory + pool->size >= (uint8_t*)keep + copy_bytes);
392 if ( (
NULL != keep) &&
393 (keep != pool->memory) )
396 memmove (pool->memory,
401 if (pool->size > copy_bytes)
405 to_zero = pool->size - copy_bytes;
410 uint8_t *recommit_addr;
413 recommit_addr = pool->memory + pool->size - to_recommit;
417 if (VirtualFree (recommit_addr,
421 to_zero -= to_recommit;
423 if (recommit_addr != VirtualAlloc (recommit_addr,
431 memset (&pool->memory[copy_bytes],
436 pool->end = pool->size;
void MHD_init_mem_pools_(void)
size_t MHD_pool_get_free(struct MemoryPool *pool)
void * MHD_pool_reset(struct MemoryPool *pool, void *keep, size_t copy_bytes, size_t new_size)
static size_t MHD_sys_page_size_
void * MHD_pool_allocate(struct MemoryPool *pool, size_t size, int from_end)
struct MemoryPool * MHD_pool_create(size_t max)
#define ROUND_TO_ALIGN(n)
void * MHD_pool_reallocate(struct MemoryPool *pool, void *old, size_t old_size, size_t new_size)
void MHD_pool_destroy(struct MemoryPool *pool)
#define MHD_DEF_PAGE_SIZE_