X-Git-Url: http://de.git.xonotic.org/?p=xonotic%2Fdarkplaces.git;a=blobdiff_plain;f=zone.c;h=2c72c113cfb50dd6c30e0a86eaef8665c279c24b;hp=87e7121248bf6cd0043cdf69156dfffa75dd1a52;hb=8b02b061c1b8af09a7062d13cdd0540fc3ef5fc6;hpb=cfee52a1ec9db338098789cae89ae5cf1f7a6fbf diff --git a/zone.c b/zone.c index 87e71212..2c72c113 100644 --- a/zone.c +++ b/zone.c @@ -40,15 +40,26 @@ unsigned int sentinel_seed; qboolean mem_bigendian = false; void *mem_mutex = NULL; +// divVerent: enables file backed malloc using mmap to conserve swap space (instead of malloc) +#ifndef FILE_BACKED_MALLOC +# define FILE_BACKED_MALLOC 0 +#endif + // LordHavoc: enables our own low-level allocator (instead of malloc) -#define MEMCLUMPING 0 -#define MEMCLUMPING_FREECLUMPS 0 +#ifndef MEMCLUMPING +# define MEMCLUMPING 0 +#endif +#ifndef MEMCLUMPING_FREECLUMPS +# define MEMCLUMPING_FREECLUMPS 0 +#endif #if MEMCLUMPING // smallest unit we care about is this many bytes #define MEMUNIT 128 // try to do 32MB clumps, but overhead eats into this -#define MEMWANTCLUMPSIZE (1<<27) +#ifndef MEMWANTCLUMPSIZE +# define MEMWANTCLUMPSIZE (1<<27) +#endif // give malloc padding so we can't waste most of a page at the end #define MEMCLUMPSIZE (MEMWANTCLUMPSIZE - MEMWANTCLUMPSIZE/MEMUNIT/32 - 128) #define MEMBITS (MEMCLUMPSIZE / MEMUNIT) @@ -92,6 +103,46 @@ static mempool_t *poolchain = NULL; void Mem_PrintStats(void); void Mem_PrintList(size_t minallocationsize); +#if FILE_BACKED_MALLOC +#include +#include +typedef struct mmap_data_s +{ + size_t len; +} +mmap_data_t; +static void *mmap_malloc(size_t size) +{ + char vabuf[MAX_OSPATH + 1]; + char *tmpdir = getenv("TEMP"); + mmap_data_t *data; + int fd; + size += sizeof(mmap_data_t); // waste block + dpsnprintf(vabuf, sizeof(vabuf), "%s/darkplaces.XXXXXX", tmpdir ? tmpdir : "/tmp"); + fd = mkstemp(vabuf); + if(fd < 0) + return NULL; + ftruncate(fd, size); + data = (unsigned char *) mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd, 0); + close(fd); + unlink(vabuf); + if(!data) + return NULL; + data->len = size; + return (void *) (data + 1); +} +static void mmap_free(void *mem) +{ + mmap_data_t *data; + if(!mem) + return; + data = ((mmap_data_t *) mem) - 1; + munmap(data, data->len); +} +#define malloc mmap_malloc +#define free mmap_free +#endif + #if MEMCLUMPING != 2 // some platforms have a malloc that returns NULL but succeeds later // (Windows growing its swapfile for example) @@ -331,7 +382,12 @@ void *_Mem_Alloc(mempool_t *pool, void *olddata, size_t size, size_t alignment, return NULL; } if (pool == NULL) - Sys_Error("Mem_Alloc: pool == NULL (alloc at %s:%i)", filename, fileline); + { + if(olddata) + pool = ((memheader_t *)((unsigned char *) olddata - sizeof(memheader_t)))->pool; + else + Sys_Error("Mem_Alloc: pool == NULL (alloc at %s:%i)", filename, fileline); + } if (mem_mutex) Thread_LockMutex(mem_mutex); if (developer_memory.integer) @@ -641,44 +697,6 @@ void Mem_ExpandableArray_FreeArray(memexpandablearray_t *l) memset(l, 0, sizeof(*l)); } -// VorteX: hacked Mem_ExpandableArray_AllocRecord, it does allocate record at certain index -void *Mem_ExpandableArray_AllocRecordAtIndex(memexpandablearray_t *l, size_t index) -{ - size_t j; - if (index >= l->numarrays) - { - if (l->numarrays == l->maxarrays) - { - memexpandablearray_array_t *oldarrays = l->arrays; - l->maxarrays = max(l->maxarrays * 2, 128); - l->arrays = (memexpandablearray_array_t*) Mem_Alloc(l->mempool, l->maxarrays * sizeof(*l->arrays)); - if (oldarrays) - { - memcpy(l->arrays, oldarrays, l->numarrays * sizeof(*l->arrays)); - Mem_Free(oldarrays); - } - } - l->arrays[index].numflaggedrecords = 0; - l->arrays[index].data = (unsigned char *) Mem_Alloc(l->mempool, (l->recordsize + 1) * l->numrecordsperarray); - l->arrays[index].allocflags = l->arrays[index].data + l->recordsize * l->numrecordsperarray; - l->numarrays++; - } - if (l->arrays[index].numflaggedrecords < l->numrecordsperarray) - { - for (j = 0;j < l->numrecordsperarray;j++) - { - if (!l->arrays[index].allocflags[j]) - { - l->arrays[index].allocflags[j] = true; - l->arrays[index].numflaggedrecords++; - memset(l->arrays[index].data + l->recordsize * j, 0, l->recordsize); - return (void *)(l->arrays[index].data + l->recordsize * j); - } - } - } - return NULL; -} - void *Mem_ExpandableArray_AllocRecord(memexpandablearray_t *l) { size_t i, j; @@ -874,13 +892,13 @@ void Memory_Init (void) u.s = 0x100; mem_bigendian = u.b[0] != 0; - if (Thread_HasThreads()) - mem_mutex = Thread_CreateMutex(); - sentinel_seed = rand(); poolchain = NULL; tempmempool = Mem_AllocPool("Temporary Memory", POOLFLAG_TEMP, NULL); zonemempool = Mem_AllocPool("Zone", 0, NULL); + + if (Thread_HasThreads()) + mem_mutex = Thread_CreateMutex(); } void Memory_Shutdown (void)