diff options
Diffstat (limited to 'byterun/unix.c')
-rw-r--r-- | byterun/unix.c | 47 |
1 files changed, 47 insertions, 0 deletions
diff --git a/byterun/unix.c b/byterun/unix.c index d1e29ec39..465b68409 100644 --- a/byterun/unix.c +++ b/byterun/unix.c @@ -207,3 +207,50 @@ char * caml_dlerror(void) #endif +#ifdef USE_MMAP_INSTEAD_OF_MALLOC + +/* The code below supports the use of mmap() rather than malloc() + for allocating the chunks composing the major heap. + This code is needed for the IA64 under Linux, where the native + malloc() implementation can return pointers several *exabytes* apart, + (some coming from mmap(), other from sbrk()); this makes the + page table *way* too large. + No other tested platform requires this hack so far. However, it could + be useful for other 64-bit platforms in the future. */ + +#include <sys/mman.h> + +char *aligned_mmap (asize_t size, int modulo, void **block) +{ + char *raw_mem; + unsigned long aligned_mem; + Assert (modulo < Page_size); + raw_mem = (char *) mmap(NULL, size + Page_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (raw_mem == MAP_FAILED) return NULL; + *block = raw_mem; + raw_mem += modulo; /* Address to be aligned */ + aligned_mem = (((unsigned long) raw_mem / Page_size + 1) * Page_size); +#ifdef DEBUG + { + unsigned long *p; + unsigned long *p0 = (void *) *block, + *p1 = (void *) (aligned_mem - modulo), + *p2 = (void *) (aligned_mem - modulo + size), + *p3 = (void *) ((char *) *block + size + Page_size); + + for (p = p0; p < p1; p++) *p = Debug_filler_align; + for (p = p1; p < p2; p++) *p = Debug_uninit_align; + for (p = p2; p < p3; p++) *p = Debug_filler_align; + } +#endif + return (char *) (aligned_mem - modulo); +} + +void aligned_munmap (char * addr, asize_t size) +{ + int retcode = munmap (addr, size + Page_size); + Assert(retcode == 0); +} + +#endif |