summaryrefslogtreecommitdiffstats
path: root/byterun/unix.c
diff options
context:
space:
mode:
authorXavier Leroy <xavier.leroy@inria.fr>2002-06-03 14:21:50 +0000
committerXavier Leroy <xavier.leroy@inria.fr>2002-06-03 14:21:50 +0000
commitc9c0e6d13c694dad2ff2fa3219491cd373256de7 (patch)
tree67bcb9189c0ec70008ee2bf41451a9fefe82ee92 /byterun/unix.c
parent685a839af651583941922c7aa87a9a13696e35ed (diff)
Nettoyage alloc_for_heap, free_for_heap. Prevoir d'utiliser mmap() au lieu de malloc() pour allouer le tas majeur (utile pour IA64/Linux)
git-svn-id: http://caml.inria.fr/svn/ocaml/trunk@4867 f963ae5c-01c2-4b8c-9fe0-0dff7051ff02
Diffstat (limited to 'byterun/unix.c')
-rw-r--r--byterun/unix.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/byterun/unix.c b/byterun/unix.c
index d1e29ec39..465b68409 100644
--- a/byterun/unix.c
+++ b/byterun/unix.c
@@ -207,3 +207,50 @@ char * caml_dlerror(void)
#endif
+#ifdef USE_MMAP_INSTEAD_OF_MALLOC
+
+/* The code below supports the use of mmap() rather than malloc()
+ for allocating the chunks composing the major heap.
+ This code is needed for the IA64 under Linux, where the native
+ malloc() implementation can return pointers several *exabytes* apart,
+ (some coming from mmap(), other from sbrk()); this makes the
+ page table *way* too large.
+ No other tested platform requires this hack so far. However, it could
+ be useful for other 64-bit platforms in the future. */
+
+#include <sys/mman.h>
+
+char *aligned_mmap (asize_t size, int modulo, void **block)
+{
+ char *raw_mem;
+ unsigned long aligned_mem;
+ Assert (modulo < Page_size);
+ raw_mem = (char *) mmap(NULL, size + Page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (raw_mem == MAP_FAILED) return NULL;
+ *block = raw_mem;
+ raw_mem += modulo; /* Address to be aligned */
+ aligned_mem = (((unsigned long) raw_mem / Page_size + 1) * Page_size);
+#ifdef DEBUG
+ {
+ unsigned long *p;
+ unsigned long *p0 = (void *) *block,
+ *p1 = (void *) (aligned_mem - modulo),
+ *p2 = (void *) (aligned_mem - modulo + size),
+ *p3 = (void *) ((char *) *block + size + Page_size);
+
+ for (p = p0; p < p1; p++) *p = Debug_filler_align;
+ for (p = p1; p < p2; p++) *p = Debug_uninit_align;
+ for (p = p2; p < p3; p++) *p = Debug_filler_align;
+ }
+#endif
+ return (char *) (aligned_mem - modulo);
+}
+
+void aligned_munmap (char * addr, asize_t size)
+{
+ int retcode = munmap (addr, size + Page_size);
+ Assert(retcode == 0);
+}
+
+#endif