diff options
author | Damien Doligez <damien.doligez-inria.fr> | 2003-12-31 14:20:40 +0000 |
---|---|---|
committer | Damien Doligez <damien.doligez-inria.fr> | 2003-12-31 14:20:40 +0000 |
commit | 331b2d89c3dc4fb7e1b51276a5a9e37a6c8c8b3a (patch) | |
tree | 305f55662dc4f3e95d7aac3ffd0659112c27e1df | |
parent | fc3a69ce89605c92e3b65d17241a5e23644fc08e (diff) |
depollution suite (PR#1914, PR#1956)
git-svn-id: http://caml.inria.fr/svn/ocaml/trunk@6044 f963ae5c-01c2-4b8c-9fe0-0dff7051ff02
89 files changed, 1393 insertions, 1219 deletions
diff --git a/asmcomp/cmmgen.ml b/asmcomp/cmmgen.ml index 6cc838883..1060d0c31 100644 --- a/asmcomp/cmmgen.ml +++ b/asmcomp/cmmgen.ml @@ -287,7 +287,7 @@ let float_array_ref arr ofs = box_float(unboxed_float_array_ref arr ofs) let addr_array_set arr ofs newval = - Cop(Cextcall("modify", typ_void, false), + Cop(Cextcall("caml_modify", typ_void, false), [array_indexing log2_size_addr arr ofs; newval]) let int_array_set arr ofs newval = Cop(Cstore Word, [array_indexing log2_size_addr arr ofs; newval]) @@ -594,12 +594,12 @@ let simplif_primitive_32bits = function | Plslbint Pint64 -> Pccall (default_prim "int64_shift_left") | Plsrbint Pint64 -> Pccall (default_prim "int64_shift_right_unsigned") | Pasrbint Pint64 -> Pccall (default_prim "int64_shift_right") - | Pbintcomp(Pint64, Lambda.Ceq) -> Pccall (default_prim "equal") - | Pbintcomp(Pint64, Lambda.Cneq) -> Pccall (default_prim "notequal") - | Pbintcomp(Pint64, Lambda.Clt) -> Pccall (default_prim "lessthan") - | Pbintcomp(Pint64, Lambda.Cgt) -> Pccall (default_prim "greaterthan") - | Pbintcomp(Pint64, Lambda.Cle) -> Pccall (default_prim "lessequal") - | Pbintcomp(Pint64, Lambda.Cge) -> Pccall (default_prim "greaterequal") + | Pbintcomp(Pint64, Lambda.Ceq) -> Pccall (default_prim "caml_equal") + | Pbintcomp(Pint64, Lambda.Cneq) -> Pccall (default_prim "caml_notequal") + | Pbintcomp(Pint64, Lambda.Clt) -> Pccall (default_prim "caml_lessthan") + | Pbintcomp(Pint64, Lambda.Cgt) -> Pccall (default_prim "caml_greaterthan") + | Pbintcomp(Pint64, Lambda.Cle) -> Pccall (default_prim "caml_lessequal") + | Pbintcomp(Pint64, Lambda.Cge) -> Pccall (default_prim "caml_greaterequal") | Pbigarrayref(n, Pbigarray_int64, layout) -> Pccall (default_prim ("bigarray_get_" ^ string_of_int n)) | Pbigarrayset(n, Pbigarray_int64, layout) -> @@ -856,7 +856,7 @@ let rec transl = function | (Pmakearray kind, args) -> begin match kind with Pgenarray -> - Cop(Cextcall("make_array", typ_addr, true), + Cop(Cextcall("caml_make_array", typ_addr, true), [make_alloc 0 (List.map transl args)]) | Paddrarray | Pintarray -> make_alloc 0 (List.map transl args) @@ -1081,7 +1081,7 @@ and transl_prim_2 p arg1 arg2 = (* Heap operations *) Psetfield(n, ptr) -> if ptr then - return_unit(Cop(Cextcall("modify", typ_void, false), + return_unit(Cop(Cextcall("caml_modify", typ_void, false), [field_address (transl arg1) n; transl arg2])) else return_unit(set_field (transl arg1) n (transl arg2)) diff --git a/asmrun/alpha.S b/asmrun/alpha.S index 99c71dd8d..9efafac06 100644 --- a/asmrun/alpha.S +++ b/asmrun/alpha.S @@ -102,7 +102,7 @@ $103: ldgp $gp, 0($27) lda $24, 0x100($sp) stq $24, caml_gc_regs /* Save current allocation pointer for debugging purposes */ -$113: stq $13, young_ptr +$113: stq $13, caml_young_ptr /* Save trap pointer in case an exception is raised (e.g. sighandler) */ stq $15, caml_exception_pointer /* Save all integer regs used by the code generator in the context */ @@ -197,8 +197,8 @@ $113: stq $13, young_ptr ldt $f29, 29 * 8 ($sp) ldt $f30, 30 * 8 ($sp) /* Reload new allocation pointer and allocation limit */ - ldq $13, young_ptr - ldq $14, young_limit + ldq $13, caml_young_ptr + ldq $14, caml_young_limit /* Allocate space for the block */ ldq $25, 0x1E8($sp) subq $13, $25, $13 @@ -233,16 +233,16 @@ $104: ldgp $gp, 0($27) stq $26, 0($11) stq $sp, caml_bottom_of_stack /* Make the exception handler and alloc ptr available to the C code */ - lda $12, young_ptr + lda $12, caml_young_ptr stq $13, 0($12) - lda $14, young_limit + lda $14, caml_young_limit stq $15, caml_exception_pointer /* Call the function */ mov $25, $27 jsr ($25) /* Reload alloc ptr and alloc limit */ - ldq $13, 0($12) /* $12 still points to young_ptr */ - ldq $14, 0($14) /* $14 still points to young_limit */ + ldq $13, 0($12) /* $12 still points to caml_young_ptr */ + ldq $14, 0($14) /* $14 still points to caml_young_limit */ /* Say that we are back into Caml code */ stq $31, 0($11) /* $11 still points to caml_last_return_address */ /* Restore $gp */ @@ -261,7 +261,7 @@ caml_start_program: ldgp $gp, 0($27) lda $25, caml_program -/* Code shared with callback* */ +/* Code shared with caml_callback* */ $107: /* Save return address */ lda $sp, -128($sp) @@ -298,8 +298,8 @@ $107: stq $0, 8($sp) mov $sp, $15 /* Reload allocation pointers */ - ldq $13, young_ptr - ldq $14, young_limit + ldq $13, caml_young_ptr + ldq $14, caml_young_limit /* We are back into Caml code */ stq $31, caml_last_return_address /* Call the Caml code */ @@ -321,7 +321,7 @@ $112: ldq $24, 0($sp) stq $24, caml_gc_regs lda $sp, 32($sp) /* Update allocation pointer */ - stq $13, young_ptr + stq $13, caml_young_ptr /* Reload callee-save registers */ ldq $9, 8($sp) ldq $10, 16($sp) @@ -362,8 +362,8 @@ $109: ldgp $gp, 0($26) raise_caml_exception: ldgp $gp, 0($27) mov $16, $0 /* Move exn bucket */ - ldq $13, young_ptr - ldq $14, young_limit + ldq $13, caml_young_ptr + ldq $14, caml_young_limit stq $31, caml_last_return_address /* We're back into Caml */ ldq $sp, caml_exception_pointer ldq $15, 0($sp) @@ -374,10 +374,10 @@ raise_caml_exception: /* Callback from C to Caml */ - .globl callback_exn - .ent callback_exn + .globl caml_callback_exn + .ent caml_callback_exn .align 3 -callback_exn: +caml_callback_exn: /* Initial shuffling of arguments */ ldgp $gp, 0($27) mov $16, $25 @@ -385,12 +385,12 @@ callback_exn: mov $25, $17 /* environment */ ldq $25, 0($25) /* code pointer */ br $107 - .end callback_exn + .end caml_callback_exn - .globl callback2_exn - .ent callback2_exn + .globl caml_callback2_exn + .ent caml_callback2_exn .align 3 -callback2_exn: +caml_callback2_exn: ldgp $gp, 0($27) mov $16, $25 mov $17, $16 /* first arg */ @@ -398,12 +398,12 @@ callback2_exn: mov $25, $18 /* environment */ lda $25, caml_apply2 br $107 - .end callback2_exn + .end caml_callback2_exn - .globl callback3_exn - .ent callback3_exn + .globl caml_callback3_exn + .ent caml_callback3_exn .align 3 -callback3_exn: +caml_callback3_exn: ldgp $gp, 0($27) mov $16, $25 mov $17, $16 /* first arg */ @@ -412,7 +412,7 @@ callback3_exn: mov $25, $19 /* environment */ lda $25, caml_apply3 br $107 - .end callback3_exn + .end caml_callback3_exn /* Glue code to call array_bound_error */ diff --git a/asmrun/amd64.S b/asmrun/amd64.S index 50eecf538..e2070239f 100644 --- a/asmrun/amd64.S +++ b/asmrun/amd64.S @@ -34,8 +34,8 @@ FUNCTION(caml_call_gc) movq %rax, caml_last_return_address(%rip) leaq 8(%rsp), %rax movq %rax, caml_bottom_of_stack(%rip) - /* Save young_ptr, caml_exception_pointer */ - movq %r15, young_ptr(%rip) + /* Save caml_young_ptr, caml_exception_pointer */ + movq %r15, caml_young_ptr(%rip) movq %r14, caml_exception_pointer(%rip) /* Build array of registers, save it into caml_gc_regs */ .L105: @@ -104,15 +104,15 @@ FUNCTION(caml_call_gc) popq %rbp popq %r12 popq %r13 - /* Restore young_ptr, caml_exception_pointer */ - movq young_ptr(%rip), %r15 + /* Restore caml_young_ptr, caml_exception_pointer */ + movq caml_young_ptr(%rip), %r15 movq caml_exception_pointer(%rip), %r14 /* Return to caller */ ret FUNCTION(caml_alloc1) subq $16, %r15 - cmpq young_limit(%rip), %r15 + cmpq caml_young_limit(%rip), %r15 jb .L100 ret .L100: @@ -127,7 +127,7 @@ FUNCTION(caml_alloc1) FUNCTION(caml_alloc2) subq $24, %r15 - cmpq young_limit(%rip), %r15 + cmpq caml_young_limit(%rip), %r15 jb .L101 ret .L101: @@ -142,7 +142,7 @@ FUNCTION(caml_alloc2) FUNCTION(caml_alloc3) subq $32, %r15 - cmpq young_limit(%rip), %r15 + cmpq caml_young_limit(%rip), %r15 jb .L102 ret .L102: @@ -157,7 +157,7 @@ FUNCTION(caml_alloc3) FUNCTION(caml_alloc) subq %rax, %r15 - cmpq young_limit(%rip), %r15 + cmpq caml_young_limit(%rip), %r15 jb .L103 ret .L103: @@ -178,12 +178,12 @@ FUNCTION(caml_c_call) movq %r12, caml_last_return_address(%rip) movq %rsp, caml_bottom_of_stack(%rip) /* Make the exception handler and alloc ptr available to the C code */ - movq %r15, young_ptr(%rip) + movq %r15, caml_young_ptr(%rip) movq %r14, caml_exception_pointer(%rip) /* Call the function (address in %rax) */ call *%rax /* Reload alloc ptr */ - movq young_ptr(%rip), %r15 + movq caml_young_ptr(%rip), %r15 /* Return to caller */ pushq %r12 ret @@ -201,7 +201,7 @@ FUNCTION(caml_start_program) subq $8, %rsp /* stack 16-aligned */ /* Initial entry point is caml_program */ leaq caml_program(%rip), %r12 - /* Common code for caml_start_program and callback* */ + /* Common code for caml_start_program and caml_callback* */ .L106: /* Build a callback link */ subq $8, %rsp /* stack 16-aligned */ @@ -209,7 +209,7 @@ FUNCTION(caml_start_program) pushq caml_last_return_address(%rip) pushq caml_bottom_of_stack(%rip) /* Setup alloc ptr and exception ptr */ - movq young_ptr(%rip), %r15 + movq caml_young_ptr(%rip), %r15 movq caml_exception_pointer(%rip), %r14 /* Build an exception handler */ lea .L108(%rip), %r13 @@ -224,7 +224,7 @@ FUNCTION(caml_start_program) popq %r12 /* dummy register */ .L109: /* Update alloc ptr and exception ptr */ - movq %r15, young_ptr(%rip) + movq %r15, caml_young_ptr(%rip) movq %r14, caml_exception_pointer(%rip) /* Pop the callback link, restoring the global variables */ popq caml_bottom_of_stack(%rip) @@ -253,12 +253,12 @@ FUNCTION(raise_caml_exception) movq %rdi, %rax movq caml_exception_pointer(%rip), %rsp popq %r14 /* Recover previous exception handler */ - movq young_ptr(%rip), %r15 /* Reload alloc ptr */ + movq caml_young_ptr(%rip), %r15 /* Reload alloc ptr */ ret /* Callback from C to Caml */ -FUNCTION(callback_exn) +FUNCTION(caml_callback_exn) /* Save callee-save registers */ pushq %rbx pushq %rbp @@ -273,7 +273,7 @@ FUNCTION(callback_exn) movq 0(%rbx), %r12 /* code pointer */ jmp .L106 -FUNCTION(callback2_exn) +FUNCTION(caml_callback2_exn) /* Save callee-save registers */ pushq %rbx pushq %rbp @@ -289,7 +289,7 @@ FUNCTION(callback2_exn) leaq caml_apply2(%rip), %r12 /* code pointer */ jmp .L106 -FUNCTION(callback3_exn) +FUNCTION(caml_callback3_exn) /* Save callee-save registers */ pushq %rbx pushq %rbp @@ -308,7 +308,7 @@ FUNCTION(callback3_exn) FUNCTION(caml_array_bound_error) /* Make the exception handler and alloc ptr available to the C code */ - movq %r15, young_ptr(%rip) + movq %r15, caml_young_ptr(%rip) movq %r14, caml_exception_pointer(%rip) jmp array_bound_error diff --git a/asmrun/arm.S b/asmrun/arm.S index c465636b5..d2ba61016 100644 --- a/asmrun/arm.S +++ b/asmrun/arm.S @@ -114,7 +114,7 @@ caml_alloc: stfd f2, [sp, #-8]! stfd f3, [sp, #-8]! /* Save current allocation pointer for debugging purposes */ - ldr r10, .Lyoung_ptr + ldr r10, .Lcaml_young_ptr str alloc_ptr, [r10, #0] /* Save trap pointer in case an exception is raised during GC */ ldr r10, .Lcaml_exception_pointer @@ -134,9 +134,9 @@ caml_alloc: mov alloc_ptr, #0 str alloc_ptr, [r10, #0] /* Reload new allocation pointer and allocation limit */ - ldr r10, .Lyoung_ptr + ldr r10, .Lcaml_young_ptr ldr alloc_ptr, [r10, #0] - ldr alloc_limit, .Lyoung_limit + ldr alloc_limit, .Lcaml_young_limit /* Return to caller */ ldmfd sp!, {pc} @@ -153,7 +153,7 @@ caml_c_call: str lr, [r5, #0] str sp, [r6, #0] /* Make the exception handler and alloc ptr available to the C code */ - ldr r6, .Lyoung_ptr + ldr r6, .Lcaml_young_ptr ldr r7, .Lcaml_exception_pointer str alloc_ptr, [r6, #0] str trap_ptr, [r7, #0] @@ -161,7 +161,7 @@ caml_c_call: mov lr, pc mov pc, r10 /* Reload alloc ptr */ - ldr alloc_ptr, [r6, #0] /* r6 still points to young_ptr */ + ldr alloc_ptr, [r6, #0] /* r6 still points to caml_young_ptr */ /* Say that we are back into Caml code */ mov r6, #0 str r6, [r5, #0] /* r5 still points to caml_last_return_address */ @@ -174,7 +174,7 @@ caml_c_call: caml_start_program: ldr r10, .Lcaml_program -/* Code shared with callback* */ +/* Code shared with caml_callback* */ /* Address of Caml code to call is in r10 */ /* Arguments to the Caml code are in r0...r3 */ @@ -205,9 +205,9 @@ caml_start_program: str r4, [sp, #4] mov trap_ptr, sp /* Reload allocation pointers */ - ldr r4, .Lyoung_ptr + ldr r4, .Lcaml_young_ptr ldr alloc_ptr, [r4, #0] - ldr alloc_limit, .Lyoung_limit + ldr alloc_limit, .Lcaml_young_limit /* We are back into Caml code */ ldr r4, .Lcaml_last_return_address mov r5, #0 @@ -234,7 +234,7 @@ caml_start_program: str r5, [r4, #0] add sp, sp, #4*3 /* Update allocation pointer */ - ldr r4, .Lyoung_ptr + ldr r4, .Lcaml_young_ptr str alloc_ptr, [r4, #0] /* Reload callee-save registers and return */ ldfd f4, [sp], #8 @@ -258,9 +258,9 @@ caml_start_program: .global raise_caml_exception raise_caml_exception: /* Reload Caml allocation pointers */ - ldr r1, .Lyoung_ptr + ldr r1, .Lcaml_young_ptr ldr alloc_ptr, [r1, #0] - ldr alloc_limit, .Lyoung_limit + ldr alloc_limit, .Lcaml_young_limit /* Say we're back into Caml */ ldr r1, .Lcaml_last_return_address mov r2, #0 @@ -273,8 +273,8 @@ raise_caml_exception: /* Callback from C to Caml */ - .global callback_exn -callback_exn: + .global caml_callback_exn +caml_callback_exn: /* Initial shuffling of arguments (r0 = closure, r1 = first arg) */ mov r10, r0 mov r0, r1 /* r0 = first arg */ @@ -282,8 +282,8 @@ callback_exn: ldr r10, [r10, #0] /* code pointer */ b .Ljump_to_caml - .global callback2_exn -callback2_exn: + .global caml_callback2_exn +caml_callback2_exn: /* Initial shuffling of arguments (r0 = closure, r1 = arg1, r2 = arg2) */ mov r10, r0 mov r0, r1 /* r0 = first arg */ @@ -292,8 +292,8 @@ callback2_exn: ldr r10, .Lcaml_apply2 b .Ljump_to_caml - .global callback3_exn -callback3_exn: + .global caml_callback3_exn +caml_callback3_exn: /* Initial shuffling of arguments */ /* (r0 = closure, r1 = arg1, r2 = arg2, r3 = arg3) */ mov r10, r0 @@ -316,8 +316,8 @@ caml_array_bound_error: .Lcaml_last_return_address: .word caml_last_return_address .Lcaml_bottom_of_stack: .word caml_bottom_of_stack .Lcaml_gc_regs: .word caml_gc_regs -.Lyoung_ptr: .word young_ptr -.Lyoung_limit: .word young_limit +.Lcaml_young_ptr: .word caml_young_ptr +.Lcaml_young_limit: .word caml_young_limit .Lcaml_exception_pointer: .word caml_exception_pointer .Lcaml_program: .word caml_program .LLtrap_handler: .word .Ltrap_handler diff --git a/asmrun/hppa.S b/asmrun/hppa.S index 59daa91c7..b8e710158 100644 --- a/asmrun/hppa.S +++ b/asmrun/hppa.S @@ -60,8 +60,8 @@ .import caml_apply3, code .import array_bound_error, code -young_limit .comm 8 -young_ptr .comm 8 +caml_young_limit .comm 8 +caml_young_ptr .comm 8 caml_bottom_of_stack .comm 8 caml_last_return_address .comm 8 caml_gc_regs .comm 8 @@ -70,8 +70,8 @@ caml_required_size .comm 8 #endif #ifdef SYS_nextstep - .comm G(young_limit), 8 - .comm G(young_ptr), 8 + .comm G(caml_young_limit), 8 + .comm G(caml_young_ptr), 8 .comm G(caml_bottom_of_stack), 8 .comm G(caml_last_return_address), 8 .comm G(caml_gc_regs), 8 @@ -101,8 +101,8 @@ G(caml_call_gc): LOADHIGH(G(caml_required_size)) stw %r29, LOW(G(caml_required_size))(%r1) ; Save current allocation pointer for debugging purposes - LOADHIGH(G(young_ptr)) - stw %r3, LOW(G(young_ptr))(%r1) + LOADHIGH(G(caml_young_ptr)) + stw %r3, LOW(G(caml_young_ptr))(%r1) ; Record lowest stack address LOADHIGH(G(caml_bottom_of_stack)) stw %r30, LOW(G(caml_bottom_of_stack))(%r1) @@ -236,8 +236,8 @@ L100: ldo -(64 + 4*32)(%r30), %r31 fldds,ma 8(%r1), %fr30 ; Reload the allocation pointer - LOADHIGH(G(young_ptr)) - ldw LOW(G(young_ptr))(%r1), %r3 + LOADHIGH(G(caml_young_ptr)) + ldw LOW(G(caml_young_ptr))(%r1), %r3 ; Allocate space for block LOADHIGH(G(caml_required_size)) ldw LOW(G(caml_required_size))(%r1), %r29 @@ -273,8 +273,8 @@ G(caml_c_call): LOADHIGH(G(caml_exception_pointer)) stw %r5, LOW(G(caml_exception_pointer))(%r1) ; Save the allocation pointer - LOADHIGH(G(young_ptr)) - stw %r3, LOW(G(young_ptr))(%r1) + LOADHIGH(G(caml_young_ptr)) + stw %r3, LOW(G(caml_young_ptr))(%r1) ; Call the C function #ifdef SYS_hpux bl $$dyncall, %r31 @@ -286,10 +286,10 @@ G(caml_c_call): LOADHIGH(G(caml_last_return_address)) ldw LOW(G(caml_last_return_address))(%r1), %r2 ; Reload allocation pointer - LOADHIGH(G(young_ptr)) + LOADHIGH(G(caml_young_ptr)) ; Return to caller bv 0(%r2) - ldw LOW(G(young_ptr))(%r1), %r3 ; in delay slot + ldw LOW(G(caml_young_ptr))(%r1), %r3 ; in delay slot ENDPROC ; Start the Caml program @@ -301,7 +301,7 @@ G(caml_start_program): LOADHIGH(G(caml_program)) ldo LOW(G(caml_program))(%r1), %r22 -; Code shared with callback* +; Code shared with caml_callback* L102: ; Save return address stw %r2,-20(%r30) @@ -365,10 +365,10 @@ L102: stw %r1, -4(%r30) copy %r30, %r5 ; Reload allocation pointers - LOADHIGH(G(young_ptr)) - ldw LOW(G(young_ptr))(%r1), %r3 - LOADHIGH(G(young_limit)) - ldo LOW(G(young_limit))(%r1), %r4 + LOADHIGH(G(caml_young_ptr)) + ldw LOW(G(caml_young_ptr))(%r1), %r3 + LOADHIGH(G(caml_young_limit)) + ldo LOW(G(caml_young_limit))(%r1), %r4 ; Call the Caml code ble 0(4, %r22) copy %r31, %r2 @@ -391,8 +391,8 @@ L105: stw %r31, LOW(G(caml_gc_regs))(%r1) ldo -16(%r30), %r30 ; Save allocation pointer - LOADHIGH(G(young_ptr)) - stw %r3, LOW(G(young_ptr))(%r1) + LOADHIGH(G(caml_young_ptr)) + stw %r3, LOW(G(caml_young_ptr))(%r1) ; Move result where C function expects it copy %r26, %r28 ; Reload callee-save registers @@ -472,10 +472,10 @@ G(raise_caml_exception): LOADHIGH(G(caml_exception_pointer)) ldw LOW(G(caml_exception_pointer))(%r1), %r30 ; Reload allocation registers - LOADHIGH(G(young_ptr)) - ldw LOW(G(young_ptr))(%r1), %r3 - LOADHIGH(G(young_limit)) - ldo LOW(G(young_limit))(%r1), %r4 + LOADHIGH(G(caml_young_ptr)) + ldw LOW(G(caml_young_ptr))(%r1), %r3 + LOADHIGH(G(caml_young_limit)) + ldo LOW(G(caml_young_limit))(%r1), %r4 ; Raise the exception ldw -4(%r30), %r1 ldw -8(%r30), %r5 @@ -486,8 +486,8 @@ G(raise_caml_exception): ; Callbacks C -> ML .align CODE_ALIGN - EXPORT_CODE(G(callback_exn)) -G(callback_exn): + EXPORT_CODE(G(caml_callback_exn)) +G(caml_callback_exn): STARTPROC ; Initial shuffling of arguments copy %r26, %r1 ; Closure @@ -498,8 +498,8 @@ G(callback_exn): ENDPROC .align CODE_ALIGN - EXPORT_CODE(G(callback2_exn)) -G(callback2_exn): + EXPORT_CODE(G(caml_callback2_exn)) +G(caml_callback2_exn): STARTPROC copy %r26, %r1 ; Closure copy %r25, %r26 ; First argument @@ -511,8 +511,8 @@ G(callback2_exn): ENDPROC .align CODE_ALIGN - EXPORT_CODE(G(callback3_exn)) -G(callback3_exn): + EXPORT_CODE(G(caml_callback3_exn)) +G(caml_callback3_exn): STARTPROC copy %r26, %r1 ; Closure copy %r25, %r26 ; First argument diff --git a/asmrun/i386.S b/asmrun/i386.S index f89ed3742..fdc4694fd 100644 --- a/asmrun/i386.S +++ b/asmrun/i386.S @@ -105,10 +105,10 @@ LBL(105): .align FUNCTION_ALIGN G(caml_alloc1): PROFILE_CAML - movl G(young_ptr), %eax + movl G(caml_young_ptr), %eax subl $8, %eax - movl %eax, G(young_ptr) - cmpl G(young_limit), %eax + movl %eax, G(caml_young_ptr) + cmpl G(caml_young_limit), %eax jb LBL(100) ret LBL(100): @@ -122,10 +122,10 @@ LBL(100): .align FUNCTION_ALIGN G(caml_alloc2): PROFILE_CAML - movl G(young_ptr), %eax + movl G(caml_young_ptr), %eax subl $12, %eax - movl %eax, G(young_ptr) - cmpl G(young_limit), %eax + movl %eax, G(caml_young_ptr) + cmpl G(caml_young_limit), %eax jb LBL(101) ret LBL(101): @@ -139,10 +139,10 @@ LBL(101): .align FUNCTION_ALIGN G(caml_alloc3): PROFILE_CAML - movl G(young_ptr), %eax + movl G(caml_young_ptr), %eax subl $16, %eax - movl %eax, G(young_ptr) - cmpl G(young_limit), %eax + movl %eax, G(caml_young_ptr) + cmpl G(caml_young_limit), %eax jb LBL(102) ret LBL(102): @@ -156,17 +156,17 @@ LBL(102): .align FUNCTION_ALIGN G(caml_alloc): PROFILE_CAML - subl G(young_ptr), %eax /* eax = size - young_ptr */ - negl %eax /* eax = young_ptr - size */ - cmpl G(young_limit), %eax + subl G(caml_young_ptr), %eax /* eax = size - caml_young_ptr */ + negl %eax /* eax = caml_young_ptr - size */ + cmpl G(caml_young_limit), %eax jb LBL(103) - movl %eax, G(young_ptr) + movl %eax, G(caml_young_ptr) ret LBL(103): - subl G(young_ptr), %eax /* eax = - size */ + subl G(caml_young_ptr), %eax /* eax = - size */ negl %eax /* eax = size */ pushl %eax /* save desired size */ - subl %eax, G(young_ptr) /* must update young_ptr */ + subl %eax, G(caml_young_ptr) /* must update young_ptr */ movl 4(%esp), %eax movl %eax, G(caml_last_return_address) leal 8(%esp), %eax @@ -202,7 +202,7 @@ G(caml_start_program): pushl %ebp /* Initial entry point is caml_program */ movl $ G(caml_program), %esi - /* Common code for caml_start_program and callback* */ + /* Common code for caml_start_program and caml_callback* */ LBL(106): /* Build a callback link */ pushl G(caml_gc_regs) @@ -249,9 +249,9 @@ G(raise_caml_exception): /* Callback from C to Caml */ - .globl G(callback_exn) + .globl G(caml_callback_exn) .align FUNCTION_ALIGN -G(callback_exn): +G(caml_callback_exn): PROFILE_C /* Save callee-save registers */ pushl %ebx @@ -264,9 +264,9 @@ G(callback_exn): movl 0(%ebx), %esi /* code pointer */ jmp LBL(106) - .globl G(callback2_exn) + .globl G(caml_callback2_exn) .align FUNCTION_ALIGN -G(callback2_exn): +G(caml_callback2_exn): PROFILE_C /* Save callee-save registers */ pushl %ebx @@ -280,9 +280,9 @@ G(callback2_exn): movl $ G(caml_apply2), %esi /* code pointer */ jmp LBL(106) - .globl G(callback3_exn) + .globl G(caml_callback3_exn) .align FUNCTION_ALIGN -G(callback3_exn): +G(caml_callback3_exn): PROFILE_C /* Save callee-save registers */ pushl %ebx diff --git a/asmrun/ia64.S b/asmrun/ia64.S index 66e1620a9..3377b11a5 100644 --- a/asmrun/ia64.S +++ b/asmrun/ia64.S @@ -126,7 +126,7 @@ caml_call_gc: FSAVE8(f56,f57,f58,f59,f60,f61,f62,f63) ;; /* Save current allocation pointer for debugging purposes */ - STOREGLOBAL(r4, young_ptr#) + STOREGLOBAL(r4, caml_young_ptr#) /* Save trap pointer in case an exception is raised */ STOREGLOBAL(r6, caml_exception_pointer#) @@ -162,8 +162,8 @@ caml_call_gc: FLOAD8(f56,f57,f58,f59,f60,f61,f62,f63) ;; /* Reload new allocation pointer and allocation limit */ - LOADGLOBAL(r4, young_ptr#) - LOADGLOBAL(r5, young_limit#) + LOADGLOBAL(r4, caml_young_ptr#) + LOADGLOBAL(r5, caml_young_limit#) /* Allocate space for the block */ add r3 = 16, sp ;; @@ -208,7 +208,7 @@ caml_c_call: STOREGLOBAL(r14, caml_last_return_address#) /* Make the exception handler and alloc ptr available to the C code */ - STOREGLOBAL(r4, young_ptr#) + STOREGLOBAL(r4, caml_young_ptr#) STOREGLOBAL(r6, caml_exception_pointer#) /* Recover gp from the function pointer in r2 */ @@ -221,8 +221,8 @@ caml_c_call: mov gp = r7 ;; /* Reload alloc ptr and alloc limit */ - LOADGLOBAL(r4, young_ptr#) - LOADGLOBAL(r5, young_limit#) + LOADGLOBAL(r4, caml_young_ptr#) + LOADGLOBAL(r5, caml_young_limit#) /* Reload return address and say that we are back into Caml code */ ADDRGLOBAL(r3, caml_last_return_address#) ;; @@ -245,7 +245,7 @@ caml_start_program: ADDRGLOBAL(r2, caml_program#) ;; mov b6 = r2 - /* Code shared with callback* */ + /* Code shared with caml_callback* */ .L103: /* Allocate 64 "out" registers (for the Caml code) and no locals */ alloc r3 = ar.pfs, 0, 0, 64, 0 @@ -324,8 +324,8 @@ caml_start_program: mov ar.rsc = r14 /* restore original RSE mode */ /* Reload allocation pointers */ - LOADGLOBAL(r4, young_ptr#) - LOADGLOBAL(r5, young_limit#) + LOADGLOBAL(r4, caml_young_ptr#) + LOADGLOBAL(r5, caml_young_limit#) /* We are back into Caml code */ STOREGLOBAL(r0, caml_last_return_address#) @@ -355,7 +355,7 @@ caml_start_program: add sp = 48, sp /* Update allocation pointer */ - STOREGLOBAL(r4, young_ptr#) + STOREGLOBAL(r4, caml_young_ptr#) /* Restore all callee-save registers from stack */ add r2 = 16, sp ;; @@ -436,8 +436,8 @@ raise_caml_exception: mov ar.rsc = r14 ;; /* Restore original RSE mode */ /* Reload allocation pointers and exception pointer */ - LOADGLOBAL(r4, young_ptr#) - LOADGLOBAL(r5, young_limit#) + LOADGLOBAL(r4, caml_young_ptr#) + LOADGLOBAL(r5, caml_young_limit#) LOADGLOBAL(r6, caml_exception_pointer#) /* Say that we're back into Caml */ @@ -458,10 +458,10 @@ raise_caml_exception: /* Callbacks from C to Caml */ - .global callback_exn# - .proc callback_exn# + .global caml_callback_exn# + .proc caml_callback_exn# .align 16 -callback_exn: +caml_callback_exn: /* Initial shuffling of arguments */ ld8 r3 = [r32] /* code pointer */ mov r2 = r32 @@ -470,12 +470,12 @@ callback_exn: mov b6 = r3 br.sptk .L103 ;; - .endp callback_exn# + .endp caml_callback_exn# - .global callback2_exn# - .proc callback2_exn# + .global caml_callback2_exn# + .proc caml_callback2_exn# .align 16 -callback2_exn: +caml_callback2_exn: /* Initial shuffling of arguments */ ADDRGLOBAL(r3, caml_apply2) /* code pointer */ mov r2 = r32 @@ -485,12 +485,12 @@ callback2_exn: mov b6 = r3 br.sptk .L103 ;; - .endp callback2_exn# + .endp caml_callback2_exn# - .global callback3_exn# - .proc callback3_exn# + .global caml_callback3_exn# + .proc caml_callback3_exn# .align 16 -callback3_exn: +caml_callback3_exn: /* Initial shuffling of arguments */ ADDRGLOBAL(r3, caml_apply3) /* code pointer */ mov r2 = r32 @@ -501,7 +501,7 @@ callback3_exn: mov b6 = r3 br.sptk .L103 ;; - .endp callback3_exn# + .endp caml_callback3_exn# /* Glue code to call array_bound_error */ diff --git a/asmrun/m68k.S b/asmrun/m68k.S index 85de3fb3d..8d318aa9f 100644 --- a/asmrun/m68k.S +++ b/asmrun/m68k.S @@ -35,7 +35,7 @@ _caml_call_gc: addql #4, d5 movel d5, _caml_bottom_of_stack | Record current allocation pointer (for debugging) - movel d6, _young_ptr + movel d6, _caml_young_ptr | Save all regs used by the code generator movel d4, a7@- movel d3, a7@- @@ -68,14 +68,14 @@ _caml_call_gc: movel a7@+, d3 movel a7@+, d4 | Reload allocation pointer and allocate block - movel _young_ptr, d6 + movel _caml_young_ptr, d6 subl _caml_requested_size, d6 | Return to caller rts _caml_alloc1: subql #8, d6 - cmpl _young_limit, d6 + cmpl _caml_young_limit, d6 bcs L100 rts L100: moveq #8, d5 @@ -83,7 +83,7 @@ L100: moveq #8, d5 _caml_alloc2: subl #12, d6 - cmpl _young_limit, d6 + cmpl _caml_young_limit, d6 bcs L101 rts L101: moveq #12, d5 @@ -91,7 +91,7 @@ L101: moveq #12, d5 _caml_alloc3: subl #16, d6 - cmpl _young_limit, d6 + cmpl _caml_young_limit, d6 bcs L102 rts L102: moveq #16, d5 @@ -99,7 +99,7 @@ L102: moveq #16, d5 _caml_alloc: subl d5, d6 - cmpl _young_limit, d6 + cmpl _caml_young_limit, d6 bcs _caml_call_gc rts @@ -112,12 +112,12 @@ _caml_c_call: movel a7@+, _caml_last_return_address movel a7, _caml_bottom_of_stack | Save allocation pointer and exception pointer - movel d6, _young_ptr + movel d6, _caml_young_ptr movel d7, _caml_exception_pointer | Call the function (address in a0) jbsr a0@ | Reload allocation pointer - movel _young_ptr, d6 + movel _caml_young_ptr, d6 | Return to caller movel _caml_last_return_address, a1 jmp a1@ @@ -133,7 +133,7 @@ _caml_start_program: | Initial code point is caml_program lea _caml_program, a5 -| Code shared between caml_start_program and callback* +| Code shared between caml_start_program and caml_callback* L106: | Build a callback link @@ -145,14 +145,14 @@ L106: movel _caml_exception_pointer, a7@- movel a7, d7 | Load allocation pointer - movel _young_ptr, d6 + movel _caml_young_ptr, d6 | Call the Caml code jbsr a5@ L107: | Move result where C code expects it movel a0, d0 | Save allocation pointer - movel d6, _young_ptr + movel d6, _caml_young_ptr | Pop the exception handler movel a7@+, _caml_exception_pointer addql #4, a7 @@ -170,7 +170,7 @@ L109: L108: | Exception handler | Save allocation pointer and exception pointer - movel d6, _young_ptr + movel d6, _caml_young_ptr movel d7, _caml_exception_pointer | Encode exception bucket as an exception result movel a0, d0 @@ -183,15 +183,15 @@ L108: .globl _raise_caml_exception _raise_caml_exception: movel a7@(4), a0 | exception bucket - movel _young_ptr, d6 + movel _caml_young_ptr, d6 movel _caml_exception_pointer, a7 movel a7@+, d7 rts | Callback from C to Caml - .globl _callback_exn -_callback_exn: + .globl _caml_callback_exn +_caml_callback_exn: link a6, #0 | Save callee-save registers moveml a2-a6/d2-d7, a7@- @@ -202,8 +202,8 @@ _callback_exn: movel a1@(0), a5 | code pointer bra L106 - .globl _callback2_exn -_callback2_exn: + .globl _caml_callback2_exn +_caml_callback2_exn: link a6, #0 | Save callee-save registers moveml a2-a6/d2-d7, a7@- @@ -215,8 +215,8 @@ _callback2_exn: lea _caml_apply2, a5 | code pointer bra L106 - .globl _callback3_exn -_callback3_exn: + .globl _caml_callback3_exn +_caml_callback3_exn: link a6, #0 | Save callee-save registers moveml a2-a6/d2-d7, a7@- diff --git a/asmrun/mips.s b/asmrun/mips.s index 26853c0d8..76d89c165 100644 --- a/asmrun/mips.s +++ b/asmrun/mips.s @@ -39,7 +39,7 @@ caml_call_gc: addu $24, $sp, 0x100 sw $24, caml_gc_regs /* Save current allocation pointer for debugging purposes */ - sw $22, young_ptr + sw $22, caml_young_ptr /* Save the exception handler (if e.g. a sighandler raises) */ sw $30, caml_exception_pointer /* Save all regs used by the code generator on the stack */ @@ -152,8 +152,8 @@ caml_call_gc: l.d $f30, 30 * 8($sp) l.d $f31, 31 * 8($sp) /* Reload new allocation pointer and allocation limit */ - lw $22, young_ptr - lw $23, young_limit + lw $22, caml_young_ptr + lw $23, caml_young_limit /* Reload return address */ lw $31, caml_last_return_address /* Say that we are back into Caml code */ @@ -179,19 +179,19 @@ caml_c_call: /* Preload addresses of interesting global variables in callee-save registers */ la $16, caml_last_return_address - la $17, young_ptr + la $17, caml_young_ptr /* Save return address, bottom of stack, alloc ptr, exn ptr */ sw $31, 0($16) /* caml_last_return_address */ sw $sp, caml_bottom_of_stack - sw $22, 0($17) /* young_ptr */ + sw $22, 0($17) /* caml_young_ptr */ sw $30, caml_exception_pointer /* Call C function */ move $25, $24 jal $24 /* Reload return address, alloc ptr, alloc limit */ lw $31, 0($16) /* caml_last_return_address */ - lw $22, 0($17) /* young_ptr */ - lw $23, young_limit /* young_limit */ + lw $22, 0($17) /* caml_young_ptr */ + lw $23, caml_young_limit /* caml_young_limit */ /* Zero caml_last_return_address, indicating we're back in Caml code */ sw $0, 0($16) /* caml_last_return_address */ /* Restore $gp and return */ @@ -211,7 +211,7 @@ caml_start_program: .cpsetup $25, 0x80, caml_start_program /* Load in $24 the code address to call */ la $24, caml_program - /* Code shared with callback* */ + /* Code shared with caml_callback* */ $103: /* Save return address */ sd $31, 0x88($sp) @@ -248,8 +248,8 @@ $103: sw $gp, 8($sp) move $30, $sp /* Reload allocation pointers */ - lw $22, young_ptr - lw $23, young_limit + lw $22, caml_young_ptr + lw $23, caml_young_limit /* Say that we are back into Caml code */ sw $0, caml_last_return_address /* Call the Caml code */ @@ -270,7 +270,7 @@ $106: sw $24, caml_gc_regs addu $sp, $sp, 16 /* Update allocation pointer */ - sw $22, young_ptr + sw $22, caml_young_ptr /* Reload callee-save registers and return */ ld $31, 0x88($sp) ld $16, 0x0($sp) @@ -310,8 +310,8 @@ raise_caml_exception: .cpsetup $25, $24, raise_caml_exception /* Branch to exn handler */ move $2, $4 - lw $22, young_ptr - lw $23, young_limit + lw $22, caml_young_ptr + lw $23, caml_young_limit lw $sp, caml_exception_pointer lw $30, 0($sp) lw $24, 4($sp) @@ -323,23 +323,23 @@ raise_caml_exception: /* Callback from C to Caml */ - .globl callback_exn - .ent callback_exn -callback_exn: + .globl caml_callback_exn + .ent caml_callback_exn +caml_callback_exn: subu $sp, $sp, 0x90 - .cpsetup $25, 0x80, callback_exn + .cpsetup $25, 0x80, caml_callback_exn /* Initial shuffling of arguments */ move $9, $4 /* closure */ move $8, $5 /* argument */ lw $24, 0($4) /* code pointer */ b $103 - .end callback_exn + .end caml_callback_exn - .globl callback2_exn - .ent callback2_exn -callback2_exn: + .globl caml_callback2_exn + .ent caml_callback2_exn +caml_callback2_exn: subu $sp, $sp, 0x90 - .cpsetup $25, 0x80, callback2_exn + .cpsetup $25, 0x80, caml_callback2_exn /* Initial shuffling of arguments */ move $10, $4 /* closure */ move $8, $5 /* first argument */ @@ -347,13 +347,13 @@ callback2_exn: la $24, caml_apply2 /* code pointer */ b $103 - .end callback2_exn + .end caml_callback2_exn - .globl callback3_exn - .ent callback3_exn -callback3_exn: + .globl caml_callback3_exn + .ent caml_callback3_exn +caml_callback3_exn: subu $sp, $sp, 0x90 - .cpsetup $25, 0x80, callback3_exn + .cpsetup $25, 0x80, caml_callback3_exn /* Initial shuffling of arguments */ move $11, $4 /* closure */ move $8, $5 /* first argument */ @@ -362,7 +362,7 @@ callback3_exn: la $24, caml_apply3 /* code pointer */ b $103 - .end callback3_exn + .end caml_callback3_exn /* Glue code to call array_bound_error */ diff --git a/asmrun/power-aix.S b/asmrun/power-aix.S index 3baa5f966..c11167414 100644 --- a/asmrun/power-aix.S +++ b/asmrun/power-aix.S @@ -37,7 +37,7 @@ addi 0, 1, 8*32 + 64 stw 0, 0(11) # Save current allocation pointer for debugging purposes - lwz 11, L..young_ptr(2) + lwz 11, L..caml_young_ptr(2) stw 31, 0(11) # Save exception pointer (if e.g. a sighandler raises) lwz 11, L..caml_exception_pointer(2) @@ -103,9 +103,9 @@ bl .garbage_collection or 0, 0, 0 # Reload new allocation pointer and allocation limit - lwz 11, L..young_ptr(2) + lwz 11, L..caml_young_ptr(2) lwz 31, 0(11) - lwz 11, L..young_limit(2) + lwz 11, L..caml_young_limit(2) lwz 30, 0(11) # Restore all regs used by the code generator addi 11, 1, 8*32 + 64 - 4 @@ -192,14 +192,14 @@ stw 1, 0(27) stw 25, 0(24) # Make the exception handler and alloc ptr available to the C code - lwz 27, L..young_ptr(2) + lwz 27, L..caml_young_ptr(2) lwz 26, L..caml_exception_pointer(2) stw 31, 0(27) stw 29, 0(26) # Preserve RTOC and return address in callee-save registers # The C function will preserve them, and the Caml code does not # expect them to be preserved - # Return address is in 25, RTOC is in 26, pointer to young_ptr in 27, + # Return address is in 25, RTOC is in 26, pointer to caml_young_ptr in 27, # pointer to caml_last_return_address is in 24 # Call the function (descriptor in 11) lwz 0, 0(11) @@ -213,7 +213,7 @@ # Restore RTOC mr 2, 26 # Reload allocation pointer - lwz 31, 0(27) # 27 still points to young_ptr + lwz 31, 0(27) # 27 still points to caml_young_ptr # Say we are back into Caml code li 12, 0 stw 12, 0(24) # 24 still points to caml_last_return_address @@ -226,8 +226,8 @@ .raise_caml_exception: # Reload Caml global registers lwz 4, L..caml_exception_pointer(2) - lwz 5, L..young_ptr(2) - lwz 6, L..young_limit(2) + lwz 5, L..caml_young_ptr(2) + lwz 6, L..caml_young_limit(2) lwz 1, 0(4) lwz 31, 0(5) lwz 30, 0(6) @@ -250,7 +250,7 @@ .caml_start_program: lwz 11, L..caml_program(2) -#### Code shared between caml_start_program and callback* +#### Code shared between caml_start_program and caml_callback* L..102: mflr 0 @@ -320,8 +320,8 @@ L..103: stw 2, 20(1) mr 29, 1 # Reload allocation pointers - lwz 9, L..young_ptr(2) - lwz 10, L..young_limit(2) + lwz 9, L..caml_young_ptr(2) + lwz 10, L..caml_young_limit(2) lwz 31, 0(9) lwz 30, 0(10) # Say we are back into Caml code @@ -354,7 +354,7 @@ L..106: stw 9, 0(12) addi 1, 1, 32 # Update allocation pointer - lwz 11, L..young_ptr(2) + lwz 11, L..caml_young_ptr(2) stw 31, 0(11) # Deallocate stack frame addi 1, 1, 288 @@ -412,8 +412,8 @@ L..104: #### Callback from C to Caml - .globl .callback_exn -.callback_exn: + .globl .caml_callback_exn +.caml_callback_exn: # Initial shuffling of arguments mr 0, 3 # Closure mr 3, 4 # Argument @@ -421,8 +421,8 @@ L..104: lwz 11, 0(4) # Code pointer b L..102 - .globl .callback2_exn -.callback2_exn: + .globl .caml_callback2_exn +.caml_callback2_exn: mr 0, 3 # Closure mr 3, 4 # First argument mr 4, 5 # Second argument @@ -430,8 +430,8 @@ L..104: lwz 11, L..caml_apply2(2) b L..102 - .globl .callback3_exn -.callback3_exn: + .globl .caml_callback3_exn +.caml_callback3_exn: mr 0, 3 # Closure mr 3, 4 # First argument mr 4, 5 # Second argument @@ -453,10 +453,10 @@ system__frametable: #### TOC entries .toc -L..young_limit: - .tc young_limit[TC], young_limit -L..young_ptr: - .tc young_ptr[TC], young_ptr +L..caml_young_limit: + .tc caml_young_limit[TC], caml_young_limit +L..caml_young_ptr: + .tc caml_young_ptr[TC], caml_young_ptr L..caml_bottom_of_stack: .tc caml_bottom_of_stack[TC], caml_bottom_of_stack L..caml_last_return_address: @@ -497,17 +497,17 @@ raise_caml_exception: caml_start_program: .long .caml_start_program, TOC[tc0], 0 - .globl callback_exn - .csect callback_exn[DS] -callback_exn: - .long .callback_exn, TOC[tc0], 0 + .globl caml_callback_exn + .csect caml_callback_exn[DS] +caml_callback_exn: + .long .caml_callback_exn, TOC[tc0], 0 - .globl callback2_exn - .csect callback2_exn[DS] -callback2_exn: - .long .callback2_exn, TOC[tc0], 0 + .globl caml_callback2_exn + .csect caml_callback2_exn[DS] +caml_callback2_exn: + .long .caml_callback2_exn, TOC[tc0], 0 - .globl callback3_exn - .csect callback3_exn[DS] -callback3_exn: - .long .callback3_exn, TOC[tc0], 0 + .globl caml_callback3_exn + .csect caml_callback3_exn[DS] +caml_callback3_exn: + .long .caml_callback3_exn, TOC[tc0], 0 diff --git a/asmrun/power-elf.S b/asmrun/power-elf.S index 52f3441cd..a29e6c29e 100644 --- a/asmrun/power-elf.S +++ b/asmrun/power-elf.S @@ -43,7 +43,7 @@ caml_call_gc: addi 0, 1, 8*32 + 32 Storeglobal(0, caml_gc_regs, 11) /* Save current allocation pointer for debugging purposes */ - Storeglobal(31, young_ptr, 11) + Storeglobal(31, caml_young_ptr, 11) /* Save exception pointer (if e.g. a sighandler raises) */ Storeglobal(29, caml_exception_pointer, 11) /* Save all registers used by the code generator */ @@ -106,8 +106,8 @@ caml_call_gc: /* Call the GC */ bl garbage_collection /* Reload new allocation pointer and allocation limit */ - Loadglobal(31, young_ptr, 11) - Loadglobal(30, young_limit, 11) + Loadglobal(31, caml_young_ptr, 11) + Loadglobal(30, caml_young_limit, 11) /* Restore all regs used by the code generator */ addi 11, 1, 8*32 + 32 - 4 lwzu 3, 4(11) @@ -190,15 +190,15 @@ caml_c_call: Storeglobal(1, caml_bottom_of_stack, 12) Storeglobal(25, caml_last_return_address, 12) /* Make the exception handler and alloc ptr available to the C code */ - Storeglobal(31, young_ptr, 11) + Storeglobal(31, caml_young_ptr, 11) Storeglobal(29, caml_exception_pointer, 11) /* Call the function (address in link register) */ blrl /* Restore return address (in 25, preserved by the C function) */ mtlr 25 /* Reload allocation pointer and allocation limit*/ - Loadglobal(31, young_ptr, 11) - Loadglobal(30, young_limit, 11) + Loadglobal(31, caml_young_ptr, 11) + Loadglobal(30, caml_young_limit, 11) /* Say we are back into Caml code */ li 12, 0 Storeglobal(12, caml_last_return_address, 11) @@ -212,8 +212,8 @@ caml_c_call: raise_caml_exception: /* Reload Caml global registers */ Loadglobal(1, caml_exception_pointer, 11) - Loadglobal(31, young_ptr, 11) - Loadglobal(30, young_limit, 11) + Loadglobal(31, caml_young_ptr, 11) + Loadglobal(30, caml_young_limit, 11) /* Say we are back into Caml code */ li 0, 0 Storeglobal(0, caml_last_return_address, 11) @@ -232,7 +232,7 @@ raise_caml_exception: caml_start_program: Addrglobal(12, caml_program) -/* Code shared between caml_start_program and callback */ +/* Code shared between caml_start_program and caml_callback */ .L102: /* Allocate and link stack frame */ stwu 1, -256(1) @@ -298,8 +298,8 @@ caml_start_program: stw 11, 4(1) mr 29, 1 /* Reload allocation pointers */ - Loadglobal(31, young_ptr, 11) - Loadglobal(30, young_limit, 11) + Loadglobal(31, caml_young_ptr, 11) + Loadglobal(30, caml_young_limit, 11) /* Say we are back into Caml code */ li 0, 0 Storeglobal(0, caml_last_return_address, 11) @@ -321,7 +321,7 @@ caml_start_program: Storeglobal(11, caml_gc_regs, 12) addi 1, 1, 16 /* Update allocation pointer */ - Storeglobal(31, young_ptr, 11) + Storeglobal(31, caml_young_ptr, 11) /* Restore callee-save registers */ addi 11, 1, 16-4 lwzu 14, 4(11) @@ -377,9 +377,9 @@ caml_start_program: /* Callback from C to Caml */ - .globl callback_exn - .type callback_exn, @function -callback_exn: + .globl caml_callback_exn + .type caml_callback_exn, @function +caml_callback_exn: /* Initial shuffling of arguments */ mr 0, 3 /* Closure */ mr 3, 4 /* Argument */ @@ -387,9 +387,9 @@ callback_exn: lwz 12, 0(4) /* Code pointer */ b .L102 - .globl callback2_exn - .type callback2_exn, @function -callback2_exn: + .globl caml_callback2_exn + .type caml_callback2_exn, @function +caml_callback2_exn: mr 0, 3 /* Closure */ mr 3, 4 /* First argument */ mr 4, 5 /* Second argument */ @@ -397,9 +397,9 @@ callback2_exn: Addrglobal(12, caml_apply2) b .L102 - .globl callback3_exn - .type callback3_exn, @function -callback3_exn: + .globl caml_callback3_exn + .type caml_callback3_exn, @function +caml_callback3_exn: mr 0, 3 /* Closure */ mr 3, 4 /* First argument */ mr 4, 5 /* Second argument */ diff --git a/asmrun/power-rhapsody.S b/asmrun/power-rhapsody.S index 0d56983df..528205bd8 100644 --- a/asmrun/power-rhapsody.S +++ b/asmrun/power-rhapsody.S @@ -45,7 +45,7 @@ _caml_call_gc: addi r0, r1, 8*32 + 32 Storeglobal r0, _caml_gc_regs, r11 /* Save current allocation pointer for debugging purposes */ - Storeglobal r31, _young_ptr, r11 + Storeglobal r31, _caml_young_ptr, r11 /* Save exception pointer (if e.g. a sighandler raises) */ Storeglobal r29, _caml_exception_pointer, r11 /* Save all registers used by the code generator */ @@ -108,8 +108,8 @@ _caml_call_gc: /* Call the GC */ bl _garbage_collection /* Reload new allocation pointer and allocation limit */ - Loadglobal r31, _young_ptr, r11 - Loadglobal r30, _young_limit, r11 + Loadglobal r31, _caml_young_ptr, r11 + Loadglobal r30, _caml_young_limit, r11 /* Restore all regs used by the code generator */ addi r11, r1, 8*32 + 32 - 4 lwzu r3, 4(r11) @@ -191,15 +191,15 @@ _caml_c_call: Storeglobal r1, _caml_bottom_of_stack, r12 Storeglobal r25, _caml_last_return_address, r12 /* Make the exception handler and alloc ptr available to the C code */ - Storeglobal r31, _young_ptr, r11 + Storeglobal r31, _caml_young_ptr, r11 Storeglobal r29, _caml_exception_pointer, r11 /* Call the function (address in link register) */ blrl /* Restore return address (in 25, preserved by the C function) */ mtlr r25 /* Reload allocation pointer and allocation limit*/ - Loadglobal r31, _young_ptr, r11 - Loadglobal r30, _young_limit, r11 + Loadglobal r31, _caml_young_ptr, r11 + Loadglobal r30, _caml_young_limit, r11 /* Say we are back into Caml code */ li r12, 0 Storeglobal r12, _caml_last_return_address, r11 @@ -212,8 +212,8 @@ _caml_c_call: _raise_caml_exception: /* Reload Caml global registers */ Loadglobal r1, _caml_exception_pointer, r11 - Loadglobal r31, _young_ptr, r11 - Loadglobal r30, _young_limit, r11 + Loadglobal r31, _caml_young_ptr, r11 + Loadglobal r30, _caml_young_limit, r11 /* Say we are back into Caml code */ li r0, 0 Storeglobal r0, _caml_last_return_address, r11 @@ -231,7 +231,7 @@ _raise_caml_exception: _caml_start_program: Addrglobal r12, _caml_program -/* Code shared between caml_start_program and callback */ +/* Code shared between caml_start_program and caml_callback */ L102: /* Allocate and link stack frame */ stwu r1, -256(r1) @@ -297,8 +297,8 @@ L103: stw r11, 4(r1) mr r29, r1 /* Reload allocation pointers */ - Loadglobal r31, _young_ptr, r11 - Loadglobal r30, _young_limit, r11 + Loadglobal r31, _caml_young_ptr, r11 + Loadglobal r30, _caml_young_limit, r11 /* Say we are back into Caml code */ li r0, 0 Storeglobal r0, _caml_last_return_address, r11 @@ -320,7 +320,7 @@ L106: Storeglobal r11, _caml_gc_regs, r12 addi r1, r1, 16 /* Update allocation pointer */ - Storeglobal r31, _young_ptr, r11 + Storeglobal r31, _caml_young_ptr, r11 /* Restore callee-save registers */ addi r11, r1, 16-4 lwzu r14, 4(r11) @@ -376,8 +376,8 @@ L104: /* Callback from C to Caml */ - .globl _callback_exn -_callback_exn: + .globl _caml_callback_exn +_caml_callback_exn: /* Initial shuffling of arguments */ mr r0, r3 /* Closure */ mr r3, r4 /* Argument */ @@ -385,8 +385,8 @@ _callback_exn: lwz r12, 0(r4) /* Code pointer */ b L102 - .globl _callback2_exn -_callback2_exn: + .globl _caml_callback2_exn +_caml_callback2_exn: mr r0, r3 /* Closure */ mr r3, r4 /* First argument */ mr r4, r5 /* Second argument */ @@ -394,8 +394,8 @@ _callback2_exn: Addrglobal r12, _caml_apply2 b L102 - .globl _callback3_exn -_callback3_exn: + .globl _caml_callback3_exn +_caml_callback3_exn: mr r0, r3 /* Closure */ mr r3, r4 /* First argument */ mr r4, r5 /* Second argument */ diff --git a/asmrun/roots.c b/asmrun/roots.c index f56ebf150..69ff4740f 100644 --- a/asmrun/roots.c +++ b/asmrun/roots.c @@ -65,7 +65,7 @@ static void init_frame_descriptors(void) /* Allocate the hash table */ frame_descriptors = - (frame_descr **) stat_alloc(tblsize * sizeof(frame_descr *)); + (frame_descr **) caml_stat_alloc(tblsize * sizeof(frame_descr *)); for (i = 0; i < tblsize; i++) frame_descriptors[i] = NULL; frame_descriptors_mask = tblsize - 1; @@ -97,7 +97,7 @@ value * caml_gc_regs; long caml_globals_inited = 0; static long caml_globals_scanned = 0; -/* Call [oldify_one] on (at least) all the roots that point to the minor +/* Call [caml_oldify_one] on (at least) all the roots that point to the minor heap. */ void oldify_local_roots (void) { @@ -188,16 +188,16 @@ void oldify_local_roots (void) Oldify (gr->root); } /* Finalised values */ - final_do_young_roots (&oldify_one); + final_do_young_roots (&caml_oldify_one); /* Hook */ - if (scan_roots_hook != NULL) (*scan_roots_hook)(oldify_one); + if (scan_roots_hook != NULL) (*scan_roots_hook)(caml_oldify_one); } /* Call [darken] on all roots */ void darken_all_roots (void) { - do_roots (darken); + do_roots (caml_darken); } void do_roots (scanning_action f) diff --git a/asmrun/signals.c b/asmrun/signals.c index cdd4f7c1c..8bfec82e1 100644 --- a/asmrun/signals.c +++ b/asmrun/signals.c @@ -143,8 +143,8 @@ void execute_signal(int signal_number, int in_signal_handler) sigaddset(&sigs, signal_number); sigprocmask(SIG_BLOCK, &sigs, &sigs); #endif - res = callback_exn(Field(signal_handlers, signal_number), - Val_int(rev_convert_signal_number(signal_number))); + res = caml_callback_exn(Field(signal_handlers, signal_number), + Val_int(rev_convert_signal_number(signal_number))); #ifdef POSIX_SIGNALS if (! in_signal_handler) { /* Restore the original signal mask */ @@ -171,12 +171,14 @@ void garbage_collection(void) { int sig; - if (young_ptr < young_start || force_major_slice) minor_collection(); + if (caml_young_ptr < caml_young_start || force_major_slice){ + caml_minor_collection(); + } /* If a signal arrives between the following two instructions, it will be lost. */ sig = pending_signal; pending_signal = 0; - young_limit = young_start; + caml_young_limit = caml_young_start; if (sig) execute_signal(sig, 0); } @@ -185,11 +187,11 @@ void garbage_collection(void) void urge_major_slice (void) { force_major_slice = 1; - young_limit = young_end; - /* This is only moderately effective on ports that cache young_limit - in a register, since modify() is called directly, not through - caml_c_call, so it may take a while before the register is reloaded - from young_limit. */ + caml_young_limit = caml_young_end; + /* This is only moderately effective on ports that cache [caml_young_limit] + in a register, since [caml_modify] is called directly, not through + [caml_c_call], so it may take a while before the register is reloaded + from [caml_young_limit]. */ } void enter_blocking_section(void) @@ -202,7 +204,7 @@ void enter_blocking_section(void) it will be lost. */ sig = pending_signal; pending_signal = 0; - young_limit = young_start; + caml_young_limit = caml_young_start; if (sig) execute_signal(sig, 0); async_signal_mode = 1; if (!pending_signal) break; @@ -266,44 +268,44 @@ void handle_signal(int sig) Instead, we remember the signal and play with the allocation limit so that the next allocation will trigger a garbage collection. */ pending_signal = sig; - young_limit = young_end; - /* Some ports cache young_limit in a register. + caml_young_limit = caml_young_end; + /* Some ports cache [caml_young_limit] in a register. Use the signal context to modify that register too, but only if we are inside Caml code (not inside C code). */ #if defined(TARGET_alpha) if (In_code_area(context->sc_pc)) { /* Cached in register $14 */ - context->sc_regs[14] = (long) young_limit; + context->sc_regs[14] = (long) caml_young_limit; } #endif #if defined(TARGET_mips) if (In_code_area(context->sc_pc)) { /* Cached in register $23 */ - context->sc_regs[23] = (int) young_limit; + context->sc_regs[23] = (int) caml_young_limit; } #endif #if defined(TARGET_power) && defined(SYS_aix) if (caml_last_return_address == 0) { /* Cached in register 30 */ - CONTEXT_GPR(context, 30) = (ulong_t) young_limit; + CONTEXT_GPR(context, 30) = (ulong_t) caml_young_limit; } #endif #if defined(TARGET_power) && defined(SYS_elf) if (caml_last_return_address == 0) { /* Cached in register 30 */ - context->regs->gpr[30] = (unsigned long) young_limit; + context->regs->gpr[30] = (unsigned long) caml_young_limit; } #endif #if defined(TARGET_power) && defined(SYS_rhapsody) if (In_code_area(CONTEXT_PC(context))) { /* Cached in register 30 */ - CONTEXT_GPR(context, 30) = (unsigned long) young_limit; + CONTEXT_GPR(context, 30) = (unsigned long) caml_young_limit; } #endif #if defined(TARGET_power) && defined(SYS_bsd) if (caml_last_return_address == 0) { /* Cached in register 30 */ - context->sc_frame.fixreg[30] = (unsigned long) young_limit; + context->sc_frame.fixreg[30] = (unsigned long) caml_young_limit; } #endif #if defined(TARGET_sparc) && defined(SYS_solaris) @@ -311,7 +313,7 @@ void handle_signal(int sig) if (In_code_area(gregs[REG_PC])) { /* Cached in register l7, which is saved on the stack 7 words after the stack pointer. */ - ((long *)(gregs[REG_SP]))[7] = (long) young_limit; + ((long *)(gregs[REG_SP]))[7] = (long) caml_young_limit; } } #endif @@ -459,7 +461,7 @@ value install_signal_handler(value signal_number, value action) /* ML */ signal_handlers = caml_alloc(NSIG, 0); register_global_root(&signal_handlers); } - modify(&Field(signal_handlers, sig), Field(action, 0)); + caml_modify(&Field(signal_handlers, sig), Field(action, 0)); } CAMLreturn (res); } @@ -480,10 +482,11 @@ static void trap_handler(int sig, int code, fprintf(stderr, "Fatal error: illegal instruction, code 0x%x\n", code); exit(100); } - /* Recover young_ptr and caml_exception_pointer from the %l5 and %l6 regs */ + /* Recover [caml_young_ptr] and [caml_exception_pointer] + from the %l5 and %l6 regs */ sp = (int *) context->sc_sp; caml_exception_pointer = (char *) sp[5]; - young_ptr = (char *) sp[6]; + caml_young_ptr = (char *) sp[6]; array_bound_error(); } #endif @@ -498,10 +501,11 @@ static void trap_handler(int sig, siginfo_t * info, void * context) info->si_code); exit(100); } - /* Recover young_ptr and caml_exception_pointer from the %l5 and %l6 regs */ + /* Recover [caml_young_ptr] and [caml_exception_pointer] + from the %l5 and %l6 regs */ sp = (long *) (((ucontext_t *)context)->uc_mcontext.gregs[REG_SP]); caml_exception_pointer = (char *) sp[5]; - young_ptr = (char *) sp[6]; + caml_young_ptr = (char *) sp[6]; array_bound_error(); } #endif @@ -522,9 +526,10 @@ static void trap_handler(int sig, int code, STRUCT_SIGCONTEXT * context) sigemptyset(&mask); sigaddset(&mask, SIGTRAP); sigprocmask(SIG_UNBLOCK, &mask, NULL); - /* Recover young_ptr and caml_exception_pointer from registers 31 and 29 */ + /* Recover [caml_young_ptr] and [caml_exception_pointer] + from registers 31 and 29 */ caml_exception_pointer = (char *) CONTEXT_GPR(context, 29); - young_ptr = (char *) CONTEXT_GPR(context, 31); + caml_young_ptr = (char *) CONTEXT_GPR(context, 31); array_bound_error(); } #endif @@ -532,9 +537,10 @@ static void trap_handler(int sig, int code, STRUCT_SIGCONTEXT * context) #if defined(TARGET_power) && defined(SYS_elf) static void trap_handler(int sig, struct sigcontext * context) { - /* Recover young_ptr and caml_exception_pointer from registers 31 and 29 */ + /* Recover [caml_young_ptr] and [caml_exception_pointer] + from registers 31 and 29 */ caml_exception_pointer = (char *) context->regs->gpr[29]; - young_ptr = (char *) context->regs->gpr[31]; + caml_young_ptr = (char *) context->regs->gpr[31]; array_bound_error(); } #endif @@ -547,9 +553,10 @@ static void trap_handler(int sig, int code, STRUCT_SIGCONTEXT * context) sigemptyset(&mask); sigaddset(&mask, SIGTRAP); sigprocmask(SIG_UNBLOCK, &mask, NULL); - /* Recover young_ptr and caml_exception_pointer from registers 31 and 29 */ + /* Recover [caml_young_ptr] and [caml_exception_pointer] + from registers 31 and 29 */ caml_exception_pointer = (char *) CONTEXT_GPR(context, 29); - young_ptr = (char *) CONTEXT_GPR(context, 31); + caml_young_ptr = (char *) CONTEXT_GPR(context, 31); array_bound_error(); } #endif @@ -557,9 +564,10 @@ static void trap_handler(int sig, int code, STRUCT_SIGCONTEXT * context) #if defined(TARGET_power) && defined(SYS_bsd) static void trap_handler(int sig, int code, struct sigcontext * context) { - /* Recover young_ptr and caml_exception_pointer from registers 31 and 29 */ + /* Recover [caml_young_ptr] and [caml_exception_pointer] + from registers 31 and 29 */ caml_exception_pointer = (char *) context->sc_frame.fixreg[29]; - young_ptr = (char *) context->sc_frame.fixreg[31]; + caml_young_ptr = (char *) context->sc_frame.fixreg[31]; array_bound_error(); } #endif diff --git a/asmrun/sparc.S b/asmrun/sparc.S index 1ac331d96..d93cf3836 100644 --- a/asmrun/sparc.S +++ b/asmrun/sparc.S @@ -22,8 +22,8 @@ .common _caml_required_size, 4, "bss" -#define Young_limit _young_limit -#define Young_ptr _young_ptr +#define Caml_young_limit _caml_young_limit +#define Caml_young_ptr _caml_young_ptr #define Caml_bottom_of_stack _caml_bottom_of_stack #define Caml_last_return_address _caml_last_return_address #define Caml_gc_regs _caml_gc_regs @@ -36,9 +36,9 @@ #define Caml_start_program _caml_start_program #define Caml_program _caml_program #define Raise_caml_exception _raise_caml_exception -#define Callback_exn _callback_exn -#define Callback2_exn _callback2_exn -#define Callback3_exn _callback3_exn +#define Caml_callback_exn _caml_callback_exn +#define Caml_callback2_exn _caml_callback2_exn +#define Caml_callback3_exn _caml_callback3_exn #define Caml_apply2 _caml_apply2 #define Caml_apply3 _caml_apply3 #define Mlraise _mlraise @@ -62,9 +62,9 @@ #define Caml_start_program caml_start_program #define Caml_program caml_program #define Raise_caml_exception raise_caml_exception -#define Callback_exn callback_exn -#define Callback2_exn callback2_exn -#define Callback3_exn callback3_exn +#define Caml_callback_exn caml_callback_exn +#define Caml_callback2_exn caml_callback2_exn +#define Caml_callback3_exn caml_callback3_exn #define Caml_apply2 caml_apply2 #define Caml_apply3 caml_apply3 #define Mlraise mlraise @@ -113,7 +113,7 @@ Caml_call_gc: /* Save exception pointer if GC raises */ Store(Exn_ptr, Caml_exception_pointer) /* Save current allocation pointer for debugging purposes */ - Store(Alloc_ptr, Young_ptr) + Store(Alloc_ptr, Caml_young_ptr) /* Record lowest stack address */ Store(%sp, Caml_bottom_of_stack) /* Record last return address */ @@ -200,7 +200,7 @@ L100: add %sp, 96 + 15*8, %g2 ldd [%g1 + 0x68], %f26 ldd [%g1 + 0x70], %f28 /* Reload alloc ptr */ - Load(Young_ptr, Alloc_ptr) + Load(Caml_young_ptr, Alloc_ptr) /* Allocate space for block */ Load(Caml_required_size, %g2) #ifdef INDIRECT_LIMIT @@ -208,7 +208,7 @@ L100: add %sp, 96 + 15*8, %g2 sub Alloc_ptr, %g2, Alloc_ptr cmp Alloc_ptr, %g1 /* Check that we have enough free space */ #else - Load(Young_limit,Alloc_limit) + Load(Caml_young_limit,Alloc_limit) sub Alloc_ptr, %g2, Alloc_ptr cmp Alloc_ptr, Alloc_limit #endif @@ -229,17 +229,17 @@ Caml_c_call: Store(%o7, Caml_last_return_address) /* Save the exception handler and alloc pointer */ Store(Exn_ptr, Caml_exception_pointer) - sethi %hi(Young_ptr), %g1 + sethi %hi(Caml_young_ptr), %g1 /* Call the C function */ call %g2 - st Alloc_ptr, [%g1 + %lo(Young_ptr)] /* in delay slot */ + st Alloc_ptr, [%g1 + %lo(Caml_young_ptr)] /* in delay slot */ /* Reload return address */ Load(Caml_last_return_address, %o7) /* Reload alloc pointer */ - sethi %hi(Young_ptr), %g1 + sethi %hi(Caml_young_ptr), %g1 /* Return to caller */ retl - ld [%g1 + %lo(Young_ptr)], Alloc_ptr /* in delay slot */ + ld [%g1 + %lo(Caml_young_ptr)], Alloc_ptr /* in delay slot */ /* Start the Caml program */ @@ -250,7 +250,7 @@ Caml_start_program: /* Address of code to call */ Address(Caml_program, %l2) - /* Code shared with callback* */ + /* Code shared with caml_callback* */ L108: /* Set up a callback link on the stack. */ sub %sp, 16, %sp @@ -270,11 +270,11 @@ L111: sub %sp, 8, %sp st Exn_ptr, [%sp + 100] mov %sp, Exn_ptr /* Reload allocation pointers */ - Load(Young_ptr, Alloc_ptr) + Load(Caml_young_ptr, Alloc_ptr) #ifdef INDIRECT_LIMIT - Address(Young_limit, Alloc_limit) + Address(Caml_young_limit, Alloc_limit) #else - Load(Young_limit, Alloc_limit) + Load(Caml_young_limit, Alloc_limit) #endif /* Call the Caml code */ L109: call %l2 @@ -292,7 +292,7 @@ L112: ld [%sp + 96], %l0 Store(%l2, Caml_gc_regs) add %sp, 16, %sp /* Save allocation pointer */ - Store(Alloc_ptr, Young_ptr) + Store(Alloc_ptr, Caml_young_ptr) /* Reload callee-save registers and return */ ret restore %o0, 0, %o0 /* copy %o0 in this window to caller's %o0 */ @@ -321,11 +321,11 @@ L106: restore nop L107: /* Reload allocation registers */ - Load(Young_ptr, Alloc_ptr) + Load(Caml_young_ptr, Alloc_ptr) #ifdef INDIRECT_LIMIT - Address(Young_limit, Alloc_limit) + Address(Caml_young_limit, Alloc_limit) #else - Load(Young_limit, Alloc_limit) + Load(Caml_young_limit, Alloc_limit) #endif /* Branch to exception handler */ mov %g3, %sp diff --git a/asmrun/startup.c b/asmrun/startup.c index 68c20d3cf..3b1851d0a 100644 --- a/asmrun/startup.c +++ b/asmrun/startup.c @@ -32,7 +32,7 @@ #endif extern int parser_trace; -header_t atom_table[256]; +header_t caml_atom_table[256]; char * static_data_start, * static_data_end; char * code_area_start, * code_area_end; @@ -56,7 +56,7 @@ static void init_atoms(void) int i; extern struct segment caml_data_segments[], caml_code_segments[]; - for (i = 0; i < 256; i++) atom_table[i] = Make_header(0, i, Caml_white); + for (i = 0; i < 256; i++) caml_atom_table[i] = Make_header(0, i, Caml_white); minmax_table(caml_data_segments, &static_data_start, &static_data_end); minmax_table(caml_code_segments, &code_area_start, &code_area_end); } diff --git a/boot/ocamlc b/boot/ocamlc Binary files differindex a993af2d6..2335f8480 100755 --- a/boot/ocamlc +++ b/boot/ocamlc diff --git a/boot/ocamllex b/boot/ocamllex Binary files differindex 34329c9be..e6a5ec840 100755 --- a/boot/ocamllex +++ b/boot/ocamllex diff --git a/bytecomp/bytegen.ml b/bytecomp/bytegen.ml index 980bd262f..b91efc105 100644 --- a/bytecomp/bytegen.ml +++ b/bytecomp/bytegen.ml @@ -310,17 +310,17 @@ let comp_primitive p args = | Pstringrefu -> Kgetstringchar | Pstringsetu -> Ksetstringchar | Parraylength kind -> Kvectlength - | Parrayrefs Pgenarray -> Kccall("array_get", 2) - | Parrayrefs Pfloatarray -> Kccall("array_get_float", 2) - | Parrayrefs _ -> Kccall("array_get_addr", 2) - | Parraysets Pgenarray -> Kccall("array_set", 3) - | Parraysets Pfloatarray -> Kccall("array_set_float", 3) - | Parraysets _ -> Kccall("array_set_addr", 3) - | Parrayrefu Pgenarray -> Kccall("array_unsafe_get", 2) - | Parrayrefu Pfloatarray -> Kccall("array_unsafe_get_float", 2) + | Parrayrefs Pgenarray -> Kccall("caml_array_get", 2) + | Parrayrefs Pfloatarray -> Kccall("caml_array_get_float", 2) + | Parrayrefs _ -> Kccall("caml_array_get_addr", 2) + | Parraysets Pgenarray -> Kccall("caml_array_set", 3) + | Parraysets Pfloatarray -> Kccall("caml_array_set_float", 3) + | Parraysets _ -> Kccall("caml_array_set_addr", 3) + | Parrayrefu Pgenarray -> Kccall("caml_array_unsafe_get", 2) + | Parrayrefu Pfloatarray -> Kccall("caml_array_unsafe_get_float", 2) | Parrayrefu _ -> Kgetvectitem - | Parraysetu Pgenarray -> Kccall("array_unsafe_set", 3) - | Parraysetu Pfloatarray -> Kccall("array_unsafe_set_float", 3) + | Parraysetu Pgenarray -> Kccall("caml_array_unsafe_set", 3) + | Parraysetu Pfloatarray -> Kccall("caml_array_unsafe_set_float", 3) | Parraysetu _ -> Ksetvectitem | Pisint -> Kisint | Pisout -> Kisout @@ -345,12 +345,12 @@ let comp_primitive p args = | Plslbint bi -> comp_bint_primitive bi "shift_left" args | Plsrbint bi -> comp_bint_primitive bi "shift_right_unsigned" args | Pasrbint bi -> comp_bint_primitive bi "shift_right" args - | Pbintcomp(bi, Ceq) -> Kccall("equal", 2) - | Pbintcomp(bi, Cneq) -> Kccall("notequal", 2) - | Pbintcomp(bi, Clt) -> Kccall("lessthan", 2) - | Pbintcomp(bi, Cgt) -> Kccall("greaterthan", 2) - | Pbintcomp(bi, Cle) -> Kccall("lessequal", 2) - | Pbintcomp(bi, Cge) -> Kccall("greaterequal", 2) + | Pbintcomp(bi, Ceq) -> Kccall("caml_equal", 2) + | Pbintcomp(bi, Cneq) -> Kccall("caml_notequal", 2) + | Pbintcomp(bi, Clt) -> Kccall("caml_lessthan", 2) + | Pbintcomp(bi, Cgt) -> Kccall("caml_greaterthan", 2) + | Pbintcomp(bi, Cle) -> Kccall("caml_lessequal", 2) + | Pbintcomp(bi, Cge) -> Kccall("caml_greaterequal", 2) | Pbigarrayref(n, _, _) -> Kccall("bigarray_get_" ^ string_of_int n, n + 1) | Pbigarrayset(n, _, _) -> Kccall("bigarray_set_" ^ string_of_int n, n + 2) | _ -> fatal_error "Bytegen.comp_primitive" @@ -554,7 +554,7 @@ let rec comp_expr env exp sz cont = then Kmakeblock(0, 0) :: cont else comp_args env args sz (Kmakeblock(List.length args, 0) :: - Kccall("make_array", 1) :: cont) + Kccall("caml_make_array", 1) :: cont) end (* Integer first for enabling futher optimization (cf. emitcode.ml) *) | Lprim (Pintcomp c, [arg ; (Lconst _ as k)]) -> @@ -779,7 +779,7 @@ let comp_block env exp sz cont = (* +1 because comp_expr may have pushed one more word *) if !max_stack_used + 1 > Config.stack_threshold then Kconst(Const_base(Const_int(!max_stack_used + 1))) :: - Kccall("ensure_stack_capacity", 1) :: + Kccall("caml_ensure_stack_capacity", 1) :: code else code diff --git a/bytecomp/meta.ml b/bytecomp/meta.ml index e8ae4608a..10fe28d34 100644 --- a/bytecomp/meta.ml +++ b/bytecomp/meta.ml @@ -12,12 +12,12 @@ (* $Id$ *) -external global_data : unit -> Obj.t array = "get_global_data" -external realloc_global_data : int -> unit = "realloc_global" +external global_data : unit -> Obj.t array = "caml_get_global_data" +external realloc_global_data : int -> unit = "caml_realloc_global" external static_alloc : int -> string = "static_alloc" external static_free : string -> unit = "static_free" external static_resize : string -> int -> string = "static_resize" type closure = unit -> Obj.t -external reify_bytecode : string -> int -> closure = "reify_bytecode" +external reify_bytecode : string -> int -> closure = "caml_reify_bytecode" external invoke_traced_function : Obj.t -> Obj.t -> Obj.t -> Obj.t - = "invoke_traced_function" + = "caml_invoke_traced_function" diff --git a/bytecomp/meta.mli b/bytecomp/meta.mli index e76a56532..e59a4a77b 100644 --- a/bytecomp/meta.mli +++ b/bytecomp/meta.mli @@ -14,12 +14,12 @@ (* To control the runtime system and bytecode interpreter *) -external global_data : unit -> Obj.t array = "get_global_data" -external realloc_global_data : int -> unit = "realloc_global" +external global_data : unit -> Obj.t array = "caml_get_global_data" +external realloc_global_data : int -> unit = "caml_realloc_global" external static_alloc : int -> string = "static_alloc" external static_free : string -> unit = "static_free" external static_resize : string -> int -> string = "static_resize" type closure = unit -> Obj.t -external reify_bytecode : string -> int -> closure = "reify_bytecode" +external reify_bytecode : string -> int -> closure = "caml_reify_bytecode" external invoke_traced_function : Obj.t -> Obj.t -> Obj.t -> Obj.t - = "invoke_traced_function" + = "caml_invoke_traced_function" diff --git a/bytecomp/translcore.ml b/bytecomp/translcore.ml index 53e122f60..93e31ea05 100644 --- a/bytecomp/translcore.ml +++ b/bytecomp/translcore.ml @@ -44,7 +44,7 @@ let transl_object = let comparisons_table = create_hashtable 11 [ "%equal", - (Pccall{prim_name = "equal"; prim_arity = 2; prim_alloc = true; + (Pccall{prim_name = "caml_equal"; prim_arity = 2; prim_alloc = true; prim_native_name = ""; prim_native_float = false}, Pintcomp Ceq, Pfloatcomp Ceq, @@ -55,7 +55,7 @@ let comparisons_table = create_hashtable 11 [ Pbintcomp(Pint32, Ceq), Pbintcomp(Pint64, Ceq)); "%notequal", - (Pccall{prim_name = "notequal"; prim_arity = 2; prim_alloc = true; + (Pccall{prim_name = "caml_notequal"; prim_arity = 2; prim_alloc = true; prim_native_name = ""; prim_native_float = false}, Pintcomp Cneq, Pfloatcomp Cneq, @@ -66,7 +66,7 @@ let comparisons_table = create_hashtable 11 [ Pbintcomp(Pint32, Cneq), Pbintcomp(Pint64, Cneq)); "%lessthan", - (Pccall{prim_name = "lessthan"; prim_arity = 2; prim_alloc = true; + (Pccall{prim_name = "caml_lessthan"; prim_arity = 2; prim_alloc = true; prim_native_name = ""; prim_native_float = false}, Pintcomp Clt, Pfloatcomp Clt, @@ -77,7 +77,7 @@ let comparisons_table = create_hashtable 11 [ Pbintcomp(Pint32, Clt), Pbintcomp(Pint64, Clt)); "%greaterthan", - (Pccall{prim_name = "greaterthan"; prim_arity = 2; prim_alloc = true; + (Pccall{prim_name = "caml_greaterthan"; prim_arity = 2; prim_alloc = true; prim_native_name = ""; prim_native_float = false}, Pintcomp Cgt, Pfloatcomp Cgt, @@ -88,7 +88,7 @@ let comparisons_table = create_hashtable 11 [ Pbintcomp(Pint32, Cgt), Pbintcomp(Pint64, Cgt)); "%lessequal", - (Pccall{prim_name = "lessequal"; prim_arity = 2; prim_alloc = true; + (Pccall{prim_name = "caml_lessequal"; prim_arity = 2; prim_alloc = true; prim_native_name = ""; prim_native_float = false}, Pintcomp Cle, Pfloatcomp Cle, @@ -99,7 +99,8 @@ let comparisons_table = create_hashtable 11 [ Pbintcomp(Pint32, Cle), Pbintcomp(Pint64, Cle)); "%greaterequal", - (Pccall{prim_name = "greaterequal"; prim_arity = 2; prim_alloc = true; + (Pccall{prim_name = "caml_greaterequal"; prim_arity = 2; + prim_alloc = true; prim_native_name = ""; prim_native_float = false}, Pintcomp Cge, Pfloatcomp Cge, @@ -110,7 +111,7 @@ let comparisons_table = create_hashtable 11 [ Pbintcomp(Pint32, Cge), Pbintcomp(Pint64, Cge)); "%compare", - (Pccall{prim_name = "compare"; prim_arity = 2; prim_alloc = true; + (Pccall{prim_name = "caml_compare"; prim_arity = 2; prim_alloc = true; prim_native_name = ""; prim_native_float = false}, Pccall{prim_name = "int_compare"; prim_arity = 2; prim_alloc = false; prim_native_name = ""; @@ -251,7 +252,7 @@ let primitives_table = create_hashtable 57 [ ] let prim_makearray = - { prim_name = "make_vect"; prim_arity = 2; prim_alloc = true; + { prim_name = "caml_make_vect"; prim_arity = 2; prim_alloc = true; prim_native_name = ""; prim_native_float = false } let prim_obj_dup = diff --git a/byterun/alloc.c b/byterun/alloc.c index 3784b1391..c25e53b4f 100644 --- a/byterun/alloc.c +++ b/byterun/alloc.c @@ -44,9 +44,9 @@ CAMLexport value caml_alloc (mlsize_t wosize, tag_t tag) for (i = 0; i < wosize; i++) Field (result, i) = 0; } }else{ - result = alloc_shr (wosize, tag); + result = caml_alloc_shr (wosize, tag); if (tag < No_scan_tag) memset (Bp_val (result), 0, Bsize_wsize (wosize)); - result = check_urgent_gc (result); + result = caml_check_urgent_gc (result); } return result; } @@ -76,8 +76,8 @@ CAMLexport value caml_alloc_string (mlsize_t len) if (wosize <= Max_young_wosize) { Alloc_small (result, wosize, String_tag); }else{ - result = alloc_shr (wosize, String_tag); - result = check_urgent_gc (result); + result = caml_alloc_shr (wosize, String_tag); + result = caml_check_urgent_gc (result); } Field (result, wosize - 1) = 0; offset_index = Bsize_wsize (wosize) - 1; @@ -121,7 +121,7 @@ CAMLexport value caml_alloc_array(value (*funct)(char const *), order (don't take the address &Field(result, n) before calling funct, which may cause a GC and move result). */ v = funct(arr[n]); - modify(&Field(result, n), v); + caml_modify(&Field(result, n), v); } CAMLreturn (result); } @@ -160,6 +160,6 @@ CAMLprim value caml_update_dummy(value dummy, value newval) Assert (size == Wosize_val(dummy)); Tag_val(dummy) = Tag_val(newval); for (i = 0; i < size; i++) - modify(&Field(dummy, i), Field(newval, i)); + caml_modify(&Field(dummy, i), Field(newval, i)); return Val_unit; } diff --git a/byterun/array.c b/byterun/array.c index 3a56d8e42..097607777 100644 --- a/byterun/array.c +++ b/byterun/array.c @@ -23,14 +23,14 @@ #ifndef NATIVE_CODE -CAMLprim value array_get_addr(value array, value index) +CAMLprim value caml_array_get_addr(value array, value index) { long idx = Long_val(index); if (idx < 0 || idx >= Wosize_val(array)) array_bound_error(); return Field(array, idx); } -CAMLprim value array_get_float(value array, value index) +CAMLprim value caml_array_get_float(value array, value index) { long idx = Long_val(index); double d; @@ -48,15 +48,15 @@ CAMLprim value array_get_float(value array, value index) return res; } -CAMLprim value array_get(value array, value index) +CAMLprim value caml_array_get(value array, value index) { if (Tag_val(array) == Double_array_tag) - return array_get_float(array, index); + return caml_array_get_float(array, index); else - return array_get_addr(array, index); + return caml_array_get_addr(array, index); } -CAMLprim value array_set_addr(value array, value index, value newval) +CAMLprim value caml_array_set_addr(value array, value index, value newval) { long idx = Long_val(index); if (idx < 0 || idx >= Wosize_val(array)) array_bound_error(); @@ -64,7 +64,7 @@ CAMLprim value array_set_addr(value array, value index, value newval) return Val_unit; } -CAMLprim value array_set_float(value array, value index, value newval) +CAMLprim value caml_array_set_float(value array, value index, value newval) { long idx = Long_val(index); if (idx < 0 || idx >= Wosize_val(array) / Double_wosize) @@ -73,15 +73,15 @@ CAMLprim value array_set_float(value array, value index, value newval) return Val_unit; } -CAMLprim value array_set(value array, value index, value newval) +CAMLprim value caml_array_set(value array, value index, value newval) { if (Tag_val(array) == Double_array_tag) - return array_set_float(array, index, newval); + return caml_array_set_float(array, index, newval); else - return array_set_addr(array, index, newval); + return caml_array_set_addr(array, index, newval); } -CAMLprim value array_unsafe_get_float(value array, value index) +CAMLprim value caml_array_unsafe_get_float(value array, value index) { double d; value res; @@ -96,38 +96,38 @@ CAMLprim value array_unsafe_get_float(value array, value index) return res; } -CAMLprim value array_unsafe_get(value array, value index) +CAMLprim value caml_array_unsafe_get(value array, value index) { if (Tag_val(array) == Double_array_tag) - return array_unsafe_get_float(array, index); + return caml_array_unsafe_get_float(array, index); else return Field(array, Long_val(index)); } -CAMLprim value array_unsafe_set_addr(value array, value index, value newval) +CAMLprim value caml_array_unsafe_set_addr(value array, value index,value newval) { long idx = Long_val(index); Modify(&Field(array, idx), newval); return Val_unit; } -CAMLprim value array_unsafe_set_float(value array, value index, value newval) +CAMLprim value caml_array_unsafe_set_float(value array,value index,value newval) { Store_double_field(array, Long_val(index), Double_val(newval)); return Val_unit; } -CAMLprim value array_unsafe_set(value array, value index, value newval) +CAMLprim value caml_array_unsafe_set(value array, value index, value newval) { if (Tag_val(array) == Double_array_tag) - return array_unsafe_set_float(array, index, newval); + return caml_array_unsafe_set_float(array, index, newval); else - return array_unsafe_set_addr(array, index, newval); + return caml_array_unsafe_set_addr(array, index, newval); } #endif -CAMLprim value make_vect(value len, value init) +CAMLprim value caml_make_vect(value len, value init) { CAMLparam2 (len, init); CAMLlocal1 (res); @@ -155,21 +155,21 @@ CAMLprim value make_vect(value len, value init) for (i = 0; i < size; i++) Field(res, i) = init; } else if (Is_block(init) && Is_young(init)) { - minor_collection(); - res = alloc_shr(size, 0); + caml_minor_collection(); + res = caml_alloc_shr(size, 0); for (i = 0; i < size; i++) Field(res, i) = init; - res = check_urgent_gc (res); + res = caml_check_urgent_gc (res); } else { - res = alloc_shr(size, 0); - for (i = 0; i < size; i++) initialize(&Field(res, i), init); - res = check_urgent_gc (res); + res = caml_alloc_shr(size, 0); + for (i = 0; i < size; i++) caml_initialize(&Field(res, i), init); + res = caml_check_urgent_gc (res); } } CAMLreturn (res); } -CAMLprim value make_array(value init) +CAMLprim value caml_make_array(value init) { CAMLparam1 (init); mlsize_t wsize, size, i; diff --git a/byterun/backtrace.c b/byterun/backtrace.c index deda286de..5ac64f090 100644 --- a/byterun/backtrace.c +++ b/byterun/backtrace.c @@ -34,10 +34,10 @@ #include "sys.h" #include "backtrace.h" -CAMLexport int backtrace_active = 0; -CAMLexport int backtrace_pos = 0; -CAMLexport code_t * backtrace_buffer = NULL; -CAMLexport value backtrace_last_exn = Val_unit; +CAMLexport int caml_backtrace_active = 0; +CAMLexport int caml_backtrace_pos = 0; +CAMLexport code_t * caml_backtrace_buffer = NULL; +CAMLexport value caml_backtrace_last_exn = Val_unit; #define BACKTRACE_BUFFER_SIZE 1024 /* Location of fields in the Instruct.debug_event record */ @@ -56,38 +56,38 @@ enum { /* Initialize the backtrace machinery */ -void init_backtrace(void) +void caml_init_backtrace(void) { - backtrace_active = 1; - register_global_root(&backtrace_last_exn); - /* Note: lazy initialization of backtrace_buffer in stash_backtrace + caml_backtrace_active = 1; + register_global_root(&caml_backtrace_last_exn); + /* Note: lazy initialization of caml_backtrace_buffer in caml_stash_backtrace to simplify the interface with the thread libraries */ } /* Store the return addresses contained in the given stack fragment into the backtrace array */ -void stash_backtrace(value exn, code_t pc, value * sp) +void caml_stash_backtrace(value exn, code_t pc, value * sp) { code_t end_code = (code_t) ((char *) start_code + code_size); if (pc != NULL) pc = pc - 1; - if (exn != backtrace_last_exn) { - backtrace_pos = 0; - backtrace_last_exn = exn; + if (exn != caml_backtrace_last_exn) { + caml_backtrace_pos = 0; + caml_backtrace_last_exn = exn; } - if (backtrace_buffer == NULL) { - backtrace_buffer = malloc(BACKTRACE_BUFFER_SIZE * sizeof(code_t)); - if (backtrace_buffer == NULL) return; + if (caml_backtrace_buffer == NULL) { + caml_backtrace_buffer = malloc(BACKTRACE_BUFFER_SIZE * sizeof(code_t)); + if (caml_backtrace_buffer == NULL) return; } - if (backtrace_pos >= BACKTRACE_BUFFER_SIZE) return; + if (caml_backtrace_pos >= BACKTRACE_BUFFER_SIZE) return; if (pc >= start_code && pc < end_code){ - backtrace_buffer[backtrace_pos++] = pc; + caml_backtrace_buffer[caml_backtrace_pos++] = pc; } - for (/*nothing*/; sp < trapsp; sp++) { + for (/*nothing*/; sp < caml_trapsp; sp++) { code_t p = (code_t) *sp; if (p >= start_code && p < end_code) { - if (backtrace_pos >= BACKTRACE_BUFFER_SIZE) break; - backtrace_buffer[backtrace_pos++] = p; + if (caml_backtrace_pos >= BACKTRACE_BUFFER_SIZE) break; + caml_backtrace_buffer[caml_backtrace_pos++] = p; } } } @@ -112,10 +112,10 @@ static value read_debug_info(void) value evl, l; exec_name = caml_exe_name; - fd = attempt_open(&exec_name, &trail, 1); + fd = caml_attempt_open(&exec_name, &trail, 1); if (fd < 0) CAMLreturn(Val_false); - read_section_descriptors(fd, &trail); - if (seek_optional_section(fd, &trail, "DBUG") == -1) { + caml_read_section_descriptors(fd, &trail); + if (caml_seek_optional_section(fd, &trail, "DBUG") == -1) { close(fd); CAMLreturn(Val_false); } @@ -162,7 +162,7 @@ static value event_for_location(value events, code_t pc) static void print_location(value events, int index) { - code_t pc = backtrace_buffer[index]; + code_t pc = caml_backtrace_buffer[index]; char * info; value ev; @@ -196,7 +196,7 @@ static void print_location(value events, int index) /* Print a backtrace */ -CAMLexport void print_exception_backtrace(void) +CAMLexport void caml_print_exception_backtrace(void) { value events; int i; @@ -207,6 +207,6 @@ CAMLexport void print_exception_backtrace(void) "(Program not linked with -g, cannot print stack backtrace)\n"); return; } - for (i = 0; i < backtrace_pos; i++) + for (i = 0; i < caml_backtrace_pos; i++) print_location(events, i); } diff --git a/byterun/backtrace.h b/byterun/backtrace.h index 7977d974d..be8c48289 100644 --- a/byterun/backtrace.h +++ b/byterun/backtrace.h @@ -18,13 +18,13 @@ #include "mlvalues.h" -CAMLextern int backtrace_active; -CAMLextern int backtrace_pos; -CAMLextern code_t * backtrace_buffer; -CAMLextern value backtrace_last_exn; +CAMLextern int caml_backtrace_active; +CAMLextern int caml_backtrace_pos; +CAMLextern code_t * caml_backtrace_buffer; +CAMLextern value caml_backtrace_last_exn; -extern void init_backtrace(void); -extern void stash_backtrace(value exn, code_t pc, value * sp); -CAMLextern void print_exception_backtrace(void); +extern void caml_init_backtrace(void); +extern void caml_stash_backtrace(value exn, code_t pc, value * sp); +CAMLextern void caml_print_exception_backtrace(void); #endif /* CAML_BACKTRACE_H */ diff --git a/byterun/callback.c b/byterun/callback.c index 2d25b4f7b..d62ef608f 100644 --- a/byterun/callback.c +++ b/byterun/callback.c @@ -30,7 +30,7 @@ #include "fix_code.h" #include "stacks.h" -int callback_depth = 0; +int caml_callback_depth = 0; static opcode_t callback_code[] = { ACC, 0, APPLY, 0, POP, 1, STOP }; @@ -52,56 +52,56 @@ static void thread_callback(void) #endif -CAMLexport value callbackN_exn(value closure, int narg, value args[]) +CAMLexport value caml_callbackN_exn(value closure, int narg, value args[]) { int i; value res; Assert(narg + 4 <= 256); Init_callback(); - extern_sp -= narg + 4; - for (i = 0; i < narg; i++) extern_sp[i] = args[i]; /* arguments */ - extern_sp[narg] = (value) (callback_code + 4); /* return address */ - extern_sp[narg + 1] = Val_unit; /* environment */ - extern_sp[narg + 2] = Val_long(0); /* extra args */ - extern_sp[narg + 3] = closure; + caml_extern_sp -= narg + 4; + for (i = 0; i < narg; i++) caml_extern_sp[i] = args[i]; /* arguments */ + caml_extern_sp[narg] = (value) (callback_code + 4); /* return address */ + caml_extern_sp[narg + 1] = Val_unit; /* environment */ + caml_extern_sp[narg + 2] = Val_long(0); /* extra args */ + caml_extern_sp[narg + 3] = closure; callback_code[1] = narg + 3; callback_code[3] = narg; res = interprete(callback_code, sizeof(callback_code)); - if (Is_exception_result(res)) extern_sp += narg + 4; /* PR#1228 */ + if (Is_exception_result(res)) caml_extern_sp += narg + 4; /* PR#1228 */ return res; } -CAMLexport value callback_exn(value closure, value arg1) +CAMLexport value caml_callback_exn(value closure, value arg1) { value arg[1]; arg[0] = arg1; - return callbackN_exn(closure, 1, arg); + return caml_callbackN_exn(closure, 1, arg); } -CAMLexport value callback2_exn(value closure, value arg1, value arg2) +CAMLexport value caml_callback2_exn(value closure, value arg1, value arg2) { value arg[2]; arg[0] = arg1; arg[1] = arg2; - return callbackN_exn(closure, 2, arg); + return caml_callbackN_exn(closure, 2, arg); } -CAMLexport value callback3_exn(value closure, +CAMLexport value caml_callback3_exn(value closure, value arg1, value arg2, value arg3) { value arg[3]; arg[0] = arg1; arg[1] = arg2; arg[2] = arg3; - return callbackN_exn(closure, 3, arg); + return caml_callbackN_exn(closure, 3, arg); } #else -/* Native-code callbacks. callback[123]_exn are implemented in asm. */ +/* Native-code callbacks. caml_callback[123]_exn are implemented in asm. */ -CAMLexport value callbackN_exn(value closure, int narg, value args[]) +CAMLexport value caml_callbackN_exn(value closure, int narg, value args[]) { CAMLparam1 (closure); CAMLxparamN (args, narg); @@ -113,17 +113,17 @@ CAMLexport value callbackN_exn(value closure, int narg, value args[]) /* Pass as many arguments as possible */ switch (narg - i) { case 1: - res = callback_exn(res, args[i]); + res = caml_callback_exn(res, args[i]); if (Is_exception_result(res)) CAMLreturn (res); i += 1; break; case 2: - res = callback2_exn(res, args[i], args[i + 1]); + res = caml_callback2_exn(res, args[i], args[i + 1]); if (Is_exception_result(res)) CAMLreturn (res); i += 2; break; default: - res = callback3_exn(res, args[i], args[i + 1], args[i + 2]); + res = caml_callback3_exn(res, args[i], args[i + 1], args[i + 2]); if (Is_exception_result(res)) CAMLreturn (res); i += 3; break; @@ -136,30 +136,31 @@ CAMLexport value callbackN_exn(value closure, int narg, value args[]) /* Exception-propagating variants of the above */ -CAMLexport value callback (value closure, value arg) +CAMLexport value caml_callback (value closure, value arg) { - value res = callback_exn(closure, arg); + value res = caml_callback_exn(closure, arg); if (Is_exception_result(res)) mlraise(Extract_exception(res)); return res; } -CAMLexport value callback2 (value closure, value arg1, value arg2) +CAMLexport value caml_callback2 (value closure, value arg1, value arg2) { - value res = callback2_exn(closure, arg1, arg2); + value res = caml_callback2_exn(closure, arg1, arg2); if (Is_exception_result(res)) mlraise(Extract_exception(res)); return res; } -CAMLexport value callback3 (value closure, value arg1, value arg2, value arg3) +CAMLexport value caml_callback3 (value closure, value arg1, value arg2, + value arg3) { - value res = callback3_exn(closure, arg1, arg2, arg3); + value res = caml_callback3_exn(closure, arg1, arg2, arg3); if (Is_exception_result(res)) mlraise(Extract_exception(res)); return res; } -CAMLexport value callbackN (value closure, int narg, value args[]) +CAMLexport value caml_callbackN (value closure, int narg, value args[]) { - value res = callbackN_exn(closure, narg, args); + value res = caml_callbackN_exn(closure, narg, args); if (Is_exception_result(res)) mlraise(Extract_exception(res)); return res; } @@ -183,14 +184,14 @@ static unsigned int hash_value_name(char *name) return h % Named_value_size; } -CAMLprim value register_named_value(value vname, value val) +CAMLprim value caml_register_named_value(value vname, value val) { struct named_value * nv; char * name = String_val(vname); unsigned int h = hash_value_name(name); nv = (struct named_value *) - stat_alloc(sizeof(struct named_value) + strlen(name)); + caml_stat_alloc(sizeof(struct named_value) + strlen(name)); strcpy(nv->name, name); nv->val = val; nv->next = named_value_table[h]; diff --git a/byterun/callback.h b/byterun/callback.h index 492c09611..7f9ae4a5f 100644 --- a/byterun/callback.h +++ b/byterun/callback.h @@ -23,16 +23,17 @@ #endif #include "mlvalues.h" -CAMLextern value callback (value closure, value arg); -CAMLextern value callback2 (value closure, value arg1, value arg2); -CAMLextern value callback3 (value closure, value arg1, value arg2, value arg3); -CAMLextern value callbackN (value closure, int narg, value args[]); - -CAMLextern value callback_exn (value closure, value arg); -CAMLextern value callback2_exn (value closure, value arg1, value arg2); -CAMLextern value callback3_exn (value closure, - value arg1, value arg2, value arg3); -CAMLextern value callbackN_exn (value closure, int narg, value args[]); +CAMLextern value caml_callback (value closure, value arg); +CAMLextern value caml_callback2 (value closure, value arg1, value arg2); +CAMLextern value caml_callback3 (value closure, value arg1, value arg2, + value arg3); +CAMLextern value caml_callbackN (value closure, int narg, value args[]); + +CAMLextern value caml_callback_exn (value closure, value arg); +CAMLextern value caml_callback2_exn (value closure, value arg1, value arg2); +CAMLextern value caml_callback3_exn (value closure, + value arg1, value arg2, value arg3); +CAMLextern value caml_callbackN_exn (value closure, int narg, value args[]); #define Make_exception_result(v) ((v) | 2) #define Is_exception_result(v) (((v) & 3) == 2) @@ -45,6 +46,6 @@ CAMLextern value * caml_named_value (char * name); CAMLextern void caml_main (char ** argv); CAMLextern void caml_startup (char ** argv); -CAMLextern int callback_depth; +CAMLextern int caml_callback_depth; #endif diff --git a/byterun/compact.c b/byterun/compact.c index 1d15425a5..8a9da2121 100644 --- a/byterun/compact.c +++ b/byterun/compact.c @@ -26,8 +26,8 @@ #include "roots.h" #include "weak.h" -extern unsigned long percent_free; /* major_gc.c */ -extern void shrink_heap (char *); /* memory.c */ +extern unsigned long caml_percent_free; /* major_gc.c */ +extern void caml_shrink_heap (char *); /* memory.c */ /* Encoded headers: the color is stored in the 2 least significant bits. (For pointer inversion, we need to distinguish headers from pointers.) @@ -116,12 +116,12 @@ static char *compact_fl; static void init_compact_allocate (void) { - char *ch = heap_start; + char *ch = caml_heap_start; while (ch != NULL){ Chunk_alloc (ch) = 0; ch = Chunk_next (ch); } - compact_fl = heap_start; + compact_fl = caml_heap_start; } static char *compact_allocate (mlsize_t size) @@ -144,10 +144,10 @@ static char *compact_allocate (mlsize_t size) return adr; } -void compact_heap (void) +void caml_compact_heap (void) { char *ch, *chend; - Assert (gc_phase == Phase_idle); + Assert (caml_gc_phase == Phase_idle); caml_gc_message (0x10, "Compacting heap...\n", 0); #ifdef DEBUG @@ -156,7 +156,7 @@ void compact_heap (void) /* First pass: encode all noninfix headers. */ { - ch = heap_start; + ch = caml_heap_start; while (ch != NULL){ header_t *p = (header_t *) ch; @@ -189,7 +189,7 @@ void compact_heap (void) do_roots (invert_root); final_do_weak_roots (invert_root); - ch = heap_start; + ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); @@ -249,7 +249,7 @@ void compact_heap (void) Rebuild infix headers. */ { init_compact_allocate (); - ch = heap_start; + ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; @@ -304,7 +304,7 @@ void compact_heap (void) } p += sz; }else{ Assert (Ecolor (q) == 3); - /* This is guaranteed only if compact_heap was called after a + /* This is guaranteed only if caml_compact_heap was called after a nonincremental major GC: Assert (Tag_ehd (q) == String_tag); */ /* No pointers to the header and no infix header: @@ -322,7 +322,7 @@ void compact_heap (void) Use the exact same allocation algorithm as pass 3. */ { init_compact_allocate (); - ch = heap_start; + ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; @@ -350,7 +350,7 @@ void compact_heap (void) asize_t free = 0; asize_t wanted; - ch = heap_start; + ch = caml_heap_start; while (ch != NULL){ if (Chunk_alloc (ch) != 0){ live += Wsize_bsize (Chunk_alloc (ch)); @@ -361,8 +361,8 @@ void compact_heap (void) /* Add up the empty chunks until there are enough, then remove the other empty chunks. */ - wanted = percent_free * (live / 100 + 1); - ch = heap_start; + wanted = caml_percent_free * (live / 100 + 1); + ch = caml_heap_start; while (ch != NULL){ char *next_chunk = Chunk_next (ch); /* Chunk_next (ch) will be erased */ @@ -370,7 +370,7 @@ void compact_heap (void) if (free < wanted){ free += Wsize_bsize (Chunk_size (ch)); }else{ - shrink_heap (ch); + caml_shrink_heap (ch); } } ch = next_chunk; @@ -379,7 +379,7 @@ void compact_heap (void) /* Rebuild the free list. */ { - ch = heap_start; + ch = caml_heap_start; fl_reset (); while (ch != NULL){ if (Chunk_size (ch) > Chunk_alloc (ch)){ @@ -393,23 +393,23 @@ void compact_heap (void) caml_gc_message (0x10, "done.\n", 0); } -unsigned long percent_max; +unsigned long caml_percent_max; /* used in gc_ctrl.c */ -void compact_heap_maybe (void) +void caml_compact_heap_maybe (void) { /* Estimated free words in the heap: FW = fl_size_at_change + 3 * (fl_cur_size - fl_size_at_change) FW = 3 * fl_cur_size - 2 * fl_size_at_change Estimated live words: LW = stat_heap_size - FW Estimated free percentage: FP = 100 * FW / LW - We compact the heap if FP > percent_max + We compact the heap if FP > caml_percent_max */ float fw, fp; - Assert (gc_phase == Phase_idle); - if (percent_max >= 1000000) return; + Assert (caml_gc_phase == Phase_idle); + if (caml_percent_max >= 1000000) return; if (stat_major_collections < 5 || stat_heap_chunks < 5) return; - fw = 3.0 * fl_cur_size - 2.0 * fl_size_at_phase_change; + fw = 3.0 * fl_cur_size - 2.0 * caml_fl_size_at_phase_change; if (fw < 0) fw = fl_cur_size; if (fw >= Wsize_bsize (stat_heap_size)){ @@ -419,17 +419,17 @@ void compact_heap_maybe (void) if (fp > 1000000.0) fp = 1000000.0; } caml_gc_message (0x200, "FL size at phase change = %lu\n", - (unsigned long) fl_size_at_phase_change); + (unsigned long) caml_fl_size_at_phase_change); caml_gc_message (0x200, "Estimated overhead = %lu%%\n", (unsigned long) fp); - if (fp >= percent_max){ + if (fp >= caml_percent_max){ caml_gc_message (0x200, "Automatic compaction triggered.\n", 0); - finish_major_cycle (); + caml_finish_major_cycle (); /* We just did a complete GC, so we can measure the overhead exactly. */ fw = fl_cur_size; fp = 100.0 * fw / (Wsize_bsize (stat_heap_size) - fw); caml_gc_message (0x200, "Measured overhead: %lu%%\n", (unsigned long) fp); - compact_heap (); + caml_compact_heap (); } } diff --git a/byterun/compact.h b/byterun/compact.h index 60c631464..a4ef4cb18 100644 --- a/byterun/compact.h +++ b/byterun/compact.h @@ -20,8 +20,8 @@ #include "config.h" #include "misc.h" -extern void compact_heap (void); -extern void compact_heap_maybe (void); +extern void caml_compact_heap (void); +extern void caml_compact_heap_maybe (void); #endif /* CAML_COMPACT_H */ diff --git a/byterun/compare.c b/byterun/compare.c index bf5b6e4dd..62c9a868e 100644 --- a/byterun/compare.c +++ b/byterun/compare.c @@ -34,7 +34,7 @@ static struct compare_item * compare_stack = compare_stack_init; static struct compare_item * compare_stack_limit = compare_stack_init + COMPARE_STACK_INIT_SIZE; -CAMLexport int compare_unordered; +CAMLexport int caml_compare_unordered; /* Free the compare stack if needed */ static void compare_free_stack(void) @@ -199,9 +199,9 @@ static long compare_val(value v1, value v2, int total) int res; int (*compare)(value v1, value v2) = Custom_ops_val(v1)->compare; if (compare == NULL) invalid_argument("equal: abstract value"); - compare_unordered = 0; + caml_compare_unordered = 0; res = Custom_ops_val(v1)->compare(v1, v2); - if (compare_unordered && !total) return UNORDERED; + if (caml_compare_unordered && !total) return UNORDERED; if (res != 0) return res; break; } @@ -234,7 +234,7 @@ static long compare_val(value v1, value v2, int total) } } -CAMLprim value compare(value v1, value v2) +CAMLprim value caml_compare(value v1, value v2) { long res = compare_val(v1, v2, 1); /* Free stack if needed */ @@ -247,42 +247,42 @@ CAMLprim value compare(value v1, value v2) return Val_int(EQUAL); } -CAMLprim value equal(value v1, value v2) +CAMLprim value caml_equal(value v1, value v2) { long res = compare_val(v1, v2, 0); if (compare_stack != compare_stack_init) compare_free_stack(); return Val_int(res == 0); } -CAMLprim value notequal(value v1, value v2) +CAMLprim value caml_notequal(value v1, value v2) { long res = compare_val(v1, v2, 0); if (compare_stack != compare_stack_init) compare_free_stack(); return Val_int(res != 0); } -CAMLprim value lessthan(value v1, value v2) +CAMLprim value caml_lessthan(value v1, value v2) { long res = compare_val(v1, v2, 0); if (compare_stack != compare_stack_init) compare_free_stack(); return Val_int(res - 1 < -1); } -CAMLprim value lessequal(value v1, value v2) +CAMLprim value caml_lessequal(value v1, value v2) { long res = compare_val(v1, v2, 0); if (compare_stack != compare_stack_init) compare_free_stack(); return Val_int(res - 1 <= -1); } -CAMLprim value greaterthan(value v1, value v2) +CAMLprim value caml_greaterthan(value v1, value v2) { long res = compare_val(v1, v2, 0); if (compare_stack != compare_stack_init) compare_free_stack(); return Val_int(res > 0); } -CAMLprim value greaterequal(value v1, value v2) +CAMLprim value caml_greaterequal(value v1, value v2) { long res = compare_val(v1, v2, 0); if (compare_stack != compare_stack_init) compare_free_stack(); diff --git a/byterun/compare.h b/byterun/compare.h index 159ecf9f1..dc392ff3d 100644 --- a/byterun/compare.h +++ b/byterun/compare.h @@ -16,6 +16,6 @@ #ifndef CAML_COMPARE_H #define CAML_COMPARE_H -CAMLextern int compare_unordered; +CAMLextern int caml_compare_unordered; #endif /* CAML_COMPARE_H */ diff --git a/byterun/compatibility.h b/byterun/compatibility.h index 1137f1f10..4419e50c8 100644 --- a/byterun/compatibility.h +++ b/byterun/compatibility.h @@ -20,6 +20,12 @@ #ifndef CAML_NAME_SPACE +/* codage: + #define --> CAMLextern (CAMLexport ou CAMLprim) + (rien) --> CAMLprim + g --> ident global C +*/ + /* **** alloc.c */ #define alloc caml_alloc #define alloc_small caml_alloc_small @@ -34,10 +40,56 @@ /* update_dummy -> caml_update_dummy */ /* **** array.c */ +/* array_get_addr -> caml_array_get_addr */ +/* array_get_float -> caml_array_get_float */ +/* array_get -> caml_array_get */ +/* array_set_addr -> caml_array_set_addr */ +/* array_set_float -> caml_array_set_float */ +/* array_set -> caml_array_set */ +/* array_unsafe_get_float -> caml_array_unsafe_get_float */ +/* array_unsafe_get -> caml_array_unsafe_get */ +/* array_unsafe_set_addr -> caml_array_unsafe_set_addr */ +/* array_unsafe_set_float -> caml_array_unsafe_set_float */ +/* array_unsafe_set -> caml_array_unsafe_set */ +/* make_vect -> caml_make_vect */ +/* make_array -> caml_make_array */ + /* **** backtrace.c */ +#define backtrace_active caml_backtrace_active +#define backtrace_pos caml_backtrace_pos +#define backtrace_buffer caml_backtrace_buffer +#define backtrace_last_exn caml_backtrace_last_exn +/* g init_backtrace -> caml_init_backtrace */ +/* g stash_backtrace -> caml_stash_backtrace */ +#define print_exception_backtrace caml_print_exception_backtrace + /* **** callback.c */ +#define callback_depth caml_callback_depth /*FIXME CAMLextern sans CAMLexport */ +#define callbackN_exn caml_callbackN_exn +#define callback_exn caml_callback_exn +#define callback2_exn caml_callback2_exn +#define callback3_exn caml_callback3_exn +#define callback caml_callback +#define callback2 caml_callback2 +#define callback3 caml_callback3 +#define callbackN caml_callbackN +/* register_named_value -> caml_register_named_value */ + /* **** compact.c */ +/* g compact_heap -> caml_compact_heap */ +/* g percent_max -> caml_percent_max */ +/* g compact_heap_maybe -> caml_compact_heap_maybe */ + /* **** compare.c */ +#define compare_unordered caml_compare_unordered +/* compare -> caml_compare */ +/* equal -> caml_equal */ +/* notequal -> caml_notequal */ +/* lessthan -> caml_lessthan */ +/* lessequal -> caml_lessequal */ +/* greaterthan -> caml_greaterthan */ +/* greaterequal -> caml_greaterequal */ + /* **** custom.c */ /* **** debugger.c */ /* **** dynlink.c */ @@ -113,13 +165,77 @@ /* #define File_offset_val caml_File_offset_val *** done in io.h */ /* **** lexing.c */ -/* **** macintosh.c (a supprimer) */ +/* lex_engine -> caml_lex_engine */ +/* new_lex_engine -> caml_new_lex_engine */ + /* **** main.c */ +/* no change */ + /* **** major_gc.c */ +/* g percent_free -> caml_percent_free */ +/* g major_heap_increment -> caml_major_heap_increment */ +#define heap_start caml_heap_start /* FIXME CAMLextern sans CAMLexport */ +#define heap_end caml_heap_end /* FIXME CAMLextern sans CAMLexport */ +#define page_table caml_page_table /* FIXME CAMLextern sans CAMLexport */ +/* g page_low -> caml_page_low */ +/* g page_high -> caml_page_high */ +/* g gc_sweep_hp -> caml_gc_sweep_hp */ +/* g gc_phase -> caml_gc_phase */ +/* g allocated_words -> caml_allocated_words */ +/* g extra_heap_memory -> caml_extra_heap_memory */ +/* g fl_size_at_phase_change -> caml_fl_size_at_phase_change */ +/* g darken -> caml_darken */ +/* g major_collection_slice -> caml_major_collection_slice */ +/* g finish_major_cycle -> caml_finish_major_cycle */ +/* g round_heap_chunk_size -> caml_round_heap_chunk_size */ +/* g init_major_heap -> caml_init_major_heap */ + /* **** md5.c */ +#define md5_string caml_md5_string +#define md5_chan caml_md5_chan +#define MD5Init caml_MD5Init +#define MD5Update caml_MD5Update +#define MD5Final caml_MD5Final +#define MD5Transform caml_MD5Transform + /* **** memory.c */ +/* g alloc_for_heap -> caml_alloc_for_heap */ +/* g free_for_heap -> caml_free_for_heap */ +/* g add_to_heap -> caml_add_to_heap */ +/* g shrink_heap -> caml_shrink_heap */ +/* g allocation_color -> caml_allocation_color */ +#define alloc_shr caml_alloc_shr /* FIXME CAMLextern sans CAMLexport */ +/* g adjust_gc_speed -> caml_adjust_gc_speed FIXME pas CAMLextern ? */ +#define initialize caml_initialize +#define modify caml_modify +#define stat_alloc caml_stat_alloc /* FIXME CAMLextern sans CAMLexport */ +#define stat_free caml_stat_free /* FIXME CAMLextern sans CAMLexport */ +#define stat_resize caml_stat_resize /* FIXME CAMLextern sans CAMLexport */ + /* **** meta.c */ +/* get_global_data -> caml_get_global_data */ +/* reify_bytecode -> caml_reify_bytecode FIXME missing in native code ? */ +/* realloc_global -> caml_realloc_global */ +/* g available_primitives -> caml_available_primitives FIXME useless ? */ +/* get_current_environment -> caml_get_current_environment */ +/* invoke_traced_function -> caml_invoke_traced_function */ + /* **** minor_gc.c */ +/* g minor_heap_size -> caml_minor_heap_size */ +#define young_start caml_young_start /* FIXME CAMLextern sans CAMLexport */ +#define young_end caml_young_end /* FIXME CAMLextern sans CAMLexport */ +#define young_ptr caml_young_ptr /* FIXME CAMLextern sans CAMLexport */ +#define young_limit caml_young_limit /* FIXME CAMLextern sans CAMLexport */ +#define ref_table_ptr caml_ref_table_ptr /* FIXME CAMLextern sans CAMLexport */ +#define ref_table_limit caml_ref_table_limit /* FIXME CAMLextern sans CAMLexpo*/ +/* g in_minor_collection -> caml_in_minor_collection */ +/* g set_minor_heap_size -> caml_set_minor_heap_size */ +/* g oldify_one -> caml_oldify_one */ +/* g oldify_mopup -> caml_oldify_mopup */ +/* g empty_minor_heap -> caml_empty_minor_heap */ +#define minor_collection caml_minor_collection /*FIXME CAMLextern sans CAMLexp*/ +#define check_urgent_gc caml_check_urgent_gc /*FIXME CAMLextern sans CAMLexpor*/ +/* g realloc_ref_table -> caml_realloc_ref_table */ /* **** misc.c */ /* g verb_gc -> caml_verb_gc */ @@ -132,16 +248,33 @@ /* g ext_table_add -> caml_ext_table_add */ /* g ext_table_free -> caml_ext_table_free */ -/* **** mpwtool.c (a supprimer) */ /* **** obj.c */ /* **** parsing.c */ -/* **** prims.c */ +/* **** prims.c (changer Makefile) */ /* **** printexc.c */ /* **** roots.c check asmrun */ -/* **** rotatecursor.c (a supprimer) */ /* **** signals.c check asmrun */ + /* **** stacks.c */ -/* **** startup.c check asmrun */ +#define stack_low caml_stack_low +#define stack_high caml_stack_high +#define stack_threshold caml_stack_threshold +#define extern_sp caml_extern_sp +#define trapsp caml_trapsp +#define trap_barrier caml_trap_barrier +/* g global_data -> caml_global_data */ +/* g max_stack_size -> caml_max_stack_size */ +/* g init_stack -> caml_init_stack */ +/* g realloc_stack -> caml_realloc_stack */ +/* ensure_stack_capacity -> caml_ensure_stack_capacity */ +/* g change_max_stack_size -> caml_change_max_stack_size */ + +/* **** startup.c */ +#define atom_table caml_atom_table +/* g attempt_open -> caml_attempt_open */ +/* g read_section_descriptors -> caml_read_section_descriptors */ +/* g seek_optional_section -> caml_seek_optional_section */ +/* g seek_section -> caml_seek_section */ /* **** str.c */ #define string_length caml_string_length @@ -185,6 +318,12 @@ /* **** win32.c */ +/* a supprimer (support Mac OS 9): */ +/* **** macintosh.c */ +/* **** mpwtool.c */ +/* **** rotatecursor.c */ + + #endif /* CAML_NAME_SPACE */ #endif /* CAML_COMPATIBILITY_H */ diff --git a/byterun/custom.c b/byterun/custom.c index 9f9321e37..de10d9314 100644 --- a/byterun/custom.c +++ b/byterun/custom.c @@ -34,10 +34,10 @@ CAMLextern value alloc_custom(struct custom_operations * ops, result = caml_alloc_small(wosize, Custom_tag); Custom_ops_val(result) = ops; } else { - result = alloc_shr(wosize, Custom_tag); + result = caml_alloc_shr(wosize, Custom_tag); Custom_ops_val(result) = ops; - adjust_gc_speed(mem, max); - result = check_urgent_gc(result); + caml_adjust_gc_speed(mem, max); + result = caml_check_urgent_gc(result); } return result; } @@ -52,7 +52,7 @@ static struct custom_operations_list * custom_ops_table = NULL; CAMLextern void register_custom_operations(struct custom_operations * ops) { struct custom_operations_list * l = - stat_alloc(sizeof(struct custom_operations_list)); + caml_stat_alloc(sizeof(struct custom_operations_list)); Assert(ops->identifier != NULL); Assert(ops->deserialize != NULL); l->ops = ops; @@ -76,14 +76,14 @@ struct custom_operations * final_custom_operations(final_fun fn) struct custom_operations * ops; for (l = custom_ops_final_table; l != NULL; l = l->next) if (l->ops->finalize == fn) return l->ops; - ops = stat_alloc(sizeof(struct custom_operations)); + ops = caml_stat_alloc(sizeof(struct custom_operations)); ops->identifier = "_final"; ops->finalize = fn; ops->compare = custom_compare_default; ops->hash = custom_hash_default; ops->serialize = custom_serialize_default; ops->deserialize = custom_deserialize_default; - l = stat_alloc(sizeof(struct custom_operations_list)); + l = caml_stat_alloc(sizeof(struct custom_operations_list)); l->ops = ops; l->next = custom_ops_final_table; custom_ops_final_table = l; diff --git a/byterun/debugger.c b/byterun/debugger.c index 31d1315fb..ad6a13260 100644 --- a/byterun/debugger.c +++ b/byterun/debugger.c @@ -129,7 +129,7 @@ void debugger_init(void) } open_connection(); debugger_in_use = 1; - trap_barrier = stack_high; + caml_trap_barrier = caml_stack_high; } static value getval(struct channel *chan) @@ -177,7 +177,7 @@ void debugger(enum event_kind event) /* Reset current frame */ frame_number = 0; - frame = extern_sp + 1; + frame = caml_extern_sp + 1; /* Report the event to the debugger */ switch(event) { @@ -201,7 +201,7 @@ void debugger(enum event_kind event) } caml_putword(dbg_out, event_count); if (event == EVENT_COUNT || event == BREAKPOINT) { - caml_putword(dbg_out, stack_high - frame); + caml_putword(dbg_out, caml_stack_high - frame); caml_putword(dbg_out, (Pc(frame) - start_code) * sizeof(opcode_t)); } else { /* No PC and no stack frame associated with other events */ @@ -254,11 +254,11 @@ void debugger(enum event_kind event) wait(NULL); break; case REQ_INITIAL_FRAME: - frame = extern_sp + 1; + frame = caml_extern_sp + 1; /* Fall through */ case REQ_GET_FRAME: - caml_putword(dbg_out, stack_high - frame); - if (frame < stack_high){ + caml_putword(dbg_out, caml_stack_high - frame); + if (frame < caml_stack_high){ caml_putword(dbg_out, (Pc(frame) - start_code) * sizeof(opcode_t)); }else{ caml_putword (dbg_out, 0); @@ -267,22 +267,22 @@ void debugger(enum event_kind event) break; case REQ_SET_FRAME: i = caml_getword(dbg_in); - frame = stack_high - i; + frame = caml_stack_high - i; break; case REQ_UP_FRAME: i = caml_getword(dbg_in); - if (frame + Extra_args(frame) + i + 3 >= stack_high) { + if (frame + Extra_args(frame) + i + 3 >= caml_stack_high) { caml_putword(dbg_out, -1); } else { frame += Extra_args(frame) + i + 3; - caml_putword(dbg_out, stack_high - frame); + caml_putword(dbg_out, caml_stack_high - frame); caml_putword(dbg_out, (Pc(frame) - start_code) * sizeof(opcode_t)); } caml_flush(dbg_out); break; case REQ_SET_TRAP_BARRIER: i = caml_getword(dbg_in); - trap_barrier = stack_high - i; + caml_trap_barrier = caml_stack_high - i; break; case REQ_GET_LOCAL: i = caml_getword(dbg_in); @@ -296,11 +296,11 @@ void debugger(enum event_kind event) break; case REQ_GET_GLOBAL: i = caml_getword(dbg_in); - putval(dbg_out, Field(global_data, i)); + putval(dbg_out, Field(caml_global_data, i)); caml_flush(dbg_out); break; case REQ_GET_ACCU: - putval(dbg_out, *extern_sp); + putval(dbg_out, *caml_extern_sp); caml_flush(dbg_out); break; case REQ_GET_HEADER: diff --git a/byterun/dynlink.c b/byterun/dynlink.c index 1d4459afe..3a53d0d4c 100644 --- a/byterun/dynlink.c +++ b/byterun/dynlink.c @@ -81,18 +81,18 @@ static char * parse_ld_conf(void) stdlib = getenv("OCAMLLIB"); if (stdlib == NULL) stdlib = getenv("CAMLLIB"); if (stdlib == NULL) stdlib = OCAML_STDLIB_DIR; - ldconfname = stat_alloc(strlen(stdlib) + 2 + sizeof(LD_CONF_NAME)); + ldconfname = caml_stat_alloc(strlen(stdlib) + 2 + sizeof(LD_CONF_NAME)); strcpy(ldconfname, stdlib); strcat(ldconfname, "/" LD_CONF_NAME); if (stat(ldconfname, &st) == -1) { - stat_free(ldconfname); + caml_stat_free(ldconfname); return NULL; } ldconf = open(ldconfname, O_RDONLY, 0); if (ldconf == -1) caml_fatal_error_arg("Fatal error: cannot read loader config file %s\n", ldconfname); - config = stat_alloc(st.st_size + 1); + config = caml_stat_alloc(st.st_size + 1); nread = read(ldconf, config, st.st_size); if (nread == -1) caml_fatal_error_arg @@ -109,7 +109,7 @@ static char * parse_ld_conf(void) } if (q < p) caml_ext_table_add(&shared_libs_path, q); close(ldconf); - stat_free(ldconfname); + caml_stat_free(ldconfname); return config; } @@ -128,7 +128,7 @@ static void open_shared_lib(char * name) caml_fatal_error_arg2("Fatal error: cannot load shared library %s\n", name, "Reason: %s\n", caml_dlerror()); caml_ext_table_add(&shared_libs, handle); - stat_free(realname); + caml_stat_free(realname); } /* Build the table of primitives, given a search path and a list @@ -171,12 +171,12 @@ void build_primitive_table(char * lib_path, #endif } /* Clean up */ - stat_free(tofree1); - stat_free(tofree2); + caml_stat_free(tofree1); + caml_stat_free(tofree2); caml_ext_table_free(&shared_libs_path, 0); } -#endif +#endif /* NATIVE_CODE */ /** dlopen interface for the bytecode linker **/ @@ -250,4 +250,4 @@ value dynlink_get_current_libs(value unit) return Val_unit; /* not reached */ } -#endif +#endif /* NATIVE_CODE */ diff --git a/byterun/extern.c b/byterun/extern.c index 1bdd1c64e..22baf8c0a 100644 --- a/byterun/extern.c +++ b/byterun/extern.c @@ -62,7 +62,7 @@ static void alloc_extern_table(void) { asize_t i; extern_table = (struct extern_obj *) - stat_alloc(extern_table_size * sizeof(struct extern_obj)); + caml_stat_alloc(extern_table_size * sizeof(struct extern_obj)); for (i = 0; i < extern_table_size; i++) extern_table[i].ofs = 0; } @@ -91,7 +91,7 @@ static void resize_extern_table(void) extern_table[h].obj = obj; } } - stat_free(oldtable); + caml_stat_free(oldtable); } /* Free the extern table. We keep it around for next call if @@ -101,7 +101,7 @@ static void free_extern_table(void) { if (extern_table_size > INITIAL_EXTERN_TABLE_SIZE || initial_ofs >= INITIAL_OFFSET_MAX) { - stat_free(extern_table); + caml_stat_free(extern_table); extern_table = NULL; } } @@ -113,7 +113,7 @@ static int extern_block_malloced; static void alloc_extern_block(void) { - extern_block = stat_alloc(INITIAL_EXTERN_BLOCK_SIZE); + extern_block = caml_stat_alloc(INITIAL_EXTERN_BLOCK_SIZE); extern_limit = extern_block + INITIAL_EXTERN_BLOCK_SIZE; extern_ptr = extern_block; extern_block_malloced = 1; @@ -132,7 +132,7 @@ static void resize_extern_block(int required) size = extern_limit - extern_block; reqd_size = curr_pos + required; while (size <= reqd_size) size *= 2; - extern_block = stat_resize(extern_block, size); + extern_block = caml_stat_resize(extern_block, size); extern_limit = extern_block + size; extern_ptr = extern_block + curr_pos; } @@ -216,7 +216,7 @@ static int extern_closures; /* Flag to allow externing code pointers */ static void extern_invalid_argument(char *msg) { - if (extern_block_malloced) stat_free(extern_block); + if (extern_block_malloced) caml_stat_free(extern_block); initial_ofs += obj_counter; free_extern_table(); invalid_argument(msg); @@ -453,7 +453,7 @@ void output_val(struct channel *chan, value v, value flags) and extern_block may change. So, save the pointer in a local variable. */ block = extern_block; caml_really_putblock(chan, extern_block, len); - stat_free(block); + caml_stat_free(block); } CAMLprim value output_value(value vchan, value v, value flags) @@ -475,7 +475,7 @@ CAMLprim value output_value_to_string(value v, value flags) len = extern_value(v, flags); res = caml_alloc_string(len); memmove(String_val(res), extern_block, len); - stat_free(extern_block); + caml_stat_free(extern_block); return res; } diff --git a/byterun/fail.c b/byterun/fail.c index b8faa8ee6..eea5c2b15 100644 --- a/byterun/fail.c +++ b/byterun/fail.c @@ -73,12 +73,12 @@ CAMLexport void raise_with_string(value tag, char *msg) CAMLexport void failwith (char *msg) { - raise_with_string(Field(global_data, FAILURE_EXN), msg); + raise_with_string(Field(caml_global_data, FAILURE_EXN), msg); } CAMLexport void invalid_argument (char *msg) { - raise_with_string(Field(global_data, INVALID_EXN), msg); + raise_with_string(Field(caml_global_data, INVALID_EXN), msg); } CAMLexport void array_bound_error(void) @@ -105,32 +105,32 @@ CAMLexport void raise_out_of_memory(void) CAMLexport void raise_stack_overflow(void) { - raise_constant(Field(global_data, STACK_OVERFLOW_EXN)); + raise_constant(Field(caml_global_data, STACK_OVERFLOW_EXN)); } CAMLexport void raise_sys_error(value msg) { - raise_with_arg(Field(global_data, SYS_ERROR_EXN), msg); + raise_with_arg(Field(caml_global_data, SYS_ERROR_EXN), msg); } CAMLexport void raise_end_of_file(void) { - raise_constant(Field(global_data, END_OF_FILE_EXN)); + raise_constant(Field(caml_global_data, END_OF_FILE_EXN)); } CAMLexport void raise_zero_divide(void) { - raise_constant(Field(global_data, ZERO_DIVIDE_EXN)); + raise_constant(Field(caml_global_data, ZERO_DIVIDE_EXN)); } CAMLexport void raise_not_found(void) { - raise_constant(Field(global_data, NOT_FOUND_EXN)); + raise_constant(Field(caml_global_data, NOT_FOUND_EXN)); } CAMLexport void raise_sys_blocked_io(void) { - raise_constant(Field(global_data, SYS_BLOCKED_IO)); + raise_constant(Field(caml_global_data, SYS_BLOCKED_IO)); } /* Initialization of statically-allocated exception buckets */ @@ -138,6 +138,6 @@ CAMLexport void raise_sys_blocked_io(void) void init_exceptions(void) { out_of_memory_bucket.hdr = Make_header(1, 0, Caml_white); - out_of_memory_bucket.exn = Field(global_data, OUT_OF_MEMORY_EXN); + out_of_memory_bucket.exn = Field(caml_global_data, OUT_OF_MEMORY_EXN); register_global_root(&out_of_memory_bucket.exn); } diff --git a/byterun/finalise.c b/byterun/finalise.c index 659031f97..94a1d525c 100644 --- a/byterun/finalise.c +++ b/byterun/finalise.c @@ -72,7 +72,7 @@ void final_update (void) } } young = old; - for (i = active; i < oldactive; i++) darken (final_table[i].val, NULL); + for (i = active; i < oldactive; i++) caml_darken (final_table[i].val, NULL); } /* Call the finalisation functions for the finalising set. @@ -87,7 +87,7 @@ void final_do_calls (void) caml_gc_message (0x80, "Calling finalisation functions.\n", 0); while (active < size){ f = final_table[active++]; - callback (f.fun, f.val); + caml_callback (f.fun, f.val); } caml_gc_message (0x80, "Done calling finalisation functions.\n", 0); } @@ -164,14 +164,15 @@ CAMLprim value final_register (value f, value v) if (young >= active){ if (final_table == NULL){ unsigned long new_size = 30; - final_table = stat_alloc (new_size * sizeof (struct final)); + final_table = caml_stat_alloc (new_size * sizeof (struct final)); Assert (old == 0); Assert (young == 0); active = size = new_size; }else{ unsigned long new_size = size * 2; unsigned long i; - final_table = stat_resize (final_table, new_size * sizeof (struct final)); + final_table = caml_stat_resize (final_table, + new_size * sizeof (struct final)); for (i = size-1; i >= active; i--){ final_table[i + new_size - size] = final_table[i]; } diff --git a/byterun/fix_code.c b/byterun/fix_code.c index d0969e3c3..81fef5731 100644 --- a/byterun/fix_code.c +++ b/byterun/fix_code.c @@ -43,18 +43,18 @@ void load_code(int fd, asize_t len) struct MD5Context ctx; code_size = len; - start_code = (code_t) stat_alloc(code_size); + start_code = (code_t) caml_stat_alloc(code_size); if (read(fd, (char *) start_code, code_size) != code_size) caml_fatal_error("Fatal error: truncated bytecode file.\n"); - MD5Init(&ctx); - MD5Update(&ctx, (unsigned char *) start_code, code_size); - MD5Final(code_md5, &ctx); + caml_MD5Init(&ctx); + caml_MD5Update(&ctx, (unsigned char *) start_code, code_size); + caml_MD5Final(code_md5, &ctx); #ifdef ARCH_BIG_ENDIAN fixup_endianness(start_code, code_size); #endif if (debugger_in_use) { len /= sizeof(opcode_t); - saved_code = (unsigned char *) stat_alloc(len); + saved_code = (unsigned char *) caml_stat_alloc(len); for (i = 0; i < len; i++) saved_code[i] = start_code[i]; } #ifdef THREADED_CODE diff --git a/byterun/floats.c b/byterun/floats.c index 87fd28a48..0ec56ffcd 100644 --- a/byterun/floats.c +++ b/byterun/floats.c @@ -96,12 +96,12 @@ CAMLprim value format_float(value fmt, value arg) if (prec < sizeof(format_buffer)) { dest = format_buffer; } else { - dest = stat_alloc(prec); + dest = caml_stat_alloc(prec); } sprintf(dest, String_val(fmt), Double_val(arg)); res = caml_copy_string(dest); if (dest != format_buffer) { - stat_free(dest); + caml_stat_free(dest); } return res; } @@ -114,7 +114,7 @@ CAMLprim value float_of_string(value vs) double d; len = caml_string_length(vs); - buf = len < sizeof(parse_buffer) ? parse_buffer : stat_alloc(len + 1); + buf = len < sizeof(parse_buffer) ? parse_buffer : caml_stat_alloc(len + 1); src = String_val(vs); dst = buf; while (len--) { @@ -124,7 +124,7 @@ CAMLprim value float_of_string(value vs) *dst = 0; if (dst == buf) failwith("float_of_string"); d = strtod((const char *) buf, &end); - if (buf != parse_buffer) stat_free(buf); + if (buf != parse_buffer) caml_stat_free(buf); if (end != dst) failwith("float_of_string"); return copy_double(d); } diff --git a/byterun/freelist.c b/byterun/freelist.c index 9b43f9ae4..d359db591 100644 --- a/byterun/freelist.c +++ b/byterun/freelist.c @@ -157,7 +157,7 @@ void fl_init_merge (void) #endif } -/* This is called by compact_heap. */ +/* This is called by caml_compact_heap. */ void fl_reset (void) { Next (Fl_head) = 0; @@ -284,10 +284,10 @@ void fl_add_block (char *bp) Assert (cur > bp || cur == NULL); Next (bp) = cur; Next (prev) = bp; - /* When inserting a block between fl_merge and gc_sweep_hp, we must + /* When inserting a block between fl_merge and caml_gc_sweep_hp, we must advance fl_merge to the new block, so that fl_merge is always the - last free-list block before gc_sweep_hp. */ - if (prev == fl_merge && bp <= gc_sweep_hp) fl_merge = bp; + last free-list block before caml_gc_sweep_hp. */ + if (prev == fl_merge && bp <= caml_gc_sweep_hp) fl_merge = bp; } } diff --git a/byterun/gc_ctrl.c b/byterun/gc_ctrl.c index eadd9b084..88a377ff6 100644 --- a/byterun/gc_ctrl.c +++ b/byterun/gc_ctrl.c @@ -26,7 +26,7 @@ #include "stacks.h" #ifndef NATIVE_CODE -extern unsigned long max_stack_size; /* defined in stacks.c */ +extern unsigned long caml_max_stack_size; /* defined in stacks.c */ #endif double stat_minor_words = 0.0, @@ -40,9 +40,9 @@ long stat_minor_collections = 0, stat_compactions = 0, stat_heap_chunks = 0; -extern asize_t major_heap_increment; /* bytes; see major_gc.c */ -extern unsigned long percent_free; /* see major_gc.c */ -extern unsigned long percent_max; /* see compact.c */ +extern asize_t caml_major_heap_increment; /* bytes; see major_gc.c */ +extern unsigned long caml_percent_free; /* see major_gc.c */ +extern unsigned long caml_percent_max; /* see compact.c */ #define Next(hp) ((hp) + Bhsize_hp (hp)) @@ -85,7 +85,7 @@ static void check_block (char *hp) switch (Tag_hp (hp)){ case Abstract_tag: break; case String_tag: - /* not true when check_urgent_gc is called by caml_alloc + /* not true when [caml_check_urgent_gc] is called by [caml_alloc] or caml_alloc_string: lastbyte = Bosize_val (v) - 1; i = Byte (v, lastbyte); @@ -129,7 +129,7 @@ static value heap_stats (int returnstats) long live_words = 0, live_blocks = 0, free_words = 0, free_blocks = 0, largest_free = 0, fragments = 0, heap_chunks = 0; - char *chunk = heap_start, *chunk_end; + char *chunk = caml_heap_start, *chunk_end; char *cur_hp, *prev_hp; header_t cur_hd; @@ -151,9 +151,9 @@ static value heap_stats (int returnstats) ++ fragments; Assert (prev_hp == NULL || Color_hp (prev_hp) != Caml_blue - || cur_hp == gc_sweep_hp); + || cur_hp == caml_gc_sweep_hp); }else{ - if (gc_phase == Phase_sweep && cur_hp >= gc_sweep_hp){ + if (caml_gc_phase == Phase_sweep && cur_hp >= caml_gc_sweep_hp){ ++ free_blocks; free_words += Whsize_hd (cur_hd); if (Whsize_hd (cur_hd) > largest_free){ @@ -186,12 +186,12 @@ static value heap_stats (int returnstats) /* not true any more with big heap chunks Assert (prev_hp == NULL || (Color_hp (prev_hp) != Caml_blue && Wosize_hp (prev_hp) > 0) - || cur_hp == gc_sweep_hp); + || cur_hp == caml_gc_sweep_hp); Assert (Next (cur_hp) == chunk_end || (Color_hp (Next (cur_hp)) != Caml_blue && Wosize_hp (Next (cur_hp)) > 0) || (Whsize_hd (cur_hd) + Wosize_hp (Next (cur_hp)) > Max_wosize) - || Next (cur_hp) == gc_sweep_hp); + || Next (cur_hp) == caml_gc_sweep_hp); */ break; } @@ -209,9 +209,9 @@ static value heap_stats (int returnstats) /* get a copy of these before allocating anything... */ double minwords = stat_minor_words - + (double) Wsize_bsize (young_end - young_ptr); + + (double) Wsize_bsize (caml_young_end - caml_young_ptr); double prowords = stat_promoted_words; - double majwords = stat_major_words + (double) allocated_words; + double majwords = stat_major_words + (double) caml_allocated_words; long mincoll = stat_minor_collections; long majcoll = stat_major_collections; long heap_words = Wsize_bsize (stat_heap_size); @@ -260,9 +260,9 @@ CAMLprim value gc_counters(value v) /* get a copy of these before allocating anything... */ double minwords = stat_minor_words - + (double) Wsize_bsize (young_end - young_ptr); + + (double) Wsize_bsize (caml_young_end - caml_young_ptr); double prowords = stat_promoted_words; - double majwords = stat_major_words + (double) allocated_words; + double majwords = stat_major_words + (double) caml_allocated_words; res = caml_alloc_tuple (3); Store_field (res, 0, copy_double (minwords)); @@ -277,13 +277,13 @@ CAMLprim value gc_get(value v) CAMLlocal1 (res); res = caml_alloc_tuple (6); - Store_field (res, 0, Val_long (Wsize_bsize (minor_heap_size))); /* s */ - Store_field (res, 1, Val_long (Wsize_bsize (major_heap_increment))); /* i */ - Store_field (res, 2, Val_long (percent_free)); /* o */ + Store_field (res, 0, Val_long (Wsize_bsize (caml_minor_heap_size))); /* s */ + Store_field (res, 1,Val_long(Wsize_bsize(caml_major_heap_increment)));/* i */ + Store_field (res, 2, Val_long (caml_percent_free)); /* o */ Store_field (res, 3, Val_long (caml_verb_gc)); /* v */ - Store_field (res, 4, Val_long (percent_max)); /* O */ + Store_field (res, 4, Val_long (caml_percent_max)); /* O */ #ifndef NATIVE_CODE - Store_field (res, 5, Val_long (max_stack_size)); /* l */ + Store_field (res, 5, Val_long (caml_max_stack_size)); /* l */ #else Store_field (res, 5, Val_long (0)); #endif @@ -326,60 +326,60 @@ CAMLprim value gc_set(value v) caml_verb_gc = Long_val (Field (v, 3)); #ifndef NATIVE_CODE - change_max_stack_size (Long_val (Field (v, 5))); + caml_change_max_stack_size (Long_val (Field (v, 5))); #endif newpf = norm_pfree (Long_val (Field (v, 2))); - if (newpf != percent_free){ - percent_free = newpf; - caml_gc_message (0x20, "New space overhead: %d%%\n", percent_free); + if (newpf != caml_percent_free){ + caml_percent_free = newpf; + caml_gc_message (0x20, "New space overhead: %d%%\n", caml_percent_free); } newpm = norm_pmax (Long_val (Field (v, 4))); - if (newpm != percent_max){ - percent_max = newpm; - caml_gc_message (0x20, "New max overhead: %d%%\n", percent_max); + if (newpm != caml_percent_max){ + caml_percent_max = newpm; + caml_gc_message (0x20, "New max overhead: %d%%\n", caml_percent_max); } newheapincr = Bsize_wsize (norm_heapincr (Long_val (Field (v, 1)))); - if (newheapincr != major_heap_increment){ - major_heap_increment = newheapincr; + if (newheapincr != caml_major_heap_increment){ + caml_major_heap_increment = newheapincr; caml_gc_message (0x20, "New heap increment size: %luk bytes\n", - major_heap_increment/1024); + caml_major_heap_increment/1024); } /* Minor heap size comes last because it will trigger a minor collection (thus invalidating [v]) and it can raise [Out_of_memory]. */ newminsize = norm_minsize (Bsize_wsize (Long_val (Field (v, 0)))); - if (newminsize != minor_heap_size){ + if (newminsize != caml_minor_heap_size){ caml_gc_message (0x20, "New minor heap size: %luk bytes\n", newminsize/1024); - set_minor_heap_size (newminsize); + caml_set_minor_heap_size (newminsize); } return Val_unit; } CAMLprim value gc_minor(value v) { Assert (v == Val_unit); - minor_collection (); + caml_minor_collection (); return Val_unit; } CAMLprim value gc_major(value v) { Assert (v == Val_unit); - empty_minor_heap (); - finish_major_cycle (); + caml_empty_minor_heap (); + caml_finish_major_cycle (); final_do_calls (); return Val_unit; } CAMLprim value gc_full_major(value v) { Assert (v == Val_unit); - empty_minor_heap (); - finish_major_cycle (); + caml_empty_minor_heap (); + caml_finish_major_cycle (); final_do_calls (); - empty_minor_heap (); - finish_major_cycle (); + caml_empty_minor_heap (); + caml_finish_major_cycle (); final_do_calls (); return Val_unit; } @@ -387,16 +387,16 @@ CAMLprim value gc_full_major(value v) CAMLprim value gc_major_slice (value v) { Assert (Is_long (v)); - empty_minor_heap (); - return Val_long (major_collection_slice (Long_val (v))); + caml_empty_minor_heap (); + return Val_long (caml_major_collection_slice (Long_val (v))); } CAMLprim value gc_compaction(value v) { Assert (v == Val_unit); - empty_minor_heap (); - finish_major_cycle (); - finish_major_cycle (); - compact_heap (); + caml_empty_minor_heap (); + caml_finish_major_cycle (); + caml_finish_major_cycle (); + caml_compact_heap (); return Val_unit; } @@ -414,17 +414,17 @@ void init_gc (unsigned long minor_size, unsigned long major_size, "###\n", 0); #endif - set_minor_heap_size (Bsize_wsize (norm_minsize (minor_size))); - major_heap_increment = Bsize_wsize (norm_heapincr (major_incr)); - percent_free = norm_pfree (percent_fr); - percent_max = norm_pmax (percent_m); - init_major_heap (major_heap_size); + caml_set_minor_heap_size (Bsize_wsize (norm_minsize (minor_size))); + caml_major_heap_increment = Bsize_wsize (norm_heapincr (major_incr)); + caml_percent_free = norm_pfree (percent_fr); + caml_percent_max = norm_pmax (percent_m); + caml_init_major_heap (major_heap_size); caml_gc_message (0x20, "Initial minor heap size: %luk bytes\n", - minor_heap_size / 1024); + caml_minor_heap_size / 1024); caml_gc_message (0x20, "Initial major heap size: %luk bytes\n", major_heap_size / 1024); - caml_gc_message (0x20, "Initial space overhead: %lu%%\n", percent_free); - caml_gc_message (0x20, "Initial max overhead: %lu%%\n", percent_max); + caml_gc_message (0x20, "Initial space overhead: %lu%%\n", caml_percent_free); + caml_gc_message (0x20, "Initial max overhead: %lu%%\n", caml_percent_max); caml_gc_message (0x20, "Initial heap increment: %luk bytes\n", - major_heap_increment / 1024); + caml_major_heap_increment / 1024); } diff --git a/byterun/globroots.c b/byterun/globroots.c index 2dc55c317..154cabef0 100644 --- a/byterun/globroots.c +++ b/byterun/globroots.c @@ -84,8 +84,8 @@ void register_global_root(value *r) update[i] = (struct global_root *) &caml_global_roots; caml_global_roots.level = new_level; } - e = stat_alloc(sizeof(struct global_root) + - new_level * sizeof(struct global_root *)); + e = caml_stat_alloc(sizeof(struct global_root) + + new_level * sizeof(struct global_root *)); e->root = r; for (i = 0; i <= new_level; i++) { e->forward[i] = update[i]->forward[i]; @@ -121,7 +121,7 @@ void remove_global_root(value *r) update[i]->forward[i] = e->forward[i]; } /* Reclaim list element */ - stat_free(e); + caml_stat_free(e); /* Down-correct list level */ while (caml_global_roots.level > 0 && caml_global_roots.forward[caml_global_roots.level] == NULL) diff --git a/byterun/intern.c b/byterun/intern.c index c05d797c8..026cc9c8b 100644 --- a/byterun/intern.c +++ b/byterun/intern.c @@ -37,14 +37,14 @@ static unsigned char * intern_input; Meaningful only if intern_input_malloced = 1. */ static int intern_input_malloced; -/* 1 if intern_input was allocated by stat_alloc() - and needs stat_free() on error, 0 otherwise. */ +/* 1 if intern_input was allocated by caml_stat_alloc() + and needs caml_stat_free() on error, 0 otherwise. */ static header_t * intern_dest; /* Writing pointer in destination block */ static char * intern_extra_block; -/* If non-NULL, point to new heap chunk allocated with alloc_for_heap. */ +/* If non-NULL, point to new heap chunk allocated with caml_alloc_for_heap. */ static asize_t obj_counter; /* Count how many objects seen so far */ @@ -100,11 +100,11 @@ static long read64s(void) static void intern_cleanup(void) { - if (intern_input_malloced) stat_free(intern_input); - if (intern_obj_table != NULL) stat_free(intern_obj_table); + if (intern_input_malloced) caml_stat_free(intern_input); + if (intern_obj_table != NULL) caml_stat_free(intern_obj_table); if (intern_extra_block != NULL) { /* free newly allocated heap chunk */ - free_for_heap(intern_extra_block); + caml_free_for_heap(intern_extra_block); } else if (intern_block != 0) { /* restore original header for heap block, otherwise GC is confused */ Hd_val(intern_block) = intern_header; @@ -339,9 +339,9 @@ static void intern_alloc(mlsize_t whsize, mlsize_t num_objects) /* Round desired size up to next page */ asize_t request = ((Bsize_wsize(whsize) + Page_size - 1) >> Page_log) << Page_log; - intern_extra_block = alloc_for_heap(request); + intern_extra_block = caml_alloc_for_heap(request); if (intern_extra_block == NULL) raise_out_of_memory(); - intern_color = allocation_color(intern_extra_block); + intern_color = caml_allocation_color(intern_extra_block); intern_dest = (header_t *) intern_extra_block; } else { /* this is a specialised version of caml_alloc from alloc.c */ @@ -350,7 +350,7 @@ static void intern_alloc(mlsize_t whsize, mlsize_t num_objects) }else if (wosize <= Max_young_wosize){ intern_block = caml_alloc_small (wosize, String_tag); }else{ - intern_block = alloc_shr (wosize, String_tag); + intern_block = caml_alloc_shr (wosize, String_tag); /* do not do the urgent_gc check here because it might darken intern_block into gray and break the Assert 3 lines down */ } @@ -362,7 +362,7 @@ static void intern_alloc(mlsize_t whsize, mlsize_t num_objects) } obj_counter = 0; if (num_objects > 0) - intern_obj_table = (value *) stat_alloc(num_objects * sizeof(value)); + intern_obj_table = (value *) caml_stat_alloc(num_objects * sizeof(value)); else intern_obj_table = NULL; } @@ -381,7 +381,7 @@ static void intern_add_to_heap(mlsize_t whsize) make_free_blocks ((value *) intern_dest, end_extra_block - intern_dest, 0); } - add_to_heap(intern_extra_block); + caml_add_to_heap(intern_extra_block); } } @@ -401,13 +401,13 @@ value input_val(struct channel *chan) size_32 = caml_getword(chan); size_64 = caml_getword(chan); /* Read block from channel */ - block = stat_alloc(block_len); + block = caml_stat_alloc(block_len); /* During caml_really_getblock, concurrent input_val operations can take place (via signal handlers or context switching in systhreads), and intern_input may change. So, wait until caml_really_getblock is over before using intern_input and the other global vars. */ if (caml_really_getblock(chan, block, block_len) == 0) { - stat_free(block); + caml_stat_free(block); failwith("input_value: truncated object"); } intern_input = (unsigned char *) block; @@ -424,8 +424,8 @@ value input_val(struct channel *chan) intern_rec(&res); intern_add_to_heap(whsize); /* Free everything */ - stat_free(intern_input); - if (intern_obj_table != NULL) stat_free(intern_obj_table); + caml_stat_free(intern_input); + if (intern_obj_table != NULL) caml_stat_free(intern_obj_table); return res; } @@ -464,7 +464,7 @@ CAMLexport value input_val_from_string(value str, long int ofs) intern_rec(&obj); intern_add_to_heap(whsize); /* Free everything */ - if (intern_obj_table != NULL) stat_free(intern_obj_table); + if (intern_obj_table != NULL) caml_stat_free(intern_obj_table); CAMLreturn (obj); } @@ -492,7 +492,7 @@ static value input_val_from_block(void) intern_rec(&obj); intern_add_to_heap(whsize); /* Free internal data structures */ - if (intern_obj_table != NULL) stat_free(intern_obj_table); + if (intern_obj_table != NULL) caml_stat_free(intern_obj_table); return obj; } @@ -511,7 +511,7 @@ CAMLexport value input_value_from_malloc(char * data, long ofs) block_len = read32u(); obj = input_val_from_block(); /* Free the input */ - stat_free(intern_input); + caml_stat_free(intern_input); return obj; } @@ -560,11 +560,11 @@ unsigned char * code_checksum() if (! checksum_computed) { struct MD5Context ctx; - MD5Init(&ctx); - MD5Update(&ctx, - (unsigned char *) code_area_start, - code_area_end - code_area_start); - MD5Final(checksum, &ctx); + caml_MD5Init(&ctx); + caml_MD5Update(&ctx, + (unsigned char *) code_area_start, + code_area_end - code_area_start); + caml_MD5Final(checksum, &ctx); checksum_computed = 1; } return checksum; diff --git a/byterun/interp.c b/byterun/interp.c index c15bb59f8..0da6b1d76 100644 --- a/byterun/interp.c +++ b/byterun/interp.c @@ -37,10 +37,10 @@ sp the stack pointer (grows downward) accu the accumulator env heap-allocated environment - trapsp pointer to the current trap frame + caml_trapsp pointer to the current trap frame extra_args number of extra arguments provided by the caller -sp is a local copy of the global variable extern_sp. */ +sp is a local copy of the global variable caml_extern_sp. */ /* Instruction decoding */ @@ -68,10 +68,11 @@ sp is a local copy of the global variable extern_sp. */ /* GC interface */ -#define Setup_for_gc { sp -= 2; sp[0] = accu; sp[1] = env; extern_sp = sp; } +#define Setup_for_gc \ + { sp -= 2; sp[0] = accu; sp[1] = env; caml_extern_sp = sp; } #define Restore_after_gc { accu = sp[0]; env = sp[1]; sp += 2; } -#define Setup_for_c_call { saved_pc = pc; *--sp = env; extern_sp = sp; } -#define Restore_after_c_call { sp = extern_sp; env = *sp++; } +#define Setup_for_c_call { saved_pc = pc; *--sp = env; caml_extern_sp = sp; } +#define Restore_after_c_call { sp = caml_extern_sp; env = *sp++; } /* An event frame must look like accu + a C_CALL frame + a RETURN 1 frame */ #define Setup_for_event \ @@ -82,9 +83,9 @@ sp is a local copy of the global variable extern_sp. */ sp[3] = (value) pc; /* RETURN frame: saved return address */ \ sp[4] = env; /* RETURN frame: saved environment */ \ sp[5] = Val_long(extra_args); /* RETURN frame: saved extra args */ \ - extern_sp = sp; } + caml_extern_sp = sp; } #define Restore_after_event \ - { sp = extern_sp; accu = sp[0]; \ + { sp = caml_extern_sp; accu = sp[0]; \ pc = (code_t) sp[3]; env = sp[4]; extra_args = Long_val(sp[5]); \ sp += 6; } @@ -94,7 +95,7 @@ sp is a local copy of the global variable extern_sp. */ { sp -= 4; \ sp[0] = accu; sp[1] = (value)(pc - 1); \ sp[2] = env; sp[3] = Val_long(extra_args); \ - extern_sp = sp; } + caml_extern_sp = sp; } #define Restore_after_debugger { sp += 4; } #ifdef THREADED_CODE @@ -234,21 +235,21 @@ value interprete(code_t prog, asize_t prog_size) jumptbl_base = Jumptbl_base; #endif initial_local_roots = local_roots; - initial_sp_offset = (char *) stack_high - (char *) extern_sp; + initial_sp_offset = (char *) caml_stack_high - (char *) caml_extern_sp; initial_external_raise = external_raise; - callback_depth++; + caml_callback_depth++; saved_pc = NULL; if (sigsetjmp(raise_buf.buf, 0)) { local_roots = initial_local_roots; - sp = extern_sp; + sp = caml_extern_sp; accu = exn_bucket; pc = saved_pc + 2; /* +2 adjustement for the sole purpose of backtraces */ goto raise_exception; } external_raise = &raise_buf; - sp = extern_sp; + sp = caml_extern_sp; pc = prog; extra_args = 0; env = Atom(0); @@ -258,8 +259,8 @@ value interprete(code_t prog, asize_t prog_size) #ifdef DEBUG next_instr: if (icount-- == 0) stop_here (); - Assert(sp >= stack_low); - Assert(sp <= stack_high); + Assert(sp >= caml_stack_low); + Assert(sp <= caml_stack_high); #endif goto *(void *)(jumptbl_base + *pc++); /* Jump to the first instruction */ #else @@ -267,8 +268,8 @@ value interprete(code_t prog, asize_t prog_size) #ifdef DEBUG if (icount-- == 0) stop_here (); if (trace_flag) disasm_instr(pc); - Assert(sp >= stack_low); - Assert(sp <= stack_high); + Assert(sp >= caml_stack_low); + Assert(sp <= caml_stack_high); #endif curr_instr = *pc++; @@ -570,7 +571,7 @@ value interprete(code_t prog, asize_t prog_size) *--sp = accu; /* Fallthrough */ Instruct(GETGLOBAL): - accu = Field(global_data, *pc); + accu = Field(caml_global_data, *pc); pc++; Next; @@ -578,7 +579,7 @@ value interprete(code_t prog, asize_t prog_size) *--sp = accu; /* Fallthrough */ Instruct(GETGLOBALFIELD): { - accu = Field(global_data, *pc); + accu = Field(caml_global_data, *pc); pc++; accu = Field(accu, *pc); pc++; @@ -586,7 +587,7 @@ value interprete(code_t prog, asize_t prog_size) } Instruct(SETGLOBAL): - modify(&Field(global_data, *pc), accu); + caml_modify(&Field(caml_global_data, *pc), accu); accu = Val_unit; pc++; Next; @@ -615,9 +616,9 @@ value interprete(code_t prog, asize_t prog_size) Field(block, 0) = accu; for (i = 1; i < wosize; i++) Field(block, i) = *sp++; } else { - block = alloc_shr(wosize, tag); - initialize(&Field(block, 0), accu); - for (i = 1; i < wosize; i++) initialize(&Field(block, i), *sp++); + block = caml_alloc_shr(wosize, tag); + caml_initialize(&Field(block, 0), accu); + for (i = 1; i < wosize; i++) caml_initialize(&Field(block, i), *sp++); } accu = block; Next; @@ -658,7 +659,7 @@ value interprete(code_t prog, asize_t prog_size) if (size <= Max_young_wosize / Double_wosize) { Alloc_small(block, size * Double_wosize, Double_array_tag); } else { - block = alloc_shr(size * Double_wosize, Double_array_tag); + block = caml_alloc_shr(size * Double_wosize, Double_array_tag); } Store_double_field(block, 0, Double_val(accu)); for (i = 1; i < size; i++){ @@ -784,10 +785,10 @@ value interprete(code_t prog, asize_t prog_size) Instruct(PUSHTRAP): sp -= 4; Trap_pc(sp) = pc + *pc; - Trap_link(sp) = trapsp; + Trap_link(sp) = caml_trapsp; sp[2] = env; sp[3] = Val_long(extra_args); - trapsp = sp; + caml_trapsp = sp; pc++; Next; @@ -799,23 +800,25 @@ value interprete(code_t prog, asize_t prog_size) pc--; /* restart the POPTRAP after processing the signal */ goto process_signal; } - trapsp = Trap_link(sp); + caml_trapsp = Trap_link(sp); sp += 4; Next; Instruct(RAISE): raise_exception: - if (trapsp >= trap_barrier) debugger(TRAP_BARRIER); - if (backtrace_active) stash_backtrace(accu, pc, sp); - if ((char *) trapsp >= (char *) stack_high - initial_sp_offset) { + if (caml_trapsp >= caml_trap_barrier) debugger(TRAP_BARRIER); + if (caml_backtrace_active) caml_stash_backtrace(accu, pc, sp); + if ((char *) caml_trapsp + >= (char *) caml_stack_high - initial_sp_offset) { external_raise = initial_external_raise; - extern_sp = (value *) ((char *) stack_high - initial_sp_offset); - callback_depth--; + caml_extern_sp = (value *) ((char *) caml_stack_high + - initial_sp_offset); + caml_callback_depth--; return Make_exception_result(accu); } - sp = trapsp; + sp = caml_trapsp; pc = Trap_pc(sp); - trapsp = Trap_link(sp); + caml_trapsp = Trap_link(sp); env = sp[2]; extra_args = Long_val(sp[3]); sp += 4; @@ -824,10 +827,10 @@ value interprete(code_t prog, asize_t prog_size) /* Stack checks */ check_stacks: - if (sp < stack_threshold) { - extern_sp = sp; - realloc_stack(Stack_threshold / sizeof(value)); - sp = extern_sp; + if (sp < caml_stack_threshold) { + caml_extern_sp = sp; + caml_realloc_stack(Stack_threshold / sizeof(value)); + sp = caml_extern_sp; } /* Fall through CHECK_SIGNALS */ @@ -1021,8 +1024,8 @@ value interprete(code_t prog, asize_t prog_size) Instruct(STOP): external_raise = initial_external_raise; - extern_sp = sp; - callback_depth--; + caml_extern_sp = sp; + caml_callback_depth--; return accu; Instruct(EVENT): diff --git a/byterun/ints.c b/byterun/ints.c index edda1b736..345765364 100644 --- a/byterun/ints.c +++ b/byterun/ints.c @@ -163,7 +163,7 @@ static char * parse_format(value fmt, if (prec < FORMAT_BUFFER_SIZE) return default_format_buffer; else - return stat_alloc(prec + 1); + return caml_stat_alloc(prec + 1); } CAMLprim value format_int(value fmt, value arg) @@ -184,7 +184,7 @@ CAMLprim value format_int(value fmt, value arg) break; } res = caml_copy_string(buffer); - if (buffer != default_format_buffer) stat_free(buffer); + if (buffer != default_format_buffer) caml_stat_free(buffer); return res; } @@ -314,7 +314,7 @@ CAMLprim value int32_format(value fmt, value arg) buffer = parse_format(fmt, "", format_string, default_format_buffer, &conv); sprintf(buffer, format_string, (long) Int32_val(arg)); res = caml_copy_string(buffer); - if (buffer != default_format_buffer) stat_free(buffer); + if (buffer != default_format_buffer) caml_stat_free(buffer); return res; } @@ -509,7 +509,7 @@ CAMLprim value int64_format(value fmt, value arg) format_string, default_format_buffer, &conv); I64_format(buffer, format_string, Int64_val(arg)); res = caml_copy_string(buffer); - if (buffer != default_format_buffer) stat_free(buffer); + if (buffer != default_format_buffer) caml_stat_free(buffer); return res; } @@ -715,7 +715,7 @@ CAMLprim value nativeint_format(value fmt, value arg) buffer = parse_format(fmt, "l", format_string, default_format_buffer, &conv); sprintf(buffer, format_string, (long) Nativeint_val(arg)); res = caml_copy_string(buffer); - if (buffer != default_format_buffer) stat_free(buffer); + if (buffer != default_format_buffer) caml_stat_free(buffer); return res; } diff --git a/byterun/io.c b/byterun/io.c index 54d1a146e..0eba8c51b 100644 --- a/byterun/io.c +++ b/byterun/io.c @@ -65,7 +65,7 @@ CAMLexport struct channel * caml_open_descriptor_in(int fd) { struct channel * channel; - channel = (struct channel *) stat_alloc(sizeof(struct channel)); + channel = (struct channel *) caml_stat_alloc(sizeof(struct channel)); channel->fd = fd; channel->offset = lseek (fd, 0, SEEK_CUR); channel->curr = channel->max = channel->buff; @@ -104,7 +104,7 @@ CAMLexport void caml_close_channel(struct channel *channel) if (channel->refcount > 0) return; if (caml_channel_mutex_free != NULL) (*caml_channel_mutex_free)(channel); unlink_channel(channel); - stat_free(channel); + caml_stat_free(channel); } CAMLexport file_offset caml_channel_size(struct channel *channel) @@ -410,7 +410,7 @@ CAMLexport void caml_finalize_channel(value vchan) if (--chan->refcount > 0) return; if (caml_channel_mutex_free != NULL) (*caml_channel_mutex_free)(chan); unlink_channel(chan); - stat_free(chan); + caml_stat_free(chan); } static int compare_channel(value vchan1, value vchan2) diff --git a/byterun/lexing.c b/byterun/lexing.c index a48c44628..e7afb9106 100644 --- a/byterun/lexing.c +++ b/byterun/lexing.c @@ -56,8 +56,8 @@ struct lexing_table { #define Short(tbl,n) (((short *)(tbl))[(n)]) #endif -CAMLprim value lex_engine(struct lexing_table *tbl, value start_state, - struct lexer_buffer *lexbuf) +CAMLprim value caml_lex_engine(struct lexing_table *tbl, value start_state, + struct lexer_buffer *lexbuf) { int state, base, backtrk, c; @@ -154,8 +154,8 @@ static void run_tag(char *pc, value mem) { } } -CAMLprim value new_lex_engine(struct lexing_table *tbl, value start_state, - struct lexer_buffer *lexbuf) +CAMLprim value caml_new_lex_engine(struct lexing_table *tbl, value start_state, + struct lexer_buffer *lexbuf) { int state, base, backtrk, c, pstate ; state = Int_val(start_state); diff --git a/byterun/major_gc.c b/byterun/major_gc.c index 22b82963c..272b672b3 100644 --- a/byterun/major_gc.c +++ b/byterun/major_gc.c @@ -29,21 +29,21 @@ #include "roots.h" #include "weak.h" -unsigned long percent_free; -long major_heap_increment; -char *heap_start, *heap_end; -page_table_entry *page_table; -asize_t page_low, page_high; -char *gc_sweep_hp; -int gc_phase; /* always Phase_mark, Phase_sweep, or Phase_idle */ +unsigned long caml_percent_free; +long caml_major_heap_increment; +char *caml_heap_start, *caml_heap_end; +page_table_entry *caml_page_table; +asize_t caml_page_low, caml_page_high; +char *caml_gc_sweep_hp; +int caml_gc_phase; /* always Phase_mark, Phase_sweep, or Phase_idle */ static value *gray_vals; -value *gray_vals_cur, *gray_vals_end; +static value *gray_vals_cur, *gray_vals_end; static asize_t gray_vals_size; static int heap_is_pure; /* The heap is pure if the only gray objects below [markhp] are also in [gray_vals]. */ -unsigned long allocated_words; -double extra_heap_memory; -unsigned long fl_size_at_phase_change = 0; +unsigned long caml_allocated_words; +double caml_extra_heap_memory; +unsigned long caml_fl_size_at_phase_change = 0; extern char *fl_merge; /* Defined in freelist.c. */ @@ -81,7 +81,7 @@ static void realloc_gray_vals (void) } } -void darken (value v, value *p /* not used */) +void caml_darken (value v, value *p /* not used */) { if (Is_block (v) && Is_in_heap (v)) { if (Tag_val(v) == Infix_tag) v -= Infix_offset_val(v); @@ -95,11 +95,11 @@ void darken (value v, value *p /* not used */) static void start_cycle (void) { - Assert (gc_phase == Phase_idle); + Assert (caml_gc_phase == Phase_idle); Assert (gray_vals_cur == gray_vals); caml_gc_message (0x01, "Starting new major GC cycle\n", 0); darken_all_roots(); - gc_phase = Phase_mark; + caml_gc_phase = Phase_mark; gc_subphase = Subphase_main; markhp = NULL; #ifdef DEBUG @@ -173,7 +173,7 @@ static void mark_slice (long work) } }else if (!heap_is_pure){ heap_is_pure = 1; - chunk = heap_start; + chunk = caml_heap_start; markhp = chunk; limit = chunk + Chunk_size (chunk); }else if (gc_subphase == Subphase_main){ @@ -230,14 +230,14 @@ static void mark_slice (long work) Assert (gc_subphase == Subphase_final); /* Initialise the sweep phase. */ gray_vals_cur = gray_vals_ptr; - gc_sweep_hp = heap_start; + caml_gc_sweep_hp = caml_heap_start; fl_init_merge (); - gc_phase = Phase_sweep; - chunk = heap_start; - gc_sweep_hp = chunk; + caml_gc_phase = Phase_sweep; + chunk = caml_heap_start; + caml_gc_sweep_hp = chunk; limit = chunk + Chunk_size (chunk); work = 0; - fl_size_at_phase_change = fl_cur_size; + caml_fl_size_at_phase_change = fl_cur_size; } } gray_vals_cur = gray_vals_ptr; @@ -250,18 +250,18 @@ static void sweep_slice (long work) caml_gc_message (0x40, "Sweeping %ld words\n", work); while (work > 0){ - if (gc_sweep_hp < limit){ - hp = gc_sweep_hp; + if (caml_gc_sweep_hp < limit){ + hp = caml_gc_sweep_hp; hd = Hd_hp (hp); work -= Whsize_hd (hd); - gc_sweep_hp += Bhsize_hd (hd); + caml_gc_sweep_hp += Bhsize_hd (hd); switch (Color_hd (hd)){ case Caml_white: if (Tag_hd (hd) == Custom_tag){ void (*final_fun)(value) = Custom_ops_val(Val_hp(hp))->finalize; if (final_fun != NULL) final_fun(Val_hp(hp)); } - gc_sweep_hp = fl_merge_block (Bp_hp (hp)); + caml_gc_sweep_hp = fl_merge_block (Bp_hp (hp)); break; case Caml_blue: /* Only the blocks of the free-list are blue. See [freelist.c]. */ @@ -272,16 +272,16 @@ static void sweep_slice (long work) Hd_hp (hp) = Whitehd_hd (hd); break; } - Assert (gc_sweep_hp <= limit); + Assert (caml_gc_sweep_hp <= limit); }else{ chunk = Chunk_next (chunk); if (chunk == NULL){ /* Sweeping is done. */ ++ stat_major_collections; work = 0; - gc_phase = Phase_idle; + caml_gc_phase = Phase_idle; }else{ - gc_sweep_hp = chunk; + caml_gc_sweep_hp = chunk; limit = chunk + Chunk_size (chunk); } } @@ -292,13 +292,14 @@ static void sweep_slice (long work) [howmuch] is the amount of work to do, 0 to let the GC compute it. Return the computed amount of work to do. */ -long major_collection_slice (long howmuch) +long caml_major_collection_slice (long howmuch) { double p; long computed_work; /* Free memory at the start of the GC cycle (garbage + free list) (assumed): - FM = stat_heap_size * percent_free / (100 + percent_free) + FM = stat_heap_size * caml_percent_free + / (100 + caml_percent_free) Assuming steady state and enforcing a constant allocation rate, then FM is divided in 2/3 for garbage and 1/3 for free list. @@ -307,80 +308,80 @@ long major_collection_slice (long howmuch) (still assuming steady state). Proportion of G consumed since the previous slice: - PH = allocated_words / G - = allocated_words * 3 * (100 + percent_free) - / (2 * stat_heap_size * percent_free) + PH = caml_allocated_words / G + = caml_allocated_words * 3 * (100 + caml_percent_free) + / (2 * stat_heap_size * caml_percent_free) Proportion of extra-heap memory consumed since the previous slice: - PE = extra_heap_memory + PE = caml_extra_heap_memory Proportion of total work to do in this slice: P = max (PH, PE) Amount of marking work for the GC cycle: - MW = stat_heap_size * 100 / (100 + percent_free) + MW = stat_heap_size * 100 / (100 + caml_percent_free) Amount of sweeping work for the GC cycle: SW = stat_heap_size Amount of marking work for this slice: MS = P * MW - MS = P * stat_heap_size * 100 / (100 + percent_free) + MS = P * stat_heap_size * 100 / (100 + caml_percent_free) Amount of sweeping work for this slice: SS = P * SW SS = P * stat_heap_size This slice will either mark 2*MS words or sweep 2*SS words. */ - if (gc_phase == Phase_idle) start_cycle (); + if (caml_gc_phase == Phase_idle) start_cycle (); - p = (double) allocated_words * 3.0 * (100 + percent_free) - / Wsize_bsize (stat_heap_size) / percent_free / 2.0; - if (p < extra_heap_memory) p = extra_heap_memory; + p = (double) caml_allocated_words * 3.0 * (100 + caml_percent_free) + / Wsize_bsize (stat_heap_size) / caml_percent_free / 2.0; + if (p < caml_extra_heap_memory) p = caml_extra_heap_memory; - caml_gc_message (0x40, "allocated_words = %lu\n", allocated_words); + caml_gc_message (0x40, "allocated_words = %lu\n", caml_allocated_words); caml_gc_message (0x40, "extra_heap_memory = %luu\n", - (unsigned long) (extra_heap_memory * 1000000)); + (unsigned long) (caml_extra_heap_memory * 1000000)); caml_gc_message (0x40, "amount of work to do = %luu\n", (unsigned long) (p * 1000000)); - if (gc_phase == Phase_mark){ + if (caml_gc_phase == Phase_mark){ computed_work = 2 * (long) (p * Wsize_bsize (stat_heap_size) * 100 - / (100+percent_free)); + / (100 + caml_percent_free)); }else{ computed_work = 2 * (long) (p * Wsize_bsize (stat_heap_size)); } caml_gc_message (0x40, "ordered work = %ld words\n", howmuch); caml_gc_message (0x40, "computed work = %ld words\n", computed_work); if (howmuch == 0) howmuch = computed_work; - if (gc_phase == Phase_mark){ + if (caml_gc_phase == Phase_mark){ mark_slice (howmuch); caml_gc_message (0x02, "!", 0); }else{ - Assert (gc_phase == Phase_sweep); + Assert (caml_gc_phase == Phase_sweep); sweep_slice (howmuch); caml_gc_message (0x02, "$", 0); } - if (gc_phase == Phase_idle) compact_heap_maybe (); + if (caml_gc_phase == Phase_idle) caml_compact_heap_maybe (); - stat_major_words += allocated_words; - allocated_words = 0; - extra_heap_memory = 0.0; + stat_major_words += caml_allocated_words; + caml_allocated_words = 0; + caml_extra_heap_memory = 0.0; return computed_work; } /* The minor heap must be empty when this function is called; the minor heap is empty when this function returns. */ -/* This does not call compact_heap_maybe because the estimations of +/* This does not call caml_compact_heap_maybe because the estimations of free and live memory are only valid for a cycle done incrementally. - Besides, this function is called by compact_heap_maybe. + Besides, this function is called by caml_compact_heap_maybe. */ -void finish_major_cycle (void) +void caml_finish_major_cycle (void) { - if (gc_phase == Phase_idle) start_cycle (); - while (gc_phase == Phase_mark) mark_slice (LONG_MAX); - Assert (gc_phase == Phase_sweep); - while (gc_phase == Phase_sweep) sweep_slice (LONG_MAX); - Assert (gc_phase == Phase_idle); - stat_major_words += allocated_words; - allocated_words = 0; + if (caml_gc_phase == Phase_idle) start_cycle (); + while (caml_gc_phase == Phase_mark) mark_slice (LONG_MAX); + Assert (caml_gc_phase == Phase_sweep); + while (caml_gc_phase == Phase_sweep) sweep_slice (LONG_MAX); + Assert (caml_gc_phase == Phase_idle); + stat_major_words += caml_allocated_words; + caml_allocated_words = 0; } /* Make sure the request is at least Heap_chunk_min and round it up @@ -394,15 +395,15 @@ static asize_t clip_heap_chunk_size (asize_t request) return ((request + Page_size - 1) >> Page_log) << Page_log; } -/* Make sure the request is >= major_heap_increment, then call +/* Make sure the request is >= caml_major_heap_increment, then call clip_heap_chunk_size, then make sure the result is >= request. */ -asize_t round_heap_chunk_size (asize_t request) +asize_t caml_round_heap_chunk_size (asize_t request) { asize_t result = request; - if (result < major_heap_increment){ - result = major_heap_increment; + if (result < caml_major_heap_increment){ + result = caml_major_heap_increment; } result = clip_heap_chunk_size (result); @@ -413,7 +414,7 @@ asize_t round_heap_chunk_size (asize_t request) return result; } -void init_major_heap (asize_t heap_size) +void caml_init_major_heap (asize_t heap_size) { asize_t i; void *block; @@ -423,32 +424,32 @@ void init_major_heap (asize_t heap_size) stat_heap_size = clip_heap_chunk_size (heap_size); stat_top_heap_size = stat_heap_size; Assert (stat_heap_size % Page_size == 0); - heap_start = (char *) alloc_for_heap (stat_heap_size); - if (heap_start == NULL) + caml_heap_start = (char *) caml_alloc_for_heap (stat_heap_size); + if (caml_heap_start == NULL) caml_fatal_error ("Fatal error: not enough memory for the initial heap.\n"); - Chunk_next (heap_start) = NULL; - heap_end = heap_start + stat_heap_size; - Assert ((unsigned long) heap_end % Page_size == 0); + Chunk_next (caml_heap_start) = NULL; + caml_heap_end = caml_heap_start + stat_heap_size; + Assert ((unsigned long) caml_heap_end % Page_size == 0); stat_heap_chunks = 1; - page_low = Page (heap_start); - page_high = Page (heap_end); + caml_page_low = Page (caml_heap_start); + caml_page_high = Page (caml_heap_end); - page_table_size = page_high - page_low; + page_table_size = caml_page_high - caml_page_low; page_table_block = (page_table_entry *) malloc (page_table_size * sizeof (page_table_entry)); if (page_table_block == NULL){ caml_fatal_error ("Fatal error: not enough memory for the initial heap.\n"); } - page_table = page_table_block - page_low; - for (i = Page (heap_start); i < Page (heap_end); i++){ - page_table [i] = In_heap; + caml_page_table = page_table_block - caml_page_low; + for (i = Page (caml_heap_start); i < Page (caml_heap_end); i++){ + caml_page_table [i] = In_heap; } fl_init_merge (); - make_free_blocks ((value *) heap_start, Wsize_bsize (stat_heap_size), 1); - gc_phase = Phase_idle; + make_free_blocks ((value *) caml_heap_start, Wsize_bsize (stat_heap_size), 1); + caml_gc_phase = Phase_idle; gray_vals_size = 2048; gray_vals = (value *) malloc (gray_vals_size * sizeof (value)); if (gray_vals == NULL) @@ -456,6 +457,6 @@ void init_major_heap (asize_t heap_size) gray_vals_cur = gray_vals; gray_vals_end = gray_vals + gray_vals_size; heap_is_pure = 1; - allocated_words = 0; - extra_heap_memory = 0.0; + caml_allocated_words = 0; + caml_extra_heap_memory = 0.0; } diff --git a/byterun/major_gc.h b/byterun/major_gc.h index 4a225663c..0c3eb14ec 100644 --- a/byterun/major_gc.h +++ b/byterun/major_gc.h @@ -32,10 +32,10 @@ typedef struct { #define Chunk_next(c) (((heap_chunk_head *) (c)) [-1]).next #define Chunk_block(c) (((heap_chunk_head *) (c)) [-1]).block -extern int gc_phase; -extern unsigned long allocated_words; -extern double extra_heap_memory; -extern unsigned long fl_size_at_phase_change; +extern int caml_gc_phase; +extern unsigned long caml_allocated_words; +extern double caml_extra_heap_memory; +extern unsigned long caml_fl_size_at_phase_change; #define Phase_mark 0 #define Phase_sweep 1 @@ -47,27 +47,27 @@ typedef int page_table_entry; typedef char page_table_entry; #endif -CAMLextern char *heap_start; -CAMLextern char *heap_end; +CAMLextern char *caml_heap_start; +CAMLextern char *caml_heap_end; extern unsigned long total_heap_size; -CAMLextern page_table_entry *page_table; -extern asize_t page_low, page_high; -extern char *gc_sweep_hp; +CAMLextern page_table_entry *caml_page_table; +extern asize_t caml_page_low, caml_page_high; +extern char *caml_gc_sweep_hp; #define In_heap 1 #define Not_in_heap 0 #define Page(p) ((unsigned long) (p) >> Page_log) #define Is_in_heap(p) \ (Assert (Is_block ((value) (p))), \ - (addr)(p) >= (addr)heap_start && (addr)(p) < (addr)heap_end \ - && page_table [Page (p)]) + (addr)(p) >= (addr)caml_heap_start && (addr)(p) < (addr)caml_heap_end \ + && caml_page_table [Page (p)]) -void init_major_heap (asize_t); /* size in bytes */ -asize_t round_heap_chunk_size (asize_t); /* size in bytes */ -void darken (value, value *); -long major_collection_slice (long); +void caml_init_major_heap (asize_t); /* size in bytes */ +asize_t caml_round_heap_chunk_size (asize_t); /* size in bytes */ +void caml_darken (value, value *); +long caml_major_collection_slice (long); void major_collection (void); -void finish_major_cycle (void); +void caml_finish_major_cycle (void); #endif /* CAML_MAJOR_GC_H */ diff --git a/byterun/md5.c b/byterun/md5.c index b9dc43c43..c686c3687 100644 --- a/byterun/md5.c +++ b/byterun/md5.c @@ -23,18 +23,18 @@ /* MD5 message digest */ -CAMLprim value md5_string(value str, value ofs, value len) +CAMLprim value caml_md5_string(value str, value ofs, value len) { struct MD5Context ctx; value res; - MD5Init(&ctx); - MD5Update(&ctx, &Byte_u(str, Long_val(ofs)), Long_val(len)); + caml_MD5Init(&ctx); + caml_MD5Update(&ctx, &Byte_u(str, Long_val(ofs)), Long_val(len)); res = caml_alloc_string(16); - MD5Final(&Byte_u(res, 0), &ctx); + caml_MD5Final(&Byte_u(res, 0), &ctx); return res; } -CAMLprim value md5_chan(value vchan, value len) +CAMLprim value caml_md5_chan(value vchan, value len) { struct channel * chan = Channel(vchan); struct MD5Context ctx; @@ -43,25 +43,25 @@ CAMLprim value md5_chan(value vchan, value len) char buffer[4096]; Lock(chan); - MD5Init(&ctx); + caml_MD5Init(&ctx); toread = Long_val(len); if (toread < 0){ while (1){ read = caml_getblock (chan, buffer, sizeof(buffer)); if (read == 0) break; - MD5Update (&ctx, (unsigned char *) buffer, read); + caml_MD5Update (&ctx, (unsigned char *) buffer, read); } }else{ while (toread > 0) { read = caml_getblock(chan, buffer, toread > sizeof(buffer) ? sizeof(buffer) : toread); if (read == 0) raise_end_of_file(); - MD5Update(&ctx, (unsigned char *) buffer, read); + caml_MD5Update(&ctx, (unsigned char *) buffer, read); toread -= read; } } res = caml_alloc_string(16); - MD5Final(&Byte_u(res, 0), &ctx); + caml_MD5Final(&Byte_u(res, 0), &ctx); Unlock(chan); return res; } @@ -78,8 +78,8 @@ CAMLprim value md5_chan(value vchan, value len) * with every copy. * * To compute the message digest of a chunk of bytes, declare an - * MD5Context structure, pass it to MD5Init, call MD5Update as - * needed on buffers full of bytes, and then call MD5Final, which + * MD5Context structure, pass it to caml_MD5Init, call caml_MD5Update as + * needed on buffers full of bytes, and then call caml_MD5Final, which * will fill a supplied 16-byte array with the digest. */ @@ -102,7 +102,7 @@ static void byteReverse(unsigned char * buf, unsigned longs) * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious * initialization constants. */ -CAMLexport void MD5Init(struct MD5Context *ctx) +CAMLexport void caml_MD5Init(struct MD5Context *ctx) { ctx->buf[0] = 0x67452301; ctx->buf[1] = 0xefcdab89; @@ -117,8 +117,8 @@ CAMLexport void MD5Init(struct MD5Context *ctx) * Update context to reflect the concatenation of another buffer full * of bytes. */ -CAMLexport void MD5Update(struct MD5Context *ctx, unsigned char *buf, - unsigned long len) +CAMLexport void caml_MD5Update(struct MD5Context *ctx, unsigned char *buf, + unsigned long len) { uint32 t; @@ -143,7 +143,7 @@ CAMLexport void MD5Update(struct MD5Context *ctx, unsigned char *buf, } memcpy(p, buf, t); byteReverse(ctx->in, 16); - MD5Transform(ctx->buf, (uint32 *) ctx->in); + caml_MD5Transform(ctx->buf, (uint32 *) ctx->in); buf += t; len -= t; } @@ -152,7 +152,7 @@ CAMLexport void MD5Update(struct MD5Context *ctx, unsigned char *buf, while (len >= 64) { memcpy(ctx->in, buf, 64); byteReverse(ctx->in, 16); - MD5Transform(ctx->buf, (uint32 *) ctx->in); + caml_MD5Transform(ctx->buf, (uint32 *) ctx->in); buf += 64; len -= 64; } @@ -166,7 +166,7 @@ CAMLexport void MD5Update(struct MD5Context *ctx, unsigned char *buf, * Final wrapup - pad to 64-byte boundary with the bit pattern * 1 0* (64-bit count of bits processed, MSB-first) */ -CAMLexport void MD5Final(unsigned char *digest, struct MD5Context *ctx) +CAMLexport void caml_MD5Final(unsigned char *digest, struct MD5Context *ctx) { unsigned count; unsigned char *p; @@ -187,7 +187,7 @@ CAMLexport void MD5Final(unsigned char *digest, struct MD5Context *ctx) /* Two lots of padding: Pad the first block to 64 bytes */ memset(p, 0, count); byteReverse(ctx->in, 16); - MD5Transform(ctx->buf, (uint32 *) ctx->in); + caml_MD5Transform(ctx->buf, (uint32 *) ctx->in); /* Now fill the next block with 56 bytes */ memset(ctx->in, 0, 56); @@ -201,7 +201,7 @@ CAMLexport void MD5Final(unsigned char *digest, struct MD5Context *ctx) ((uint32 *) ctx->in)[14] = ctx->bits[0]; ((uint32 *) ctx->in)[15] = ctx->bits[1]; - MD5Transform(ctx->buf, (uint32 *) ctx->in); + caml_MD5Transform(ctx->buf, (uint32 *) ctx->in); byteReverse((unsigned char *) ctx->buf, 4); memcpy(digest, ctx->buf, 16); memset(ctx, 0, sizeof(ctx)); /* In case it's sensitive */ @@ -221,10 +221,10 @@ CAMLexport void MD5Final(unsigned char *digest, struct MD5Context *ctx) /* * The core of the MD5 algorithm, this alters an existing MD5 hash to - * reflect the addition of 16 longwords of new data. MD5Update blocks + * reflect the addition of 16 longwords of new data. caml_MD5Update blocks * the data and converts bytes into longwords for this routine. */ -CAMLexport void MD5Transform(uint32 *buf, uint32 *in) +CAMLexport void caml_MD5Transform(uint32 *buf, uint32 *in) { register uint32 a, b, c, d; diff --git a/byterun/md5.h b/byterun/md5.h index 393482d8e..5e50125b3 100644 --- a/byterun/md5.h +++ b/byterun/md5.h @@ -22,8 +22,8 @@ #include "mlvalues.h" #include "io.h" -CAMLextern value md5_string (value str, value ofs, value len); -CAMLextern value md5_chan (value vchan, value len); +CAMLextern value caml_md5_string (value str, value ofs, value len); +CAMLextern value caml_md5_chan (value vchan, value len); struct MD5Context { uint32 buf[4]; @@ -31,11 +31,11 @@ struct MD5Context { unsigned char in[64]; }; -CAMLextern void MD5Init (struct MD5Context *context); -CAMLextern void MD5Update (struct MD5Context *context, unsigned char *buf, - unsigned long len); -CAMLextern void MD5Final (unsigned char *digest, struct MD5Context *ctx); -CAMLextern void MD5Transform (uint32 *buf, uint32 *in); +CAMLextern void caml_MD5Init (struct MD5Context *context); +CAMLextern void caml_MD5Update (struct MD5Context *context, unsigned char *buf, + unsigned long len); +CAMLextern void caml_MD5Final (unsigned char *digest, struct MD5Context *ctx); +CAMLextern void caml_MD5Transform (uint32 *buf, uint32 *in); #endif /* CAML_MD5_H */ diff --git a/byterun/memory.c b/byterun/memory.c index a3c8e5b03..051eeeff6 100644 --- a/byterun/memory.c +++ b/byterun/memory.c @@ -32,13 +32,13 @@ extern void aligned_munmap (char * addr, asize_t size); #endif /* Allocate a block of the requested size, to be passed to - [add_to_heap] later. + [caml_add_to_heap] later. [request] must be a multiple of [Page_size]. - [alloc_for_heap] returns NULL if the request cannot be satisfied. + [caml_alloc_for_heap] returns NULL if the request cannot be satisfied. The returned pointer is a hp, but the header must be initialized by the caller. */ -char *alloc_for_heap (asize_t request) +char *caml_alloc_for_heap (asize_t request) { char *mem; void *block; @@ -57,10 +57,10 @@ char *alloc_for_heap (asize_t request) return mem; } -/* Use this function to free a block allocated with [alloc_for_heap] - if you don't add it with [add_to_heap]. +/* Use this function to free a block allocated with [caml_alloc_for_heap] + if you don't add it with [caml_add_to_heap]. */ -void free_for_heap (char *mem) +void caml_free_for_heap (char *mem) { #ifdef USE_MMAP_INSTEAD_OF_MALLOC aligned_munmap (Chunk_block (mem), @@ -71,15 +71,15 @@ void free_for_heap (char *mem) } /* Take a chunk of memory as argument, which must be the result of a - call to [alloc_for_heap], and insert it into the heap chaining. + call to [caml_alloc_for_heap], and insert it into the heap chaining. The contents of the chunk must be a sequence of valid blocks and fragments: no space between blocks and no trailing garbage. If some blocks are blue, they must be added to the free list by the - caller. All other blocks must have the color [allocation_color(mem)]. - The caller must update [allocated_words] if applicable. + caller. All other blocks must have the color [caml_allocation_color(mem)]. + The caller must update [caml_allocated_words] if applicable. Return value: 0 if no error; -1 in case of error. */ -int add_to_heap (char *m) +int caml_add_to_heap (char *m) { asize_t i; Assert (Chunk_size (m) % Page_size == 0); @@ -88,10 +88,10 @@ int add_to_heap (char *m) #endif /* debug */ /* Extend the page table as needed. */ - if (Page (m) < page_low){ + if (Page (m) < caml_page_low){ page_table_entry *block, *new_page_table; asize_t new_page_low = Page (m); - asize_t new_size = page_high - new_page_low; + asize_t new_size = caml_page_high - new_page_low; caml_gc_message (0x08, "Growing page table to %lu entries\n", new_size); block = malloc (new_size * sizeof (page_table_entry)); @@ -100,16 +100,20 @@ int add_to_heap (char *m) return -1; } new_page_table = block - new_page_low; - for (i = new_page_low; i < page_low; i++) new_page_table [i] = Not_in_heap; - for (i = page_low; i < page_high; i++) new_page_table [i] = page_table [i]; - free (page_table + page_low); - page_table = new_page_table; - page_low = new_page_low; + for (i = new_page_low; i < caml_page_low; i++){ + new_page_table [i] = Not_in_heap; + } + for (i = caml_page_low; i < caml_page_high; i++){ + new_page_table [i] = caml_page_table [i]; + } + free (caml_page_table + caml_page_low); + caml_page_table = new_page_table; + caml_page_low = new_page_low; } - if (Page (m + Chunk_size (m)) > page_high){ + if (Page (m + Chunk_size (m)) > caml_page_high){ page_table_entry *block, *new_page_table; asize_t new_page_high = Page (m + Chunk_size (m)); - asize_t new_size = new_page_high - page_low; + asize_t new_size = new_page_high - caml_page_low; caml_gc_message (0x08, "Growing page table to %lu entries\n", new_size); block = malloc (new_size * sizeof (page_table_entry)); @@ -117,24 +121,26 @@ int add_to_heap (char *m) caml_gc_message (0x08, "No room for growing page table\n", 0); return -1; } - new_page_table = block - page_low; - for (i = page_low; i < page_high; i++) new_page_table [i] = page_table [i]; - for (i = page_high; i < new_page_high; i++){ + new_page_table = block - caml_page_low; + for (i = caml_page_low; i < caml_page_high; i++){ + new_page_table [i] = caml_page_table [i]; + } + for (i = caml_page_high; i < new_page_high; i++){ new_page_table [i] = Not_in_heap; } - free (page_table + page_low); - page_table = new_page_table; - page_high = new_page_high; + free (caml_page_table + caml_page_low); + caml_page_table = new_page_table; + caml_page_high = new_page_high; } /* Mark the pages as being in the heap. */ for (i = Page (m); i < Page (m + Chunk_size (m)); i++){ - page_table [i] = In_heap; + caml_page_table [i] = In_heap; } /* Chain this heap chunk. */ { - char **last = &heap_start; + char **last = &caml_heap_start; char *cur = *last; while (cur != NULL && cur < m){ @@ -148,8 +154,8 @@ int add_to_heap (char *m) } /* Update the heap bounds as needed. */ - /* already done: if (m < heap_start) heap_start = m; */ - if (m + Chunk_size (m) > heap_end) heap_end = m + Chunk_size (m); + /* already done: if (m < caml_heap_start) heap_start = m; */ + if (m + Chunk_size (m) > caml_heap_end) caml_heap_end = m + Chunk_size (m); stat_heap_size += Chunk_size (m); if (stat_heap_size > stat_top_heap_size) stat_top_heap_size = stat_heap_size; @@ -167,10 +173,10 @@ static char *expand_heap (mlsize_t request) char *mem; asize_t malloc_request; - malloc_request = round_heap_chunk_size (Bhsize_wosize (request)); + malloc_request = caml_round_heap_chunk_size (Bhsize_wosize (request)); caml_gc_message (0x04, "Growing heap to %luk bytes\n", (stat_heap_size + malloc_request) / 1024); - mem = alloc_for_heap (malloc_request); + mem = caml_alloc_for_heap (malloc_request); if (mem == NULL){ caml_gc_message (0x04, "No room for growing heap\n", 0); return NULL; @@ -178,8 +184,8 @@ static char *expand_heap (mlsize_t request) Assert (Wosize_bhsize (malloc_request) >= request); Hd_hp (mem) = Make_header (Wosize_bhsize (malloc_request), 0, Caml_blue); - if (add_to_heap (mem) != 0){ - free_for_heap (mem); + if (caml_add_to_heap (mem) != 0){ + caml_free_for_heap (mem); return NULL; } return Bp_hp (mem); @@ -188,18 +194,18 @@ static char *expand_heap (mlsize_t request) /* Remove the heap chunk [chunk] from the heap and give the memory back to [free]. */ -void shrink_heap (char *chunk) +void caml_shrink_heap (char *chunk) { char **cp; asize_t i; - /* Never deallocate the first block, because heap_start is both the + /* Never deallocate the first block, because caml_heap_start is both the first block and the base address for page numbers, and we don't want to shift the page table, it's too messy (see above). It will never happen anyway, because of the way compaction works. (see compact.c) */ - if (chunk == heap_start) return; + if (chunk == caml_heap_start) return; stat_heap_size -= Chunk_size (chunk); caml_gc_message (0x04, "Shrinking heap to %luk bytes\n", stat_heap_size/1024); @@ -216,32 +222,33 @@ void shrink_heap (char *chunk) -- stat_heap_chunks; /* Remove [chunk] from the list of chunks. */ - cp = &heap_start; + cp = &caml_heap_start; while (*cp != chunk) cp = &(Chunk_next (*cp)); *cp = Chunk_next (chunk); /* Remove the pages of [chunk] from the page table. */ for (i = Page (chunk); i < Page (chunk + Chunk_size (chunk)); i++){ - page_table [i] = Not_in_heap; + caml_page_table [i] = Not_in_heap; } /* Free the [malloc] block that contains [chunk]. */ - free_for_heap (chunk); + caml_free_for_heap (chunk); } -color_t allocation_color (void *hp) +color_t caml_allocation_color (void *hp) { - if (gc_phase == Phase_mark - || (gc_phase == Phase_sweep && (addr)hp >= (addr)gc_sweep_hp)){ + if (caml_gc_phase == Phase_mark + || (caml_gc_phase == Phase_sweep && (addr)hp >= (addr)caml_gc_sweep_hp)){ return Caml_black; }else{ - Assert (gc_phase == Phase_idle - || (gc_phase == Phase_sweep && (addr)hp < (addr)gc_sweep_hp)); + Assert (caml_gc_phase == Phase_idle + || (caml_gc_phase == Phase_sweep + && (addr)hp < (addr)caml_gc_sweep_hp)); return Caml_white; } } -value alloc_shr (mlsize_t wosize, tag_t tag) +value caml_alloc_shr (mlsize_t wosize, tag_t tag) { char *hp, *new_block; @@ -250,7 +257,7 @@ value alloc_shr (mlsize_t wosize, tag_t tag) if (hp == NULL){ new_block = expand_heap (wosize); if (new_block == NULL) { - if (in_minor_collection) + if (caml_in_minor_collection) caml_fatal_error ("Fatal error: out of memory.\n"); else raise_out_of_memory (); @@ -261,18 +268,21 @@ value alloc_shr (mlsize_t wosize, tag_t tag) Assert (Is_in_heap (Val_hp (hp))); - /* Inline expansion of allocation_color. */ - if (gc_phase == Phase_mark - || (gc_phase == Phase_sweep && (addr)hp >= (addr)gc_sweep_hp)){ + /* Inline expansion of caml_allocation_color. */ + if (caml_gc_phase == Phase_mark + || (caml_gc_phase == Phase_sweep && (addr)hp >= (addr)caml_gc_sweep_hp)){ Hd_hp (hp) = Make_header (wosize, tag, Caml_black); }else{ - Assert (gc_phase == Phase_idle - || (gc_phase == Phase_sweep && (addr)hp < (addr)gc_sweep_hp)); + Assert (caml_gc_phase == Phase_idle + || (caml_gc_phase == Phase_sweep + && (addr)hp < (addr)caml_gc_sweep_hp)); Hd_hp (hp) = Make_header (wosize, tag, Caml_white); } - Assert (Hd_hp (hp) == Make_header (wosize, tag, allocation_color (hp))); - allocated_words += Whsize_wosize (wosize); - if (allocated_words > Wsize_bsize (minor_heap_size)) urge_major_slice (); + Assert (Hd_hp (hp) == Make_header (wosize, tag, caml_allocation_color (hp))); + caml_allocated_words += Whsize_wosize (wosize); + if (caml_allocated_words > Wsize_bsize (caml_minor_heap_size)){ + urge_major_slice (); + } #ifdef DEBUG { unsigned long i; @@ -290,51 +300,51 @@ value alloc_shr (mlsize_t wosize, tag_t tag) [mem] is the number of words allocated this time. Note that only [mem/max] is relevant. You can use numbers of bytes (or kilobytes, ...) instead of words. You can change units between - calls to [adjust_gc_speed]. + calls to [caml_adjust_gc_speed]. */ -void adjust_gc_speed (mlsize_t mem, mlsize_t max) +void caml_adjust_gc_speed (mlsize_t mem, mlsize_t max) { if (max == 0) max = 1; if (mem > max) mem = max; - extra_heap_memory += (double) mem / (double) max; - if (extra_heap_memory > 1.0){ - extra_heap_memory = 1.0; + caml_extra_heap_memory += (double) mem / (double) max; + if (caml_extra_heap_memory > 1.0){ + caml_extra_heap_memory = 1.0; urge_major_slice (); } - if (extra_heap_memory > (double) Wsize_bsize (minor_heap_size) - / 2.0 / (double) Wsize_bsize (stat_heap_size)) { + if (caml_extra_heap_memory > (double) Wsize_bsize (caml_minor_heap_size) + / 2.0 / (double) Wsize_bsize (stat_heap_size)) { urge_major_slice (); } } -/* You must use [initialize] to store the initial value in a field of +/* You must use [caml_initialize] to store the initial value in a field of a shared block, unless you are sure the value is not a young block. A block value [v] is a shared block if and only if [Is_in_heap (v)] is true. */ -/* [initialize] never calls the GC, so you may call it while an block is - unfinished (i.e. just after a call to [alloc_shr].) */ -void initialize (value *fp, value val) +/* [caml_initialize] never calls the GC, so you may call it while an block is + unfinished (i.e. just after a call to [caml_alloc_shr].) */ +void caml_initialize (value *fp, value val) { *fp = val; if (Is_block (val) && Is_young (val) && Is_in_heap (fp)){ - *ref_table_ptr++ = fp; - if (ref_table_ptr >= ref_table_limit){ - realloc_ref_table (); + *caml_ref_table_ptr++ = fp; + if (caml_ref_table_ptr >= caml_ref_table_limit){ + caml_realloc_ref_table (); } } } -/* You must use [modify] to change a field of an existing shared block, +/* You must use [caml_modify] to change a field of an existing shared block, unless you are sure the value being overwritten is not a shared block and the value being written is not a young block. */ -/* [modify] never calls the GC. */ -void modify (value *fp, value val) +/* [caml_modify] never calls the GC. */ +void caml_modify (value *fp, value val) { Modify (fp, val); } -void * stat_alloc (asize_t sz) +void * caml_stat_alloc (asize_t sz) { void * result = malloc (sz); @@ -346,12 +356,12 @@ void * stat_alloc (asize_t sz) return result; } -void stat_free (void * blk) +void caml_stat_free (void * blk) { free (blk); } -void * stat_resize (void * blk, asize_t sz) +void * caml_stat_resize (void * blk, asize_t sz) { void * result = realloc (blk, sz); diff --git a/byterun/memory.h b/byterun/memory.h index 1f2ff98f8..83f676bd6 100644 --- a/byterun/memory.h +++ b/byterun/memory.h @@ -30,20 +30,20 @@ #include "misc.h" #include "mlvalues.h" -CAMLextern value alloc_shr (mlsize_t, tag_t); -void adjust_gc_speed (mlsize_t, mlsize_t); -CAMLextern void modify (value *, value); -CAMLextern void initialize (value *, value); -CAMLextern value check_urgent_gc (value); -CAMLextern void * stat_alloc (asize_t); /* Size in bytes. */ -CAMLextern void stat_free (void *); -CAMLextern void * stat_resize (void *, asize_t); /* Size in bytes. */ -char *alloc_for_heap (asize_t request); /* Size in bytes. */ -void free_for_heap (char *mem); -int add_to_heap (char *mem); -color_t allocation_color (void *hp); - -/* void shrink_heap (char *); Only used in compact.c */ +CAMLextern value caml_alloc_shr (mlsize_t, tag_t); +void caml_adjust_gc_speed (mlsize_t, mlsize_t); +CAMLextern void caml_modify (value *, value); +CAMLextern void caml_initialize (value *, value); +CAMLextern value caml_check_urgent_gc (value); +CAMLextern void * caml_stat_alloc (asize_t); /* Size in bytes. */ +CAMLextern void caml_stat_free (void *); +CAMLextern void * caml_stat_resize (void *, asize_t); /* Size in bytes. */ +char *caml_alloc_for_heap (asize_t request); /* Size in bytes. */ +void caml_free_for_heap (char *mem); +int caml_add_to_heap (char *mem); +color_t caml_allocation_color (void *hp); + +/* void caml_shrink_heap (char *); Only used in compact.c */ /* <private> */ @@ -61,16 +61,16 @@ color_t allocation_color (void *hp); #define Alloc_small(result, wosize, tag) do{ CAMLassert (wosize >= 1); \ CAMLassert ((tag_t) tag < 256); \ CAMLassert ((wosize) <= Max_young_wosize); \ - young_ptr -= Bhsize_wosize (wosize); \ - if (young_ptr < young_limit){ \ - young_ptr += Bhsize_wosize (wosize); \ + caml_young_ptr -= Bhsize_wosize (wosize); \ + if (caml_young_ptr < caml_young_limit){ \ + caml_young_ptr += Bhsize_wosize (wosize); \ Setup_for_gc; \ - minor_collection (); \ + caml_minor_collection (); \ Restore_after_gc; \ - young_ptr -= Bhsize_wosize (wosize); \ + caml_young_ptr -= Bhsize_wosize (wosize); \ } \ - Hd_hp (young_ptr) = Make_header ((wosize), (tag), Caml_black); \ - (result) = Val_hp (young_ptr); \ + Hd_hp (caml_young_ptr) = Make_header ((wosize), (tag), Caml_black); \ + (result) = Val_hp (caml_young_ptr); \ DEBUG_clear ((result), (wosize)); \ }while(0) @@ -89,13 +89,13 @@ color_t allocation_color (void *hp); value _old_ = *(fp); \ *(fp) = (val); \ if (Is_in_heap (fp)){ \ - if (gc_phase == Phase_mark) darken (_old_, NULL); \ + if (caml_gc_phase == Phase_mark) caml_darken (_old_, NULL); \ if (Is_block (val) && Is_young (val) \ && ! (Is_block (_old_) && Is_young (_old_))){ \ - *ref_table_ptr++ = (fp); \ - if (ref_table_ptr >= ref_table_limit){ \ - CAMLassert (ref_table_ptr == ref_table_limit); \ - realloc_ref_table (); \ + *caml_ref_table_ptr++ = (fp); \ + if (caml_ref_table_ptr >= caml_ref_table_limit){ \ + CAMLassert (caml_ref_table_ptr == caml_ref_table_limit); \ + caml_realloc_ref_table (); \ } \ } \ } \ @@ -277,7 +277,7 @@ CAMLextern struct caml__roots_block *local_roots; /* defined in roots.c */ #define Store_field(block, offset, val) do{ \ mlsize_t caml__temp_offset = (offset); \ value caml__temp_val = (val); \ - modify (&Field ((block), caml__temp_offset), caml__temp_val); \ + caml_modify (&Field ((block), caml__temp_offset), caml__temp_val); \ }while(0) /* diff --git a/byterun/meta.c b/byterun/meta.c index 273fe9da3..e533f7a71 100644 --- a/byterun/meta.c +++ b/byterun/meta.c @@ -30,12 +30,12 @@ #ifndef NATIVE_CODE -CAMLprim value get_global_data(value unit) +CAMLprim value caml_get_global_data(value unit) { - return global_data; + return caml_global_data; } -CAMLprim value reify_bytecode(value prog, value len) +CAMLprim value caml_reify_bytecode(value prog, value len) { value clos; #ifdef ARCH_BIG_ENDIAN @@ -49,34 +49,34 @@ CAMLprim value reify_bytecode(value prog, value len) return clos; } -CAMLprim value realloc_global(value size) +CAMLprim value caml_realloc_global(value size) { mlsize_t requested_size, actual_size, i; value new_global_data; requested_size = Long_val(size); - actual_size = Wosize_val(global_data); + actual_size = Wosize_val(caml_global_data); if (requested_size >= actual_size) { requested_size = (requested_size + 0x100) & 0xFFFFFF00; caml_gc_message (0x08, "Growing global data to %lu entries\n", requested_size); - new_global_data = alloc_shr(requested_size, 0); + new_global_data = caml_alloc_shr(requested_size, 0); for (i = 0; i < actual_size; i++) - initialize(&Field(new_global_data, i), Field(global_data, i)); + caml_initialize(&Field(new_global_data, i), Field(caml_global_data, i)); for (i = actual_size; i < requested_size; i++){ Field (new_global_data, i) = Val_long (0); } - global_data = new_global_data; + caml_global_data = new_global_data; } return Val_unit; } -CAMLprim value get_current_environment(value unit) +CAMLprim value caml_get_current_environment(value unit) { - return *extern_sp; + return *caml_extern_sp; } -CAMLprim value invoke_traced_function(value codeptr, value env, value arg) +CAMLprim value caml_invoke_traced_function(value codeptr, value env, value arg) { /* Stack layout on entry: return frame into instrument_closure function @@ -104,9 +104,9 @@ CAMLprim value invoke_traced_function(value codeptr, value env, value arg) value * osp, * nsp; int i; - osp = extern_sp; - extern_sp -= 4; - nsp = extern_sp; + osp = caml_extern_sp; + caml_extern_sp -= 4; + nsp = caml_extern_sp; for (i = 0; i < 6; i++) nsp[i] = osp[i]; nsp[6] = codeptr; nsp[7] = env; @@ -119,43 +119,43 @@ CAMLprim value invoke_traced_function(value codeptr, value env, value arg) /* Dummy definitions to support compilation of ocamlc.opt */ -value get_global_data(value unit) +value caml_get_global_data(value unit) { invalid_argument("Meta.get_global_data"); return Val_unit; /* not reached */ } -value realloc_global(value size) +value caml_realloc_global(value size) { invalid_argument("Meta.realloc_global"); return Val_unit; /* not reached */ } -value available_primitives(value unit) +value caml_available_primitives(value unit) { invalid_argument("Meta.available_primitives"); return Val_unit; /* not reached */ } -value invoke_traced_function(value codeptr, value env, value arg) +value caml_invoke_traced_function(value codeptr, value env, value arg) { invalid_argument("Meta.invoke_traced_function"); return Val_unit; /* not reached */ } -value * stack_low; -value * stack_high; -value * stack_threshold; -value * extern_sp; -value * trapsp; -int backtrace_active; -int backtrace_pos; -code_t * backtrace_buffer; -value backtrace_last_exn; -int callback_depth; +value * caml_stack_low; +value * caml_stack_high; +value * caml_stack_threshold; +value * caml_extern_sp; +value * caml_trapsp; +int caml_backtrace_active; +int caml_backtrace_pos; +code_t * caml_backtrace_buffer; +value caml_backtrace_last_exn; +int caml_callback_depth; int volatile something_to_do; void (* volatile async_action_hook)(void); -void print_exception_backtrace(void) { } +void caml_print_exception_backtrace(void) { } struct longjmp_buffer * external_raise; #endif diff --git a/byterun/minor_gc.c b/byterun/minor_gc.c index ec0203e92..ab4930385 100644 --- a/byterun/minor_gc.c +++ b/byterun/minor_gc.c @@ -27,15 +27,15 @@ #include "roots.h" #include "signals.h" -asize_t minor_heap_size; -char *young_start = NULL, *young_end = NULL; -char *young_ptr = NULL, *young_limit = NULL; +asize_t caml_minor_heap_size; +char *caml_young_start = NULL, *caml_young_end = NULL; +char *caml_young_ptr = NULL, *caml_young_limit = NULL; static value **ref_table = NULL, **ref_table_end, **ref_table_threshold; -value **ref_table_ptr = NULL, **ref_table_limit; +value **caml_ref_table_ptr = NULL, **caml_ref_table_limit; static asize_t ref_table_size, ref_table_reserve; -int in_minor_collection = 0; +int caml_in_minor_collection = 0; -void set_minor_heap_size (asize_t size) +void caml_set_minor_heap_size (asize_t size) { char *new_heap; value **new_table; @@ -43,27 +43,27 @@ void set_minor_heap_size (asize_t size) Assert (size >= Minor_heap_min); Assert (size <= Minor_heap_max); Assert (size % sizeof (value) == 0); - if (young_ptr != young_end) minor_collection (); - Assert (young_ptr == young_end); - new_heap = (char *) stat_alloc (size); - if (young_start != NULL){ - stat_free (young_start); + if (caml_young_ptr != caml_young_end) caml_minor_collection (); + Assert (caml_young_ptr == caml_young_end); + new_heap = (char *) caml_stat_alloc (size); + if (caml_young_start != NULL){ + caml_stat_free (caml_young_start); } - young_start = new_heap; - young_end = new_heap + size; - young_limit = young_start; - young_ptr = young_end; - minor_heap_size = size; + caml_young_start = new_heap; + caml_young_end = new_heap + size; + caml_young_limit = caml_young_start; + caml_young_ptr = caml_young_end; + caml_minor_heap_size = size; - ref_table_size = minor_heap_size / sizeof (value) / 8; + ref_table_size = caml_minor_heap_size / sizeof (value) / 8; ref_table_reserve = 256; - new_table = (value **) stat_alloc ((ref_table_size + ref_table_reserve) - * sizeof (value *)); - if (ref_table != NULL) stat_free (ref_table); + new_table = (value **) caml_stat_alloc ((ref_table_size + ref_table_reserve) + * sizeof (value *)); + if (ref_table != NULL) caml_stat_free (ref_table); ref_table = new_table; - ref_table_ptr = ref_table; + caml_ref_table_ptr = ref_table; ref_table_threshold = ref_table + ref_table_size; - ref_table_limit = ref_table_threshold; + caml_ref_table_limit = ref_table_threshold; ref_table_end = ref_table + ref_table_size + ref_table_reserve; } @@ -72,7 +72,7 @@ static value oldify_todo_list = 0; /* Note that the tests on the tag depend on the fact that Infix_tag, Forward_tag, and No_scan_tag are contiguous. */ -void oldify_one (value v, value *p) +void caml_oldify_one (value v, value *p) { value result; header_t hd; @@ -81,7 +81,7 @@ void oldify_one (value v, value *p) tail_call: if (Is_block (v) && Is_young (v)){ - Assert (Hp_val (v) >= young_ptr); + Assert (Hp_val (v) >= caml_young_ptr); hd = Hd_val (v); if (hd == 0){ /* If already forwarded */ *p = Field (v, 0); /* then forward pointer is first field. */ @@ -91,7 +91,7 @@ void oldify_one (value v, value *p) value field0; sz = Wosize_hd (hd); - result = alloc_shr (sz, tag); + result = caml_alloc_shr (sz, tag); *p = result; field0 = Field (v, 0); Hd_val (v) = 0; /* Set forward flag */ @@ -108,14 +108,14 @@ void oldify_one (value v, value *p) } }else if (tag >= No_scan_tag){ sz = Wosize_hd (hd); - result = alloc_shr (sz, tag); + result = caml_alloc_shr (sz, tag); for (i = 0; i < sz; i++) Field (result, i) = Field (v, i); Hd_val (v) = 0; /* Set forward flag */ Field (v, 0) = result; /* and forward pointer. */ *p = result; }else if (tag == Infix_tag){ mlsize_t offset = Infix_offset_hd (hd); - oldify_one (v - offset, p); /* This cannot recurse deeper than 1. */ + caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */ *p += offset; }else{ value f = Forward_val (v); @@ -128,7 +128,7 @@ void oldify_one (value v, value *p) if (ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){ /* Do not short-circuit the pointer. Copy as a normal block. */ Assert (Wosize_hd (hd) == 1); - result = alloc_shr (1, Forward_tag); + result = caml_alloc_shr (1, Forward_tag); *p = result; Hd_val (v) = 0; /* Set (GC) forward flag */ Field (v, 0) = result; /* and forward pointer. */ @@ -146,11 +146,11 @@ void oldify_one (value v, value *p) } } -/* Finish the work that was put off by oldify_one. - Note that oldify_one itself is called by oldify_mopup, so we +/* Finish the work that was put off by [caml_oldify_one]. + Note that [caml_oldify_one] itself is called by oldify_mopup, so we have to be careful to remove the first entry from the list before oldifying its fields. */ -void oldify_mopup (void) +void caml_oldify_mopup (void) { value v, new_v, f; mlsize_t i; @@ -163,12 +163,12 @@ void oldify_mopup (void) f = Field (new_v, 0); if (Is_block (f) && Is_young (f)){ - oldify_one (f, &Field (new_v, 0)); + caml_oldify_one (f, &Field (new_v, 0)); } for (i = 1; i < Wosize_val (new_v); i++){ f = Field (v, i); if (Is_block (f) && Is_young (f)){ - oldify_one (f, &Field (new_v, i)); + caml_oldify_one (f, &Field (new_v, i)); }else{ Field (new_v, i) = f; } @@ -179,32 +179,32 @@ void oldify_mopup (void) /* Make sure the minor heap is empty by performing a minor collection if needed. */ -void empty_minor_heap (void) +void caml_empty_minor_heap (void) { value **r; - if (young_ptr != young_end){ - in_minor_collection = 1; + if (caml_young_ptr != caml_young_end){ + caml_in_minor_collection = 1; caml_gc_message (0x02, "<", 0); oldify_local_roots(); - for (r = ref_table; r < ref_table_ptr; r++){ - oldify_one (**r, *r); + for (r = ref_table; r < caml_ref_table_ptr; r++){ + caml_oldify_one (**r, *r); } - oldify_mopup (); - if (young_ptr < young_start) young_ptr = young_start; - stat_minor_words += Wsize_bsize (young_end - young_ptr); - young_ptr = young_end; - young_limit = young_start; - ref_table_ptr = ref_table; - ref_table_limit = ref_table_threshold; + caml_oldify_mopup (); + if (caml_young_ptr < caml_young_start) caml_young_ptr = caml_young_start; + stat_minor_words += Wsize_bsize (caml_young_end - caml_young_ptr); + caml_young_ptr = caml_young_end; + caml_young_limit = caml_young_start; + caml_ref_table_ptr = ref_table; + caml_ref_table_limit = ref_table_threshold; caml_gc_message (0x02, ">", 0); - in_minor_collection = 0; + caml_in_minor_collection = 0; } final_empty_young (); #ifdef DEBUG { value *p; - for (p = (value *) young_start; p < (value *) young_end; ++p){ + for (p = (value *) caml_young_start; p < (value *) caml_young_end; ++p){ *p = Debug_free_minor; } } @@ -215,41 +215,41 @@ void empty_minor_heap (void) functions, etc. Leave the minor heap empty. */ -void minor_collection (void) +void caml_minor_collection (void) { - long prev_alloc_words = allocated_words; + long prev_alloc_words = caml_allocated_words; - empty_minor_heap (); + caml_empty_minor_heap (); - stat_promoted_words += allocated_words - prev_alloc_words; + stat_promoted_words += caml_allocated_words - prev_alloc_words; ++ stat_minor_collections; - major_collection_slice (0); + caml_major_collection_slice (0); force_major_slice = 0; final_do_calls (); - empty_minor_heap (); + caml_empty_minor_heap (); } -value check_urgent_gc (value extra_root) +value caml_check_urgent_gc (value extra_root) { CAMLparam1 (extra_root); - if (force_major_slice) minor_collection(); + if (force_major_slice) caml_minor_collection(); CAMLreturn (extra_root); } -void realloc_ref_table (void) -{ Assert (ref_table_ptr == ref_table_limit); - Assert (ref_table_limit <= ref_table_end); - Assert (ref_table_limit >= ref_table_threshold); +void caml_realloc_ref_table (void) +{ Assert (caml_ref_table_ptr == caml_ref_table_limit); + Assert (caml_ref_table_limit <= ref_table_end); + Assert (caml_ref_table_limit >= ref_table_threshold); - if (ref_table_limit == ref_table_threshold){ + if (caml_ref_table_limit == ref_table_threshold){ caml_gc_message (0x08, "ref_table threshold crossed\n", 0); - ref_table_limit = ref_table_end; + caml_ref_table_limit = ref_table_end; urge_major_slice (); }else{ /* This will almost never happen with the bytecode interpreter. */ asize_t sz; - asize_t cur_ptr = ref_table_ptr - ref_table; + asize_t cur_ptr = caml_ref_table_ptr - ref_table; Assert (force_major_slice); ref_table_size *= 2; @@ -261,7 +261,7 @@ void realloc_ref_table (void) } ref_table_end = ref_table + ref_table_size + ref_table_reserve; ref_table_threshold = ref_table + ref_table_size; - ref_table_ptr = ref_table + cur_ptr; - ref_table_limit = ref_table_end; + caml_ref_table_ptr = ref_table + cur_ptr; + caml_ref_table_limit = ref_table_end; } } diff --git a/byterun/minor_gc.h b/byterun/minor_gc.h index ad61ba091..a569d3a90 100644 --- a/byterun/minor_gc.h +++ b/byterun/minor_gc.h @@ -19,27 +19,28 @@ #include "misc.h" -CAMLextern char *young_start, *young_ptr, *young_end, *young_limit; -CAMLextern value **ref_table_ptr, **ref_table_limit; -extern asize_t minor_heap_size; -extern int in_minor_collection; +CAMLextern char *caml_young_start, *caml_young_ptr; +CAMLextern char *caml_young_end, *caml_young_limit; +CAMLextern value **caml_ref_table_ptr, **caml_ref_table_limit; +extern asize_t caml_minor_heap_size; +extern int caml_in_minor_collection; #define Is_young(val) \ (Assert (Is_block (val)), \ - (addr)(val) < (addr)young_end && (addr)(val) > (addr)young_start) + (addr)(val) < (addr)caml_young_end && (addr)(val) > (addr)caml_young_start) -extern void set_minor_heap_size (asize_t); -extern void empty_minor_heap (void); -CAMLextern void minor_collection (void); -CAMLextern void garbage_collection (void); /* for the native-code system */ -extern void realloc_ref_table (void); -extern void oldify_one (value, value *); -extern void oldify_mopup (void); +extern void caml_set_minor_heap_size (asize_t); +extern void caml_empty_minor_heap (void); +CAMLextern void caml_minor_collection (void); +CAMLextern void garbage_collection (void); /* def in asmrun/signals.c */ +extern void caml_realloc_ref_table (void); +extern void caml_oldify_one (value, value *); +extern void caml_oldify_mopup (void); #define Oldify(p) do{ \ value __oldify__v__ = *p; \ if (Is_block (__oldify__v__) && Is_young (__oldify__v__)){ \ - oldify_one (__oldify__v__, (p)); \ + caml_oldify_one (__oldify__v__, (p)); \ } \ }while(0) diff --git a/byterun/misc.c b/byterun/misc.c index 7814ca794..c89e86ec3 100644 --- a/byterun/misc.c +++ b/byterun/misc.c @@ -113,7 +113,7 @@ void caml_ext_table_init(struct ext_table * tbl, int init_capa) { tbl->size = 0; tbl->capacity = init_capa; - tbl->contents = stat_alloc(sizeof(void *) * init_capa); + tbl->contents = caml_stat_alloc(sizeof(void *) * init_capa); } int caml_ext_table_add(struct ext_table * tbl, void * data) @@ -122,7 +122,7 @@ int caml_ext_table_add(struct ext_table * tbl, void * data) if (tbl->size >= tbl->capacity) { tbl->capacity *= 2; tbl->contents = - stat_resize(tbl->contents, sizeof(void *) * tbl->capacity); + caml_stat_resize(tbl->contents, sizeof(void *) * tbl->capacity); } res = tbl->size; tbl->contents[res] = data; @@ -134,6 +134,6 @@ void caml_ext_table_free(struct ext_table * tbl, int free_entries) { int i; if (free_entries) - for (i = 0; i < tbl->size; i++) stat_free(tbl->contents[i]); - stat_free(tbl->contents); + for (i = 0; i < tbl->size; i++) caml_stat_free(tbl->contents[i]); + caml_stat_free(tbl->contents); } diff --git a/byterun/misc.h b/byterun/misc.h index 287b66d37..36ed0c319 100644 --- a/byterun/misc.h +++ b/byterun/misc.h @@ -113,14 +113,14 @@ char *caml_aligned_malloc (asize_t, int, void **); 00 -> free words in minor heap 01 -> fields of free list blocks in major heap 03 -> heap chunks deallocated by heap shrinking - 04 -> fields deallocated by obj_truncate + 04 -> fields deallocated by [obj_truncate] 10 -> uninitialised fields of minor objects 11 -> uninitialised fields of major objects - 15 -> uninitialised words of caml_aligned_malloc blocks - 85 -> filler bytes of caml_aligned_malloc + 15 -> uninitialised words of [caml_aligned_malloc] blocks + 85 -> filler bytes of [caml_aligned_malloc] special case (byte by byte): - D7 -> uninitialised words of stat_alloc blocks + D7 -> uninitialised words of [caml_stat_alloc] blocks */ #define Debug_free_minor Debug_tag (0x00) #define Debug_free_major Debug_tag (0x01) diff --git a/byterun/mlvalues.h b/byterun/mlvalues.h index 74db88187..5f7fdad48 100644 --- a/byterun/mlvalues.h +++ b/byterun/mlvalues.h @@ -164,7 +164,7 @@ typedef opcode_t * code_t; /* NOTE: [Forward_tag] and [Infix_tag] must be just under [No_scan_tag], with [Infix_tag] the lower one. - See [oldify_one] in minor_gc.c for more details. + See [caml_oldify_one] in minor_gc.c for more details. NOTE: Update stdlib/obj.ml whenever you change the tags. */ @@ -259,8 +259,8 @@ CAMLextern int64 Int64_val(value v); /* 3- Atoms are 0-tuples. They are statically allocated once and for all. */ -CAMLextern header_t atom_table[]; -#define Atom(tag) (Val_hp (&(atom_table [(tag)]))) +CAMLextern header_t caml_atom_table[]; +#define Atom(tag) (Val_hp (&(caml_atom_table [(tag)]))) /* Is_atom tests whether a well-formed block is statically allocated outside the heap. For the bytecode system, only zero-sized block (Atoms) @@ -295,7 +295,7 @@ CAMLextern char * static_data_start, * static_data_end; /* The table of global identifiers */ -extern value global_data; +extern value caml_global_data; #endif /* CAML_MLVALUES_H */ diff --git a/byterun/obj.c b/byterun/obj.c index d15bb75dd..58042b3e7 100644 --- a/byterun/obj.c +++ b/byterun/obj.c @@ -28,18 +28,18 @@ CAMLprim value static_alloc(value size) { - return (value) stat_alloc((asize_t) Long_val(size)); + return (value) caml_stat_alloc((asize_t) Long_val(size)); } CAMLprim value static_free(value blk) { - stat_free((void *) blk); + caml_stat_free((void *) blk); return Val_unit; } CAMLprim value static_resize(value blk, value new_size) { - return (value) stat_resize((char *) blk, (asize_t) Long_val(new_size)); + return (value) caml_stat_resize((char *) blk, (asize_t) Long_val(new_size)); } CAMLprim value obj_is_block(value arg) @@ -97,8 +97,8 @@ CAMLprim value obj_dup(value arg) res = caml_alloc_small(sz, tg); for (i = 0; i < sz; i++) Field(res, i) = Field(arg, i); } else { - res = alloc_shr(sz, tg); - for (i = 0; i < sz; i++) initialize(&Field(res, i), Field(arg, i)); + res = caml_alloc_shr(sz, tg); + for (i = 0; i < sz; i++) caml_initialize(&Field(res, i), Field(arg, i)); } CAMLreturn (res); } @@ -130,7 +130,7 @@ CAMLprim value obj_truncate (value v, value newsize) can darken them as appropriate. */ if (tag < No_scan_tag) { for (i = new_wosize; i < wosize; i++){ - modify(&Field(v, i), Val_unit); + caml_modify(&Field(v, i), Val_unit); #ifdef DEBUG Field (v, i) = Debug_free_truncate; #endif diff --git a/byterun/osdeps.h b/byterun/osdeps.h index 4ee64b2de..1fb51df22 100644 --- a/byterun/osdeps.h +++ b/byterun/osdeps.h @@ -26,7 +26,7 @@ extern char * decompose_path(struct ext_table * tbl, char * path); /* Search the given file in the given list of directories. If not found, return a copy of [name]. Result is allocated with - [stat_alloc]. */ + [caml_stat_alloc]. */ extern char * search_in_path(struct ext_table * path, char * name); /* Same, but search an executable name in the system path for executables. */ diff --git a/byterun/parsing.c b/byterun/parsing.c index 79f910a30..3f9ac3565 100644 --- a/byterun/parsing.c +++ b/byterun/parsing.c @@ -166,10 +166,10 @@ CAMLprim value parse_engine(struct parser_tables *tables, RESTORE; if (Is_block(arg)) { env->curr_char = Field(tables->transl_block, Tag_val(arg)); - modify(&env->lval, Field(arg, 0)); + caml_modify(&env->lval, Field(arg, 0)); } else { env->curr_char = Field(tables->transl_const, Int_val(arg)); - modify(&env->lval, Val_long(0)); + caml_modify(&env->lval, Val_long(0)); } if (parser_trace) print_token(tables, state, arg); @@ -237,7 +237,7 @@ CAMLprim value parse_engine(struct parser_tables *tables, RESTORE; push: Field(env->s_stack, sp) = Val_int(state); - modify(&Field(env->v_stack, sp), env->lval); + caml_modify(&Field(env->v_stack, sp), env->lval); Store_field (env->symb_start_stack, sp, env->symb_start); Store_field (env->symb_end_stack, sp, env->symb_end); goto loop; @@ -273,7 +273,7 @@ CAMLprim value parse_engine(struct parser_tables *tables, case SEMANTIC_ACTION_COMPUTED: RESTORE; Field(env->s_stack, sp) = Val_int(state); - modify(&Field(env->v_stack, sp), arg); + caml_modify(&Field(env->v_stack, sp), arg); asp = Int_val(env->asp); Store_field (env->symb_end_stack, sp, Field(env->symb_end_stack, asp)); if (sp > asp) { diff --git a/byterun/printexc.c b/byterun/printexc.c index 79ff8fbe4..f9e2bc174 100644 --- a/byterun/printexc.c +++ b/byterun/printexc.c @@ -108,15 +108,15 @@ void fatal_uncaught_exception(value exn) /* Perform "at_exit" processing, ignoring all exceptions that may be triggered by this */ #ifndef NATIVE_CODE - saved_backtrace_active = backtrace_active; - saved_backtrace_pos = backtrace_pos; - backtrace_active = 0; + saved_backtrace_active = caml_backtrace_active; + saved_backtrace_pos = caml_backtrace_pos; + caml_backtrace_active = 0; #endif at_exit = caml_named_value("Pervasives.do_at_exit"); - if (at_exit != NULL) callback_exn(*at_exit, Val_unit); + if (at_exit != NULL) caml_callback_exn(*at_exit, Val_unit); #ifndef NATIVE_CODE - backtrace_active = saved_backtrace_active; - backtrace_pos = saved_backtrace_pos; + caml_backtrace_active = saved_backtrace_active; + caml_backtrace_pos = saved_backtrace_pos; #endif /* Display the uncaught exception */ #ifdef HAS_UI @@ -127,7 +127,9 @@ void fatal_uncaught_exception(value exn) free(msg); /* Display the backtrace if available */ #ifndef NATIVE_CODE - if (backtrace_active && !debugger_in_use) print_exception_backtrace(); + if (caml_backtrace_active && !debugger_in_use){ + caml_print_exception_backtrace(); + } #endif /* Terminate the process */ #ifdef HAS_UI diff --git a/byterun/roots.c b/byterun/roots.c index 6378ceffb..ccbc16aa1 100644 --- a/byterun/roots.c +++ b/byterun/roots.c @@ -30,7 +30,7 @@ CAMLexport struct caml__roots_block *local_roots = NULL; void (*scan_roots_hook) (scanning_action f) = NULL; /* FIXME rename to [oldify_young_roots] and synchronise with asmrun/roots.c */ -/* Call [oldify_one] on (at least) all the roots that point to the minor +/* Call [caml_oldify_one] on (at least) all the roots that point to the minor heap. */ void oldify_local_roots (void) { @@ -40,33 +40,33 @@ void oldify_local_roots (void) long i, j; /* The stack */ - for (sp = extern_sp; sp < stack_high; sp++) { - oldify_one (*sp, sp); + for (sp = caml_extern_sp; sp < caml_stack_high; sp++) { + caml_oldify_one (*sp, sp); } /* Local C roots */ /* FIXME do the old-frame trick ? */ for (lr = local_roots; lr != NULL; lr = lr->next) { for (i = 0; i < lr->ntables; i++){ for (j = 0; j < lr->nitems; j++){ sp = &(lr->tables[i][j]); - oldify_one (*sp, sp); + caml_oldify_one (*sp, sp); } } } /* Global C roots */ for (gr = caml_global_roots.forward[0]; gr != NULL; gr = gr->forward[0]) { - oldify_one(*(gr->root), gr->root); + caml_oldify_one(*(gr->root), gr->root); } /* Finalised values */ - final_do_young_roots (&oldify_one); + final_do_young_roots (&caml_oldify_one); /* Hook */ - if (scan_roots_hook != NULL) (*scan_roots_hook)(&oldify_one); + if (scan_roots_hook != NULL) (*scan_roots_hook)(&caml_oldify_one); } -/* Call [darken] on all roots */ +/* Call [caml_darken] on all roots */ void darken_all_roots (void) { - do_roots (darken); + do_roots (caml_darken); } void do_roots (scanning_action f) @@ -74,10 +74,10 @@ void do_roots (scanning_action f) struct global_root * gr; /* Global variables */ - f(global_data, &global_data); + f(caml_global_data, &caml_global_data); /* The stack and the local C roots */ - do_local_roots(f, extern_sp, stack_high, local_roots); + do_local_roots(f, caml_extern_sp, caml_stack_high, local_roots); /* Global C roots */ for (gr = caml_global_roots.forward[0]; gr != NULL; gr = gr->forward[0]) { diff --git a/byterun/signals.c b/byterun/signals.c index ce8f2ae72..13e58bc11 100644 --- a/byterun/signals.c +++ b/byterun/signals.c @@ -48,7 +48,8 @@ void process_event(void) { int signal_number; void (*async_action)(void); - if (force_major_slice) minor_collection (); /* FIXME should be check_urgent_gc */ + if (force_major_slice) caml_minor_collection (); + /* FIXME should be [caml_check_urgent_gc] */ /* If a signal arrives between the following two instructions, it will be lost. To do: use atomic swap or atomic read-and-clear for processors that support it? */ @@ -78,8 +79,8 @@ void execute_signal(int signal_number, int in_signal_handler) sigaddset(&sigs, signal_number); sigprocmask(SIG_BLOCK, &sigs, &sigs); #endif - res = callback_exn(Field(signal_handlers, signal_number), - Val_int(rev_convert_signal_number(signal_number))); + res = caml_callback_exn(Field(signal_handlers, signal_number), + Val_int(rev_convert_signal_number(signal_number))); #ifdef POSIX_SIGNALS if (! in_signal_handler) { /* Restore the original signal mask */ @@ -289,7 +290,7 @@ CAMLprim value install_signal_handler(value signal_number, value action) signal_handlers = caml_alloc(NSIG, 0); register_global_root(&signal_handlers); } - modify(&Field(signal_handlers, sig), Field(action, 0)); + caml_modify(&Field(signal_handlers, sig), Field(action, 0)); } CAMLreturn (res); } diff --git a/byterun/stacks.c b/byterun/stacks.c index a59783916..b6002907e 100644 --- a/byterun/stacks.c +++ b/byterun/stacks.c @@ -22,81 +22,82 @@ #include "mlvalues.h" #include "stacks.h" -CAMLexport value * stack_low; -CAMLexport value * stack_high; -CAMLexport value * stack_threshold; -CAMLexport value * extern_sp; -CAMLexport value * trapsp; -CAMLexport value * trap_barrier; -value global_data; +CAMLexport value * caml_stack_low; +CAMLexport value * caml_stack_high; +CAMLexport value * caml_stack_threshold; +CAMLexport value * caml_extern_sp; +CAMLexport value * caml_trapsp; +CAMLexport value * caml_trap_barrier; +value caml_global_data; -unsigned long max_stack_size; /* also used in gc_ctrl.c */ +unsigned long caml_max_stack_size; /* also used in gc_ctrl.c */ -void init_stack (long unsigned int initial_max_size) +void caml_init_stack (long unsigned int initial_max_size) { - stack_low = (value *) stat_alloc(Stack_size); - stack_high = stack_low + Stack_size / sizeof (value); - stack_threshold = stack_low + Stack_threshold / sizeof (value); - extern_sp = stack_high; - trapsp = stack_high; - trap_barrier = stack_high + 1; - max_stack_size = initial_max_size; + caml_stack_low = (value *) caml_stat_alloc(Stack_size); + caml_stack_high = caml_stack_low + Stack_size / sizeof (value); + caml_stack_threshold = caml_stack_low + Stack_threshold / sizeof (value); + caml_extern_sp = caml_stack_high; + caml_trapsp = caml_stack_high; + caml_trap_barrier = caml_stack_high + 1; + caml_max_stack_size = initial_max_size; caml_gc_message (0x08, "Initial stack limit: %luk bytes\n", - max_stack_size / 1024 * sizeof (value)); + caml_max_stack_size / 1024 * sizeof (value)); } -void realloc_stack(asize_t required_space) +void caml_realloc_stack(asize_t required_space) { asize_t size; value * new_low, * new_high, * new_sp; value * p; - Assert(extern_sp >= stack_low); - size = stack_high - stack_low; + Assert(caml_extern_sp >= caml_stack_low); + size = caml_stack_high - caml_stack_low; do { - if (size >= max_stack_size) raise_stack_overflow(); + if (size >= caml_max_stack_size) raise_stack_overflow(); size *= 2; - } while (size < stack_high - extern_sp + required_space); + } while (size < caml_stack_high - caml_extern_sp + required_space); caml_gc_message (0x08, "Growing stack to %luk bytes\n", (unsigned long) size * sizeof(value) / 1024); - new_low = (value *) stat_alloc(size * sizeof(value)); + new_low = (value *) caml_stat_alloc(size * sizeof(value)); new_high = new_low + size; #define shift(ptr) \ - ((char *) new_high - ((char *) stack_high - (char *) (ptr))) + ((char *) new_high - ((char *) caml_stack_high - (char *) (ptr))) - new_sp = (value *) shift(extern_sp); + new_sp = (value *) shift(caml_extern_sp); memmove((char *) new_sp, - (char *) extern_sp, - (stack_high - extern_sp) * sizeof(value)); - stat_free(stack_low); - trapsp = (value *) shift(trapsp); - trap_barrier = (value *) shift(trap_barrier); - for (p = trapsp; p < new_high; p = Trap_link(p)) + (char *) caml_extern_sp, + (caml_stack_high - caml_extern_sp) * sizeof(value)); + caml_stat_free(caml_stack_low); + caml_trapsp = (value *) shift(caml_trapsp); + caml_trap_barrier = (value *) shift(caml_trap_barrier); + for (p = caml_trapsp; p < new_high; p = Trap_link(p)) Trap_link(p) = (value *) shift(Trap_link(p)); - stack_low = new_low; - stack_high = new_high; - stack_threshold = stack_low + Stack_threshold / sizeof (value); - extern_sp = new_sp; + caml_stack_low = new_low; + caml_stack_high = new_high; + caml_stack_threshold = caml_stack_low + Stack_threshold / sizeof (value); + caml_extern_sp = new_sp; #undef shift } -CAMLprim value ensure_stack_capacity(value required_space) +CAMLprim value caml_ensure_stack_capacity(value required_space) { asize_t req = Long_val(required_space); - if (extern_sp - req < stack_low) realloc_stack(req); + if (caml_extern_sp - req < caml_stack_low) caml_realloc_stack(req); return Val_unit; } -void change_max_stack_size (long unsigned int new_max_size) +void caml_change_max_stack_size (long unsigned int new_max_size) { - asize_t size = stack_high - extern_sp + Stack_threshold / sizeof (value); + asize_t size = caml_stack_high - caml_extern_sp + + Stack_threshold / sizeof (value); if (new_max_size < size) new_max_size = size; - if (new_max_size != max_stack_size){ + if (new_max_size != caml_max_stack_size){ caml_gc_message (0x08, "Changing stack limit to %luk bytes\n", new_max_size * sizeof (value) / 1024); } - max_stack_size = new_max_size; + caml_max_stack_size = new_max_size; } diff --git a/byterun/stacks.h b/byterun/stacks.h index 33e5de260..ba3ab98fb 100644 --- a/byterun/stacks.h +++ b/byterun/stacks.h @@ -23,20 +23,20 @@ #include "mlvalues.h" #include "memory.h" -CAMLextern value * stack_low; -CAMLextern value * stack_high; -CAMLextern value * stack_threshold; -CAMLextern value * extern_sp; -CAMLextern value * trapsp; -CAMLextern value * trap_barrier; +CAMLextern value * caml_stack_low; +CAMLextern value * caml_stack_high; +CAMLextern value * caml_stack_threshold; +CAMLextern value * caml_extern_sp; +CAMLextern value * caml_trapsp; +CAMLextern value * caml_trap_barrier; #define Trap_pc(tp) (((code_t *)(tp))[0]) #define Trap_link(tp) (((value **)(tp))[1]) void reset_roots (void); -void init_stack (unsigned long init_max_size); -void realloc_stack (asize_t required_size); -void change_max_stack_size (unsigned long new_max_size); +void caml_init_stack (unsigned long init_max_size); +void caml_realloc_stack (asize_t required_size); +void caml_change_max_stack_size (unsigned long new_max_size); #endif /* CAML_STACKS_H */ diff --git a/byterun/startup.c b/byterun/startup.c index bd487f3ff..e7372212a 100644 --- a/byterun/startup.c +++ b/byterun/startup.c @@ -63,14 +63,14 @@ extern int parser_trace; -CAMLexport header_t atom_table[256]; +CAMLexport header_t caml_atom_table[256]; /* Initialize the atom table */ static void init_atoms(void) { int i; - for(i = 0; i < 256; i++) atom_table[i] = Make_header(0, i, Caml_white); + for(i = 0; i < 256; i++) caml_atom_table[i] = Make_header(0, i, Caml_white); } /* Read the trailer of a bytecode file */ @@ -94,8 +94,8 @@ static int read_trailer(int fd, struct exec_trailer *trail) return BAD_BYTECODE; } -int attempt_open(char **name, struct exec_trailer *trail, - int do_open_script) +int caml_attempt_open(char **name, struct exec_trailer *trail, + int do_open_script) { char * truename; int fd; @@ -130,12 +130,12 @@ int attempt_open(char **name, struct exec_trailer *trail, /* Read the section descriptors */ -void read_section_descriptors(int fd, struct exec_trailer *trail) +void caml_read_section_descriptors(int fd, struct exec_trailer *trail) { int toc_size, i; toc_size = trail->num_sections * 8; - trail->section = stat_alloc(toc_size); + trail->section = caml_stat_alloc(toc_size); lseek(fd, - (long) (TRAILER_SIZE + toc_size), SEEK_END); if (read(fd, (char *) trail->section, toc_size) != toc_size) caml_fatal_error("Fatal error: cannot read section table\n"); @@ -148,7 +148,7 @@ void read_section_descriptors(int fd, struct exec_trailer *trail) Return the length of the section data in bytes, or -1 if no section found with that name. */ -int32 seek_optional_section(int fd, struct exec_trailer *trail, char *name) +int32 caml_seek_optional_section(int fd, struct exec_trailer *trail, char *name) { long ofs; int i; @@ -167,9 +167,9 @@ int32 seek_optional_section(int fd, struct exec_trailer *trail, char *name) /* Position fd at the beginning of the section having the given name. Return the length of the section data in bytes. */ -int32 seek_section(int fd, struct exec_trailer *trail, char *name) +int32 caml_seek_section(int fd, struct exec_trailer *trail, char *name) { - int32 len = seek_optional_section(fd, trail, name); + int32 len = caml_seek_optional_section(fd, trail, name); if (len == -1) caml_fatal_error_arg("Fatal_error: section `%s' is missing\n", name); return len; @@ -183,9 +183,9 @@ static char * read_section(int fd, struct exec_trailer *trail, char *name) int32 len; char * data; - len = seek_optional_section(fd, trail, name); + len = caml_seek_optional_section(fd, trail, name); if (len == -1) return NULL; - data = stat_alloc(len + 1); + data = caml_stat_alloc(len + 1); if (read(fd, data, len) != len) caml_fatal_error_arg("Fatal error: error reading section %s\n", name); data[len] = 0; @@ -248,7 +248,7 @@ static int parse_command_line(char **argv) exit(0); break; case 'b': - init_backtrace(); + caml_init_backtrace(); break; case 'I': if (argv[i + 1] != NULL) { @@ -297,7 +297,7 @@ static void parse_camlrunparam(void) case 'o': scanmult (opt, &percent_free_init); break; case 'O': scanmult (opt, &max_percent_free_init); break; case 'v': scanmult (opt, &caml_verb_gc); break; - case 'b': init_backtrace(); break; + case 'b': caml_init_backtrace(); break; case 'p': parser_trace = 1; break; } } @@ -341,13 +341,13 @@ CAMLexport void caml_main(char **argv) if (executable_name(proc_self_exe, sizeof(proc_self_exe)) == 0) exe_name = proc_self_exe; #endif - fd = attempt_open(&exe_name, &trail, 0); + fd = caml_attempt_open(&exe_name, &trail, 0); if (fd < 0) { pos = parse_command_line(argv); if (argv[pos] == 0) caml_fatal_error("No bytecode file specified.\n"); exe_name = argv[pos]; - fd = attempt_open(&exe_name, &trail, 1); + fd = caml_attempt_open(&exe_name, &trail, 1); switch(fd) { case FILE_NOT_FOUND: caml_fatal_error_arg("Fatal error: cannot find file %s\n", argv[pos]); @@ -360,18 +360,18 @@ CAMLexport void caml_main(char **argv) } } /* Read the table of contents (section descriptors) */ - read_section_descriptors(fd, &trail); + caml_read_section_descriptors(fd, &trail); /* Initialize the abstract machine */ init_gc (minor_heap_init, heap_size_init, heap_chunk_init, percent_free_init, max_percent_free_init); - init_stack (max_stack_init); + caml_init_stack (max_stack_init); init_atoms(); /* Initialize the interpreter */ interprete(NULL, 0); /* Initialize the debugger, if needed */ debugger_init(); /* Load the code */ - code_size = seek_section(fd, &trail, "CODE"); + code_size = caml_seek_section(fd, &trail, "CODE"); load_code(fd, code_size); /* Build the table of primitives */ shared_lib_path = read_section(fd, &trail, "DLPT"); @@ -379,18 +379,18 @@ CAMLexport void caml_main(char **argv) req_prims = read_section(fd, &trail, "PRIM"); if (req_prims == NULL) caml_fatal_error("Fatal error: no PRIM section\n"); build_primitive_table(shared_lib_path, shared_libs, req_prims); - stat_free(shared_lib_path); - stat_free(shared_libs); - stat_free(req_prims); + caml_stat_free(shared_lib_path); + caml_stat_free(shared_libs); + caml_stat_free(req_prims); /* Load the globals */ - seek_section(fd, &trail, "DATA"); + caml_seek_section(fd, &trail, "DATA"); chan = caml_open_descriptor_in(fd); - global_data = input_val(chan); + caml_global_data = input_val(chan); caml_close_channel(chan); /* this also closes fd */ - stat_free(trail.section); + caml_stat_free(trail.section); /* Ensure that the globals are in the major heap. */ - oldify_one (global_data, &global_data); - oldify_mopup (); + caml_oldify_one (caml_global_data, &caml_global_data); + caml_oldify_mopup (); /* Initialize system libraries */ init_exceptions(); caml_sys_init(exe_name, argv + pos); @@ -405,7 +405,7 @@ CAMLexport void caml_main(char **argv) if (Is_exception_result(res)) { exn_bucket = Extract_exception(res); if (debugger_in_use) { - extern_sp = &exn_bucket; /* The debugger needs the exception value. */ + caml_extern_sp = &exn_bucket; /* The debugger needs the exception value.*/ debugger(UNCAUGHT_EXC); } fatal_uncaught_exception(exn_bucket); @@ -429,7 +429,7 @@ CAMLexport void caml_startup_code(code_t code, asize_t code_size, /* Initialize the abstract machine */ init_gc (minor_heap_init, heap_size_init, heap_chunk_init, percent_free_init, max_percent_free_init); - init_stack (max_stack_init); + caml_init_stack (max_stack_init); init_atoms(); /* Initialize the interpreter */ interprete(NULL, 0); @@ -442,10 +442,10 @@ CAMLexport void caml_startup_code(code_t code, asize_t code_size, prim_table.size = prim_table.capacity = -1; prim_table.contents = (void **) builtin_cprim; /* Load the globals */ - global_data = input_val_from_string((value)data, 0); + caml_global_data = input_val_from_string((value)data, 0); /* Ensure that the globals are in the major heap. */ - oldify_one (global_data, &global_data); - oldify_mopup (); + caml_oldify_one (caml_global_data, &caml_global_data); + caml_oldify_mopup (); /* Run the code */ init_exceptions(); caml_sys_init("", argv); diff --git a/byterun/startup.h b/byterun/startup.h index 61f7dcc81..807012bf9 100644 --- a/byterun/startup.h +++ b/byterun/startup.h @@ -25,12 +25,12 @@ CAMLextern void caml_startup_code(code_t code, asize_t code_size, enum { FILE_NOT_FOUND = -1, BAD_BYTECODE = -2 }; -extern int attempt_open(char **name, struct exec_trailer *trail, - int do_open_script); -extern void read_section_descriptors(int fd, struct exec_trailer *trail); -extern int32 seek_optional_section(int fd, struct exec_trailer *trail, - char *name); -extern int32 seek_section(int fd, struct exec_trailer *trail, char *name); +extern int caml_attempt_open(char **name, struct exec_trailer *trail, + int do_open_script); +extern void caml_read_section_descriptors(int fd, struct exec_trailer *trail); +extern int32 caml_seek_optional_section(int fd, struct exec_trailer *trail, + char *name); +extern int32 caml_seek_section(int fd, struct exec_trailer *trail, char *name); #endif /* CAML_STARTUP_H */ diff --git a/byterun/sys.c b/byterun/sys.c index 4fef7561f..9a26a8559 100644 --- a/byterun/sys.c +++ b/byterun/sys.c @@ -154,7 +154,7 @@ CAMLprim value caml_sys_open(value path, value flags, value perm) int fd; char * p; - p = stat_alloc(caml_string_length(path) + 1); + p = caml_stat_alloc(caml_string_length(path) + 1); strcpy(p, String_val(path)); /* open on a named FIFO can block (PR#1533) */ enter_blocking_section(); @@ -164,7 +164,7 @@ CAMLprim value caml_sys_open(value path, value flags, value perm) #endif ); leave_blocking_section(); - stat_free(p); + caml_stat_free(p); if (fd == -1) caml_sys_error(path); #if defined(F_SETFD) && defined(FD_CLOEXEC) fcntl(fd, F_SETFD, FD_CLOEXEC); @@ -273,12 +273,12 @@ CAMLprim value caml_sys_system_command(value command) unsigned long len; len = caml_string_length (command); - buf = stat_alloc (len + 1); + buf = caml_stat_alloc (len + 1); memmove (buf, String_val (command), len + 1); enter_blocking_section (); status = system(buf); leave_blocking_section (); - stat_free(buf); + caml_stat_free(buf); if (status == -1) caml_sys_error(command); if (WIFEXITED(status)) retcode = WEXITSTATUS(status); diff --git a/byterun/unix.c b/byterun/unix.c index 4ba930910..64eab3ae0 100644 --- a/byterun/unix.c +++ b/byterun/unix.c @@ -51,7 +51,7 @@ char * decompose_path(struct ext_table * tbl, char * path) int n; if (path == NULL) return NULL; - p = stat_alloc(strlen(path) + 1); + p = caml_stat_alloc(strlen(path) + 1); strcpy(p, path); q = p; while (1) { @@ -75,16 +75,16 @@ char * search_in_path(struct ext_table * path, char * name) if (*p == '/') goto not_found; } for (i = 0; i < path->size; i++) { - fullname = stat_alloc(strlen((char *)(path->contents[i])) + - strlen(name) + 2); + fullname = caml_stat_alloc(strlen((char *)(path->contents[i])) + + strlen(name) + 2); strcpy(fullname, (char *)(path->contents[i])); if (fullname[0] != 0) strcat(fullname, "/"); strcat(fullname, name); if (stat(fullname, &st) == 0 && S_ISREG(st.st_mode)) return fullname; - stat_free(fullname); + caml_stat_free(fullname); } not_found: - fullname = stat_alloc(strlen(name) + 1); + fullname = caml_stat_alloc(strlen(name) + 1); strcpy(fullname, name); return fullname; } @@ -113,18 +113,18 @@ static char * cygwin_search_exe_in_path(struct ext_table * path, char * name) if (*p == '/' || *p == '\\') goto not_found; } for (i = 0; i < path->size; i++) { - fullname = stat_alloc(strlen((char *)(path->contents[i])) + - strlen(name) + 6); + fullname = caml_stat_alloc(strlen((char *)(path->contents[i])) + + strlen(name) + 6); strcpy(fullname, (char *)(path->contents[i])); strcat(fullname, "/"); strcat(fullname, name); if (cygwin_file_exists(fullname)) return fullname; strcat(fullname, ".exe"); if (cygwin_file_exists(fullname)) return fullname; - stat_free(fullname); + caml_stat_free(fullname); } not_found: - fullname = stat_alloc(strlen(name) + 5); + fullname = caml_stat_alloc(strlen(name) + 5); strcpy(fullname, name); if (cygwin_file_exists(fullname)) return fullname; strcat(fullname, ".exe"); @@ -148,19 +148,19 @@ char * search_exe_in_path(char * name) #else res = cygwin_search_exe_in_path(&path, name); #endif - stat_free(tofree); + caml_stat_free(tofree); caml_ext_table_free(&path, 0); return res; } char * search_dll_in_path(struct ext_table * path, char * name) { - char * dllname = stat_alloc(strlen(name) + 4); + char * dllname = caml_stat_alloc(strlen(name) + 4); char * res; strcpy(dllname, name); strcat(dllname, ".so"); res = search_in_path(path, dllname); - stat_free(dllname); + caml_stat_free(dllname); return res; } @@ -347,7 +347,7 @@ int caml_read_directory(char * dirname, struct ext_table * contents) e = readdir(d); if (e == NULL) break; if (strcmp(e->d_name, ".") == 0 || strcmp(e->d_name, "..") == 0) continue; - p = stat_alloc(strlen(e->d_name) + 1); + p = caml_stat_alloc(strlen(e->d_name) + 1); strcpy(p, e->d_name); caml_ext_table_add(contents, p); } diff --git a/byterun/weak.c b/byterun/weak.c index 9c9991da2..a277fd6f1 100644 --- a/byterun/weak.c +++ b/byterun/weak.c @@ -34,7 +34,7 @@ CAMLprim value weak_create (value len) size = Long_val (len) + 1; if (size <= 0 || size > Max_wosize) invalid_argument ("Weak.create"); - res = alloc_shr (size, Abstract_tag); + res = caml_alloc_shr (size, Abstract_tag); for (i = 1; i < size; i++) Field (res, i) = weak_none; Field (res, 0) = weak_list_head; weak_list_head = res; @@ -76,8 +76,8 @@ CAMLprim value weak_get (value ar, value n) res = None_val; }else{ elt = Field (ar, offset); - if (gc_phase == Phase_mark && Is_block (elt) && Is_in_heap (elt)){ - darken (elt, NULL); + if (caml_gc_phase == Phase_mark && Is_block (elt) && Is_in_heap (elt)){ + caml_darken (elt, NULL); } res = caml_alloc_small (1, Some_tag); Field (res, 0) = elt; diff --git a/byterun/win32.c b/byterun/win32.c index badcf63b3..f465256dd 100644 --- a/byterun/win32.c +++ b/byterun/win32.c @@ -43,7 +43,7 @@ char * decompose_path(struct ext_table * tbl, char * path) int n; if (path == NULL) return NULL; - p = stat_alloc(strlen(path) + 1); + p = caml_stat_alloc(strlen(path) + 1); strcpy(p, path); q = p; while (1) { @@ -67,18 +67,18 @@ char * search_in_path(struct ext_table * path, char * name) if (*p == '/' || *p == '\\') goto not_found; } for (i = 0; i < path->size; i++) { - fullname = stat_alloc(strlen((char *)(path->contents[i])) + - strlen(name) + 2); + fullname = caml_stat_alloc(strlen((char *)(path->contents[i])) + + strlen(name) + 2); strcpy(fullname, (char *)(path->contents[i])); strcat(fullname, "\\"); strcat(fullname, name); caml_gc_message(0x100, "Searching %s\n", (unsigned long) fullname); if (stat(fullname, &st) == 0 && S_ISREG(st.st_mode)) return fullname; - stat_free(fullname); + caml_stat_free(fullname); } not_found: caml_gc_message(0x100, "%s not found in search path\n", (unsigned long) name); - fullname = stat_alloc(strlen(name) + 1); + fullname = caml_stat_alloc(strlen(name) + 1); strcpy(fullname, name); return fullname; } @@ -86,7 +86,7 @@ char * search_in_path(struct ext_table * path, char * name) CAMLexport char * search_exe_in_path(char * name) { #define MAX_PATH_LENGTH 512 - char * fullname = stat_alloc(512); + char * fullname = caml_stat_alloc(512); char * filepart; if (! SearchPath(NULL, /* use system search path */ @@ -101,12 +101,12 @@ CAMLexport char * search_exe_in_path(char * name) char * search_dll_in_path(struct ext_table * path, char * name) { - char * dllname = stat_alloc(strlen(name) + 5); + char * dllname = caml_stat_alloc(strlen(name) + 5); char * res; strcpy(dllname, name); strcat(dllname, ".dll"); res = search_in_path(path, dllname); - stat_free(dllname); + caml_stat_free(dllname); return res; } @@ -343,15 +343,15 @@ int caml_read_directory(char * dirname, struct ext_table * contents) struct _finddata_t fileinfo; char * p; - template = stat_alloc(strlen(dirname) + 5); + template = caml_stat_alloc(strlen(dirname) + 5); strcpy(template, dirname); strcat(template, "\\*.*"); h = _findfirst(template, &fileinfo); - stat_free(template); + caml_stat_free(template); if (h == -1) return errno == ENOENT ? 0 : -1; do { if (strcmp(fileinfo.name, ".") != 0 && strcmp(fileinfo.name, "..") != 0) { - p = stat_alloc(strlen(fileinfo.name) + 1); + p = caml_stat_alloc(strlen(fileinfo.name) + 1); strcpy(p, fileinfo.name); caml_ext_table_add(contents, p); } diff --git a/stdlib/array.ml b/stdlib/array.ml index 88428fd78..0b021918f 100644 --- a/stdlib/array.ml +++ b/stdlib/array.ml @@ -20,8 +20,8 @@ external get: 'a array -> int -> 'a = "%array_safe_get" external set: 'a array -> int -> 'a -> unit = "%array_safe_set" external unsafe_get: 'a array -> int -> 'a = "%array_unsafe_get" external unsafe_set: 'a array -> int -> 'a -> unit = "%array_unsafe_set" -external make: int -> 'a -> 'a array = "make_vect" -external create: int -> 'a -> 'a array = "make_vect" +external make: int -> 'a -> 'a array = "caml_make_vect" +external create: int -> 'a -> 'a array = "caml_make_vect" let init l f = if l = 0 then [||] else diff --git a/stdlib/array.mli b/stdlib/array.mli index 115d3dd1f..4712d8bf6 100644 --- a/stdlib/array.mli +++ b/stdlib/array.mli @@ -35,7 +35,7 @@ external set : 'a array -> int -> 'a -> unit = "%array_safe_set" Raise [Invalid_argument "index out of bounds"] if [n] is outside the range 0 to [Array.length a - 1]. *) -external make : int -> 'a -> 'a array = "make_vect" +external make : int -> 'a -> 'a array = "caml_make_vect" (** [Array.make n x] returns a fresh array of length [n], initialized with [x]. All the elements of this new array are initially @@ -48,7 +48,7 @@ external make : int -> 'a -> 'a array = "make_vect" If the value of [x] is a floating-point number, then the maximum size is only [Sys.max_array_length / 2].*) -external create : int -> 'a -> 'a array = "make_vect" +external create : int -> 'a -> 'a array = "caml_make_vect" (** @deprecated [Array.create] is an alias for {!Array.make}. *) val init : int -> (int -> 'a) -> 'a array diff --git a/stdlib/arrayLabels.mli b/stdlib/arrayLabels.mli index 0616ada1e..20e953647 100644 --- a/stdlib/arrayLabels.mli +++ b/stdlib/arrayLabels.mli @@ -34,7 +34,7 @@ external set : 'a array -> int -> 'a -> unit = "%array_safe_set" 0 to [Array.length a - 1]. You can also write [a.(n) <- x] instead of [Array.set a n x]. *) -external make : int -> 'a -> 'a array = "make_vect" +external make : int -> 'a -> 'a array = "caml_make_vect" (** [Array.make n x] returns a fresh array of length [n], initialized with [x]. All the elements of this new array are initially @@ -47,7 +47,7 @@ external make : int -> 'a -> 'a array = "make_vect" If the value of [x] is a floating-point number, then the maximum size is only [Sys.max_array_length / 2].*) -external create : int -> 'a -> 'a array = "make_vect" +external create : int -> 'a -> 'a array = "caml_make_vect" (** @deprecated [Array.create] is an alias for {!ArrayLabels.make}. *) val init : int -> f:(int -> 'a) -> 'a array diff --git a/stdlib/callback.ml b/stdlib/callback.ml index d070c2bb6..819f9d3f4 100644 --- a/stdlib/callback.ml +++ b/stdlib/callback.ml @@ -15,7 +15,8 @@ (* Registering Caml values with the C runtime for later callbacks *) -external register_named_value: string -> Obj.t -> unit = "register_named_value" +external register_named_value : string -> Obj.t -> unit + = "caml_register_named_value" let register name v = register_named_value name (Obj.repr v) diff --git a/stdlib/camlinternalOO.ml b/stdlib/camlinternalOO.ml index 29e81dc8c..95f07456f 100644 --- a/stdlib/camlinternalOO.ml +++ b/stdlib/camlinternalOO.ml @@ -450,7 +450,7 @@ let make_class_store pub_meths class_init init_table = let create_object table = (* XXX Appel de [obj_block] *) let obj = Obj.new_block Obj.object_tag table.size in - (* XXX Appel de [modify] *) + (* XXX Appel de [caml_modify] *) Obj.set_field obj 0 (Obj.repr table.buckets); set_id obj last_id; (Obj.obj obj) @@ -459,7 +459,7 @@ let create_object_opt obj_0 table = if (Obj.magic obj_0 : bool) then obj_0 else begin (* XXX Appel de [obj_block] *) let obj = Obj.new_block Obj.object_tag table.size in - (* XXX Appel de [modify] *) + (* XXX Appel de [caml_modify] *) Obj.set_field obj 0 (Obj.repr table.buckets); set_id obj last_id; (Obj.obj obj) diff --git a/stdlib/digest.ml b/stdlib/digest.ml index 04603303b..78a45d54e 100644 --- a/stdlib/digest.ml +++ b/stdlib/digest.ml @@ -17,8 +17,8 @@ type t = string -external unsafe_string: string -> int -> int -> t = "md5_string" -external channel: in_channel -> int -> t = "md5_chan" +external unsafe_string: string -> int -> int -> t = "caml_md5_string" +external channel: in_channel -> int -> t = "caml_md5_chan" let string str = unsafe_string str 0 (String.length str) diff --git a/stdlib/digest.mli b/stdlib/digest.mli index 9c94d60f2..72db79948 100644 --- a/stdlib/digest.mli +++ b/stdlib/digest.mli @@ -32,7 +32,7 @@ val substring : string -> int -> int -> t of [s] starting at character number [ofs] and containing [len] characters. *) -external channel : in_channel -> int -> t = "md5_chan" +external channel : in_channel -> int -> t = "caml_md5_chan" (** If [len] is nonnegative, [Digest.channel ic len] reads [len] characters from channel [ic] and returns their digest, or raises [End_of_file] if end-of-file is reached before [len] characters diff --git a/stdlib/lazy.ml b/stdlib/lazy.ml index 84158b257..6642e1ec9 100644 --- a/stdlib/lazy.ml +++ b/stdlib/lazy.ml @@ -37,7 +37,7 @@ fancy. We cannot use representation (3) for a [float Lazy.t] because - [make_array] assumes that only a [float] value can have tag + [caml_make_array] assumes that only a [float] value can have tag [Double_tag]. We have to use the built-in type constructor [lazy_t] to diff --git a/stdlib/lexing.ml b/stdlib/lexing.ml index 1899f78d8..0211144ef 100644 --- a/stdlib/lexing.ml +++ b/stdlib/lexing.ml @@ -57,8 +57,9 @@ type lex_tables = lex_check_code : string; lex_code: string;} -external c_engine: lex_tables -> int -> lexbuf -> int = "lex_engine" -external c_new_engine: lex_tables -> int -> lexbuf -> int = "new_lex_engine" +external c_engine : lex_tables -> int -> lexbuf -> int = "caml_lex_engine" +external c_new_engine : lex_tables -> int -> lexbuf -> int + = "caml_new_lex_engine" let engine tbl state buf = let result = c_engine tbl state buf in diff --git a/stdlib/pervasives.ml b/stdlib/pervasives.ml index f5ea6a349..2d653f616 100644 --- a/stdlib/pervasives.ml +++ b/stdlib/pervasives.ml @@ -424,6 +424,7 @@ let exit retcode = do_at_exit (); sys_exit retcode -external register_named_value: string -> 'a -> unit = "register_named_value" +external register_named_value : string -> 'a -> unit + = "caml_register_named_value" let _ = register_named_value "Pervasives.do_at_exit" do_at_exit diff --git a/stdlib/stdLabels.mli b/stdlib/stdLabels.mli index 509eccb62..4750bbffb 100644 --- a/stdlib/stdLabels.mli +++ b/stdlib/stdLabels.mli @@ -27,8 +27,8 @@ module Array : external length : 'a array -> int = "%array_length" external get : 'a array -> int -> 'a = "%array_safe_get" external set : 'a array -> int -> 'a -> unit = "%array_safe_set" - external make : int -> 'a -> 'a array = "make_vect" - external create : int -> 'a -> 'a array = "make_vect" + external make : int -> 'a -> 'a array = "caml_make_vect" + external create : int -> 'a -> 'a array = "caml_make_vect" val init : int -> f:(int -> 'a) -> 'a array val make_matrix : dimx:int -> dimy:int -> 'a -> 'a array array val create_matrix : dimx:int -> dimy:int -> 'a -> 'a array array diff --git a/stdlib/sys.ml b/stdlib/sys.ml index 32889830d..fd88f44e2 100644 --- a/stdlib/sys.ml +++ b/stdlib/sys.ml @@ -78,4 +78,4 @@ let catch_break on = (* OCaml version string, must be in the format described in sys.mli. *) -let ocaml_version = "3.07+8 (2003-12-29)";; +let ocaml_version = "3.07+9 (2003-12-30)";; diff --git a/stdlib/sys.mli b/stdlib/sys.mli index e1b871b08..bad9d86ef 100644 --- a/stdlib/sys.mli +++ b/stdlib/sys.mli @@ -188,6 +188,7 @@ val catch_break : bool -> unit val ocaml_version : string;; (** [ocaml_version] is the version of Objective Caml. - It is a string of the form ["major.minor[additional-info]"] - Where major and minor are integers, and [additional-info] is - a string that is empty or starts with a '+'. *) + It is a string of the form ["major.minor[.patchlevel][+additional-info]"] + Where [major], [minor], and [patchlevel] are integers, and + [additional-info] is an arbitrary string. The [[.patchlevel]] and + [[+additional-info]] parts may be absent. *) diff --git a/toplevel/topdirs.ml b/toplevel/topdirs.ml index f57e77333..18498772a 100644 --- a/toplevel/topdirs.ml +++ b/toplevel/topdirs.ml @@ -198,7 +198,7 @@ let _ = Hashtbl.add directive_table "remove_printer" (* The trace *) -external current_environment: unit -> Obj.t = "get_current_environment" +external current_environment: unit -> Obj.t = "caml_get_current_environment" let tracing_function_ptr = get_code_pointer |