diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86FOREIGNCALL.sml b/mlsource/MLCompiler/CodeTree/X86Code/X86FOREIGNCALL.sml index bbf00dd8..08c792dd 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86FOREIGNCALL.sml +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86FOREIGNCALL.sml @@ -1,1721 +1,1724 @@ (* - Copyright (c) 2016-19 David C.J. Matthews + Copyright (c) 2016-20 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86FOREIGNCALL( structure X86CODE: X86CODESIG structure X86OPTIMISE: sig type operation type code type operations = operation list type closureRef (* Optimise and code-generate. *) val generateCode: {code: code, ops: operations, labelCount: int, resultClosure: closureRef} -> unit structure Sharing: sig type operation = operation type code = code type closureRef = closureRef end end structure DEBUG: DEBUGSIG structure CODE_ARRAY: CODEARRAYSIG sharing X86CODE.Sharing = X86OPTIMISE.Sharing = CODE_ARRAY.Sharing ): FOREIGNCALLSIG = struct open X86CODE open Address open CODE_ARRAY (* Unix X64. The first six arguments are in rdi, rsi, rdx, rcx, r8, r9. The rest are on the stack. Windows X64. The first four arguments are in rcx, rdx, r8 and r9. The rest are on the stack. The caller must ensure the stack is aligned on 16-byte boundary and must allocate 32-byte save area for the register args. rbx, rbp, rdi, rsi, rsp, r12-r15 are saved by the called function. X86/32. Arguments are pushed to the stack. ebx, edi, esi, ebp and esp are saved by the called function. We use esi to hold the argument data pointer and edi to save the ML stack pointer Our ML conventions use eax, ebx for the first two arguments in X86/32, rax, ebx, r8, r9, r10 for the first five arguments in X86/64 and rax, rsi, r8, r9 and r10 for the first five arguments in X86/64-32 bit. *) val memRegSize = 0 val (polyWordOpSize, nativeWordOpSize) = case targetArch of Native32Bit => (OpSize32, OpSize32) | Native64Bit => (OpSize64, OpSize64) | ObjectId32Bit => (OpSize32, OpSize64) (* Ebx/Rbx is used for the second argument on the native architectures but is replaced by esi on the object ID arch because ebx is used as the global base register. *) val mlArg2Reg = case targetArch of ObjectId32Bit => esi | _ => ebx exception InternalError = Misc.InternalError fun opSizeToMove OpSize32 = Move32 | opSizeToMove OpSize64 = Move64 val pushR = PushToStack o RegisterArg fun moveRR{source, output, opSize} = Move{source=RegisterArg source, destination=RegisterArg output, moveSize=opSizeToMove opSize} fun loadMemory(reg, base, offset, opSize) = Move{source=MemoryArg{base=base, offset=offset, index=NoIndex}, destination=RegisterArg reg, moveSize=opSizeToMove opSize} and storeMemory(reg, base, offset, opSize) = Move{source=RegisterArg reg, destination=MemoryArg {base=base, offset=offset, index=NoIndex}, moveSize=opSizeToMove opSize} val loadHeapMemory = case targetArch of ObjectId32Bit => ( fn (reg, base, offset, opSize) => Move{source=MemoryArg{base=ebx, offset=offset, index=Index4 base}, destination=RegisterArg reg, moveSize=opSizeToMove opSize} ) | _ => loadMemory fun loadAddress{source=(srcReg, 0), destination} = Move{source=RegisterArg srcReg, destination=RegisterArg destination, moveSize=opSizeToMove nativeWordOpSize} | loadAddress{source=(srcReg, srcOffset), destination} = LoadAddress{offset=srcOffset, base=SOME srcReg, index=NoIndex, output=destination, opSize=nativeWordOpSize } (* Sequence of operations to move memory. *) fun moveMemory{source, destination, length} = [ loadAddress{source=source, destination=rsi}, loadAddress{source=destination, destination=rdi}, (* N.B. When moving a struct in a Win64 callback the source could be rcx so only move this after copying the source to rsi. *) Move{source=NonAddressConstArg(LargeInt.fromInt length), destination=RegisterArg rcx, moveSize=opSizeToMove nativeWordOpSize}, RepeatOperation MOVS8 ] fun createProfileObject _ (*functionName*) = let (* The profile object is a single mutable with the F_bytes bit set. *) open Address val profileObject = RunCall.allocateByteMemory(0w1, Word.fromLargeWord(Word8.toLargeWord(Word8.orb(F_mutable, F_bytes)))) fun clear 0w0 = () | clear i = (assignByte(profileObject, i-0w1, 0w0); clear (i-0w1)) val () = clear wordSize in toMachineWord profileObject end val makeEntryPoint: string -> machineWord = RunCall.rtsCallFull1 "PolyCreateEntryPointObject" datatype abi = X86_32 | X64Win | X64Unix local (* Get the ABI. On 64-bit Windows and Unix use different calling conventions. *) val getABICall: unit -> int = RunCall.rtsCallFast0 "PolyGetABI" in fun getABI() = case getABICall() of 0 => X86_32 | 1 => X64Unix | 2 => X64Win | n => raise InternalError ("Unknown ABI type " ^ Int.toString n) end (* This is now the standard entry call code. *) datatype fastArgs = FastArgFixed | FastArgDouble | FastArgFloat fun rtsCallFastGeneral (functionName, argFormats, (*resultFormat*) _, debugSwitches) = let val entryPointAddr = makeEntryPoint functionName (* Get the ABI. On 64-bit Windows and Unix use different calling conventions. *) val abi = getABI() val entryPtrReg = if targetArch <> Native32Bit then r11 else ecx val nArgs = List.length argFormats local (* Compute stack space. The actual number of args passed is nArgs. *) val argSpace = case abi of X64Unix => Int.max(0, nArgs-6)*8 | X64Win => Int.max(0, nArgs-4)*8 | X86_32 => List.foldl(fn (FastArgDouble, n) => n+8 | (_, n) => n+4) 0 argFormats val align = argSpace mod 16 in (* Add sufficient space so that esp will be 16-byte aligned after we have pushed any arguments we need to push. *) val stackSpace = if align = 0 then memRegSize else memRegSize + 16 - align end (* The number of ML arguments passed on the stack. *) val mlArgsOnStack = Int.max(case abi of X86_32 => nArgs - 2 | _ => nArgs - 5, 0) val code = [ Move{source=AddressConstArg entryPointAddr, destination=RegisterArg entryPtrReg, moveSize=opSizeToMove polyWordOpSize}, (* Load the entry point ref. *) loadHeapMemory(entryPtrReg, entryPtrReg, 0, nativeWordOpSize)(* Load its value. *) ] @ ( (* Save heap ptr. This is in r15 in X86/64 *) if targetArch <> Native32Bit then [storeMemory(r15, ebp, memRegLocalMPointer, nativeWordOpSize)] (* Save heap ptr *) else [] ) @ ( if (case abi of X86_32 => nArgs >= 3 | _ => nArgs >= 6) then [moveRR{source=esp, output=edi, opSize=nativeWordOpSize}] (* Needed if we have to load from the stack. *) else [] ) @ [ storeMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), (* Save ML stack and switch to C stack. *) loadMemory(esp, ebp, memRegCStackPtr, nativeWordOpSize), (* Set the stack pointer past the data on the stack. For Windows/64 add in a 32 byte save area *) ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt stackSpace), opSize=nativeWordOpSize} ] @ ( case abi of (* Set the argument registers. *) X86_32 => let fun pushReg(reg, FastArgFixed) = [pushR reg] | pushReg(reg, FastArgDouble) = (* reg contains the address of the value. This must be unboxed onto the stack. *) [ FPLoadFromMemory{address={base=reg, offset=0, index=NoIndex}, precision=DoublePrecision}, ArithToGenReg{ opc=SUB, output=esp, source=NonAddressConstArg 8, opSize=nativeWordOpSize}, FPStoreToMemory{ address={base=esp, offset=0, index=NoIndex}, precision=DoublePrecision, andPop=true } ] | pushReg(reg, FastArgFloat) = (* reg contains the address of the value. This must be unboxed onto the stack. *) [ FPLoadFromMemory{address={base=reg, offset=0, index=NoIndex}, precision=SinglePrecision}, ArithToGenReg{ opc=SUB, output=esp, source=NonAddressConstArg 4, opSize=nativeWordOpSize}, FPStoreToMemory{ address={base=esp, offset=0, index=NoIndex}, precision=SinglePrecision, andPop=true } ] (* The stack arguments have to be copied first followed by the ebx and finally eax. *) fun pushArgs (_, []) = [] | pushArgs (_, [argType]) = pushReg(eax, argType) | pushArgs (_, [arg2Type, arg1Type]) = pushReg(ebx, arg2Type) @ pushReg(eax, arg1Type) | pushArgs (n, FastArgFixed :: argTypes) = PushToStack(MemoryArg{base=edi, offset=(nArgs-n+1)* 4, index=NoIndex}) :: pushArgs(n-1, argTypes) | pushArgs (n, argType :: argTypes) = (* Use esi as a temporary register. *) loadMemory(esi, edi, (nArgs-n+1)* 4, polyWordOpSize) :: pushReg(esi, argType) @ pushArgs(n-1, argTypes) in pushArgs(nArgs, List.rev argFormats) end | X64Unix => ( if List.all (fn FastArgFixed => true | _ => false) argFormats then let fun pushArgs 0 = [] | pushArgs 1 = [moveRR{source=eax, output=edi, opSize=polyWordOpSize}] | pushArgs 2 = moveRR{source=mlArg2Reg, output=esi, opSize=polyWordOpSize} :: pushArgs 1 | pushArgs 3 = moveRR{source=r8, output=edx, opSize=polyWordOpSize} :: pushArgs 2 | pushArgs 4 = moveRR{source=r9, output=ecx, opSize=polyWordOpSize} :: pushArgs 3 | pushArgs 5 = (* We have to move r8 into edx before we can move r10 into r8 *) moveRR{source=r8, output=edx, opSize=polyWordOpSize} :: moveRR{source=r9, output=ecx, opSize=polyWordOpSize} :: moveRR{source=r10, output=r8, opSize=polyWordOpSize} :: pushArgs 2 | pushArgs 6 = (* We have to move r9 into edi before we can load r9 from the stack. *) moveRR{source=r8, output=edx, opSize=polyWordOpSize} :: moveRR{source=r9, output=ecx, opSize=polyWordOpSize} :: loadMemory(r9, edi, 8, polyWordOpSize) :: moveRR{source=r10, output=r8, opSize=polyWordOpSize} :: pushArgs 2 | pushArgs _ = raise InternalError "rtsCall: Abi/argument count not implemented" in pushArgs nArgs end else case argFormats of [] => [] | [FastArgFixed] => [ moveRR{source=eax, output=edi, opSize=polyWordOpSize} ] | [FastArgFixed, FastArgFixed] => (* Since mlArgs2Reg is esi on 32-in-64 this is redundant. *) [ moveRR{source=mlArg2Reg, output=esi, opSize=polyWordOpSize}, moveRR{source=eax, output=edi, opSize=polyWordOpSize} ] | [FastArgFixed, FastArgFixed, FastArgFixed] => [ moveRR{source=mlArg2Reg, output=esi, opSize=polyWordOpSize}, moveRR{source=eax, output=edi, opSize=polyWordOpSize}, moveRR{source=r8, output=edx, opSize=polyWordOpSize} ] | [FastArgFixed, FastArgFixed, FastArgFixed, FastArgFixed] => [ moveRR{source=mlArg2Reg, output=esi, opSize=polyWordOpSize}, moveRR{source=eax, output=edi, opSize=polyWordOpSize}, moveRR{source=r8, output=edx, opSize=polyWordOpSize}, moveRR{source=r9, output=ecx, opSize=polyWordOpSize} ] (* One "double" argument. The value needs to be unboxed. *) | [FastArgDouble] => [] (* Already in xmm0 *) (* X64 on both Windows and Unix take the first arg in xmm0 and the second in xmm1. They are already there. *) | [FastArgDouble, FastArgDouble] => [] | [FastArgDouble, FastArgFixed] => [ moveRR{source=eax, output=edi, opSize=nativeWordOpSize} ] | [FastArgFloat] => [] (* Already in xmm0 *) | [FastArgFloat, FastArgFloat] => [] (* Already in xmm0 and xmm1 *) (* One float argument and one fixed. *) | [FastArgFloat, FastArgFixed] => [moveRR{source=mlArg2Reg, output=edi, opSize=polyWordOpSize} ] | _ => raise InternalError "rtsCall: Abi/argument count not implemented" ) | X64Win => ( if List.all (fn FastArgFixed => true | _ => false) argFormats then let fun pushArgs 0 = [] | pushArgs 1 = [moveRR{source=eax, output=ecx, opSize=polyWordOpSize}] | pushArgs 2 = moveRR{source=mlArg2Reg, output=edx, opSize=polyWordOpSize} :: pushArgs 1 | pushArgs 3 = (* Already in r8 *) pushArgs 2 | pushArgs 4 = (* Already in r9, and r8 *) pushArgs 2 | pushArgs 5 = pushR r10 :: pushArgs 2 | pushArgs 6 = PushToStack(MemoryArg{base=edi, offset=8, index=NoIndex}) :: pushArgs 5 | pushArgs _ = raise InternalError "rtsCall: Abi/argument count not implemented" in pushArgs nArgs end else case argFormats of [FastArgFixed] => [ moveRR{source=eax, output=ecx, opSize=polyWordOpSize} ] | [FastArgFixed, FastArgFixed] => [ moveRR{source=eax, output=ecx, opSize=polyWordOpSize}, moveRR{source=mlArg2Reg, output=edx, opSize=polyWordOpSize} ] | [FastArgFixed, FastArgFixed, FastArgFixed] => [ moveRR{source=eax, output=ecx, opSize=polyWordOpSize}, moveRR{source=mlArg2Reg, output=edx, opSize=polyWordOpSize} (* Arg3 is already in r8. *) ] | [FastArgFixed, FastArgFixed, FastArgFixed, FastArgFixed] => [ moveRR{source=eax, output=ecx, opSize=polyWordOpSize}, moveRR{source=mlArg2Reg, output=edx, opSize=polyWordOpSize} (* Arg3 is already in r8 and arg4 in r9. *) ] | [FastArgDouble] => [ (* Already in xmm0 *) ] (* X64 on both Windows and Unix take the first arg in xmm0 and the second in xmm1. They are already there. *) | [FastArgDouble, FastArgDouble] => [ ] (* X64 on both Windows and Unix take the first arg in xmm0. On Unix the integer argument is treated as the first argument and goes into edi. On Windows it's treated as the second and goes into edx. N.B. It's also the first argument in ML so is in rax. *) | [FastArgDouble, FastArgFixed] => [ moveRR{source=eax, output=edx, opSize=nativeWordOpSize} ] | [FastArgFloat] => [] | [FastArgFloat, FastArgFloat] => [] (* Already in xmm0 and xmm1 *) | [FastArgFloat, FastArgFixed] => [moveRR{source=mlArg2Reg, output=edx, opSize=polyWordOpSize}] | _ => raise InternalError "rtsCall: Abi/argument count not implemented" ) ) @ (* For Windows/64 add in a 32 byte save area ater we've pushed any arguments. *) (case abi of X64Win => [ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg 32, opSize=nativeWordOpSize}] | _ => []) @ [ CallAddress(RegisterArg entryPtrReg), (* Call the function *) loadMemory(esp, ebp, memRegStackPtr, nativeWordOpSize) (* Restore the ML stack pointer. *) ] @ ( if targetArch <> Native32Bit then [loadMemory(r15, ebp, memRegLocalMPointer, nativeWordOpSize) ] (* Copy back the heap ptr *) else [] ) @ [ (* Since this is an ML function we need to remove any ML stack arguments. *) ReturnFromFunction mlArgsOnStack ] val profileObject = createProfileObject functionName val newCode = codeCreate (functionName, profileObject, debugSwitches) val closure = makeConstantClosure() val () = X86OPTIMISE.generateCode{code=newCode, labelCount=0, ops=code, resultClosure=closure} in closureAsAddress closure end fun rtsCallFast (functionName, nArgs, debugSwitches) = rtsCallFastGeneral (functionName, List.tabulate(nArgs, fn _ => FastArgFixed), FastArgFixed, debugSwitches) (* RTS call with one double-precision floating point argument and a floating point result. *) fun rtsCallFastRealtoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgDouble], FastArgDouble, debugSwitches) (* RTS call with two double-precision floating point arguments and a floating point result. *) fun rtsCallFastRealRealtoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgDouble, FastArgDouble], FastArgDouble, debugSwitches) (* RTS call with one double-precision floating point argument, one fixed point argument and a floating point result. *) fun rtsCallFastRealGeneraltoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgDouble, FastArgFixed], FastArgDouble, debugSwitches) (* RTS call with one general (i.e. ML word) argument and a floating point result. This is used only to convert arbitrary precision values to floats. *) fun rtsCallFastGeneraltoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFixed], FastArgDouble, debugSwitches) (* Operations on Real32.real values. *) fun rtsCallFastFloattoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFloat], FastArgFloat, debugSwitches) fun rtsCallFastFloatFloattoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFloat, FastArgFloat], FastArgFloat, debugSwitches) (* RTS call with one double-precision floating point argument, one fixed point argument and a floating point result. *) fun rtsCallFastFloatGeneraltoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFloat, FastArgFixed], FastArgFloat, debugSwitches) (* RTS call with one general (i.e. ML word) argument and a floating point result. This is used only to convert arbitrary precision values to floats. *) fun rtsCallFastGeneraltoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFixed], FastArgFloat, debugSwitches) datatype ffiABI = FFI_SYSV (* Unix 32 bit and Windows GCC 32-bit *) | FFI_STDCALL (* Windows 32-bit system ABI. Callee clears the stack. *) | FFI_MS_CDECL (* VS 32-bit. Same as SYSV except when returning a struct. Default on Windows including GCC in Mingw. *) | FFI_WIN64 (* Windows 64 bit *) | FFI_UNIX64 (* Unix 64 bit. libffi also implements this on X86/32. *) (* We don't include various other 32-bit Windows ABIs. *) local val getOSType: unit -> int = RunCall.rtsCallFast0 "PolyGetOSType" in - val abiList = + (* This actually a constant since each exported saved state has + a distinct ABI. However for compatibility with the interpreted + version we make this a function. *) + fun abiList () = case getABI() of X86_32 => [("sysv", FFI_SYSV), ("stdcall", FFI_STDCALL), ("ms_cdecl", FFI_MS_CDECL), (* Default to MS_CDECL on Windows otherwise SYSV. *) ("default", if getOSType() = 1 then FFI_MS_CDECL else FFI_SYSV)] | X64Win => [("win64", FFI_WIN64), ("default", FFI_WIN64)] | X64Unix => [("unix64", FFI_UNIX64), ("default", FFI_UNIX64)] type abi = ffiABI end fun alignUp(s, align) = Word.andb(s + align-0w1, ~ align) fun intAlignUp(s, align) = Word.toInt(alignUp(Word.fromInt s, align)) val getThreadDataCall = makeEntryPoint "PolyX86GetThreadData" local val sysWordSize = Word.toInt(nativeWordSize div wordSize) in (* Code to box an address as a SysWord.word value *) fun boxRegAsSysWord(boxReg, outputReg, saveRegs) = AllocStore{ size=sysWordSize, output=outputReg, saveRegs=saveRegs } :: ( if targetArch = Native64Bit then [ Move{source=NonAddressConstArg(LargeInt.fromInt sysWordSize), destination=MemoryArg {offset= ~ (Word.toInt wordSize), base=outputReg, index=NoIndex}, moveSize=opSizeToMove polyWordOpSize}, Move{moveSize=Move8, source=NonAddressConstArg 1 (* byte *), destination=MemoryArg {offset= ~1, base=outputReg, index=NoIndex}} ] else let val lengthWord = IntInf.orb(IntInf.fromInt sysWordSize, IntInf.<<(1, 0w24)) in [Move{source=NonAddressConstArg lengthWord, destination=MemoryArg {offset= ~ (Word.toInt wordSize), base=outputReg, index=NoIndex}, moveSize=opSizeToMove polyWordOpSize}] end ) @ Move{source=RegisterArg boxReg, destination=MemoryArg {offset=0, base=outputReg, index=NoIndex}, moveSize=opSizeToMove nativeWordOpSize} :: ( if targetArch = ObjectId32Bit then [ ArithToGenReg{ opc=SUB, output=outputReg, source=RegisterArg rbx, opSize=nativeWordOpSize }, ShiftConstant{ shiftType=SHR, output=outputReg, shift=0w2, opSize=OpSize64 } ] else [] ) @ [StoreInitialised] end (* Build a foreign call function. The arguments are the abi, the list of argument types and the result type. The result is the code of the ML function that takes three arguments: the C function to call, the arguments as a vector of C values and the address of the memory for the result. *) (* This must match the type in Foreign.LowLevel. Once this is bootstrapped we could use that type but note that this is the type we use within the compiler and we build Foreign.LowLevel AFTER compiling this. *) datatype cTypeForm = CTypeFloatingPt | CTypePointer | CTypeSignedInt | CTypeUnsignedInt | CTypeStruct of cType list | CTypeVoid withtype cType = { typeForm: cTypeForm, align: word, size: word } fun call32Bits(abi, args, result) = let (* 32-bit arguments. These all go to the stack so we can simply push them. The arguments go on the stack in reverse order. *) fun loadArgs32([], stackOffset, argOffset, code, continue) = continue(stackOffset, argOffset, code) | loadArgs32(arg::args, stackOffset, argOffset, code, continue) = let val {size, align, typeForm} = arg val newArgOffset = alignUp(argOffset, align) val baseAddr = {base=mlArg2Reg, offset=Word.toInt newArgOffset, index=NoIndex} in case (typeForm, size) of (CTypeStruct elements, _) => (* structs passed as values are recursively unpacked. *) loadArgs32(elements, stackOffset, newArgOffset (* Struct is aligned. *), code, fn (so, ao, code) => loadArgs32(args, so, ao, code, continue)) | (CTypeVoid, _) => raise Foreign.Foreign "Void cannot be used for a function argument" | (CTypeUnsignedInt, 0w1) => (* Unsigned char. *) loadArgs32(args, stackOffset+4, newArgOffset+size, Move{source=MemoryArg baseAddr, destination=RegisterArg edx, moveSize=Move8 } :: PushToStack(RegisterArg edx) :: code, continue) | (CTypeSignedInt, 0w1) => (* Signed char. *) loadArgs32(args, stackOffset+4, newArgOffset+size, Move{source=MemoryArg baseAddr, destination=RegisterArg edx, moveSize=Move8X32 } :: PushToStack(RegisterArg edx) :: code, continue) | (CTypeUnsignedInt, 0w2) => (* Unsigned 16-bits. *) loadArgs32(args, stackOffset+4, newArgOffset+size, Move{source=MemoryArg baseAddr, destination=RegisterArg edx, moveSize=Move16 } :: PushToStack(RegisterArg edx) :: code, continue) | (CTypeSignedInt, 0w2) => (* Signed 16-bits. *) loadArgs32(args, stackOffset+4, newArgOffset+size, Move{source=MemoryArg baseAddr, destination=RegisterArg edx, moveSize=Move16X32 } :: PushToStack(RegisterArg edx) :: code, continue) | (_, 0w4) => (* 32-bits. *) loadArgs32(args, stackOffset+4, newArgOffset+size, PushToStack(MemoryArg baseAddr) :: code, continue) | (CTypeFloatingPt, 0w8) =>(* Double: push the two words. High-order word first, then low-order. *) loadArgs32(args, stackOffset+8, newArgOffset+size, PushToStack(MemoryArg{base=mlArg2Reg, offset=Word.toInt newArgOffset+4, index=NoIndex}) :: PushToStack(MemoryArg{base=mlArg2Reg, offset=Word.toInt newArgOffset, index=NoIndex}) :: code, continue) | _ => raise Foreign.Foreign "argument type not supported" end val {typeForm, size, ...} = result val resultMemory = {base=ecx, offset=0, index=NoIndex} (* Structures are passed by reference by storing the address of the result as the first argument except that in MS_CDECL (and STDCALL?) structures of size 1, 2, 4 and 8 are returned in EAX, and for 8, EDX. *) val (getResult, needResultAddress) = if (case typeForm of CTypeStruct _ => true | _ => false) andalso (abi = FFI_SYSV orelse (size <> 0w1 andalso size <> 0w2 andalso size <> 0w4 andalso size <> 0w8)) (* TODO: We have to get the address of the destination area. *) then ([], true) else if typeForm = CTypeVoid then ([], false) else (loadMemory(ecx, esp, 4, nativeWordOpSize) :: loadHeapMemory(ecx, ecx, 0, nativeWordOpSize) :: (if size = 0w1 then (* Single byte *) [Move{source=RegisterArg eax, destination=MemoryArg resultMemory, moveSize=Move8}] else if size = 0w2 then (* 16-bits *) [Move{source=RegisterArg eax, destination=MemoryArg resultMemory, moveSize=Move16}] else if typeForm = CTypeFloatingPt andalso size = 0w4 then [FPStoreToMemory{address=resultMemory, precision=SinglePrecision, andPop=true }] else if size = 0w4 then [Move{source=RegisterArg eax, destination=MemoryArg resultMemory, moveSize=Move32}] else if typeForm = CTypeFloatingPt andalso size = 0w8 then [FPStoreToMemory{address=resultMemory, precision=DoublePrecision, andPop=true }] else if size = 0w8 then [ Move{source=RegisterArg eax, destination=MemoryArg resultMemory, moveSize=Move32}, Move{source=RegisterArg edx, destination=MemoryArg {base=ecx, offset=4, index=NoIndex}, moveSize=Move32} ] else raise Foreign.Foreign "Unrecognised result type"), false) local (* Load the arguments. If we need to pass the return address for a struct that is the first arg. *) val (startStack, startCode) = if needResultAddress then (4, [PushToStack(MemoryArg{base=ecx, offset=0, index=NoIndex})]) else (0, []) in val (argCode, argStack) = loadArgs32(args, startStack, 0w0, startCode, fn (stackOffset, _, code) => (code, stackOffset)) end local val align = argStack mod 16 in (* Always align the stack. It's not always necessary on 32-bits but GCC prefers it. *) val preArgAlign = if align = 0 then 0 else 16-align (* Adjustment to be made when the function returns. Stdcall resets the stack in the callee. *) val postCallStackReset = preArgAlign + (if abi = FFI_STDCALL then 0 else argStack) end in ( (* If we're returning a struct we need the result address before we call. *) if needResultAddress then [loadMemory(ecx, esp, 4, nativeWordOpSize)] else [] ) @ [ (* Save the stack pointer. *) storeMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), (* Save ML stack and switch to C stack. *) loadMemory(esp, ebp, memRegCStackPtr, nativeWordOpSize) (* Load the saved C stack pointer. *) ] @ ( if preArgAlign = 0 then [] else [ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt preArgAlign), opSize=nativeWordOpSize}] ) @ ( (* The second argument is a SysWord containing the address of a malloced area of memory with the actual arguments in it. *) if null args then [] else [loadHeapMemory(mlArg2Reg, mlArg2Reg, 0, nativeWordOpSize)] ) @ argCode @ CallAddress(MemoryArg{base=eax, offset=0, index=NoIndex}) :: (* Restore the C stack. This is really only necessary if we've called a callback since that will store its esp value. *) ( if postCallStackReset = 0 then [] else [ArithToGenReg{opc=ADD, output=esp, source=NonAddressConstArg(LargeInt.fromInt postCallStackReset), opSize=nativeWordOpSize}] ) @ [ storeMemory(esp, ebp, memRegCStackPtr, nativeWordOpSize), loadMemory(esp, ebp, memRegStackPtr, nativeWordOpSize) (* Restore the ML stack pointer. *) ] @ getResult @ (* Store the result in the destination. *) [ ReturnFromFunction 1 ] end fun closure32Bits(abi, args, result) = let (* Arguments are copied from the stack into a struct that is then passed to the ML function. *) fun copyArgs([], nArgs, argOffset, code, continue) = continue(nArgs, argOffset, code) | copyArgs(arg::args, nArgs, argOffset, code, continue) = let val {size, align, typeForm} = arg val newArgOffset = alignUp(argOffset, align) val sourceAddr = {base=ebx, offset=nArgs*4, index=NoIndex} val destAddr = {base=esp, offset=Word.toInt newArgOffset, index=NoIndex} in case (typeForm, size) of (CTypeStruct elements, _) => (* structs passed as values are recursively unpacked. *) copyArgs(elements, nArgs, newArgOffset (* Struct is aligned. *), code, fn (na, ao, c) => copyArgs(args, na, ao, c, continue)) | (CTypeVoid, _) => raise Foreign.Foreign "Void cannot be used for a function argument" | (CTypeFloatingPt, 0w8) => (* Double: copy the two words. High-order word first, then low-order. *) copyArgs(args, nArgs+2, argOffset+size, Move{source=MemoryArg sourceAddr, destination=RegisterArg eax, moveSize=Move32} :: Move{source=RegisterArg eax, destination=MemoryArg destAddr, moveSize=Move32} :: Move{source=MemoryArg {base=ebx, offset=nArgs*4+4, index=NoIndex}, destination=RegisterArg eax, moveSize=Move32} :: Move{source=RegisterArg eax, destination=MemoryArg{base=esp, offset=Word.toInt newArgOffset + 4, index=NoIndex}, moveSize=Move32} :: code, continue) | _ => (* Everything else is a single word on the stack. *) let val moveOp = case size of 0w1 => Move8 | 0w2 => Move16 | 0w4 => Move32 | _ => raise Foreign.Foreign "copyArgs: Invalid size" in copyArgs(args, nArgs+1, argOffset+size, Move{source=MemoryArg sourceAddr, destination=RegisterArg eax, moveSize=Move32} :: Move{source=RegisterArg eax, destination=MemoryArg destAddr, moveSize=moveOp} :: code, continue) end end val {typeForm, size, align, ...} = result (* Struct results are normally passed by reference. *) val resultStructByRef = (case typeForm of CTypeStruct _ => true | _ => false) andalso (abi = FFI_SYSV orelse (size <> 0w1 andalso size <> 0w2 andalso size <> 0w4 andalso size <> 0w8)) val (argCount, argumentSpace, copyArgsFromStack) = copyArgs(args, if resultStructByRef then 1 else 0, 0w0, [], fn result => result) val resultOffset = alignUp(argumentSpace, align) (* Offset of result area *) val (loadResults, resultSize) = if typeForm = CTypeVoid orelse resultStructByRef then ([], 0w0) else let val resultMem = {base=esp, offset=Word.toInt resultOffset, index=NoIndex} val resultCode = case (typeForm, size) of (CTypeSignedInt, 0w1) => [Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move8X32 }] | (_, 0w1) => [Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move8 }] | (CTypeSignedInt, 0w2) => [Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move16X32 }] | (_, 0w2) => [Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move16 }] | (CTypeFloatingPt, 0w4) => [FPLoadFromMemory{ address=resultMem, precision=SinglePrecision }] | (_, 0w4) => [Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move32 }] | (CTypeFloatingPt, 0w8) => [FPLoadFromMemory{ address=resultMem, precision=DoublePrecision }] | (_, 0w8) => (* MSC only. Struct returned in eax/edx. *) [ Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move32 }, Move{source=MemoryArg {base=esp, offset=Word.toInt resultOffset + 4, index=NoIndex}, destination=RegisterArg edx, moveSize=Move32 } ] | _ => raise Foreign.Foreign "Unrecognised result type" in (resultCode, size) end val stackSpace = Word.toInt(resultOffset + resultSize) local val align = stackSpace mod 16 in (* Stack space. In order to align the stack correctly for GCC we need the value in memRegCStackPtr to be a multiple of 16 bytes + 8. esp would have been on a 16 byte boundary before the return address was pushed so after pushing the return address and four registers we need a further 4 bytes to get the alignment back again. The effect of this is that the argument and result area is on an 8-byte boundary. *) val stackToAllocate = stackSpace + (if align = 0 then 0 else 16-align) + 4 end in [ (* Push callee-save registers. *) PushToStack(RegisterArg ebp), PushToStack(RegisterArg ebx), PushToStack(RegisterArg edi), PushToStack(RegisterArg esi), (* Set ebx to point to the original args. *) LoadAddress{ output=ebx, offset=20, base=SOME esp, index=NoIndex, opSize=OpSize32}, (* Allocate stack space. *) ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt stackToAllocate), opSize=OpSize32}, (* Move the function address in eax into esi since that's a callee-save register. *) Move{source=RegisterArg eax, destination=RegisterArg esi, moveSize=Move32} ] @ copyArgsFromStack @ [ (* Get the value for ebp. *) Move{source=AddressConstArg getThreadDataCall, destination=RegisterArg ecx, moveSize=Move32}, CallAddress(MemoryArg{base=ecx, offset=0, index=NoIndex}), (* Get the address - N.B. Heap addr in 32-in-64. *) moveRR{source=eax, output=ebp, opSize=OpSize32}, (* Save the address of the argument and result area. *) moveRR{source=esp, output=ecx, opSize=OpSize32}, (* Switch to the ML stack. *) storeMemory(esp, ebp, memRegCStackPtr, OpSize32), loadMemory(esp, ebp, memRegStackPtr, OpSize32), (* Move esi into the closure register edx *) Move{source=RegisterArg esi, destination=RegisterArg edx, moveSize=Move32} ] @ boxRegAsSysWord(ecx, eax, []) @ ( (* If we're returning a struct the address for the result will have been passed in the first argument. We use that as the result area. Otherwise point to the result area on the stack. *) if resultStructByRef then Move{source=MemoryArg {offset=0, base=ebx, index=NoIndex}, destination=RegisterArg ecx, moveSize=Move32} else ArithToGenReg{opc=ADD, output=ecx, source=NonAddressConstArg(Word.toLargeInt resultOffset), opSize=OpSize32} ) :: boxRegAsSysWord(ecx, ebx, [eax]) @ [ (* Call the ML function using the full closure call. *) CallAddress(MemoryArg{offset=0, base=edx, index=NoIndex}), (* Save the ML stack pointer because we may have grown the stack. Switch to the C stack. *) storeMemory(esp, ebp, memRegStackPtr, OpSize32), loadMemory(esp, ebp, memRegCStackPtr, OpSize32) ] @ loadResults @ [ (* Remove the stack space. *) ArithToGenReg{opc=ADD, output=esp, source=NonAddressConstArg(LargeInt.fromInt stackToAllocate), opSize=OpSize32}, PopR esi, PopR edi, PopR ebx, PopR ebp (* Restore callee-save registers. *) ] @ ( (* If we've passed in the address of the area for the result structure we're supposed to pass that back in eax. *) if resultStructByRef then [loadMemory(eax, esp, 4, OpSize32)] else [] ) @ [ (* Callee removes arguments in StdCall. *) ReturnFromFunction (if abi = FFI_STDCALL then argCount else 0) ] end local (* Windows on X64. *) val win64ArgRegs = [ (rcx, xmm0), (rdx, xmm1), (r8, xmm2), (r9, xmm3) ] in fun callWindows64Bits(args, result) = let val extraStackReg = r10 (* Not used for any arguments. *) fun loadWin64Args([], stackOffset, _, _, code, extraStack, preCode) = (code, stackOffset, preCode, extraStack) | loadWin64Args(arg::args, stackOffset, argOffset, regs, code, extraStack, preCode) = let val {size, align, typeForm, ...} = arg val newArgOffset = alignUp(argOffset, align) val baseAddr = {base=mlArg2Reg, offset=Word.toInt newArgOffset, index=NoIndex} val workReg = rcx (* rcx: the last to be loaded. *) (* Integer arguments. *) fun loadIntArg moveOp = case regs of (areg, _) :: regs' => loadWin64Args(args, stackOffset, newArgOffset+size, regs', Move{source=MemoryArg baseAddr, destination=RegisterArg areg, moveSize=moveOp } :: code, extraStack, preCode) | [] => loadWin64Args(args, stackOffset+8, newArgOffset+size, [], if size = 0w8 then PushToStack(MemoryArg baseAddr) :: code else (* Need to load it into a register first. *) Move{source=MemoryArg baseAddr, destination=RegisterArg workReg, moveSize=moveOp } :: PushToStack(RegisterArg workReg) :: code, extraStack, preCode) in (* Structs of 1, 2, 4 and 8 bytes are passed as the corresponding int. It may not be necessary to sign-extend 1, 2 or 4-byte values. 2, 4 or 8-byte structs may not be aligned onto the appropriate boundary but it should still work. *) case (size, typeForm) of (0w1, CTypeSignedInt) => (* Signed char. *) loadIntArg Move8X64 | (0w1, _) => (* Unsigned char or single byte struct *) loadIntArg Move8 | (0w2, CTypeSignedInt) =>(* Signed 16-bits. *) loadIntArg Move16X64 | (0w2, _) => (* Unsigned 16-bits. *) loadIntArg Move16 | (0w4, CTypeFloatingPt) => ( case regs of (_, fpReg) :: regs' => loadWin64Args(args, stackOffset, newArgOffset+size, regs', XMMArith{opc=SSE2MoveFloat, source=MemoryArg baseAddr, output=fpReg } :: code, extraStack, preCode) | [] => loadWin64Args(args, stackOffset+8, newArgOffset+size, [], Move{source=MemoryArg baseAddr, destination=RegisterArg workReg, moveSize=Move32 } :: PushToStack(RegisterArg workReg) :: code, extraStack, preCode) ) | (0w4, CTypeSignedInt) => (* Signed 32-bits. *) loadIntArg Move32X64 | (0w4, _) => (* Unsigned 32-bits. *) loadIntArg Move32 | (0w8, CTypeFloatingPt) => ( case regs of (_, fpReg) :: regs' => loadWin64Args(args, stackOffset, newArgOffset+size, regs', XMMArith{opc=SSE2MoveDouble, source=MemoryArg baseAddr, output=fpReg } :: code, extraStack, preCode) | [] => loadWin64Args(args, stackOffset+8, newArgOffset+size, [], Move{source=MemoryArg baseAddr, destination=RegisterArg workReg, moveSize=Move64 } :: PushToStack(RegisterArg workReg) :: code, extraStack, preCode) ) | (0w8, _) => (* 64-bits. *) loadIntArg Move64 | (_, CTypeStruct _) => let (* Structures of other sizes are passed by reference. They are first copied into new areas on the stack. This ensures that the called function can update the structure without changing the original values. *) val newExtra = intAlignUp(extraStack + Word.toInt size, 0w16) val newPreCode = moveMemory{source=(mlArg2Reg, Word.toInt newArgOffset), destination=(extraStackReg, extraStack), length=Word.toInt size} @ preCode in case regs of (areg, _) :: regs' => loadWin64Args(args, stackOffset, newArgOffset+size, regs', loadAddress{source=(extraStackReg, extraStack), destination=areg} :: code, newExtra, newPreCode) | [] => loadWin64Args(args, stackOffset+8, newArgOffset+size, [], loadAddress{source=(extraStackReg, extraStack), destination=workReg} :: PushToStack(RegisterArg workReg) :: code, newExtra, newPreCode) end | _ => raise Foreign.Foreign "Unrecognised type for function argument" end val {typeForm, size, ...} = result val resultAreaPtr = r12 (* Saved value of r8 - This is callee save. *) val resultMemory = {base=resultAreaPtr, offset=0, index=NoIndex} fun storeIntValue moveOp = ([Move{source=RegisterArg eax, destination=MemoryArg resultMemory, moveSize=moveOp}], false) and storeFloatValue precision = ([XMMStoreToMemory{toStore=xmm0, address=resultMemory, precision=precision}], false) val (getResult, passStructAddress) = case (typeForm, size) of (CTypeVoid, _) => ([], false) | (_, 0w1) (* Includes structs *) => (* Single byte *) storeIntValue Move8 | (_, 0w2) => (* 16-bits *) storeIntValue Move16 | (CTypeFloatingPt, 0w4) => storeFloatValue SinglePrecision | (_, 0w4) => storeIntValue Move32 | (CTypeFloatingPt, 0w8) => storeFloatValue DoublePrecision | (_, 0w8) => storeIntValue Move64 | (CTypeStruct _, _) => ([], true) | _ => raise Foreign.Foreign "Unrecognised result type" (* argCode is the code to load and push the arguments. argStack is the amount of stack space the arguments will take. It's only used to ensure that the stack is aligned onto a 16-byte boundary. preArgCode is any code that is needed to copy the arguments before they are actually loaded. Because it is done before the argument registers are loaded it can use rcx, rdi and rsi. extraStack is local stack space needed. It is usually zero but if it is non-zero it must be a multiple of 16 bytes. The address of this area is loaded into r10 before preArgCode is called. *) val (argCode, argStack, preArgCode, extraStack) = if passStructAddress then (* The address of the result structure goes in the first argument register: rcx *) loadWin64Args(args, 0, 0w0, tl win64ArgRegs, [moveRR{source=resultAreaPtr, output=rcx, opSize=nativeWordOpSize}], 0, []) else loadWin64Args(args, 0, 0w0, win64ArgRegs, [], 0, []) local val align = argStack mod 16 in (* Always align the stack. *) val preArgAlign = if align = 0 then 0 else 16-align (* The total space on the stack that needs to be removed at the end. *) val postCallStackReset = argStack + preArgAlign + extraStack + 32 end in (* Save heap ptr. Needed in case we have a callback. *) [storeMemory(r15, ebp, memRegLocalMPointer, nativeWordOpSize)] @ ( (* Put the destination address into a callee save resgister. We have to put the C address in there now because an ML address wouldn't be updated by a possible GC in a callback. *) if #typeForm( result) <> CTypeVoid then [loadHeapMemory(resultAreaPtr, r8, 0, nativeWordOpSize)] else [] ) @ [ (* Save the stack pointer. *) storeMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), (* Save ML stack and switch to C stack. *) loadMemory(esp, ebp, memRegCStackPtr, nativeWordOpSize) (* Load the saved C stack pointer. *) ] @ ( if extraStack = 0 then [] else [ ArithToGenReg{opc=SUB, output=rsp, source=NonAddressConstArg(LargeInt.fromInt extraStack), opSize=nativeWordOpSize}, Move{source=RegisterArg rsp, destination=RegisterArg extraStackReg, moveSize=Move64} ] ) @ ( if preArgAlign = 0 then [] else [ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt preArgAlign), opSize=nativeWordOpSize}] ) @ ( (* The second argument is a SysWord containing the address of a malloced area of memory with the actual arguments in it. *) if null args then [] else [loadHeapMemory(mlArg2Reg, mlArg2Reg, 0, nativeWordOpSize)] ) @ preArgCode @ argCode @ [ (* Reserve a 32-byte area after the arguments. This is specific to the Windows ABI. *) ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt 32), opSize=nativeWordOpSize}, let (* The entry point is in a SysWord.word value in RAX. *) val entryPoint = case targetArch of ObjectId32Bit => MemoryArg{base=ebx, offset=0, index=Index4 eax} | _ => MemoryArg{base=eax, offset=0, index=NoIndex} in (* Call the function. We're discarding the value in rsp so no need to remove args. *) CallAddress entryPoint end, (* Restore the C stack value in case it's been changed by a callback. *) ArithToGenReg{opc=ADD, output=rsp, source=NonAddressConstArg(LargeInt.fromInt postCallStackReset), opSize=nativeWordOpSize}, storeMemory(rsp, rbp, memRegCStackPtr, nativeWordOpSize), loadMemory(rsp, rbp, memRegStackPtr, nativeWordOpSize), (* Restore the ML stack pointer. *) (* Reload the heap pointer. If we've called back to ML this could well have changed. *) loadMemory(r15, rbp, memRegLocalMPointer, nativeWordOpSize) ] @ (* Store the result in the destination. *) getResult @ [ReturnFromFunction 0 ] end (* callWindows64Bits *) fun closureWindows64Bits(args, result) = let val {typeForm, size, align, ...} = result (* Struct results are normally passed by reference. *) val resultStructByRef = (* If true we've copied rcx (the first arg) into r9 *) (case typeForm of CTypeStruct _ => true | _ => false) andalso size <> 0w1 andalso size <> 0w2 andalso size <> 0w4 andalso size <> 0w8 (* Store the register arguments and copy everything else into the argument structure on the stack. The code is ordered so that the early arguments are stored first. *) fun copyWin64Args([], _, _, _) = [] | copyWin64Args(arg::args, nStackArgs, argOffset, regs) = let val {size, align, typeForm, ...} = arg val newArgOffset = alignUp(argOffset, align) val destAddr = {base=rsp, offset=Word.toInt newArgOffset, index=NoIndex} (* Integer arguments. *) fun moveIntArg moveOp = case regs of (areg, _) :: regs' => Move{source=RegisterArg areg, destination=MemoryArg destAddr, moveSize=moveOp } :: copyWin64Args(args, nStackArgs, newArgOffset+size, regs') | [] => Move{source=MemoryArg {base=r10, offset=nStackArgs*8, index=NoIndex}, destination=RegisterArg rax, moveSize=Move64} :: Move{source=RegisterArg rax, destination=MemoryArg destAddr, moveSize=moveOp} :: copyWin64Args(args, nStackArgs+1, newArgOffset+size, []) in (* Structs of 1, 2, 4 and 8 bytes are passed as the corresponding int. *) case (typeForm, size) of (_, 0w1) => moveIntArg Move8 | (_, 0w2) => moveIntArg Move16 | (CTypeFloatingPt, 0w4) => ( case regs of (_, fpReg) :: regs' => XMMStoreToMemory{ toStore=fpReg, address=destAddr, precision=SinglePrecision} :: copyWin64Args(args, nStackArgs, newArgOffset+size, regs') | [] => moveIntArg Move32 ) | (_, 0w4) => (* 32-bits *) moveIntArg Move32 | (CTypeFloatingPt, 0w8) => ( case regs of (_, fpReg) :: regs' => XMMStoreToMemory{ toStore=fpReg, address=destAddr, precision=DoublePrecision} :: copyWin64Args(args, nStackArgs, newArgOffset+size, regs') | [] => moveIntArg Move64 ) | (_, 0w8) => (* 64-bits. *) moveIntArg Move64 | (CTypeStruct _, _) => (* Structures of other size are passed by reference. We need to copy the source structure into our stack area. Since rsi and rdi aren't used as args and rcx is only used for the first argument we can copy the argument now. *) ( case regs of (arg, _) :: regs' => moveMemory{source=(arg, 0), destination=(rsp, Word.toInt newArgOffset), length=Word.toInt size} @ copyWin64Args(args, nStackArgs, newArgOffset+size, regs') | [] => moveMemory{source=(r10, nStackArgs*8), destination=(rsp, Word.toInt newArgOffset), length=Word.toInt size} @ copyWin64Args(args, nStackArgs+1, newArgOffset+size, []) ) | _ => raise Foreign.Foreign "Unrecognised type for function argument" end val copyArgsFromRegsAndStack = if resultStructByRef then (* If we're returning a struct by reference we copy the address into r9 and pass that as the result address. *) Move{source=RegisterArg rcx, destination=RegisterArg r9, moveSize=Move64} :: copyWin64Args(args, 0, 0w0, tl win64ArgRegs) else copyWin64Args(args, 0, 0w0, win64ArgRegs) local fun getNextSize (arg, argOffset) = let val {size, align, ...} = arg in alignUp(argOffset, align) + size end in val argumentSpace = List.foldl getNextSize 0w0 args end val resultOffset = alignUp(argumentSpace, align) (* Offset of result area *) val (loadResults, resultSize) = if typeForm = CTypeVoid orelse resultStructByRef then ([], 0w0) else let val resultMem = {base=rsp, offset=Word.toInt resultOffset, index=NoIndex} val resultCode = case (typeForm, size) of (CTypeSignedInt, 0w1) => [Move{source=MemoryArg resultMem, destination=RegisterArg rax, moveSize=Move8X64}] | (_, 0w1) => [Move{source=MemoryArg resultMem, destination=RegisterArg rax, moveSize=Move8}] | (CTypeSignedInt, 0w2) => [Move{source=MemoryArg resultMem, destination=RegisterArg rax, moveSize=Move16X64}] | (_, 0w2) => [Move{source=MemoryArg resultMem, destination=RegisterArg rax, moveSize=Move16}] | (CTypeFloatingPt, 0w4) => [XMMArith{opc=SSE2MoveFloat, source=MemoryArg resultMem, output=xmm0}] | (CTypeSignedInt, 0w4) => [Move{source=MemoryArg resultMem, destination=RegisterArg rax, moveSize=Move32X64}] | (_, 0w4) => [Move{source=MemoryArg resultMem, destination=RegisterArg rax, moveSize=Move32}] | (CTypeFloatingPt, 0w8) => [XMMArith{opc=SSE2MoveDouble, source=MemoryArg resultMem, output=xmm0}] | (_, 0w8) => [Move{source=MemoryArg resultMem, destination=RegisterArg rax, moveSize=Move64}] | _ => raise Foreign.Foreign "Unrecognised result type" in (resultCode, size) end (* Stack space. The stack must be 16 byte aligned. We've pushed 8 regs and a return address so add a further 8 bytes to bring it back into alignment. If we're returning a struct by reference, though, we've pushed 9 regs so don't add 8. *) val stackToAllocate = Word.toInt(alignUp(resultOffset + resultSize, 0w16)) + (if resultStructByRef then 0 else 8) in [ (* Push callee-save registers. *) PushToStack(RegisterArg rbp), PushToStack(RegisterArg rbx), PushToStack(RegisterArg r12), PushToStack(RegisterArg r13), PushToStack(RegisterArg r14), PushToStack(RegisterArg r15), PushToStack(RegisterArg rdi), PushToStack(RegisterArg rsi) ] @ ( (* If we're returning a struct by reference we have to return the address in rax even though it's been set by the caller. Save this address. *) if resultStructByRef then [PushToStack(RegisterArg rcx)] else [] ) @ [ (* Set r10 to point to the original stack args if any. This is beyond the pushed regs and also the 32-byte area. *) LoadAddress{ output=r10, offset=if resultStructByRef then 112 else 104, base=SOME rsp, index=NoIndex, opSize=nativeWordOpSize}, (* Allocate stack space. *) ArithToGenReg{opc=SUB, output=rsp, source=NonAddressConstArg(LargeInt.fromInt stackToAllocate), opSize=nativeWordOpSize}, (* Move the function we're calling, in rax, into r13, a callee-save register *) moveRR{source=rax, output=r13, opSize=polyWordOpSize} ] @ copyArgsFromRegsAndStack @ [ (* Get the value for rbp. *) (* This is a problem for 32-in-64. The value of getThreadDataCall is an object ID but rbx may well no longer hold the heap base address. We use a special inline constant to hold the full 64-bit address. *) LoadAbsolute{value=getThreadDataCall, destination=rcx}, CallAddress(MemoryArg{base=rcx, offset=0, index=NoIndex}), moveRR{source=rax, output=rbp, opSize=nativeWordOpSize}, (* Save the address of the argument and result area. *) moveRR{source=rsp, output=rcx, opSize=nativeWordOpSize}, (* Switch to the ML stack. *) storeMemory(rsp, rbp, memRegCStackPtr, nativeWordOpSize), loadMemory(rsp, rbp, memRegStackPtr, nativeWordOpSize), (* Load the ML heap pointer. *) loadMemory(r15, rbp, memRegLocalMPointer, nativeWordOpSize), (* Now move the function closure into the closure register ready for the call. *) moveRR{source=r13, output=rdx, opSize=polyWordOpSize} ] @ (* Reload the heap base address in 32-in-64. *) ( if targetArch = ObjectId32Bit then [loadMemory(rbx, rbp, memRegSavedRbx, nativeWordOpSize)] else [] ) @ boxRegAsSysWord(rcx, rax, []) @ ( (* If we're returning a struct by reference the address for the result will have been passed in the first argument. We use that as the result area. Otherwise point to the result area on the stack. *) if resultStructByRef then loadMemory(rcx, r10, ~112, nativeWordOpSize) else ArithToGenReg{opc=ADD, output=rcx, source=NonAddressConstArg(Word.toLargeInt resultOffset), opSize=nativeWordOpSize} ) :: boxRegAsSysWord(rcx, mlArg2Reg, [rax]) @ [ (* Call the ML function using the full closure call. *) CallAddress( if targetArch = ObjectId32Bit then MemoryArg{base=rbx, index=Index4 rdx, offset=0} else MemoryArg{base=rdx, index=NoIndex, offset=0}), (* Save the ML stack pointer because we may have grown the stack. Switch to the C stack. *) storeMemory(rsp, rbp, memRegStackPtr, nativeWordOpSize), loadMemory(rsp, rbp, memRegCStackPtr, nativeWordOpSize), storeMemory(r15, rbp, memRegLocalMPointer, nativeWordOpSize) ] @ loadResults @ [ (* Remove the stack space. *) ArithToGenReg{opc=ADD, output=rsp, source=NonAddressConstArg(LargeInt.fromInt stackToAllocate), opSize=nativeWordOpSize} ] @ ( if resultStructByRef then [PopR rax] else [] ) @ [ PopR rsi, PopR rdi, PopR r15, PopR r14, PopR r13, PopR r12, PopR rbx, PopR rbp, (* Restore callee-save registers. *) ReturnFromFunction 0 (* Caller removes any stack arguments. *) ] end end local (* The rules for passing structs in SysV on X86/64 are complicated but most of the special cases don't apply. We don't support floating point larger than 8 bytes, packed structures or C++ constructors. It then reduces to the following: Structures of up to 8 bytes are passed in a single register and of 8-16 bytes in two registers. Larger structures are passed on the stack. The question is whether to use general registers or SSE2 registers. Each 8 byte chunk is considered independently after any internal structs have been unwrapped. Each chunk will consist of either a single 8-byte value (i.e.. a pointer, int64_t or a double) or one or more smaller values and possibly some padding. An SSE2 register is used if the value is a double, two floats or a single float and padding. Otherwise it must have at least one shorter int-like type (e.g. int, char, short etc) in which case a general register is used. That applies even if it also contains a float. If, having selected the kind of registers to be used, there are not enough for the whole struct it is passed on the stack. We don't really need this for simple arguments but it's easier to consider them all together. *) datatype argClass = ArgInMemory | ArgInRegs of { firstInSSE: bool, secondInSSE: bool } fun classifyArg arg = let val {size, ...} = arg (* Unwrap the struct and any internal structs. *) fun getFields([], _) = [] | getFields(field::fields, offset) = let val {size, align, typeForm} = field val alignedOffset = alignUp(offset, align) (* Align this even if it's a sub-struct *) in case typeForm of CTypeVoid => raise Foreign.Foreign "Void cannot be used for a function argument" | CTypeStruct elements => getFields(elements, alignedOffset) @ getFields(fields, alignedOffset+size) | _ => (typeForm, alignedOffset) :: getFields(fields, alignedOffset+size) end val isSSE = List.all (fn (CTypeFloatingPt, _) => true | _ => false) in if size > 0w16 then ArgInMemory else let val fieldsAndOffsets = getFields([arg], 0w0) in if size <= 0w8 (* Only the first register will be used. *) then ArgInRegs{firstInSSE=isSSE fieldsAndOffsets, secondInSSE=false} else let val (first8Bytes, second8Bytes) = List.partition (fn (_, off) => off <= 0w8) fieldsAndOffsets in ArgInRegs{firstInSSE=isSSE first8Bytes, secondInSSE=isSSE second8Bytes} end end end val sysVGenRegs = [rdi, rsi, rdx, rcx, r8, r9] and sysVFPRegs = [xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7] (* Store a register into upto 8 bytes. Most values will involve a single store but odd-sized structs can require shifts and multiple stores. N.B. May modify the source register. *) fun storeUpTo8(reg, base, offset, size) = let val moveOp = if size = 0w8 then Move64 else if size >= 0w4 then Move32 else if size >= 0w2 then Move16 else Move8 in [Move{source=RegisterArg reg, destination=MemoryArg {base=base, offset=offset, index=NoIndex}, moveSize=moveOp}] end @ ( if size = 0w6 orelse size = 0w7 then [ ShiftConstant{ shiftType=SHR, output=reg, shift=0w32, opSize=OpSize64 }, Move{source=RegisterArg reg, destination=MemoryArg {base=base, offset=offset+4, index=NoIndex}, moveSize=Move16} ] else [] ) @ ( if size = 0w3 orelse size = 0w5 orelse size = 0w7 then [ ShiftConstant{ shiftType=SHR, output=reg, shift=Word8.fromLargeWord(Word.toLargeWord((size-0w1)*0w8)), opSize=OpSize64 }, Move{source=RegisterArg reg, destination=MemoryArg {base=base, offset=offset+Word.toInt(size-0w1), index=NoIndex}, moveSize=Move8} ] else [] ) in fun callUnix64Bits(args, result) = let val argWorkReg = r10 (* Not used for any arguments. *) val resultAreaPtr = r12 (* Saved value of r8 - This is callee save. *) val argPtrReg = r11 (* Pointer to argument area - Can't use mlArg2Reg since that's RSI on 32-in-64. *) fun loadSysV64Args([], stackOffset, _, _, _, code, preCode) = (code, stackOffset, preCode) | loadSysV64Args(arg::args, stackOffset, argOffset, gRegs, fpRegs, code, preCode) = let val {size, align, typeForm, ...} = arg (* Load a value into a register. Normally the size will be 1, 2, 4 or 8 bytes and this will just involve a simple load. Structs, though, can be of any size up to 8 bytes. *) fun loadRegister(reg, offset, size) = let (* We don't necessarily have to sign-extend. There's a comment in libffi that suggests that LVM expects it even though the SysV ABI doesn't require it. *) val moveOp = if size = 0w8 then Move64 else if typeForm = CTypeSignedInt andalso size = 0w4 then Move32X64 else if size >= 0w4 then Move32 else if typeForm = CTypeSignedInt andalso size = 0w2 then Move16X64 else if size >= 0w2 then Move16 else if typeForm = CTypeSignedInt andalso size = 0w1 then Move8X64 else Move8 in [Move{source=MemoryArg{base=argPtrReg, offset=Word.toInt offset, index=NoIndex}, destination=RegisterArg reg, moveSize=moveOp}] end @ ( if size = 0w6 orelse size = 0w7 then [ Move{source=MemoryArg{base=argPtrReg, offset=Word.toInt offset + 4, index=NoIndex}, destination=RegisterArg argWorkReg, moveSize=Move16}, ShiftConstant{ shiftType=SHL, output=argWorkReg, shift=0w32, opSize=OpSize64 }, ArithToGenReg{ opc=OR, output=reg, source=RegisterArg argWorkReg, opSize=OpSize64 } ] else [] ) @ ( if size = 0w3 orelse size = 0w5 orelse size = 0w7 then [ Move{source=MemoryArg{base=argPtrReg, offset=Word.toInt offset + Word.toInt(size-0w1), index=NoIndex}, destination=RegisterArg argWorkReg, moveSize=Move8}, ShiftConstant{ shiftType=SHL, output=argWorkReg, shift=Word8.fromLargeWord(Word.toLargeWord((size-0w1)*0w8)), opSize=OpSize64 }, ArithToGenReg{ opc=OR, output=reg, source=RegisterArg argWorkReg, opSize=OpSize64 } ] else [] ) val newArgOffset = alignUp(argOffset, align) val word1Addr = {base=argPtrReg, offset=Word.toInt newArgOffset, index=NoIndex} val word2Addr = {base=argPtrReg, offset=Word.toInt newArgOffset + 8, index=NoIndex} in case (classifyArg arg, size > 0w8, gRegs, fpRegs) of (* 8 bytes or smaller - single general reg. This is the usual case. *) (ArgInRegs{firstInSSE=false, ...}, false, gReg :: gRegs', fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', loadRegister(gReg, newArgOffset, size) @ code, preCode) (* 8 bytes or smaller - single SSE reg. Usual case for real arguments. *) | (ArgInRegs{firstInSSE=true, ...}, false, gRegs', fpReg :: fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', XMMArith{opc=if size = 0w4 then SSE2MoveFloat else SSE2MoveDouble, source=MemoryArg word1Addr, output=fpReg } :: code, preCode) (* 9-16 bytes - both values in general regs. *) | (ArgInRegs{firstInSSE=false, secondInSSE=false}, true, gReg1 :: gReg2 :: gRegs', fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', Move{source=MemoryArg word1Addr, destination=RegisterArg gReg1, moveSize=Move64} :: loadRegister(gReg2, newArgOffset+0w8, size-0w8) @ code, preCode) (* 9-16 bytes - first in general, second in SSE. *) | (ArgInRegs{firstInSSE=false, secondInSSE=true}, true, gReg :: gRegs', fpReg :: fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', Move{source=MemoryArg word1Addr, destination=RegisterArg gReg, moveSize=Move64} :: XMMArith{opc=if size = 0w12 then SSE2MoveFloat else SSE2MoveDouble, source=MemoryArg word2Addr, output=fpReg } :: code, preCode) (* 9-16 bytes - first in SSE, second in general. *) | (ArgInRegs{firstInSSE=true, secondInSSE=false}, true, gReg :: gRegs', fpReg :: fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', XMMArith{opc=SSE2MoveDouble, source=MemoryArg word1Addr, output=fpReg } :: loadRegister(gReg, newArgOffset+0w8, size-0w8) @ code, preCode) | (* 9-16 bytes - both values in SSE regs. *) (ArgInRegs{firstInSSE=true, secondInSSE=true}, true, gRegs', fpReg1 :: fpReg2 :: fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', XMMArith{opc=SSE2MoveDouble, source=MemoryArg word1Addr, output=fpReg1 } :: XMMArith{opc=if size = 0w12 then SSE2MoveFloat else SSE2MoveDouble, source=MemoryArg word2Addr, output=fpReg2 } :: code, preCode) | (_, _, gRegs', fpRegs') => (* Either larger than 16 bytes or we've run out of the right kind of registers. *) (* Move the argument in the preCode. It's possible a large struct could be the first argument and if we left it until the end RDI and RSI would already have been loaded. Structs are passed by value on the stack not, as in Win64, by reference. *) let val space = intAlignUp(Word.toInt size, 0w8) in loadSysV64Args(args, stackOffset+space, newArgOffset+size, gRegs', fpRegs', code, ArithToGenReg{opc=SUB, output=rsp, source=NonAddressConstArg(LargeInt.fromInt space), opSize=nativeWordOpSize} :: moveMemory{source=(argPtrReg, Word.toInt newArgOffset), destination=(rsp, 0), length=Word.toInt size} @ preCode) end end (* The rules for returning structs are similar to those for parameters. *) local (* Store a result register into the result area. In almost all cases this is very simple: the only complication is with structs of odd sizes. *) fun storeResult(reg, offset, size) = storeUpTo8(reg, resultAreaPtr, offset, size) val {size, typeForm, ...} = result in val (getResult, passArgAddress) = if typeForm = CTypeVoid then ([], false) else case (classifyArg result, size > 0w8) of (* 8 bytes or smaller - returned in RAX - Normal case for int-like results. *) (ArgInRegs{firstInSSE=false, ...}, false) => (storeResult(rax, 0, size), false) (* 8 bytes or smaller - returned in XMM0 - Normal case for real results. *) | (ArgInRegs{firstInSSE=true, ...}, false) => ([XMMStoreToMemory{toStore=xmm0, address={base=resultAreaPtr, offset=0, index=NoIndex}, precision=if size = 0w4 then SinglePrecision else DoublePrecision}], false) (* 9-16 bytes - returned in RAX/RDX. *) | (ArgInRegs{firstInSSE=false, secondInSSE=false}, true) => (storeResult(rax, 0, 0w8) @ storeResult(rdx, 0, size-0w8), false) (* 9-16 bytes - first in RAX, second in XMM0. *) | (ArgInRegs{firstInSSE=false, secondInSSE=true}, true) => (XMMStoreToMemory{toStore=xmm0, address={base=resultAreaPtr, offset=8, index=NoIndex}, precision=if size = 0w12 then SinglePrecision else DoublePrecision} :: storeResult(rax, 0, 0w8), false) (* 9-16 bytes - first in XMM0, second in RAX. *) | (ArgInRegs{firstInSSE=true, secondInSSE=false}, true) => (XMMStoreToMemory{toStore=xmm0, address={base=resultAreaPtr, offset=0, index=NoIndex}, precision=DoublePrecision} :: storeResult(rax, 8, size-0w8), false) (* 9-16 bytes - both values in SSE regs.*) | (ArgInRegs{firstInSSE=true, secondInSSE=true}, true) => ([XMMStoreToMemory{toStore=xmm0, address={base=resultAreaPtr, offset=0, index=NoIndex}, precision=DoublePrecision}, XMMStoreToMemory{toStore=xmm1, address={base=resultAreaPtr, offset=8, index=NoIndex}, precision=if size = 0w12 then SinglePrecision else DoublePrecision}], false) | _ => ([], true) (* Have to pass the address of the area in memory *) end val (argCode, argStack, preArgCode) = if passArgAddress (* If we have to pass the address of the result struct it goes in rdi. *) then loadSysV64Args(args, 0, 0w0, tl sysVGenRegs, sysVFPRegs, [moveRR{source=resultAreaPtr, output=rdi, opSize=nativeWordOpSize}], []) else loadSysV64Args(args, 0, 0w0, sysVGenRegs, sysVFPRegs, [], []) local val align = argStack mod 16 in (* Always align the stack. *) val preArgAlign = if align = 0 then 0 else 16-align end in (* Save heap ptr. Needed in case we have a callback. *) [storeMemory(r15, ebp, memRegLocalMPointer, nativeWordOpSize)] @ ( (* Put the destination address into a callee save resgister. We have to put the C address in there now because an ML address wouldn't be updated by a possible GC in a callback. *) if #typeForm( result) <> CTypeVoid then [loadHeapMemory(resultAreaPtr, r8, 0, nativeWordOpSize)] else [] ) @ [ (* Save the stack pointer. *) storeMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), (* Save ML stack and switch to C stack. *) loadMemory(esp, ebp, memRegCStackPtr, nativeWordOpSize) (* Load the saved C stack pointer. *) ] @ ( if preArgAlign = 0 then [] else [ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt preArgAlign), opSize=nativeWordOpSize}] ) @ ( (* The second argument is a SysWord containing the address of a malloced area of memory with the actual arguments in it. *) if null args then [] else [loadHeapMemory(argPtrReg, mlArg2Reg, 0, nativeWordOpSize)] ) @ preArgCode @ argCode @ [ let (* The entry point is in a SysWord.word value in RAX. *) val entryPoint = case targetArch of ObjectId32Bit => MemoryArg{base=ebx, offset=0, index=Index4 eax} | _ => MemoryArg{base=eax, offset=0, index=NoIndex} in (* Call the function. We're discarding the value in rsp so no need to remove args. *) CallAddress entryPoint end, loadMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), (* Restore the ML stack pointer. *) (* Reload the heap pointer. If we've called back to ML this could well have changed. *) loadMemory(r15, ebp, memRegLocalMPointer, nativeWordOpSize) ] @ (* Store the result in the destination. *) getResult @ [ ReturnFromFunction 0 ] end (* callUnix64Bits *) fun closureUnix64Bits(args, result) = let fun moveSysV64Args([], _, _, _, _, moveFromStack) = moveFromStack | moveSysV64Args(arg::args, stackSpace, argOffset, gRegs, fpRegs, moveFromStack) = let val {size, align, ...} = arg fun storeRegister(reg, offset, size) = storeUpTo8(reg, rsp, offset, size) val newArgOffset = alignUp(argOffset, align) val word1Addr = {base=rsp, offset=Word.toInt newArgOffset, index=NoIndex} val word2Addr = {base=rsp, offset=Word.toInt newArgOffset + 8, index=NoIndex} in case (classifyArg arg, size > 0w8, gRegs, fpRegs) of (* 8 bytes or smaller - single general reg. This is the usual case. *) (ArgInRegs{firstInSSE=false, ...}, false, gReg :: gRegs', fpRegs') => storeRegister(gReg, Word.toInt newArgOffset, size) @ moveSysV64Args(args, stackSpace, newArgOffset+size, gRegs', fpRegs', moveFromStack) (* 8 bytes or smaller - single SSE reg. Usual case for real arguments. *) | (ArgInRegs{firstInSSE=true, ...}, false, gRegs', fpReg :: fpRegs') => XMMStoreToMemory{precision=if size = 0w4 then SinglePrecision else DoublePrecision, address=word1Addr, toStore=fpReg } :: moveSysV64Args(args, stackSpace, newArgOffset+size, gRegs', fpRegs', moveFromStack) (* 9-16 bytes - both values in general regs. *) | (ArgInRegs{firstInSSE=false, secondInSSE=false}, true, gReg1 :: gReg2 :: gRegs', fpRegs') => Move{source=MemoryArg word1Addr, destination=RegisterArg gReg1, moveSize=Move64} :: storeRegister(gReg2, Word.toInt newArgOffset+8, size-0w8) @ moveSysV64Args(args, stackSpace, newArgOffset+size, gRegs', fpRegs', moveFromStack) (* 9-16 bytes - first in general, second in SSE. *) | (ArgInRegs{firstInSSE=false, secondInSSE=true}, true, gReg :: gRegs', fpReg :: fpRegs') => Move{source=MemoryArg word1Addr, destination=RegisterArg gReg, moveSize=Move64} :: XMMStoreToMemory{precision=if size = 0w12 then SinglePrecision else DoublePrecision, address=word2Addr, toStore=fpReg } :: moveSysV64Args(args, stackSpace, newArgOffset+size, gRegs', fpRegs', moveFromStack) (* 9-16 bytes - first in SSE, second in general. *) | (ArgInRegs{firstInSSE=true, secondInSSE=false}, true, gReg :: gRegs', fpReg :: fpRegs') => XMMStoreToMemory{precision=DoublePrecision, address=word1Addr, toStore=fpReg } :: storeRegister(gReg, Word.toInt newArgOffset+8, size-0w8) @ moveSysV64Args(args, stackSpace, newArgOffset+size, gRegs', fpRegs', moveFromStack) | (* 9-16 bytes - both values in SSE regs. *) (ArgInRegs{firstInSSE=true, secondInSSE=true}, true, gRegs', fpReg1 :: fpReg2 :: fpRegs') => XMMStoreToMemory{precision=DoublePrecision, address=word1Addr, toStore=fpReg1 } :: XMMStoreToMemory{precision=if size = 0w12 then SinglePrecision else DoublePrecision, address=word2Addr, toStore=fpReg2 } :: moveSysV64Args(args, stackSpace, newArgOffset+size, gRegs', fpRegs', moveFromStack) | (_, _, gRegs', fpRegs') => (* Either larger than 16 bytes or we've run out of the right kind of register. Structs larger than 16 bytes are passed by value on the stack. Move the argument after we've stored all the registers in particular rsi and rdi. *) let val space = intAlignUp(Word.toInt size, 0w8) in moveSysV64Args(args, stackSpace+space, newArgOffset+size, gRegs', fpRegs', moveMemory{source=(r10, stackSpace), destination=(rsp, Word.toInt newArgOffset), length=Word.toInt size} @ moveFromStack) end end (* Result structs larger than 16 bytes are returned by reference. *) val resultStructByRef = #size ( result) > 0w16 val copyArgsFromRegsAndStack = if resultStructByRef (* rdi contains the address for the result. *) then moveSysV64Args(args, 0, 0w0, tl sysVGenRegs, sysVFPRegs, []) else moveSysV64Args(args, 0, 0w0, sysVGenRegs, sysVFPRegs, []) local fun getNextSize (arg, argOffset) = let val {size, align, ...} = arg in alignUp(argOffset, align) + size end in val argumentSpace = List.foldl getNextSize 0w0 args end (* Allocate a 16-byte area for any results returned in registers. This will not be used if the result is a structure larger than 16-bytes. *) val resultOffset = alignUp(argumentSpace, 0w8) (* Ensure the stack is 16 bytes aligned. We've pushed 6 regs and a return address so add a further 8 bytes to bring it back into alignment. If we're returning a struct by reference, though, we've pushed 7 regs so don't add 8. *) val stackToAllocate = Word.toInt(alignUp(resultOffset + 0w16, 0w16)) + (if resultStructByRef then 0 else 8) (* The rules for returning structs are similar to those for parameters. *) local (* The result area is always 16 bytes wide so we can load the result without risking reading outside. At least at the moment we ignore any sign extension. *) val {size, typeForm, ...} = result val resultOffset = Word.toInt resultOffset in val loadResults = if typeForm = CTypeVoid then [] else case (classifyArg result, size > 0w8) of (* 8 bytes or smaller - returned in RAX - Normal case for int-like results. *) (ArgInRegs{firstInSSE=false, ...}, false) => [Move{source=MemoryArg {base=rsp, offset=resultOffset, index=NoIndex}, destination=RegisterArg rax, moveSize=Move64}] (* 8 bytes or smaller - returned in XMM0 - Normal case for real results. *) | (ArgInRegs{firstInSSE=true, ...}, false) => [XMMStoreToMemory{toStore=xmm0, address={base=rsp, offset=resultOffset, index=NoIndex}, precision=if size = 0w4 then SinglePrecision else DoublePrecision}] (* 9-16 bytes - returned in RAX/RDX. *) | (ArgInRegs{firstInSSE=false, secondInSSE=false}, true) => [Move{source=MemoryArg {base=rsp, offset=resultOffset, index=NoIndex}, destination=RegisterArg rax, moveSize=Move64}, Move{source=MemoryArg {base=rsp, offset=resultOffset+8, index=NoIndex}, destination=RegisterArg rdx, moveSize=Move64}] (* 9-16 bytes - first in RAX, second in XMM0. *) | (ArgInRegs{firstInSSE=false, secondInSSE=true}, true) => [Move{source=MemoryArg {base=rsp, offset=resultOffset, index=NoIndex}, destination=RegisterArg rax, moveSize=Move64}, XMMStoreToMemory{toStore=xmm0, address={base=rsp, offset=resultOffset+8, index=NoIndex}, precision=if size = 0w12 then SinglePrecision else DoublePrecision}] (* 9-16 bytes - first in XMM0, second in RAX. *) | (ArgInRegs{firstInSSE=true, secondInSSE=false}, true) => [XMMStoreToMemory{toStore=xmm0, address={base=rsp, offset=resultOffset, index=NoIndex}, precision=DoublePrecision}, Move{source=MemoryArg {base=rsp, offset=resultOffset+8, index=NoIndex}, destination=RegisterArg rax, moveSize=Move64}] (* 9-16 bytes - both values in SSE regs.*) | (ArgInRegs{firstInSSE=true, secondInSSE=true}, true) => [XMMStoreToMemory{toStore=xmm0, address={base=rsp, offset=resultOffset, index=NoIndex}, precision=DoublePrecision}, XMMStoreToMemory{toStore=xmm1, address={base=rsp, offset=resultOffset+8, index=NoIndex}, precision=if size = 0w12 then SinglePrecision else DoublePrecision}] | _ => [] (* Have to pass the address of the area in memory *) end in [ (* Push callee-save registers. *) PushToStack(RegisterArg rbp), PushToStack(RegisterArg rbx), PushToStack(RegisterArg r12), PushToStack(RegisterArg r13), PushToStack(RegisterArg r14), PushToStack(RegisterArg r15) ] @ ( (* If we're returning a struct by reference we have to return the address in rax even though it's been set by the caller. Save this address. *) if resultStructByRef then [PushToStack(RegisterArg rdi)] else [] ) @ [ (* Set r10 to point to the original stack args if any. *) LoadAddress{ output=r10, offset=if resultStructByRef then 64 else 56, base=SOME rsp, index=NoIndex, opSize=nativeWordOpSize}, (* Allocate stack space. *) ArithToGenReg{opc=SUB, output=rsp, source=NonAddressConstArg(LargeInt.fromInt stackToAllocate), opSize=nativeWordOpSize}, (* Move the function we're calling, in rax, into r13, a callee-save register *) moveRR{source=rax, output=r13, opSize=polyWordOpSize} ] @ copyArgsFromRegsAndStack @ [ (* Get the value for rbp. This has to be an absolute address in 32-in-64. *) LoadAbsolute{value=getThreadDataCall, destination=rcx}, CallAddress(MemoryArg{base=rcx, offset=0, index=NoIndex}), moveRR{source=rax, output=rbp, opSize=nativeWordOpSize}, (* Save the address of the argument and result area. *) moveRR{source=rsp, output=rcx, opSize=nativeWordOpSize}, (* Switch to the ML stack. *) storeMemory(rsp, rbp, memRegCStackPtr, nativeWordOpSize), loadMemory(rsp, rbp, memRegStackPtr, nativeWordOpSize), (* Load the ML heap pointer. *) loadMemory(r15, rbp, memRegLocalMPointer, nativeWordOpSize), (* Now move the function closure into the closure register ready for the call. *) moveRR{source=r13, output=rdx, opSize=polyWordOpSize} ] @ (* Reload the heap base address in 32-in-64. *) ( if targetArch = ObjectId32Bit then [loadMemory(rbx, rbp, memRegSavedRbx, nativeWordOpSize)] else [] ) @ boxRegAsSysWord(rcx, rax, []) @ ( (* If we're returning a struct by reference the address for the result will have been passed in the first argument. We use that as the result area. Otherwise point to the result area on the stack. *) if resultStructByRef then loadMemory(rcx, r10, ~64, nativeWordOpSize) else ArithToGenReg{opc=ADD, output=rcx, source=NonAddressConstArg(Word.toLargeInt resultOffset), opSize=nativeWordOpSize} ) :: boxRegAsSysWord(rcx, mlArg2Reg, [rax]) @ [ (* Call the ML function using the full closure call. *) CallAddress( if targetArch = ObjectId32Bit then MemoryArg{base=rbx, index=Index4 rdx, offset=0} else MemoryArg{base=rdx, index=NoIndex, offset=0}), (* Save the ML stack pointer because we may have grown the stack. Switch to the C stack. *) storeMemory(rsp, rbp, memRegStackPtr, nativeWordOpSize), loadMemory(rsp, rbp, memRegCStackPtr, nativeWordOpSize), storeMemory(r15, rbp, memRegLocalMPointer, nativeWordOpSize) ] @ loadResults @ [ (* Remove the stack space. *) ArithToGenReg{opc=ADD, output=rsp, source=NonAddressConstArg(LargeInt.fromInt stackToAllocate), opSize=nativeWordOpSize} ] @ ( if resultStructByRef then [PopR rax] else [] ) @ [ PopR r15, PopR r14, PopR r13, PopR r12, PopR rbx, PopR rbp, (* Restore callee-save registers. *) ReturnFromFunction 0 (* Caller removes any stack arguments. *) ] end end fun foreignCall(abi: ffiABI, args: cType list, result: cType): Address.machineWord = let val code = case abi of FFI_UNIX64 => callUnix64Bits(args, result) | FFI_WIN64 => callWindows64Bits(args, result) | abi => call32Bits(abi, args, result) val functionName = "foreignCall" val debugSwitches = [(*Universal.tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), Universal.tagInject DEBUG.assemblyCodeTag true*)] val profileObject = createProfileObject functionName val newCode = codeCreate (functionName, profileObject, debugSwitches) val closure = makeConstantClosure() val () = X86OPTIMISE.generateCode{code=newCode, labelCount=0, ops=code, resultClosure=closure} in closureAsAddress closure end (* Build a callback function. The arguments are the abi, the list of argument types and the result type. The result is an ML function that takes an ML function, f, as its argument, registers it as a callback and returns the C function as its result. When the C function is called the arguments are copied into temporary memory and the vector passed to f along with the address of the memory for the result. "f" stores the result in it when it returns and the result is then passed back as the result of the callback. N.B. This returns a closure cell which contains the address of the code. It can be used as a SysWord.word value except that while it exists the code will not be GCed. *) fun buildCallBack(abi: ffiABI, args: cType list, result: cType): Address.machineWord = let val code = case abi of FFI_UNIX64 => closureUnix64Bits(args, result) | FFI_WIN64 => closureWindows64Bits(args, result) | abi => closure32Bits(abi, args, result) val functionName = "foreignCallBack(2)" val debugSwitches = [(*Universal.tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), Universal.tagInject DEBUG.assemblyCodeTag true*)] val profileObject = createProfileObject functionName val newCode = codeCreate (functionName, profileObject, debugSwitches) val closure = makeConstantClosure() val () = X86OPTIMISE.generateCode{code=newCode, labelCount=0, ops=code, resultClosure=closure} val stage2Code = closureAsAddress closure fun resultFunction f = let (* Generate a small function to load the address of f into rax/eax and then jump to stage2. The idea is that it should be possible to generate this eventually in a single RTS call. That could be done by using a version of this as a model. *) val codeAddress = (* In the native code versions we extract the code address from the closure. We don't do that in 32-in-64 and instead the RTS does it. *) if targetArch = ObjectId32Bit then stage2Code else Address.loadWord(Address.toAddress stage2Code, 0w0) val code = [ Move{source=AddressConstArg(Address.toMachineWord f), destination=RegisterArg rax, moveSize=opSizeToMove polyWordOpSize}, JumpAddress(AddressConstArg codeAddress) ] val functionName = "foreignCallBack(1)" val debugSwitches = [(*Universal.tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), Universal.tagInject DEBUG.assemblyCodeTag true*)] val profileObject = createProfileObject functionName val newCode = codeCreate (functionName, profileObject, debugSwitches) val closure = makeConstantClosure() val () = X86OPTIMISE.generateCode{code=newCode, labelCount=0, ops=code, resultClosure=closure} val res = closureAsAddress closure (*val _ = print("Address is " ^ (LargeWord.toString(RunCall.unsafeCast res)) ^ "\n")*) in res end in Address.toMachineWord resultFunction end end; diff --git a/mlsource/MLCompiler/FOREIGNCALLSIG.sml b/mlsource/MLCompiler/FOREIGNCALLSIG.sml index 4ed39ab9..455b686e 100644 --- a/mlsource/MLCompiler/FOREIGNCALLSIG.sml +++ b/mlsource/MLCompiler/FOREIGNCALLSIG.sml @@ -1,35 +1,35 @@ (* - Copyright (c) 2016, 2018-19 David C.J. Matthews + Copyright (c) 2016, 2018-20 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) signature FOREIGNCALLSIG = sig val rtsCallFast: string * int * Universal.universal list -> Address.machineWord val rtsCallFastRealtoReal: string * Universal.universal list -> Address.machineWord val rtsCallFastRealRealtoReal: string * Universal.universal list -> Address.machineWord val rtsCallFastGeneraltoReal: string * Universal.universal list -> Address.machineWord val rtsCallFastRealGeneraltoReal: string * Universal.universal list -> Address.machineWord val rtsCallFastFloattoFloat: string * Universal.universal list -> Address.machineWord val rtsCallFastFloatFloattoFloat: string * Universal.universal list -> Address.machineWord val rtsCallFastGeneraltoFloat: string * Universal.universal list -> Address.machineWord val rtsCallFastFloatGeneraltoFloat: string * Universal.universal list -> Address.machineWord type abi and cType - val abiList: (string * abi) list + val abiList: unit -> (string * abi) list val foreignCall: abi * cType list * cType -> Address.machineWord val buildCallBack: abi * cType list * cType -> Address.machineWord end; diff --git a/mlsource/MLCompiler/INITIALISE_.ML b/mlsource/MLCompiler/INITIALISE_.ML index 6fe4b644..4e49e79b 100644 --- a/mlsource/MLCompiler/INITIALISE_.ML +++ b/mlsource/MLCompiler/INITIALISE_.ML @@ -1,2064 +1,2065 @@ (* Copyright (c) 2000 Cambridge University Technical Services Limited - Updated David C.J. Matthews 2008-9, 2012, 2013, 2015-19 + Updated David C.J. Matthews 2008-9, 2012, 2013, 2015-20 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Title: Initialise ML Global Declarations. Author: Dave Matthews,Cambridge University Computer Laboratory Copyright Cambridge University 1985 *) functor INITIALISE_ ( structure LEX: LEXSIG structure TYPETREE : TYPETREESIG structure STRUCTVALS : STRUCTVALSIG structure VALUEOPS : VALUEOPSSIG structure CODETREE : CODETREESIG structure EXPORTTREE: EXPORTTREESIG structure DATATYPEREP: DATATYPEREPSIG structure TYPEIDCODE: TYPEIDCODESIG structure MAKE: MAKESIG structure ADDRESS : AddressSig structure DEBUG: DEBUGSIG structure DEBUGGER : DEBUGGERSIG structure PRETTY : PRETTYSIG structure PRINTTABLE: PRINTTABLESIG structure MISC : sig val unescapeString : string -> string exception Conversion of string; (* string to int conversion failure *) end structure VERSION: sig val compilerVersion: string val versionNumber: int end structure UNIVERSALTABLE: sig type universal = Universal.universal type univTable type 'a tag = 'a Universal.tag val univLookup: univTable * 'a tag * string -> 'a option val fold: (string * universal * 'a -> 'a) -> 'a -> univTable -> 'a end sharing STRUCTVALS.Sharing = VALUEOPS.Sharing = TYPETREE.Sharing = EXPORTTREE.Sharing = PRETTY.Sharing = CODETREE.Sharing = MAKE.Sharing = ADDRESS = DATATYPEREP.Sharing = TYPEIDCODE.Sharing = DEBUGGER.Sharing = LEX.Sharing = PRINTTABLE.Sharing sharing STRUCTVALS.Sharing = VALUEOPS.Sharing = TYPETREE.Sharing = EXPORTTREE.Sharing = PRETTY.Sharing = CODETREE.Sharing = MAKE.Sharing = ADDRESS = DATATYPEREP.Sharing = TYPEIDCODE.Sharing = DEBUGGER.Sharing = LEX.Sharing = PRINTTABLE.Sharing = UNIVERSALTABLE ) : sig type gEnv val initGlobalEnv : {globalTable : gEnv, intIsArbitraryPrecision: bool } -> unit end = struct open STRUCTVALS; open TYPETREE open VALUEOPS; open CODETREE; open ADDRESS; open MAKE; open MISC; open EXPORTTREE open DATATYPEREP val intInfType = mkTypeConstruction ("int", intInfConstr, [], []) and realType = mkTypeConstruction ("real", realConstr, [], []) and charType = mkTypeConstruction ("char", charConstr, [], []) and wordType = mkTypeConstruction ("word", wordConstr, [], []) val declInBasis = [DeclaredAt inBasis] fun applyList _ [] = () | applyList f (h :: t) = (f h : unit; applyList f t); fun initGlobalEnv{globalTable : gEnv, intIsArbitraryPrecision: bool } = let val Env globalEnv = MAKE.gEnvAsEnv globalTable val enterGlobalValue = #enterVal globalEnv; val enterGlobalType = #enterType globalEnv; (* Some routines to help make the types. *) local (* careful - STRUCTVALS.intType differs from TYPETREE.intType *) open TYPETREE; in (* Make some type variables *) fun makeEqTV () = mkTypeVar (generalisable, true, false, false) fun makeTV () = mkTypeVar (generalisable, false, false, false) fun makePrintTV() = mkTypeVar (generalisable, false, false, true) fun makeTypeVariable() = makeTv {value=emptyType, level=generalisable, equality=false, nonunifiable=false, printable=false} (* Make some functions *) infixr 5 ->> fun a ->> b = mkFunctionType (a, b); infix 7 **; fun a ** b = mkProductType [a, b]; (* Type identifiers for the types of the declarations. *) val Int = if intIsArbitraryPrecision then intInfType else fixedIntType val String = stringType; val Bool = boolType; val Unit = unitType; val Char = charType; val Word = wordType; val Real = realType val Exn = exnType val mkTypeConstruction = mkTypeConstruction; val () = setPreferredInt(if intIsArbitraryPrecision then intInfConstr else fixedIntConstr) end; fun makePolymorphic(tvs, c) = let open TYPEIDCODE val tvs = List.filter(fn TypeVar tv => not justForEqualityTypes orelse tvEquality tv | _ => false) tvs in if null tvs then c else mkInlproc(c, List.length tvs, "", [], 0) end (* Function to make a type identifier with a pretty printer that just prints "?". None of the types are equality types so the equality function is empty. *) local fun monotypePrinter _ = PRETTY.PrettyString "?" in fun defaultEqAndPrintCode () = let open TypeValue val code = createTypeValue{ eqCode = CodeZero, printCode = mkConst (toMachineWord (ref monotypePrinter)), boxedCode = boxedNever, sizeCode = singleWord } in Global (genCode(code, [], 0) ()) end end fun makeTypeAbbreviation(name, fullName, typeVars, typeResult, locations) = makeTypeConstructor( name, typeVars, makeTypeFunction(basisDescription fullName, (typeVars, typeResult)), locations) (* Make an opaque type and add it to an environment. *) fun makeAndDeclareOpaqueType(typeName, fullName, env) = let val typeconstr = makeTypeConstructor(typeName, [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription fullName), declInBasis); in #enterType env (typeName, TypeConstrSet(typeconstr, [])); mkTypeConstruction (typeName, typeconstr, [], declInBasis) end; (* List of something *) fun List (base : types) : types = mkTypeConstruction ("list", tsConstr listConstr, [base], declInBasis); (* ref something *) fun Ref (base : types) : types = mkTypeConstruction ("ref", refConstr, [base], declInBasis); fun Option (base : types) : types = mkTypeConstruction ("option", tsConstr optionConstr, [base], declInBasis); (* Type-dependent functions. *) fun mkSpecialFun (name:string, typeof:types, opn: typeDependent) : values = makeOverloaded (name, typeof, opn); (* Overloaded functions. *) fun mkOverloaded (name:string) (typeof: types) : values = mkSpecialFun(name, typeof, TypeDep) (* Make a structure. Returns the table as an environment so that entries can be added to the structure. *) fun makeStructure(parentEnv, name) = let val str as Struct{signat=Signatures{tab, ...}, ...} = makeEmptyGlobal name val () = #enterStruct parentEnv (name, str) val Env env = makeEnv tab in env end val () = enterGlobalType ("unit", TypeConstrSet(unitConstr, [])); local val falseCons = mkGconstr ("false", Bool, createNullaryConstructor(EnumForm{tag=0w0, maxTag=0w1}, [], "false"), true, 2, declInBasis) val trueCons = mkGconstr ("true", Bool, createNullaryConstructor(EnumForm{tag=0w1, maxTag=0w1}, [], "true"), true, 2, declInBasis) val boolEnv = makeStructure(globalEnv, "Bool") (* Bool structure *) val notFn = mkGvar("not", Bool ->> Bool, mkUnaryFn BuiltIns.NotBoolean, declInBasis) in val () = #enterType boolEnv ("bool", TypeConstrSet(boolConstr, [trueCons, falseCons])) val () = #enterVal boolEnv ("true", trueCons) val () = #enterVal boolEnv ("false", falseCons) val () = #enterVal boolEnv ("not", notFn) end; val () = enterGlobalType ("int", TypeConstrSet(if intIsArbitraryPrecision then intInfConstr else fixedIntConstr, [])) val () = enterGlobalType ("char", TypeConstrSet(charConstr, [])) val () = enterGlobalType ("string", TypeConstrSet(stringConstr, [])) val () = enterGlobalType ("real", TypeConstrSet(realConstr, [])) val () = (* Enter :: and nil. *) List.app(fn(tv as Value{name, ...}) => enterGlobalValue(name, tv)) (tsConstructors listConstr) val () = enterGlobalType ("list", listConstr); val () = (* Enter NONE and SOME. *) List.app(fn(tv as Value{name, ...}) => enterGlobalValue(name, tv)) (tsConstructors optionConstr) val () = enterGlobalType ("option", optionConstr); local val refCons = let val a = mkTypeVar(generalisable, false, false, false) in mkGconstr ("ref", a ->> Ref a, createUnaryConstructor(RefForm, [a], "ref"), false, 1, declInBasis) end in val () = enterGlobalType ("ref", TypeConstrSet(refConstr, [refCons])); val () = enterGlobalValue ("ref", refCons); end local open BuiltIns fun monoTypePrinter _ = PRETTY.PrettyString "?" val idCode = let open TypeValue val equalLongWordFn = mkInlproc( mkBinary(LargeWordComparison TestEqual, mkLoadArgument 0, mkLoadArgument 1), 2, "EqualLargeWord()", [], 0) val code = createTypeValue{ eqCode=equalLongWordFn, printCode=mkConst (toMachineWord (ref monoTypePrinter)), boxedCode = boxedNever, sizeCode = singleWord } in Global (genCode(code, [], 0) ()) end in val largeWordType = makeTypeConstructor("word", [], makeFreeId(0, idCode, true, basisDescription "LargeWord.word"), declInBasis) val LargeWord = mkTypeConstruction ("LargeWord.word", largeWordType, [], declInBasis) end val () = enterGlobalType ("exn", TypeConstrSet(exnConstr, [])); val () = enterGlobalType ("word", TypeConstrSet(wordConstr, [])); val runCallEnv = makeStructure(globalEnv, "RunCall") fun enterRunCall (name : string, entry : codetree, typ : types) : unit = let val value = mkGvar (name, typ, entry, declInBasis); in #enterVal runCallEnv (name, value) end local (* unsafeCast. Can be used to convert any value to any type. *) val a = makeTV () val b = makeTV () val unsafeCastEntry = mkInlproc (mkLoadArgument 0 (* just the parameter *), 1, "unsafeCast(1)", [], 0) in val () = enterRunCall ("unsafeCast", makePolymorphic([a, b], unsafeCastEntry), a ->> b) end local val a = makeTV() and b = makeTV() open BuiltIns in (* isShort - test if a value is tagged rather than being an address. *) val () = enterRunCall ("isShort", makePolymorphic([a], mkUnaryFn IsTaggedValue), a ->> Bool) (* pointer equality *) val () = enterRunCall ("pointerEq", makePolymorphic([a], mkBinaryFn(WordComparison{test=TestEqual, isSigned=false})), a ** a ->> Bool) (* load a word. The index is in words and is always zero or positive. *) val () = enterRunCall ("loadWord", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLWord{isImmutable=false})), a ** Word ->> b) (* Load a word from an immutable. *) val () = enterRunCall ("loadWordFromImmutable", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLWord{isImmutable=true})), a ** Word ->> b) (* load a byte. The index is in bytes and is always zero or positive. Probably the result should be a Word8.word value or a char. *) val () = enterRunCall ("loadByte", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLByte{isImmutable=false})), a ** Word ->> b) (* Load a byte from an immutable. *) val () = enterRunCall ("loadByteFromImmutable", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLByte{isImmutable=true})), a ** Word ->> b) (* Get the length of a heap cell. *) val () = enterRunCall ("memoryCellLength", makePolymorphic([a], mkUnaryFn MemoryCellLength), a ->> Word) (* Return the flags. Perhaps this could return a Word8.word value instead of a word. *) val () = enterRunCall ("memoryCellFlags", makePolymorphic([a], mkUnaryFn MemoryCellFlags), a ->> Word) (* Return the number of bytes per word. This is a constant since we have separate pre-built compilers for 32-bit and 64-bit. N.B. The byte order is not a constant since we only have a single pre-built compiler for little-endian and big-endian interpreted code. *) val () = enterRunCall ("bytesPerWord", mkConst(toMachineWord wordSize), Word) (* Store a word *) val () = enterRunCall ("storeWord", makePolymorphic([a, b], mkStoreOperationFn(LoadStoreMLWord{isImmutable=false})), mkProductType[a, Word, b] ->> Unit) (* Store a byte *) val () = enterRunCall ("storeByte", makePolymorphic([a, b], mkStoreOperationFn(LoadStoreMLByte{isImmutable=false})), mkProductType[a, Word, b] ->> Unit) (* Lock a mutable cell. *) val () = enterRunCall ("clearMutableBit", makePolymorphic([a], mkUnaryFn ClearMutableFlag), a ->> Unit) (* Allocate a byte cell. The second argument is the flags byte. It might be better if this were a Word8.word value. *) val () = enterRunCall ("allocateByteMemory", makePolymorphic([a], mkBinaryFn AllocateByteMemory), Word ** Word ->> a) (* Allocate a word cell. *) val () = enterRunCall ("allocateWordMemory", makePolymorphic([a, b], mkAllocateWordMemoryFn), mkProductType[Word, Word, a] ->> b) (* Byte vector operations. *) val () = enterRunCall ("byteVectorEqual", makePolymorphic([a], mkBlockOperationFn BlockOpEqualByte), mkProductType[a, a, Word, Word, Word] ->> Bool) val () = enterRunCall ("byteVectorCompare", makePolymorphic([a], mkBlockOperationFn BlockOpCompareByte), mkProductType[a, a, Word, Word, Word] ->> Int) (* Block moves. *) val () = enterRunCall ("moveBytes", makePolymorphic([a], mkBlockOperationFn (BlockOpMove{isByteMove=true})), mkProductType[a, a, Word, Word, Word] ->> Unit) val () = enterRunCall ("moveWords", makePolymorphic([a], mkBlockOperationFn (BlockOpMove{isByteMove=false})), mkProductType[a, a, Word, Word, Word] ->> Unit) (* Untagged loads and stores. *) val () = enterRunCall ("loadUntagged", mkLoadOperationFn LoadStoreUntaggedUnsigned, String ** Word ->> Word) val () = enterRunCall ("storeUntagged", mkStoreOperationFn LoadStoreUntaggedUnsigned, mkProductType[String, Word, Word] ->> Unit) val () = enterRunCall ("touch", makePolymorphic([a], mkUnaryFn TouchAddress), a ->> Unit) end local val debugOpts = [] (* Place to add debugging if necessary. *) (* [tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), tagInject assemblyCodeTag true] *) fun makeCall rtsCall n entryName = rtsCall (entryName, n, debugOpts) val makeFastCall = makeCall CODETREE.Foreign.rtsCallFast (* We need to wrap this so that the arguments are passed in registers. *) fun makeRunCallTupled (argTypes, resultType, callN) : codetree = let val width = List.length argTypes val name = "rtsCall" ^ Int.toString width; local val f = mkLoadClosure 0 (* first item from enclosing scope *) val tuple = mkLoadArgument 0 (* the inner parameter *) val args = case argTypes of [singleType] => [(tuple, singleType)] | argTypes => let val argVals = List.tabulate(width, fn n => mkInd (n, tuple)) in ListPair.zipEq(argVals, argTypes) end in val innerBody = mkCall (f, args, resultType) end local (* The closure contains the address of the RTS call. *) val f = mkEval(mkConst callN, [mkLoadArgument 0]) val innerLambda = mkInlproc (innerBody, 1, name ^ "(1)", [mkLoadLocal 0], 0) in val outerBody = mkEnv([mkDec (0, f)], innerLambda) end val outerLambda = mkInlproc (outerBody, 1, name, [], 1) in outerLambda end (* "Full" calls pass the thread Id as the first parameter. *) fun makeRunCallTupledFull (argTypes, resultType) = let val width = List.length argTypes val callN = toMachineWord(makeFastCall(width + 1)) val name = "rtsCall" ^ Int.toString width; local val f = mkLoadClosure 0 (* first item from enclosing scope *) val tuple = mkLoadArgument 0 (* the inner parameter *) val args = case argTypes of [singleType] => [(tuple, singleType)] | argTypes => let val argVals = List.tabulate(width, fn n => mkInd (n, tuple)) in ListPair.zipEq(argVals, argTypes) end in val innerBody = mkEnv( [ mkDec(0, mkCall (f, (getCurrentThreadId, GeneralType) :: args, resultType)), mkNullDec checkRTSException ], mkLoadLocal 0) end local (* The closure contains the address of the RTS call. *) val f = mkEval(mkConst callN, [mkLoadArgument 0]) (* This creates the actual call. *) val innerLambda = mkInlproc (innerBody, 1, name ^ "(1)", [mkLoadLocal 0], 1) in val outerBody = mkEnv([mkDec (0, f)], innerLambda) end val outerLambda = mkInlproc (outerBody, 1, name, [], 1) in outerLambda end local val a = makeTV () and b = makeTV () fun makeInlCode(makeCall, name) = let val call1 = toMachineWord(makeCall 1) val body = mkEval(mkConst call1, [mkLoadArgument 0]) val proc = mkInlproc (body, 1, name, [], 0) in makePolymorphic([a, b], proc) end in val rtsCallFast1Entry = makeInlCode(makeFastCall, "rtsCallFast1") end local val a = makeTV () and b = makeTV () and c = makeTV () and d = makeTV () and e = makeTV () and f = makeTV () fun makeRtsCall(n, makeCall) = makeRunCallTupled(List.tabulate(n, fn _ => GeneralType), GeneralType, toMachineWord(makeCall n)) fun makeFullRtsCall n = makeRunCallTupledFull(List.tabulate(n, fn _ => GeneralType), GeneralType) in val rtsCallFull0Entry = makePolymorphic([a], makeFullRtsCall 0) and rtsCallFast0Entry = makePolymorphic([a], makeRtsCall(0, makeFastCall)) val rtsCall0Type = String ->> Unit ->> a val rtsCall1Type = String ->> a ->> b val rtsCallFull1Entry = makePolymorphic([a, b], makeFullRtsCall 1) val rtsCallFull2Entry = makePolymorphic([a, b, c], makeFullRtsCall 2) and rtsCallFast2Entry = makePolymorphic([a, b, c], makeRtsCall(2, makeFastCall)) val rtsCall2Type = String ->> TYPETREE.mkProductType [a,b] ->> c val rtsCallFull3Entry = makePolymorphic([a, b, c, d], makeFullRtsCall 3) val rtsCallFast3Entry = makePolymorphic([a, b, c, d], makeRtsCall(3, makeFastCall)) val rtsCall3Type = String ->> TYPETREE.mkProductType [a,b,c] ->> d val rtsCallFull4Entry = makePolymorphic([a, b, c, d, e], makeFullRtsCall 4) val rtsCallFast4Entry = makePolymorphic([a, b, c, d, e], makeRtsCall(4, makeFastCall)) val rtsCall4Type = String ->> TYPETREE.mkProductType [a,b,c,d] ->> e val rtsCallFull5Entry = makePolymorphic([a, b, c, d, e, f], makeFullRtsCall 5) val rtsCall5Type = String ->> TYPETREE.mkProductType [a,b,c,d,e] ->> f end in val () = enterRunCall ("rtsCallFull0", rtsCallFull0Entry, rtsCall0Type) val () = enterRunCall ("rtsCallFast0", rtsCallFast0Entry, rtsCall0Type) val () = enterRunCall ("rtsCallFull1", rtsCallFull1Entry, rtsCall1Type) val () = enterRunCall ("rtsCallFast1", rtsCallFast1Entry, rtsCall1Type) val () = enterRunCall ("rtsCallFull2", rtsCallFull2Entry, rtsCall2Type) val () = enterRunCall ("rtsCallFast2", rtsCallFast2Entry, rtsCall2Type) val () = enterRunCall ("rtsCallFull3", rtsCallFull3Entry, rtsCall3Type) val () = enterRunCall ("rtsCallFast3", rtsCallFast3Entry, rtsCall3Type) val () = enterRunCall ("rtsCallFast4", rtsCallFast4Entry, rtsCall4Type) val () = enterRunCall ("rtsCallFull4", rtsCallFull4Entry, rtsCall4Type) val () = enterRunCall ("rtsCallFull5", rtsCallFull5Entry, rtsCall5Type) val makeRunCallTupled = makeRunCallTupled (* Needed for reals. *) end local (* Create nullary exception. *) fun makeException0(name, id) = let val exc = Value{ name = name, typeOf = TYPETREE.exnType, access = Global(mkConst(toMachineWord id)), class = Exception, locations = declInBasis, references = NONE, instanceTypes=NONE } in #enterVal runCallEnv (name, exc) end (* Create exception with parameter. *) and makeException1(name, id, exType) = let val exc = Value{ name = name, typeOf = exType ->> TYPETREE.exnType, access = Global(mkConst(toMachineWord id)), class = Exception, locations = declInBasis, references = NONE, instanceTypes=NONE } in #enterVal runCallEnv (name, exc) end (* Exception numbers. Most of these are hard-coded in the RTS. *) val EXC_interrupt = 1 val EXC_syserr = 2 val EXC_size = 4 val EXC_overflow = 5 val EXC_divide = 7 val EXC_conversion = 8 val EXC_XWindows = 10 val EXC_subscript = 11 val EXC_thread = 12 val EXC_Bind = 100 (* In Match compiler. *) val EXC_Match = 101 val EXC_Fail = 103 in val () = List.app makeException0 [ ("Interrupt", EXC_interrupt), ("Size", EXC_size), ("Bind", EXC_Bind), ("Div", EXC_divide), ("Match", EXC_Match), ("Overflow", EXC_overflow), ("Subscript", EXC_subscript) ] val () = List.app makeException1 [ ("Fail", EXC_Fail, String), ("Conversion", EXC_conversion, String), ("XWindows", EXC_XWindows, String), ("Thread", EXC_thread, String), ("SysErr", EXC_syserr, String ** Option LargeWord) ] end (* Standard Basis structures for basic types. These contain the definitions of the basic types and operations on them. The structures are extended in the basis library and overloaded functions are extracted from them. *) local val largeIntEnv = makeStructure(globalEnv, "LargeInt") (* The comparison operations take two arbitrary precision ints and a general "compare" function that returns a fixed precision int. *) val compareType = mkProductType[intInfType, intInfType, intInfType ** intInfType ->> fixedIntType] ->> Bool val arithType = mkProductType[intInfType, intInfType, intInfType ** intInfType ->> intInfType] ->> intInfType fun enterArbitrary(name, oper, typ) = let val value = mkGvar (name, typ, mkArbitraryFn oper, declInBasis) in #enterVal largeIntEnv (name, value) end in val () = #enterType largeIntEnv ("int", TypeConstrSet(intInfConstr, [])) (* These functions are used internally. *) val () = enterArbitrary("less", ArbCompare BuiltIns.TestLess, compareType) val () = enterArbitrary("greater", ArbCompare BuiltIns.TestGreater, compareType) val () = enterArbitrary("lessEq", ArbCompare BuiltIns.TestLessEqual, compareType) val () = enterArbitrary("greaterEq", ArbCompare BuiltIns.TestGreaterEqual, compareType) val () = enterArbitrary("add", ArbArith BuiltIns.ArithAdd, arithType) val () = enterArbitrary("subtract", ArbArith BuiltIns.ArithSub, arithType) val () = enterArbitrary("multiply", ArbArith BuiltIns.ArithMult, arithType) end local val fixedIntEnv = makeStructure(globalEnv, "FixedInt") open BuiltIns fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal fixedIntEnv (name, value) end val compareType = fixedIntType ** fixedIntType ->> Bool and binaryType = fixedIntType ** fixedIntType ->> fixedIntType fun enterComparison(name, test) = enterBinary(name, WordComparison{test=test, isSigned=true}, compareType) and enterBinaryOp(name, oper) = enterBinary(name, FixedPrecisionArith oper, binaryType) in val () = #enterType fixedIntEnv ("int", TypeConstrSet(fixedIntConstr, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("quot", ArithQuot) val () = enterBinaryOp("rem", ArithRem) end local open BuiltIns val largeWordEnv = makeStructure(globalEnv, "LargeWord") fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal largeWordEnv (name, value) end val compareType = LargeWord ** LargeWord ->> Bool and binaryType = LargeWord ** LargeWord ->> LargeWord and shiftType = LargeWord ** Word ->> LargeWord (* The shift amount is a Word. *) fun enterComparison(name, test) = enterBinary(name, LargeWordComparison test, compareType) and enterBinaryOp(name, oper) = enterBinary(name, LargeWordArith oper, binaryType) and enterBinaryLogical(name, oper) = enterBinary(name, LargeWordLogical oper, binaryType) and enterBinaryShift(name, oper) = enterBinary(name, LargeWordShift oper, shiftType) in val () = #enterType largeWordEnv ("word", TypeConstrSet(largeWordType, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("div", ArithDiv) val () = enterBinaryOp("mod", ArithMod) val () = enterBinaryLogical("orb", LogicalOr) val () = enterBinaryLogical("andb", LogicalAnd) val () = enterBinaryLogical("xorb", LogicalXor) val () = enterBinaryShift("<<", ShiftLeft) val () = enterBinaryShift(">>", ShiftRightLogical) val () = enterBinaryShift("~>>", ShiftRightArithmetic) val LargeWord = LargeWord end local val wordStructEnv = makeStructure(globalEnv, "Word") open BuiltIns fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal wordStructEnv (name, value) end val compareType = Word ** Word ->> Bool and binaryType = Word ** Word ->> Word fun enterComparison(name, test) = enterBinary(name, WordComparison{test=test, isSigned=false}, compareType) and enterBinaryOp(name, oper) = enterBinary(name, WordArith oper, binaryType) and enterBinaryLogical(name, oper) = enterBinary(name, WordLogical oper, binaryType) and enterBinaryShift(name, oper) = enterBinary(name, WordShift oper, binaryType) val toLargeWordFn = mkGvar ("toLargeWord", Word ->> LargeWord, mkUnaryFn UnsignedToLongWord, declInBasis) and toLargeWordXFn = mkGvar ("toLargeWordX", Word ->> LargeWord, mkUnaryFn SignedToLongWord, declInBasis) and fromLargeWordFn = mkGvar ("fromLargeWord", LargeWord ->> Word, mkUnaryFn LongWordToTagged, declInBasis) in val () = #enterType wordStructEnv ("word", TypeConstrSet(wordConstr, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("div", ArithDiv) val () = enterBinaryOp("mod", ArithMod) val () = enterBinaryLogical("orb", LogicalOr) val () = enterBinaryLogical("andb", LogicalAnd) val () = enterBinaryLogical("xorb", LogicalXor) val () = enterBinaryShift("<<", ShiftLeft) val () = enterBinaryShift(">>", ShiftRightLogical) val () = enterBinaryShift("~>>", ShiftRightArithmetic) val () = #enterVal wordStructEnv ("toLargeWord", toLargeWordFn) val () = #enterVal wordStructEnv ("toLargeWordX", toLargeWordXFn) val () = #enterVal wordStructEnv ("fromLargeWord", fromLargeWordFn) end local val charEnv = makeStructure(globalEnv, "Char") open BuiltIns (* Comparison functions are the same as Word. *) fun enterComparison(name, test) = let val typ = Char ** Char ->> Bool val entry = mkBinaryFn(WordComparison{test=test, isSigned=false}) val value = mkGvar (name, typ, entry, declInBasis) in #enterVal charEnv (name, value) end in val () = #enterType charEnv ("char", TypeConstrSet(charConstr, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) end local val stringEnv = makeStructure(globalEnv, "String") in val () = #enterType stringEnv ("string", TypeConstrSet(stringConstr, [])) end local val realEnv = makeStructure(globalEnv, "Real") (* These are only used in Real so are included here rather than in RunCall. rtsCallFastRealtoReal is used for functions such as sqrt. rtsCallFastGeneraltoReal is used for Real.fromLargeInt. *) val debugOpts = [] (* Place to add debugging if necessary. *) fun makeFastRealRealCall entryName = CODETREE.Foreign.rtsCallFastRealtoReal (entryName, debugOpts) and makeFastRealRealRealCall entryName = CODETREE.Foreign.rtsCallFastRealRealtoReal (entryName, debugOpts) and makeFastIntInfRealCall entryName = CODETREE.Foreign.rtsCallFastGeneraltoReal (entryName, debugOpts) and makeFastRealGeneralRealCall entryName = CODETREE.Foreign.rtsCallFastRealGeneraltoReal (entryName, debugOpts) val rtsCallFastR_REntry = makeRunCallTupled([DoubleFloatType], DoubleFloatType, toMachineWord makeFastRealRealCall) (* This needs to be tupled. *) val rtsCallFastRR_REntry = makeRunCallTupled([DoubleFloatType, DoubleFloatType], DoubleFloatType, toMachineWord makeFastRealRealRealCall) and rtsCallFastRI_REntry = makeRunCallTupled([DoubleFloatType, GeneralType], DoubleFloatType, toMachineWord makeFastRealGeneralRealCall) val rtsCallFastI_REntry = makeRunCallTupled([GeneralType], DoubleFloatType, toMachineWord makeFastIntInfRealCall) val rtsCallFastF_F = mkGvar ("rtsCallFastR_R", String ->> Real ->> Real, rtsCallFastR_REntry, declInBasis) val rtsCallFastFF_F = mkGvar ("rtsCallFastRR_R", String ->> Real ** Real ->> Real, rtsCallFastRR_REntry, declInBasis) val rtsCallFastFG_F = mkGvar ("rtsCallFastRI_R", String ->> Real ** Int ->> Real, rtsCallFastRI_REntry, declInBasis) val rtsCallFastG_F = mkGvar ("rtsCallFastI_R", String ->> intInfType ->> Real, rtsCallFastI_REntry, declInBasis) fun enterUnary(name, oper, typ) = let val value = mkGvar (name, typ, mkUnaryFn oper, declInBasis) in #enterVal realEnv (name, value) end fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal realEnv (name, value) end val compareType = Real ** Real ->> Bool and binaryType = Real ** Real ->> Real and unaryType = Real ->> Real and realToFixType = Real ->> fixedIntType open BuiltIns IEEEReal fun enterComparison(name, test) = enterBinary(name, RealComparison(test, PrecDouble), compareType) and enterBinaryOp(name, oper) = enterBinary(name, RealArith(oper, PrecDouble), binaryType) in val () = #enterType realEnv ("real", TypeConstrSet(realConstr, [])) val () = #enterVal realEnv ("rtsCallFastR_R", rtsCallFastF_F) val () = #enterVal realEnv ("rtsCallFastRR_R", rtsCallFastFF_F) val () = #enterVal realEnv ("rtsCallFastRI_R", rtsCallFastFG_F) val () = #enterVal realEnv ("rtsCallFastI_R", rtsCallFastG_F) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterComparison("==", TestEqual) (* real is not an eqtype. *) (* Included unordered mainly because it's easy to implement isNan. *) val () = enterComparison("unordered", TestUnordered) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("/", ArithDiv) val () = enterUnary("~", RealNeg PrecDouble, unaryType) val () = enterUnary("abs", RealAbs PrecDouble, unaryType) val () = enterUnary("fromFixedInt", RealFixedInt PrecDouble, fixedIntType ->> Real) val () = enterUnary("truncFix", RealToInt(PrecDouble, TO_ZERO), realToFixType) val () = enterUnary("roundFix", RealToInt(PrecDouble, TO_NEAREST), realToFixType) val () = enterUnary("ceilFix", RealToInt(PrecDouble, TO_POSINF), realToFixType) val () = enterUnary("floorFix", RealToInt(PrecDouble, TO_NEGINF), realToFixType) end local val real32Env = makeStructure(globalEnv, "Real32") val floatType = mkTypeConstruction ("real", floatConstr, [], []) val Float = floatType val debugOpts = [] (* Place to add debugging if necessary. *) fun makeFastFloatFloatCall entryName = CODETREE.Foreign.rtsCallFastFloattoFloat (entryName, debugOpts) and makeFastFloatFloatFloatCall entryName = CODETREE.Foreign.rtsCallFastFloatFloattoFloat (entryName, debugOpts) and makeFastIntInfFloatCall entryName = CODETREE.Foreign.rtsCallFastGeneraltoFloat (entryName, debugOpts) and makeFastFloatGeneralFloatCall entryName = CODETREE.Foreign.rtsCallFastFloatGeneraltoFloat (entryName, debugOpts) val rtsCallFastR_REntry = makeRunCallTupled([SingleFloatType], SingleFloatType, toMachineWord makeFastFloatFloatCall) (* This needs to be tupled. *) val rtsCallFastRR_REntry = makeRunCallTupled([SingleFloatType, SingleFloatType], SingleFloatType, toMachineWord makeFastFloatFloatFloatCall) and rtsCallFastRI_REntry = makeRunCallTupled([SingleFloatType, GeneralType], SingleFloatType, toMachineWord makeFastFloatGeneralFloatCall) val rtsCallFastI_REntry = makeRunCallTupled([GeneralType], SingleFloatType, toMachineWord makeFastIntInfFloatCall) val rtsCallFastF_F = mkGvar ("rtsCallFastF_F", String ->> Float ->> Float, rtsCallFastR_REntry, declInBasis) val rtsCallFastFF_F = mkGvar ("rtsCallFastFF_F", String ->> Float ** Float ->> Float, rtsCallFastRR_REntry, declInBasis) val rtsCallFastFG_F = mkGvar ("rtsCallFastFI_F", String ->> Float ** Int ->> Float, rtsCallFastRI_REntry, declInBasis) val rtsCallFastG_F = mkGvar ("rtsCallFastI_F", String ->> intInfType ->> Float, rtsCallFastI_REntry, declInBasis) fun enterUnary(name, oper, typ) = let val value = mkGvar (name, typ, mkUnaryFn oper, declInBasis) in #enterVal real32Env (name, value) end fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal real32Env (name, value) end val compareType = Float ** Float ->> Bool and binaryType = Float ** Float ->> Float and unaryType = Float ->> Float and floatToFixType = Float ->> fixedIntType open BuiltIns IEEEReal fun enterComparison(name, test) = enterBinary(name, RealComparison(test, PrecSingle), compareType) and enterBinaryOp(name, oper) = enterBinary(name, RealArith(oper, PrecSingle), binaryType) in val () = #enterType real32Env ("real", TypeConstrSet(floatConstr, [])) val () = enterUnary("toLarge", BuiltIns.FloatToDouble, floatType ->> Real) (* Conversion with the current rounding mode. *) and () = enterUnary("fromReal", BuiltIns.DoubleToFloat NONE, Real ->> floatType) (* There are various versions of this function for each of the rounding modes. *) and () = enterUnary("fromRealRound", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_NEAREST), Real ->> floatType) and () = enterUnary("fromRealTrunc", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_ZERO), Real ->> floatType) and () = enterUnary("fromRealCeil", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_POSINF), Real ->> floatType) and () = enterUnary("fromRealFloor", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_NEGINF), Real ->> floatType) val () = #enterVal real32Env ("rtsCallFastR_R", rtsCallFastF_F) val () = #enterVal real32Env ("rtsCallFastRR_R", rtsCallFastFF_F) val () = #enterVal real32Env ("rtsCallFastRI_R", rtsCallFastFG_F) val () = #enterVal real32Env ("rtsCallFastI_R", rtsCallFastG_F) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterComparison("==", TestEqual) (* Real32.real is not an eqtype. *) val () = enterComparison("unordered", TestUnordered) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("/", ArithDiv) val () = enterUnary("~", RealNeg PrecSingle, unaryType) val () = enterUnary("abs", RealAbs PrecSingle, unaryType) val () = enterUnary("truncFix", RealToInt(PrecSingle, TO_ZERO), floatToFixType) val () = enterUnary("roundFix", RealToInt(PrecSingle, TO_NEAREST), floatToFixType) val () = enterUnary("ceilFix", RealToInt(PrecSingle, TO_POSINF), floatToFixType) val () = enterUnary("floorFix", RealToInt(PrecSingle, TO_NEGINF), floatToFixType) end val bootstrapEnv = makeStructure(globalEnv, "Bootstrap") fun enterBootstrap (name : string, entry : codetree, typ : types) : unit = let val value = mkGvar (name, typ, entry, declInBasis) in #enterVal bootstrapEnv (name, value) end local val threadEnv = makeStructure(globalEnv, "Thread") open TypeValue fun monoTypePrinter _ = PRETTY.PrettyString "?" val code = createTypeValue{ eqCode=equalWordFn, printCode=mkConst (toMachineWord (ref monoTypePrinter)), boxedCode=boxedAlways, sizeCode=singleWord } (* Thread.thread type. This is an equality type with pointer equality. *) val threadConstr= makeTypeConstructor ( "thread", [], makeFreeId(0, Global (genCode(code, [], 0) ()), true, basisDescription "thread"), [DeclaredAt inBasis]) val threadType = mkTypeConstruction ("thread", threadConstr, [], []); val selfFunction = mkGvar ("self", Unit ->> threadType, getCurrentThreadIdFn, declInBasis) val atIncrFunction = mkGvar("atomicIncr", Ref Word ->> Word, mkUnaryFn BuiltIns.AtomicIncrement, declInBasis) val atDecrFunction = mkGvar("atomicDecr", Ref Word ->> Word, mkUnaryFn BuiltIns.AtomicDecrement, declInBasis) val atResetFunction = mkGvar("atomicReset", Ref Word ->> Unit, mkUnaryFn BuiltIns.AtomicReset, declInBasis) in val () = #enterType threadEnv ("thread", TypeConstrSet(threadConstr, [])) val () = #enterVal threadEnv ("self", selfFunction) val () = #enterVal threadEnv ("atomicIncr", atIncrFunction) val () = #enterVal threadEnv ("atomicDecr", atDecrFunction) val () = #enterVal threadEnv ("atomicReset", atResetFunction) end local val fmemEnv = makeStructure(globalEnv, "ForeignMemory") val a = makeTV() (* We don't have Word8.word or Word32.word at this point so the easiest way to deal with this is to make them polymorphic. *) val get8Function = mkGvar("get8", LargeWord ** Word ->> a, makePolymorphic([a], mkLoadOperationFn LoadStoreC8), declInBasis) val get16Function = mkGvar("get16", LargeWord ** Word ->> Word, mkLoadOperationFn LoadStoreC16, declInBasis) val get32Function = mkGvar("get32", LargeWord ** Word ->> a, makePolymorphic([a], mkLoadOperationFn LoadStoreC32), declInBasis) val get64Function = mkGvar("get64", LargeWord ** Word ->> LargeWord, mkLoadOperationFn LoadStoreC64, declInBasis) val getFloatFunction = mkGvar("getFloat", LargeWord ** Word ->> Real, mkLoadOperationFn LoadStoreCFloat, declInBasis) val getDoubleFunction = mkGvar("getDouble", LargeWord ** Word ->> Real, mkLoadOperationFn LoadStoreCDouble, declInBasis) val set8Function = mkGvar("set8", mkProductType[LargeWord, Word, a] ->> Unit, makePolymorphic([a], mkStoreOperationFn LoadStoreC8), declInBasis) val set16Function = mkGvar("set16", mkProductType[LargeWord, Word, Word] ->> Unit, mkStoreOperationFn LoadStoreC16, declInBasis) val set32Function = mkGvar("set32", mkProductType[LargeWord, Word, a] ->> Unit, makePolymorphic([a], mkStoreOperationFn LoadStoreC32), declInBasis) val set64Function = mkGvar("set64", mkProductType[LargeWord, Word, LargeWord] ->> Unit, mkStoreOperationFn LoadStoreC64, declInBasis) val setFloatFunction = mkGvar("setFloat", mkProductType[LargeWord, Word, Real] ->> Unit, mkStoreOperationFn LoadStoreCFloat, declInBasis) val setDoubleFunction = mkGvar("setDouble", mkProductType[LargeWord, Word, Real] ->> Unit, mkStoreOperationFn LoadStoreCDouble, declInBasis) val allocCStackFn = mkGvar("allocCStack", Word ->> LargeWord, mkUnaryFn BuiltIns.AllocCStack, declInBasis) val freeCStackFn = mkGvar("freeCStack", LargeWord ** Word ->> Unit, mkBinaryFn BuiltIns.FreeCStack, declInBasis) in val () = #enterVal fmemEnv ("get8", get8Function) val () = #enterVal fmemEnv ("get16", get16Function) val () = #enterVal fmemEnv ("get32", get32Function) val () = #enterVal fmemEnv ("get64", get64Function) val () = #enterVal fmemEnv ("getFloat", getFloatFunction) val () = #enterVal fmemEnv ("getDouble", getDoubleFunction) val () = #enterVal fmemEnv ("set8", set8Function) val () = #enterVal fmemEnv ("set16", set16Function) val () = #enterVal fmemEnv ("set32", set32Function) val () = #enterVal fmemEnv ("set64", set64Function) val () = #enterVal fmemEnv ("setFloat", setFloatFunction) val () = #enterVal fmemEnv ("setDouble", setDoubleFunction) val () = #enterVal fmemEnv ("allocCStack", allocCStackFn) (* Free is a binary operation that takes both the allocated address and the size. The size is used by the compiled code where this is implemented using the C-stack. The allocated address is intended for possible use by the interpreter where so that it can be implemented as malloc/free. *) val () = #enterVal fmemEnv ("freeCStack", freeCStackFn) end local val foreignEnv = makeStructure(globalEnv, "Foreign") local val EXC_foreign = 23 val foreignException = Value{ name = "Foreign", typeOf = String ->> TYPETREE.exnType, access = Global(mkConst(toMachineWord EXC_foreign)), class = Exception, locations = declInBasis, references = NONE, instanceTypes=NONE } in val () = #enterVal foreignEnv ("Foreign", foreignException) end val arg0 = mkLoadArgument 0 val arg1 = mkLoadArgument 1 local val callForeignCall = mkEval(mkConst (toMachineWord CODETREE.Foreign.foreignCall), [arg0]) val innerBody = mkEval(mkLoadClosure 0, [mkInd(0, arg0), mkInd(1, arg0), mkInd(2, arg0)]) val outerBody = mkEnv([mkDec(0, callForeignCall)], mkInlproc(innerBody, 1, "foreignCall(1)(1)", [mkLoadLocal 0], 0)) in val foreignCallEntry = mkInlproc(outerBody, 1, "foreignCall(1)", [], 1) end local (* Build a callback. First apply the compiler to the abi/argtype/restype values. Then apply the result to a function to generate the final C callback code. The C callback code calls the function with two arguments. Here we have to pass it a function that expects a tuple and unwrap it. *) val innerMost = mkInlproc(mkEval(mkLoadClosure 0, [mkTuple[arg0, arg1]]), 2, "buildCallBack(1)(1)2", [mkLoadArgument 0], 0) val resultFn = mkInlproc(mkEval(mkLoadClosure 0, [innerMost]), 1, "buildCallBack(1)(1)", [mkLoadLocal 0], 0) val firstBuild = mkEval(mkConst (toMachineWord CODETREE.Foreign.buildCallBack), [arg0]) val outerBody = mkEnv([mkDec(0, firstBuild)], resultFn) in val buildCallBackEntry = mkInlproc(outerBody, 1, "buildCallBack(1)", [], 1) end (* Abi - an eqtype. *) local open TypeValue fun monotypePrinter _ = PRETTY.PrettyString "?" val code = createTypeValue{ eqCode = equalWordFn, printCode = mkConst (toMachineWord (ref monotypePrinter)), boxedCode = boxedEither (* Assume this for the moment *), sizeCode = singleWord } val abiEqAndPrint = Global (genCode(code, [], 0) ()) in val abiConstr = makeTypeConstructor("abi", [], makeFreeId(0, abiEqAndPrint, true, basisDescription "Foreign.LowLevel.abi"), declInBasis) end val () = #enterType foreignEnv ("abi", TypeConstrSet(abiConstr, [])) val abiType = mkTypeConstruction ("abi", abiConstr, [], declInBasis) (* It would be possible to put the definition of cType in here but it's complicated. It's easier to use an opaque type and put in a cast later. *) val ctypeConstr = makeTypeConstructor("ctype", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "Foreign.LowLevel.ctype"), declInBasis) val () = #enterType foreignEnv ("ctype", TypeConstrSet(ctypeConstr, [])) val ffiType = mkTypeConstruction ("ctype", ctypeConstr, [], declInBasis) val foreignCallType = mkProductType[abiType, List ffiType, ffiType] ->> mkProductType[LargeWord, LargeWord, LargeWord] ->> Unit val buildCallBackType = mkProductType[abiType, List ffiType, ffiType] ->> (mkProductType[LargeWord, LargeWord] ->> Unit) ->> LargeWord fun enterForeign (name, entry, typ) = #enterVal foreignEnv (name, mkGvar (name, typ, entry, declInBasis)) in val () = enterForeign("foreignCall", foreignCallEntry, foreignCallType) val () = enterForeign("buildCallBack", buildCallBackEntry, buildCallBackType) - val () = enterForeign("abiList", mkConst(toMachineWord CODETREE.Foreign.abiList), List (String ** abiType)) + (* Apply the abiList function here. The ABIs depend on the platform in the interpreted version. *) + val () = enterForeign("abiList", mkConst(toMachineWord(CODETREE.Foreign.abiList())), List (String ** abiType)) end local fun addVal (name : string, value : 'a, typ : types) : unit = enterBootstrap (name, mkConst (toMachineWord value), typ) (* These are only used during the bootstrap phase. Replacements are installed once the appropriate modules of the basis library are compiled. *) fun intOfString s = let val radix = if String.size s >= 3 andalso String.substring(s, 0, 2) = "0x" orelse String.size s >= 4 andalso String.substring(s, 0, 3) = "~0x" then StringCvt.HEX else StringCvt.DEC in case StringCvt.scanString (Int.scan radix) s of NONE => raise Conversion "Invalid integer constant" | SOME res => res end fun wordOfString s = let val radix = if String.size s > 2 andalso String.sub(s, 2) = #"x" then StringCvt.HEX else StringCvt.DEC in case StringCvt.scanString (Word.scan radix) s of NONE => raise Conversion "Invalid word constant" | SOME res => res end open PRINTTABLE val convstringCode = genCode(mkConst(toMachineWord unescapeString), [], 0) () val convintCode = genCode(mkConst(toMachineWord intOfString), [], 0) () val convwordCode = genCode(mkConst(toMachineWord wordOfString), [], 0) () in (* Conversion overloads used to be set by the ML bootstrap code. It's simpler to do that here but to maintain compatibility with the 5.6 compiler we need to define these. Once we've rebuilt the compiler this can be removed along with the code that uses it. *) val () = addVal ("convStringName", "convString": string, String) val () = addVal ("convInt", intOfString : string -> int, String ->> intInfType) val () = addVal ("convWord", wordOfString : string -> word, String ->> Word) (* Convert a string, recognising and converting the escape codes. *) val () = addVal ("convString", unescapeString: string -> string, String ->> String) (* Flag to indicate which version of Int to compile *) val () = addVal ("intIsArbitraryPrecision", intIsArbitraryPrecision, Bool) (* Install the overloads now. *) val () = addOverload("convString", stringConstr, convstringCode) val () = addOverload("convInt", fixedIntConstr, convintCode) val () = addOverload("convInt", intInfConstr, convintCode) val () = addOverload("convWord", wordConstr, convwordCode) end (* The only reason we have vector here is to get equality right. We need vector to be an equality type and to have a specific equality function. *) local fun polyTypePrinter _ _ = PRETTY.PrettyString "?" (* The equality function takes the base equality type as an argument. The inner function takes two arguments which are the two vectors to compare, checks the lengths and if they're equal applies the base equality to each field. *) val eqCode = mkInlproc( mkProc( mkEnv([ (* Length of the items. *) mkDec(0, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 0)), mkDec(1, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 1)), mkMutualDecs[(2, (* Loop function. *) mkProc( mkIf( (* Finished? *) mkEqualWord(mkLoadClosure 0, mkLoadArgument 0), CodeTrue, (* Yes, all equal. *) mkIf( mkEval( TypeValue.extractEquality(mkLoadClosure 2), (* Base equality fn *) [ mkLoadOperation(LoadStoreMLWord{isImmutable=true}, mkLoadClosure 3, mkLoadArgument 0), mkLoadOperation(LoadStoreMLWord{isImmutable=true}, mkLoadClosure 4, mkLoadArgument 0) ]), mkEval(mkLoadClosure 1, (* Recursive call with index+1. *) [ mkBinary(BuiltIns.WordArith BuiltIns.ArithAdd, mkLoadArgument 0, mkConst(toMachineWord 1)) ]), CodeFalse (* Not equal elements - result false *) ) ), 1, "vector-loop", [mkLoadLocal 0 (* Length *), mkLoadLocal 2 (* Loop function *), mkLoadClosure 0 (* Base equality function *), mkLoadArgument 0 (* Vector 0 *), mkLoadArgument 1 (* Vector 1 *)], 0))] ], mkIf( (* Test the lengths. *) mkEqualWord(mkLoadLocal 0, mkLoadLocal 1), (* Equal - test the contents. *) mkEval(mkLoadLocal 2, [CodeZero]), CodeFalse (* Not same length- result false *) ) ), 2, "vector-eq", [mkLoadArgument 0], 3), 1, "vector-eq()", [], 0) val idCode = (* Polytype *) let open TypeValue val code = createTypeValue{ eqCode=eqCode, printCode=mkConst (toMachineWord (ref polyTypePrinter)), boxedCode=mkInlproc(boxedAlways, 1, "boxed-vector", [], 0), sizeCode=mkInlproc(singleWord, 1, "size-vector", [], 0)} in Global (genCode(code, [], 0) ()) end in val vectorType = makeTypeConstructor("vector", [makeTypeVariable()], makeFreeId(1, idCode, true, basisDescription "vector"), declInBasis) val () = enterGlobalType ("vector", TypeConstrSet(vectorType, [])) end (* We also need a type with byte-wise equality. *) local fun monoTypePrinter _ = PRETTY.PrettyString "?" (* This is a monotype equality function that takes two byte vectors and compares them byte-by-byte for equality. Because they are vectors of bytes it's unsafe to load the whole words which could look like addresses if the bottom bit happens to be zero. *) val eqCode = mkProc( mkEnv([ (* Length of the items. *) mkDec(0, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 0)), mkDec(1, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 1)) ], mkIf( (* Test the lengths. *) mkEqualWord(mkLoadLocal 0, mkLoadLocal 1), (* Equal - test the contents. *) mkEnv([ (* ByteVecEqual takes a byte length so we have to multiply by the number of bytes per word. *) mkDec(2, mkBinary(BuiltIns.WordArith BuiltIns.ArithMult, mkConst(toMachineWord RunCall.bytesPerWord), mkLoadLocal 0)) ], mkBlockOperation{kind=BlockOpEqualByte, leftBase=mkLoadArgument 0, rightBase=mkLoadArgument 1, leftIndex=CodeZero, rightIndex=CodeZero, length=mkLoadLocal 2}), CodeFalse (* Not same length- result false *) ) ), 2, "byteVector-eq", [], 3) val idCode = (* Polytype *) let open TypeValue val code = createTypeValue{ eqCode=eqCode, printCode=mkConst (toMachineWord (ref monoTypePrinter)), boxedCode=boxedAlways, sizeCode=singleWord} in Global (genCode(code, [], 0) ()) end in val byteVectorType = makeTypeConstructor("byteVector", [], makeFreeId(0, idCode, true, basisDescription "byteVector"), declInBasis) val () = #enterType bootstrapEnv ("byteVector", TypeConstrSet(byteVectorType, [])) end (* We also need array and Array2.array to be passed through here so that they have the special property of being eqtypes even if their argument is not. "array" is defined to be in the global environment. *) val () = enterGlobalType ("array", TypeConstrSet(arrayConstr, [])) val () = #enterType bootstrapEnv ("array", TypeConstrSet(array2Constr, [])) val () = #enterType bootstrapEnv ("byteArray", TypeConstrSet(byteArrayConstr, [])) (* "=', '<>', PolyML.print etc are type-specific function which appear to be polymorphic. The compiler recognises these and treats them specially. For (in)equality that means generating type-specific versions of the equality operations; for print etc that means printing in a type-specific way. They can become true polymorphic functions and lose their type-specificity. For (in)equality that means defaulting to structure equality which is normal and expected behaviour. For print etc that means losing the ability to print and just printing "?" so it's important to avoid that happening. "open" treats type-specific functions specially and retains the type-specificity. That's important to allow the prelude code to expand the PolyML structure. *) local val eqType = let val a = makeEqTV () in a ** a ->> Bool end val eqVal = mkSpecialFun("=", eqType, Equal) in val () = enterGlobalValue ("=", eqVal) end local val neqType = let val a = makeEqTV () in a ** a ->> Bool end val neqVal = mkSpecialFun("<>", neqType, NotEqual) in val () = enterGlobalValue ("<>", neqVal) end val polyMLEnv = makeStructure(globalEnv, "PolyML") val enterPolyMLVal = #enterVal polyMLEnv local (* This version of the environment must match that used in the NameSpace structure. *) open TYPETREE (* Create a new structure for them. *) val nameSpaceEnv = makeStructure(polyMLEnv, "NameSpace") (* Substructures. *) val valuesEnv = makeStructure(nameSpaceEnv, "Values") and typesEnv = makeStructure(nameSpaceEnv, "TypeConstrs") and fixesEnv = makeStructure(nameSpaceEnv, "Infixes") and structsEnv = makeStructure(nameSpaceEnv, "Structures") and sigsEnv = makeStructure(nameSpaceEnv, "Signatures") and functsEnv = makeStructure(nameSpaceEnv, "Functors") (* Types for the basic values. These are opaque. *) val valueVal = makeAndDeclareOpaqueType("value", "PolyML.NameSpace.Values.value", valuesEnv) (* Representation of the type of a value. *) val Types = makeAndDeclareOpaqueType("typeExpression", "PolyML.NameSpace.Values.typeExpression", valuesEnv) val typeVal = makeAndDeclareOpaqueType("typeConstr", "PolyML.NameSpace.TypeConstrs.typeConstr", typesEnv) val fixityVal = makeAndDeclareOpaqueType("fixity", "PolyML.NameSpace.Infixes.fixity", fixesEnv) val signatureVal = makeAndDeclareOpaqueType("signatureVal", "PolyML.NameSpace.Signatures.signatureVal", sigsEnv) val structureVal = makeAndDeclareOpaqueType("structureVal", "PolyML.NameSpace.Structures.structureVal", structsEnv) val functorVal = makeAndDeclareOpaqueType("functorVal", "PolyML.NameSpace.Functors.functorVal", functsEnv) (* nameSpace type. Labelled record. *) fun createFields(name, vType): { name: string, typeof: types} list = let val enterFun = String ** vType ->> Unit val lookupFun = String ->> Option vType val allFun = Unit ->> List (String ** vType) in [mkLabelEntry("enter" ^ name, enterFun), mkLabelEntry("lookup" ^ name, lookupFun), mkLabelEntry("all" ^ name, allFun)] end (* We have to use the same names as we use in the env type because we're passing "env" values through the bootstrap. *) val valTypes = [("Val", valueVal), ("Type", typeVal), ("Fix", fixityVal), ("Struct", structureVal), ("Sig", signatureVal), ("Funct", functorVal)] val fields = List.foldl (fn (p,l) => createFields p @ l) [] valTypes val recordType = makeTypeAbbreviation("nameSpace", "PolyML.NameSpace.nameSpace", [], mkLabelled(sortLabels fields, true), declInBasis); val () = #enterType nameSpaceEnv ("nameSpace", TypeConstrSet(recordType, [])); (* The result type of the compiler includes valueVal etc. *) val resultFields = List.map TYPETREE.mkLabelEntry [("values", List(String ** valueVal)), ("fixes", List(String ** fixityVal)), ("types", List(String ** typeVal)), ("structures", List(String ** structureVal)), ("signatures", List(String ** signatureVal)), ("functors", List(String ** functorVal))] in val nameSpaceType = mkTypeConstruction ("nameSpace", recordType, [], declInBasis) val execResult = mkLabelled(sortLabels resultFields, true) type execResult = { fixes: (string * fixStatus) list, values: (string * values) list, structures: (string * structVals) list, signatures: (string * signatures) list, functors: (string * functors) list, types: (string * typeConstrSet) list } val valueVal = valueVal val typeVal = typeVal val fixityVal = fixityVal val signatureVal = signatureVal val structureVal = structureVal val functorVal = functorVal val Types = Types val valuesEnv = valuesEnv and typesEnv = typesEnv and fixesEnv = fixesEnv and structsEnv = structsEnv and sigsEnv = sigsEnv and functsEnv = functsEnv end local val typeconstr = locationConstr val () = #enterType polyMLEnv ("location", typeconstr); in val Location = mkTypeConstruction ("location", tsConstr typeconstr, [], declInBasis) end (* Interface to the debugger. *) local open TYPETREE val debuggerEnv = makeStructure(polyMLEnv, "DebuggerInterface") (* Make these opaque at this level. *) val locationPropList = makeAndDeclareOpaqueType("locationPropList", "PolyML.DebuggerInterface.locationPropList", debuggerEnv) val typeId = makeAndDeclareOpaqueType("typeId", "PolyML.DebuggerInterface.typeId", debuggerEnv) val machineWordType = makeAndDeclareOpaqueType("machineWord", "PolyML.DebuggerInterface.machineWord", debuggerEnv) (* For long term security keep these as different from global types and sigs. Values in the static environment need to be copied before they are global. *) val localType = makeAndDeclareOpaqueType("localType", "PolyML.DebuggerInterface.localType", debuggerEnv) val localTypeConstr = makeAndDeclareOpaqueType("localTypeConstr", "PolyML.DebuggerInterface.localTypeConstr", debuggerEnv) val localSig = makeAndDeclareOpaqueType("localSig", "PolyML.DebuggerInterface.localSig", debuggerEnv) open DEBUGGER (* Entries in the static list. This type is only used within the implementation of DebuggerInterface in the basis library and does not appear in the final signature. *) val environEntryConstr = makeTypeConstructor("environEntry", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "PolyML.DebuggerInterface.environEntry"), declInBasis) val environEntryType = mkTypeConstruction ("environEntry", environEntryConstr, [], declInBasis) val constrs = (* Order is significant. *) [ ("EnvEndFunction", mkProductType[String, Location, localType]), ("EnvException", mkProductType[String, localType, locationPropList]), ("EnvStartFunction", mkProductType[String, Location, localType]), ("EnvStructure", mkProductType[String, localSig, locationPropList]), ("EnvTConstr", String ** localTypeConstr), ("EnvTypeid", typeId ** typeId), ("EnvVConstr", mkProductType[String, localType, Bool, Int, locationPropList]), ("EnvValue", mkProductType[String, localType, locationPropList]) ] (* This representation must match the representation defined in DEBUGGER_.sml. *) val numConstrs = List.length constrs val {constrs=constrReps, ...} = chooseConstrRepr(constrs, []) val constructors = ListPair.map (fn ((s,t), code) => mkGconstr(s, t ->> environEntryType, code, false, numConstrs, declInBasis)) (constrs, constrReps) val () = List.app (fn c => #enterVal debuggerEnv(valName c, c)) constructors (* Put these constructors onto the type. *) val () = #enterType debuggerEnv ("environEntry", TypeConstrSet(environEntryConstr, constructors)) (* Debug state type. *) val debugStateConstr = makeTypeAbbreviation("debugState", "PolyML.DebuggerInterface.debugState", [], mkProductType[List environEntryType, List machineWordType, Location], declInBasis) val () = #enterType debuggerEnv ("debugState", TypeConstrSet(debugStateConstr, [])) val debugStateType = mkTypeConstruction ("debugState", debugStateConstr, [], declInBasis) in val () = applyList (fn (name, v, t) => #enterVal debuggerEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("makeValue", toMachineWord(makeValue: debugState -> string * types * locationProp list * machineWord -> values), debugStateType ->> mkProductType[String, localType, locationPropList, machineWordType] ->> valueVal), ("makeException", toMachineWord(makeException: debugState -> string * types * locationProp list * machineWord -> values), debugStateType ->> mkProductType[String, localType, locationPropList, machineWordType] ->> valueVal), ("makeConstructor", toMachineWord(makeConstructor: debugState -> string * types * bool * int * locationProp list * machineWord -> values), debugStateType ->> mkProductType[String, localType, Bool, Int, locationPropList, machineWordType] ->> valueVal), ("makeAnonymousValue", toMachineWord(makeAnonymousValue: debugState -> types * machineWord -> values), debugStateType ->> mkProductType[localType, machineWordType] ->> valueVal), ("makeStructure", toMachineWord(makeStructure: debugState -> string * signatures * locationProp list * machineWord -> structVals), debugStateType ->> mkProductType[String, localSig, locationPropList, machineWordType] ->> structureVal), ("makeTypeConstr", toMachineWord(makeTypeConstr: debugState -> typeConstrSet -> typeConstrSet), debugStateType ->> localTypeConstr ->> typeVal), ("unitValue", toMachineWord(mkGvar("", unitType, CodeZero, []): values), valueVal), (* Used as a default *) ("setOnEntry", toMachineWord(setOnEntry: (string * PolyML.location -> unit) option -> unit), Option (String ** Location ->> Unit) ->> Unit), ("setOnExit", toMachineWord(setOnExit: (string * PolyML.location -> unit) option -> unit), Option (String ** Location ->> Unit) ->> Unit), ("setOnExitException", toMachineWord(setOnExitException: (string * PolyML.location -> exn -> unit) option -> unit), Option (String ** Location ->> Exn ->> Unit) ->> Unit), ("setOnBreakPoint", toMachineWord(setOnBreakPoint: (PolyML.location * bool ref -> unit) option -> unit), Option (Location ** Ref Bool ->> Unit) ->> Unit) ] end local val typeconstr = contextConstr in val () = #enterType polyMLEnv ("context", typeconstr); val () = List.app(fn(tv as Value{name, ...}) => #enterVal polyMLEnv(name, tv)) (tsConstructors typeconstr) end local val typeconstr = prettyConstr in val () = #enterType polyMLEnv ("pretty", typeconstr); val () = List.app(fn(tv as Value{name, ...}) => #enterVal polyMLEnv(name, tv)) (tsConstructors typeconstr) val PrettyType = mkTypeConstruction ("pretty", tsConstr typeconstr, [], declInBasis) end local val printType = let val a = makePrintTV () in a ->> a end; val printVal = mkSpecialFun("print", printType, Print); in val () = enterPolyMLVal ("print", printVal); end; local val makeStringType = let val a = makePrintTV () in a ->> String end; val makeStringVal = mkSpecialFun("makestring", makeStringType, MakeString); in val () = enterPolyMLVal ("makestring", makeStringVal); end; local val prettyType = let val a = makePrintTV () in a ** fixedIntType ->> PrettyType end; val prettyVal = mkSpecialFun("prettyRepresentation", prettyType, GetPretty); in val () = enterPolyMLVal ("prettyRepresentation", prettyVal); end; local (* addPrettyPrinter is the new function to install a pretty printer. *) val a = makeTV () val b = makeTV () val addPrettyType = (TYPETREE.fixedIntType ->> b ->> a ->> PrettyType) ->> Unit; val addPrettyVal = mkSpecialFun("addPrettyPrinter", addPrettyType, AddPretty); in val () = enterPolyMLVal ("addPrettyPrinter", addPrettyVal); end; (* This goes in RunCall since it's only for the basis library. *) local val addOverloadType = let val a = makeTV () and b = makeTV () in (a ->> b) ->> String ->> Unit end; val addOverloadVal = mkSpecialFun("addOverload", addOverloadType, AddOverload); in val () = #enterVal runCallEnv ("addOverload", addOverloadVal); end local (* Add a function to switch the default integer type. *) fun setType isArbitrary = setPreferredInt(if isArbitrary then intInfConstr else fixedIntConstr) in val () = #enterVal runCallEnv ("setDefaultIntTypeArbitrary", mkGvar ("setDefaultIntTypeArbitrary", Bool ->> Unit, mkConst (toMachineWord setType), declInBasis)) end local val sourceLocVal = mkSpecialFun("sourceLocation", Unit ->> Location, GetLocation); in val () = enterPolyMLVal ("sourceLocation", sourceLocVal); end; local (* This is used as one of the arguments to the compiler function. *) open TYPETREE val uniStructEnv = makeStructure(bootstrapEnv, "Universal") fun enterUniversal (name : string, entry : codetree, typ : types) : unit = let val value = mkGvar (name, typ, entry, declInBasis); in #enterVal uniStructEnv (name, value) end; local fun polyTypePrinter _ _ = PRETTY.PrettyString "?" open TypeValue val idCode = let val code = createTypeValue{ eqCode=CodeZero, (* Not an equality type *) printCode=mkConst (toMachineWord (ref polyTypePrinter)), boxedCode=mkInlproc(boxedEither(* Assume worst case *), 1, "boxed-tag", [], 0), sizeCode=mkInlproc(singleWord, 1, "size-tag", [], 0)} in Global (genCode(code, [], 0) ()) end in (* type 'a tag *) val tagConstr = makeTypeConstructor("tag", [makeTypeVariable()], makeFreeId(1, idCode, false, basisDescription "tag"), declInBasis); val () = #enterType uniStructEnv ("tag", TypeConstrSet(tagConstr, [])) end (* type universal *) val univConstr = makeTypeConstructor("universal", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "universal"), declInBasis); val () = #enterType uniStructEnv ("universal", TypeConstrSet(univConstr, [])); fun Tag base = mkTypeConstruction ("tag", tagConstr, [base], declInBasis) val Universal = mkTypeConstruction ("universal", univConstr, [], declInBasis) val a = makeTV() (* val tagInject : 'a tag -> 'a -> universal *) val injectType = Tag a ->> a ->> Universal val () = enterUniversal ("tagInject", makePolymorphic([a], mkConst (toMachineWord (Universal.tagInject: 'a Universal.tag -> 'a -> Universal.universal))), injectType) (* We don't actually need tagIs and tagProject since this is only used for the compiler. Universal is redefined in the basis library. *) val projectType = Tag a ->> Universal ->> a val () = enterUniversal ("tagProject", makePolymorphic([a], mkConst (toMachineWord(Universal.tagProject: 'a Universal.tag -> Universal.universal -> 'a))), projectType) val testType = Tag a ->> Universal ->> Bool val () = enterUniversal ("tagIs", makePolymorphic([a], mkConst (toMachineWord(Universal.tagIs: 'a Universal.tag -> Universal.universal -> bool))), testType) in val Tag = Tag and Universal = Universal end local open TYPETREE (* Parsetree properties datatype. *) val propConstr = makeTypeConstructor("ptProperties", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "PolyML.ptProperties"), declInBasis); val PtProperties = mkTypeConstruction ("ptProperties", propConstr, [], declInBasis) (* Parsetree type. *) val parseTreeConstr = makeTypeAbbreviation("parseTree", "PolyML.parseTree", [], Location ** List PtProperties, declInBasis); val ParseTree = mkTypeConstruction ("parseTree", parseTreeConstr, [], declInBasis) val () = #enterType polyMLEnv ("parseTree", TypeConstrSet(parseTreeConstr, [])); val constrs = (* Order is significant. *) [ ("PTbreakPoint", Ref Bool), ("PTcompletions", List String), ("PTdeclaredAt", Location), ("PTdefId", fixedIntType), ("PTfirstChild", Unit ->> ParseTree), ("PTnextSibling", Unit ->> ParseTree), ("PTopenedAt", Location), ("PTparent", Unit ->> ParseTree), ("PTpreviousSibling", Unit ->> ParseTree), ("PTprint", fixedIntType ->> PrettyType), ("PTreferences", Bool ** List Location), ("PTrefId", fixedIntType), ("PTstructureAt", Location), ("PTtype", Types) ]; (* This representation must match the representation defined in ExportTree.sml. *) val numConstrs = List.length constrs val {constrs=constrReps, ...} = chooseConstrRepr(constrs, []) val constructors = ListPair.map (fn ((s,t), code) => mkGconstr(s, t ->> PtProperties, code, false, numConstrs, declInBasis)) (constrs, constrReps) val () = List.app (fn c => #enterVal polyMLEnv(valName c, c)) constructors (* Put these constructors onto the type. *) val () = #enterType polyMLEnv ("ptProperties", TypeConstrSet(propConstr, constructors)); in val ParseTree = ParseTree and PtProperties = PtProperties end local open TYPETREE val compilerType : types = mkProductType[nameSpaceType, Unit ->> Option Char, List Universal] ->> mkProductType[Option ParseTree, Option (Unit ->> execResult)] type compilerType = nameSpace * (unit -> char option) * Universal.universal list -> exportTree option * (unit->execResult) option in val () = enterBootstrap ("use", mkConst (toMachineWord ((useIntoEnv globalTable []): string -> unit)), String ->> Unit) val () = enterBootstrap ("useWithParms", mkConst (toMachineWord ((useIntoEnv globalTable): Universal.universal list -> string -> unit)), List Universal ->> String ->> Unit) val () = enterPolyMLVal("compiler", mkGvar ("compiler", compilerType, mkConst (toMachineWord (compiler: compilerType)), declInBasis)); val () = enterBootstrap("globalSpace", mkConst (toMachineWord(gEnvAsNameSpace globalTable: nameSpace)), nameSpaceType) end; local val ty = TYPETREE.mkOverloadSet[] val addType = ty ** ty ->> ty; val negType = ty ->> ty; val cmpType = ty ** ty ->> Bool; in val () = enterGlobalValue ("+", mkOverloaded "+" addType); val () = enterGlobalValue ("-", mkOverloaded "-" addType); val () = enterGlobalValue ("*", mkOverloaded "*" addType); val () = enterGlobalValue ("~", mkOverloaded "~" negType); val () = enterGlobalValue ("abs", mkOverloaded "abs" negType); val () = enterGlobalValue (">=", mkOverloaded ">=" cmpType); val () = enterGlobalValue ("<=", mkOverloaded "<=" cmpType); val () = enterGlobalValue (">", mkOverloaded ">" cmpType); val () = enterGlobalValue ("<", mkOverloaded "<" cmpType); (* The following overloads are added in ML97 *) val () = enterGlobalValue ("div", mkOverloaded "div" addType); val () = enterGlobalValue ("mod", mkOverloaded "mod" addType); val () = enterGlobalValue ("/", mkOverloaded "/" addType); end; local open DEBUG; local open TYPETREE val fields = [ mkLabelEntry("location", Location), mkLabelEntry("hard", Bool), mkLabelEntry("message", PrettyType), mkLabelEntry("context", Option PrettyType) ] in val errorMessageProcType = mkLabelled(sortLabels fields, true) ->> Unit type errorMessageProcType = { location: location, hard: bool, message: pretty, context: pretty option } -> unit end local open TYPETREE val optNav = Option(Unit->>ParseTree) val fields = [ mkLabelEntry("parent", optNav), mkLabelEntry("next", optNav), mkLabelEntry("previous", optNav) ] in val navigationType = mkLabelled(sortLabels fields, true) type navigationType = { parent: (unit->exportTree) option, next: (unit->exportTree) option, previous: (unit->exportTree) option } end type 'a tag = 'a Universal.tag in val () = applyList (fn (name, v, t) => enterBootstrap(name, mkConst v, t)) [ ("compilerVersion", toMachineWord (VERSION.compilerVersion: string), String), ("compilerVersionNumber", toMachineWord (VERSION.versionNumber: int), Int), ("lineNumberTag", toMachineWord (lineNumberTag : (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("offsetTag", toMachineWord (offsetTag: (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("fileNameTag", toMachineWord (fileNameTag: string tag), Tag String), ("bindingCounterTag", toMachineWord (bindingCounterTag: (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("maxInlineSizeTag", toMachineWord (maxInlineSizeTag: FixedInt.int tag), Tag fixedIntType), ("assemblyCodeTag", toMachineWord (assemblyCodeTag: bool tag), Tag Bool), ("parsetreeTag", toMachineWord (parsetreeTag: bool tag), Tag Bool), ("codetreeTag", toMachineWord (codetreeTag: bool tag), Tag Bool), ("icodeTag", toMachineWord (icodeTag: bool tag), Tag Bool), ("lowlevelOptimiseTag", toMachineWord (lowlevelOptimiseTag: bool tag), Tag Bool), ("codetreeAfterOptTag", toMachineWord (codetreeAfterOptTag: bool tag), Tag Bool), ("inlineFunctorsTag", toMachineWord (inlineFunctorsTag: bool tag), Tag Bool), ("debugTag", toMachineWord (debugTag: bool tag), Tag Bool), ("printDepthFunTag", toMachineWord (DEBUG.printDepthFunTag: (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("errorDepthTag", toMachineWord (DEBUG.errorDepthTag: FixedInt.int tag), Tag fixedIntType), ("lineLengthTag", toMachineWord (DEBUG.lineLengthTag: FixedInt.int tag), Tag fixedIntType), ("profileAllocationTag", toMachineWord (DEBUG.profileAllocationTag: FixedInt.int tag), Tag fixedIntType), ("printOutputTag", toMachineWord (PRETTY.printOutputTag: (pretty->unit) tag), Tag (PrettyType->>Unit)) , ("compilerOutputTag", toMachineWord (PRETTY.compilerOutputTag: (pretty->unit) tag), Tag (PrettyType->>Unit)), ("errorMessageProcTag", toMachineWord (LEX.errorMessageProcTag: errorMessageProcType tag), Tag errorMessageProcType), ("rootTreeTag", toMachineWord (EXPORTTREE.rootTreeTag: navigation tag), Tag navigationType), ("reportUnreferencedIdsTag", toMachineWord (reportUnreferencedIdsTag: bool tag), Tag Bool), ("reportExhaustiveHandlersTag", toMachineWord (reportExhaustiveHandlersTag: bool tag), Tag Bool), ("narrowOverloadFlexRecordTag", toMachineWord (narrowOverloadFlexRecordTag: bool tag), Tag Bool), ("createPrintFunctionsTag", toMachineWord (createPrintFunctionsTag: bool tag), Tag Bool), ("reportDiscardedValuesTag", toMachineWord (reportDiscardedValuesTag: FixedInt.int tag), Tag fixedIntType) ] end; (* PolyML.CodeTree structure. This exports the CodeTree structure into the ML space. *) local open CODETREE val codetreeEnv = makeStructure(polyMLEnv, "CodeTree") fun createType typeName = makeAndDeclareOpaqueType(typeName, "PolyML.CodeTree." ^ typeName, codetreeEnv) val CodeTree = createType "codetree" and MachineWord = createType "machineWord" and CodeBinding = createType "codeBinding" (* For the moment export these only for the general argument and result types. *) fun simpleFn (code, nArgs, name, closure, nLocals) = mkFunction{body=code, argTypes=List.tabulate(nArgs, fn _ => GeneralType), resultType=GeneralType, name=name, closure=closure, numLocals=nLocals} and simpleInlineFn (code, nArgs, name, closure, nLocals) = mkInlineFunction{body=code, argTypes=List.tabulate(nArgs, fn _ => GeneralType), resultType=GeneralType, name=name, closure=closure, numLocals=nLocals} and simpleCall(func, args) = mkCall(func, List.map (fn c => (c, GeneralType)) args, GeneralType) in val CodeTree = CodeTree val () = applyList (fn (name, v, t) => #enterVal codetreeEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("pretty", toMachineWord (CODETREE.pretty: codetree -> pretty), CodeTree ->> PrettyType), ("mkConstant", toMachineWord(mkConst: machineWord -> codetree), MachineWord ->> CodeTree), ("genCode", toMachineWord (genCode: codetree * Universal.universal list * int -> (unit->codetree)), mkProductType[CodeTree, List Universal, Int] ->> (Unit ->> CodeTree)), ("evalue", toMachineWord (evalue: codetree -> machineWord option), CodeTree ->> Option MachineWord), ("mkFunction", toMachineWord (simpleFn: codetree * int * string * codetree list * int -> codetree), mkProductType[CodeTree, Int, String, List CodeTree, Int] ->> CodeTree), ("mkInlineFunction", toMachineWord (simpleInlineFn: codetree * int * string * codetree list * int -> codetree), mkProductType[CodeTree, Int, String, List CodeTree, Int] ->> CodeTree), ("mkCall", toMachineWord (simpleCall: codetree * codetree list -> codetree), CodeTree ** List CodeTree ->> CodeTree), ("mkLoadLocal", toMachineWord (mkLoadLocal: int -> codetree), Int ->> CodeTree), ("mkLoadArgument", toMachineWord (mkLoadArgument: int -> codetree), Int ->> CodeTree), ("mkLoadClosure", toMachineWord (mkLoadClosure: int -> codetree), Int ->> CodeTree), ("mkDec", toMachineWord (mkDec: int * codetree -> codeBinding), Int ** CodeTree ->> CodeBinding), ("mkInd", toMachineWord (mkInd: int * codetree -> codetree), Int ** CodeTree ->> CodeTree), ("mkIf", toMachineWord (mkIf: codetree * codetree * codetree -> codetree), mkProductType[CodeTree, CodeTree, CodeTree] ->> CodeTree), ("mkWhile", toMachineWord (mkWhile: codetree * codetree -> codetree), CodeTree ** CodeTree ->> CodeTree), ("mkLoop", toMachineWord (mkLoop: codetree list -> codetree), List CodeTree ->> CodeTree), ("mkBeginLoop", toMachineWord (mkBeginLoop: codetree * (int * codetree) list -> codetree), CodeTree ** List(Int ** CodeTree) ->> CodeTree), ("mkEnv", toMachineWord (mkEnv: codeBinding list * codetree -> codetree), List CodeBinding ** CodeTree ->> CodeTree), ("mkMutualDecs", toMachineWord (mkMutualDecs: (int * codetree) list -> codeBinding), List(Int ** CodeTree) ->> CodeBinding), ("mkTuple", toMachineWord (mkTuple: codetree list -> codetree), List CodeTree ->> CodeTree), ("mkRaise", toMachineWord (mkRaise: codetree -> codetree), CodeTree ->> CodeTree), ("mkHandle", toMachineWord (mkHandle: codetree * codetree * int -> codetree), mkProductType[CodeTree, CodeTree, Int] ->> CodeTree), ("mkNullDec", toMachineWord (mkNullDec: codetree -> codeBinding), CodeTree ->> CodeBinding) ] end local (* Finish off the NameSpace structure now we have types such as pretty. *) open TYPETREE (* The exported versions expect full name spaces as arguments. Because we convert the exported versions to machineWord and give them types as data structures the compiler can't actually check that the type we give matched the internal type. *) fun makeTypeEnv NONE = { lookupType = fn _ => NONE, lookupStruct = fn _ => NONE } | makeTypeEnv(SOME(nameSpace: nameSpace)): printTypeEnv = { lookupType = fn s => case #lookupType nameSpace s of NONE => NONE | SOME t => SOME(t, NONE), lookupStruct = fn s => case #lookupStruct nameSpace s of NONE => NONE | SOME t => SOME(t, NONE) } local (* Values substructure. This also has operations related to type expressions. *) fun codeForValue (Value{access = Global code, class = ValBound, ...}) = code | codeForValue _ = raise Fail "Not a global value" and exportedDisplayTypeExp(ty, depth, nameSpace: nameSpace option) = TYPETREE.display(ty, depth, makeTypeEnv nameSpace) and exportedDisplayValues(valu, depth, nameSpace: nameSpace option) = displayValues(valu, depth, makeTypeEnv nameSpace) and propsForValue (Value {locations, typeOf, ...}) = PTtype typeOf :: mapLocationProps locations fun isConstructor (Value{class = Exception, ...}) = true | isConstructor (Value{class = Constructor _, ...}) = true | isConstructor _ = false fun isException (Value{class = Exception, ...}) = true | isException _ = false in val () = applyList (fn (name, v, t) => #enterVal valuesEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord (valName: values -> string), valueVal ->> String), ("print", toMachineWord (printValues: values * FixedInt.int -> pretty), mkProductType[valueVal, fixedIntType] ->> PrettyType), ("printWithType", toMachineWord (exportedDisplayValues: values * FixedInt.int * nameSpace option -> pretty), mkProductType[valueVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("printType", toMachineWord(exportedDisplayTypeExp: types * FixedInt.int * nameSpace option -> pretty), mkProductType[Types, fixedIntType, Option nameSpaceType] ->> PrettyType), ("typeof", toMachineWord (valTypeOf: values -> types), valueVal ->> Types), ("code", toMachineWord (codeForValue: values -> codetree), valueVal ->> CodeTree), ("properties", toMachineWord (propsForValue: values ->ptProperties list), valueVal ->> List PtProperties), ("isConstructor", toMachineWord(isConstructor: values -> bool), valueVal ->> Bool), ("isException", toMachineWord(isException: values -> bool), valueVal ->> Bool) ] end local (* TypeConstrs substructure. *) fun exportedDisplayTypeConstr(tyCons, depth, nameSpace: nameSpace option) = TYPETREE.displayTypeConstrs(tyCons, depth, makeTypeEnv nameSpace) and propsForTypeConstr (TypeConstrSet(TypeConstrs {locations, ...}, _)) = mapLocationProps locations and nameForType (TypeConstrSet(TypeConstrs{name, ...}, _)) = name in val () = applyList (fn (name, v, t) => #enterVal typesEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForType: typeConstrSet -> string), typeVal ->> String), ("print", toMachineWord (exportedDisplayTypeConstr: typeConstrSet * FixedInt.int * nameSpace option -> pretty), mkProductType[typeVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("properties", toMachineWord (propsForTypeConstr: typeConstrSet ->ptProperties list), typeVal ->> List PtProperties) ] end local (* Structures substructure *) fun exportedDisplayStructs(str, depth, nameSpace: nameSpace option) = displayStructures(str, depth, makeTypeEnv nameSpace) and codeForStruct (Struct{access = Global code, ...}) = code | codeForStruct _ = raise Fail "Not a global structure" and propsForStruct (Struct {locations, ...}) = mapLocationProps locations and nameForStruct (Struct{name, ...}) = name fun nameSpaceForStruct(baseStruct as Struct{signat=Signatures { tab, ...}, ...}): nameSpace = let open UNIVERSALTABLE fun lookupVal s = case univLookup (tab, valueVar, s) of NONE => NONE | SOME v => SOME(makeSelectedValue(v, baseStruct)) and lookupType s = case univLookup (tab, typeConstrVar, s) of NONE => NONE | SOME t => SOME(makeSelectedType(t, baseStruct)) and lookupStruct s = case univLookup (tab, structVar, s) of NONE => NONE | SOME s => SOME(makeSelectedStructure(s, baseStruct)) local fun extractItems t tab = UNIVERSALTABLE.fold (fn (s, u, l) => if Universal.tagIs t u then (s, Universal.tagProject t u) :: l else l ) [] tab in fun allValues() = map(fn (s, v) => (s, makeSelectedValue(v, baseStruct))) (extractItems valueVar tab) and allTypes() = map(fn (s, t) => (s, makeSelectedType(t, baseStruct))) (extractItems typeConstrVar tab) and allStructs() = map(fn (s, v) => (s, makeSelectedStructure(v, baseStruct))) (extractItems structVar tab) end fun enterFunction _ = raise Fail "updating a structure is not possible." (* Raise an exception for any attempt to enter a new value. Return empty for the classes that can't exist in a structure. *) in { lookupVal = lookupVal, lookupType = lookupType, lookupStruct = lookupStruct, lookupFix = fn _ => NONE, lookupSig = fn _ => NONE, lookupFunct = fn _ => NONE, enterVal = enterFunction, enterType = enterFunction, enterFix = enterFunction, enterStruct = enterFunction, enterSig = enterFunction, enterFunct = enterFunction, allVal = allValues, allType = allTypes, allStruct = allStructs, allFix = fn () => [], allSig = fn () => [], allFunct = fn () => [] } end in val () = applyList (fn (name, v, t) => #enterVal structsEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForStruct: structVals -> string), structureVal ->> String), ("print", toMachineWord (exportedDisplayStructs: structVals * FixedInt.int * nameSpace option -> pretty), mkProductType[structureVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("code", toMachineWord (codeForStruct: structVals -> codetree), structureVal ->> CodeTree), ("properties", toMachineWord (propsForStruct: structVals ->ptProperties list), structureVal ->> List PtProperties), ("contents", toMachineWord(nameSpaceForStruct: structVals -> nameSpace), structureVal ->> nameSpaceType) ] end local (* Signatures substructure *) fun exportedDisplaySigs(sign, depth, nameSpace: nameSpace option) = displaySignatures(sign, depth, makeTypeEnv nameSpace) and propsForSig (Signatures {locations, ...}) = mapLocationProps locations and nameForSig (Signatures{name, ...}) = name in val () = applyList (fn (name, v, t) => #enterVal sigsEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForSig: signatures -> string), signatureVal ->> String), ("print", toMachineWord (exportedDisplaySigs: signatures * FixedInt.int * nameSpace option -> pretty), mkProductType[signatureVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("properties", toMachineWord (propsForSig: signatures ->ptProperties list), signatureVal ->> List PtProperties) ] end local (* Functors substructure *) fun exportedDisplayFunctors(funct, depth, nameSpace: nameSpace option) = displayFunctors(funct, depth, makeTypeEnv nameSpace) and codeForFunct (Functor{access = Global code, ...}) = code | codeForFunct _ = raise Fail "Not a global functor" and propsForFunctor (Functor {locations, ...}) = mapLocationProps locations and nameForFunctor (Functor{name, ...}) = name in val () = applyList (fn (name, v, t) => #enterVal functsEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForFunctor: functors -> string), functorVal ->> String), ("print", toMachineWord (exportedDisplayFunctors: functors * FixedInt.int * nameSpace option -> pretty), mkProductType[functorVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("code", toMachineWord (codeForFunct: functors -> codetree), functorVal ->> CodeTree), ("properties", toMachineWord (propsForFunctor: functors ->ptProperties list), functorVal ->> List PtProperties) ] end local (* Infixes substructure *) fun nameForFix(FixStatus(s, _)) = s in val () = applyList (fn (name, v, t) => #enterVal fixesEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForFix: fixStatus -> string), fixityVal ->> String), ("print", toMachineWord (displayFixStatus: fixStatus -> pretty), fixityVal ->> PrettyType) ] end in end in () end (* initGlobalEnv *); end;