diff --git a/Tests/Succeed/Test163.ML b/Tests/Succeed/Test163.ML index 17066634..1db79f8b 100644 --- a/Tests/Succeed/Test163.ML +++ b/Tests/Succeed/Test163.ML @@ -1,60 +1,77 @@ (* Some tests on C memory operations. *) fun check f a b = if f a = b then () else raise Fail "Mismatch"; (* 64-bit operations are not implemented in 32-bit mode. *) fun checkEx f a b = (if f a = b then () else raise Fail "Mismatch") handle Foreign.Foreign _ => (); open Foreign.Memory; val m = malloc 0w32; +val e = ++(m, 0w32); val r = ref 0w0; set32(m, 0w0, 0wx12343421); set32(m, 0w1, 0w0); set32(m, 0w2, 0w0); set32(m, 0w3, 0w0); set32(m, 0w4, 0w0); set32(m, 0w5, 0w0); set32(m, 0w6, 0wxabccccab); set32(m, 0w7, 0wx12211221); (* These are often handled differently depending on whether the offset is a compile-time constant. *) check get32 (m, 0w0) 0wx12343421; check get32 (m, !r) 0wx12343421; +check get32 (e, ~ 0w8) 0wx12343421; +check get32 (e, !r - 0w8) 0wx12343421; + check get32 (m, 0w6) 0wxabccccab; +check get32 (e, ~ 0w2) 0wxabccccab; r := 0w6; check get32 (m, !r) 0wxabccccab; +check get32 (e, !r - 0w8) 0wxabccccab; check get16 (m, 0w14) 0wx1221; +check get16 (e, ~ 0w2) 0wx1221; r := 0w14; check get16 (m, !r) 0wx1221; +check get16 (e, !r - 0w16) 0wx1221; checkEx get64 (m, 0w1) 0w0; +checkEx get64 (e, ~ 0w7) 0w0; r := 0w1; checkEx get64 (m, !r) 0w0; +checkEx get64 (e, !r - 0w8) 0w0; check get8 (m, 0w24) 0wxab; +check get8 (e, ~ 0w8) 0wxab; r := 0w24; check get8 (m, !r) 0wxab; +check get8 (e, !r - 0w32) 0wxab; set8(m, !r, 0wx88); check get8 (m, 0w24) 0wx88; +set8(e, !r - 0w32, 0wx77); +check get8 (e, ~ 0w8) 0wx77; set64(m, 0w1, 0wx123456) handle Foreign.Foreign _ => (); checkEx get64 (m, 0w1) 0wx123456; set16(m, 0w4, 0wxffee); check get16 (m, 0w4) 0wxffee; setFloat(m, 0w2, 1.0); if Real.==(getFloat(m, 0w2), 1.0) then () else raise Fail "Mismatch"; +if Real.==(getFloat(e, ~ 0w6), 1.0) then () else raise Fail "Mismatch"; r := 0w2; if Real.==(getFloat(m, !r), 1.0) then () else raise Fail "Mismatch"; setDouble(m, 0w2, 2.0); if Real.==(getDouble(m, 0w2), 2.0) then () else raise Fail "Mismatch"; +if Real.==(getDouble(e, ~ 0w2), 2.0) then () else raise Fail "Mismatch"; if Real.==(getDouble(m, !r), 2.0) then () else raise Fail "Mismatch"; + free m; diff --git a/basis/ForeignMemory.sml b/basis/ForeignMemory.sml index c1730e6e..528aa103 100644 --- a/basis/ForeignMemory.sml +++ b/basis/ForeignMemory.sml @@ -1,230 +1,237 @@ (* Title: Foreign Function Interface: memory operations Author: David Matthews - Copyright David Matthews 2015, 2017 + Copyright David Matthews 2015, 2017, 2020 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) structure ForeignMemory :> sig eqtype volatileRef val volatileRef: SysWord.word -> volatileRef val setVolatileRef: volatileRef * SysWord.word -> unit val getVolatileRef: volatileRef -> SysWord.word eqtype voidStar val voidStar2Sysword: voidStar -> SysWord.word val sysWord2VoidStar: SysWord.word -> voidStar val null: voidStar + (* Helper functions to add a word value to an address. + From 5.8.2 the word value is treated as signed. *) val ++ : voidStar * word -> voidStar val -- : voidStar * word -> voidStar (* Remember an address except across loads. *) val memoise: ('a -> voidStar) ->'a -> unit -> voidStar exception Memory (* malloc - allocate memory. N.B. argument is the number of bytes. Raises Memory exception if it cannot allocate. *) val malloc: word -> voidStar (* free - free allocated memory. *) val free: voidStar -> unit + (* Load and store a value. From 5.8.2 the offset is + treated as signed. *) val get8: voidStar * Word.word -> Word8.word val get16: voidStar * Word.word -> Word.word val get32: voidStar * Word.word -> Word32.word val get64: voidStar * Word.word -> SysWord.word val set8: voidStar * Word.word * Word8.word -> unit val set16: voidStar * Word.word * Word.word -> unit val set32: voidStar * Word.word * Word32.word -> unit val set64: voidStar * Word.word * SysWord.word -> unit val getFloat: voidStar * Word.word -> real val getDouble: voidStar * Word.word -> real val setFloat: voidStar * Word.word * real -> unit val setDouble: voidStar * Word.word * real -> unit val getAddress: voidStar * Word.word -> voidStar val setAddress: voidStar * Word.word * voidStar -> unit end = struct open ForeignConstants open ForeignMemory exception Foreign = RunCall.Foreign fun id x = x (* Internal utility function. *) fun alignUp(s, align) = Word.andb(s + align-0w1, ~ align) (* Both volatileRef and SysWord.word are the ADDRESSes of the actual value. *) type volatileRef = word ref val memMove: SysWord.word * SysWord.word * word * word* word -> unit = RunCall.moveBytes fun volatileRef init = let (* Allocate a single word marked as mutable, weak, no-overwrite, byte. *) (* A weak byte cell is cleared to zero when it is read in either from the executable or from a saved state. Using the no-overwrite bit ensures that if it is contained in the executable it won't be changed by loading a saved state but there's a problem if it is contained in a parent state. Then loading a child state will clear it because we reload all the parents when we load a child. *) val v = RunCall.allocateWordMemory(sysWordSize div wordSize, 0wx69, 0w0) (* Copy the SysWord into it. *) val () = memMove(init, RunCall.unsafeCast v, 0w0, 0w0, sysWordSize) in v end fun setVolatileRef(v, i) = memMove(i, RunCall.unsafeCast v, 0w0, 0w0, sysWordSize) fun getVolatileRef var = let (* Allocate a single word marked as mutable, byte. *) val v = RunCall.allocateByteMemory(sysWordSize div wordSize, 0wx41) val () = memMove(RunCall.unsafeCast var, v, 0w0, 0w0, sysWordSize) val () = RunCall.clearMutableBit v in v end type voidStar = SysWord.word val voidStar2Sysword = id and sysWord2VoidStar = id (* Exported conversions *) val null: voidStar = 0w0 infix 6 ++ -- - fun s ++ w = s + SysWord.fromLarge(Word.toLarge w) - and s -- w = s - SysWord.fromLarge(Word.toLarge w) + (* Helper operations to add a constant to an address. + These now treat the offset as signed so that adding ~1 is + the same as subtracting 1. *) + fun s ++ w = s + SysWord.fromLarge(Word.toLargeX w) + and s -- w = s - SysWord.fromLarge(Word.toLargeX w) fun 'a memoise(f: 'a -> voidStar) (a: 'a) : unit -> voidStar = let (* Initialise to zero. That means the function won't be executed until we actually want the result. *) val v = volatileRef 0w0 in (* If we've reloaded the volatile ref it will have been reset to zero. We need to execute the function and set it. *) fn () => (case getVolatileRef v of 0w0 => let val r = f a in setVolatileRef(v, r); r end | r => r) end exception Memory (* Get and set addresses. This is a bit messy because it has to compile on 64-bits as well as 32-bits. *) val getAddress: voidStar * Word.word -> voidStar = if sysWordSize = 0w4 then Word32.toLargeWord o get32 else get64 val setAddress: voidStar * Word.word * voidStar -> unit = if sysWordSize = 0w4 then fn (s, i, v) => set32(s, i, Word32.fromLargeWord v) else set64 local local val ffiGeneralCall = RunCall.rtsCallFull2 "PolyFFIGeneral" in fun ffiGeneral(code: int, arg: 'a): 'b = RunCall.unsafeCast(ffiGeneralCall(RunCall.unsafeCast(code, arg))) end fun systemMalloc (s: word): voidStar = ffiGeneral (0, s) (*fun systemFree (s: voidStar): unit = ffiGeneral (1, s)*) (* Simple malloc/free implementation to reduce the number of RTS calls needed. *) val lock = Thread.Mutex.mutex() (* It would be possible to chain the free list in the C memory itself. For the moment we don't do that. The free list is the list of chunks ordered by increasing address. That allows us to merge adjacent free blocks. *) val freeList: {address: SysWord.word, size: word} list ref = LibrarySupport.noOverwriteRef nil (* Clear it once on entry. *) val () = PolyML.onEntry (fn _ => freeList := nil) (* Assume that if we align to the maximum of these we're all right. *) val maxAlign = Word.max(#align saDouble, Word.max(#align saPointer, #align saSint64)) (* We need a length word in each object we allocate but we need enough padding to align the result. *) val overhead = alignUp(sysWordSize, maxAlign) val chunkSize = 0w4096 (* Configure this. *) fun addFree(entry, []) = [entry] | addFree(entry, this :: rest) = if #address entry < #address this then ( if #address entry ++ #size entry = #address this then (* New entry is immediately before old one - merge. *) {address= #address entry, size = #size entry + #size this } :: rest else entry :: this :: rest ) else if #address this ++ #size this = #address entry then (* New entry is immediately after this - merge. Continue because it could also merge with an entry after this as well. *) addFree({address= #address this, size= #size entry + #size this}, rest) else this :: addFree(entry, rest) (* Search on. *) (* Find free space. *) fun findFree (_, []) = (NONE, []) | findFree (space, (this as {size, address}) :: tl) = if space = size then (SOME address, tl) else if space < size then (SOME address, {size=size-space, address=address ++ space} :: tl) else let val (res, rest) = findFree(space, tl) in (res, this :: rest) end fun freeMem s = let val addr = s -- overhead val size = Word.fromLarge(SysWord.toLarge(getAddress(addr, 0w0))) in freeList := addFree({address=addr, size=size}, !freeList) end fun allocMem s = let val space = alignUp(s + overhead, maxAlign) val (found, newList) = findFree(space, !freeList) in case found of NONE => let (* Need more memory *) val requestSpace = Word.max(chunkSize, space) val newSpace = systemMalloc requestSpace val _ = newSpace <> null orelse raise Memory in (* Add the space to the free list in the appropriate place. *) freeList := addFree({address=newSpace, size=requestSpace}, !freeList); allocMem s (* Repeat - should succeed now. *) end | SOME address => let val () = freeList := newList (* Update the free list *) (* Store the length in the first word. *) val () = setAddress(address, 0w0, SysWord.fromLarge(Word.toLarge space)) in address ++ overhead end end in val malloc: word -> voidStar = ThreadLib.protect lock allocMem fun free v = if v = null then () else ThreadLib.protect lock freeMem v end end; diff --git a/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml b/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml index 534b3364..0c7db185 100644 --- a/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml +++ b/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml @@ -1,728 +1,728 @@ (* Copyright (c) 2012, 2016-20 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Intermediate code tree for the back end of the compiler. *) structure BackendIntermediateCode: BackendIntermediateCodeSig = struct open Address structure BuiltIns = struct datatype testConditions = TestEqual | TestLess | TestLessEqual | TestGreater | TestGreaterEqual | TestUnordered (* Reals only. *) datatype arithmeticOperations = ArithAdd | ArithSub | ArithMult | ArithQuot | ArithRem | ArithDiv | ArithMod datatype logicalOperations = LogicalAnd | LogicalOr | LogicalXor datatype shiftOperations = ShiftLeft | ShiftRightLogical | ShiftRightArithmetic datatype unaryOps = NotBoolean | IsTaggedValue | MemoryCellLength | MemoryCellFlags | ClearMutableFlag | AtomicIncrement | AtomicDecrement | AtomicReset | LongWordToTagged | SignedToLongWord | UnsignedToLongWord | RealAbs of precision | RealNeg of precision | RealFixedInt of precision | FloatToDouble | DoubleToFloat of IEEEReal.rounding_mode option | RealToInt of precision * IEEEReal.rounding_mode | TouchAddress and precision = PrecSingle | PrecDouble and binaryOps = WordComparison of { test: testConditions, isSigned: bool } | FixedPrecisionArith of arithmeticOperations | WordArith of arithmeticOperations | WordLogical of logicalOperations | WordShift of shiftOperations | AllocateByteMemory | LargeWordComparison of testConditions | LargeWordArith of arithmeticOperations | LargeWordLogical of logicalOperations | LargeWordShift of shiftOperations | RealComparison of testConditions * precision | RealArith of arithmeticOperations * precision | PointerEq fun unaryRepr NotBoolean = "NotBoolean" | unaryRepr IsTaggedValue = "IsTaggedValue" | unaryRepr MemoryCellLength = "MemoryCellLength" | unaryRepr MemoryCellFlags = "MemoryCellFlags" | unaryRepr ClearMutableFlag = "ClearMutableFlag" | unaryRepr AtomicIncrement = "AtomicIncrement" | unaryRepr AtomicDecrement = "AtomicDecrement" | unaryRepr AtomicReset = "AtomicReset" | unaryRepr LongWordToTagged = "LongWordToTagged" | unaryRepr SignedToLongWord = "SignedToLongWord" | unaryRepr UnsignedToLongWord = "UnsignedToLongWord" | unaryRepr (RealAbs prec) = "RealAbs" ^ precRepr prec | unaryRepr (RealNeg prec) = "RealNeg" ^ precRepr prec | unaryRepr (RealFixedInt prec) = "RealFixedInt" ^ precRepr prec | unaryRepr FloatToDouble = "FloatToDouble" | unaryRepr (DoubleToFloat NONE) = "DoubleToFloat" | unaryRepr (DoubleToFloat (SOME mode)) = "DoubleToFloat" ^ rndModeRepr mode | unaryRepr (RealToInt (prec, mode)) = "RealToInt" ^ precRepr prec ^ rndModeRepr mode | unaryRepr TouchAddress = "TouchAddress" and binaryRepr (WordComparison{test, isSigned}) = "Test" ^ (testRepr test) ^ (if isSigned then "Signed" else "Unsigned") | binaryRepr (FixedPrecisionArith arithOp) = (arithRepr arithOp) ^ "Fixed" | binaryRepr (WordArith arithOp) = (arithRepr arithOp) ^ "Word" | binaryRepr (WordLogical logOp) = (logicRepr logOp) ^ "Word" | binaryRepr (WordShift shiftOp) = (shiftRepr shiftOp) ^ "Word" | binaryRepr AllocateByteMemory = "AllocateByteMemory" | binaryRepr (LargeWordComparison test) = "Test" ^ (testRepr test) ^ "LargeWord" | binaryRepr (LargeWordArith arithOp) = (arithRepr arithOp) ^ "LargeWord" | binaryRepr (LargeWordLogical logOp) = (logicRepr logOp) ^ "LargeWord" | binaryRepr (LargeWordShift shiftOp) = (shiftRepr shiftOp) ^ "LargeWord" | binaryRepr (RealComparison (test, prec)) = "Test" ^ testRepr test ^ precRepr prec | binaryRepr (RealArith (arithOp, prec)) = arithRepr arithOp ^ precRepr prec | binaryRepr PointerEq = "PointerEq" and testRepr TestEqual = "Equal" | testRepr TestLess = "Less" | testRepr TestLessEqual = "LessEqual" | testRepr TestGreater = "Greater" | testRepr TestGreaterEqual = "GreaterEqual" | testRepr TestUnordered = "Unordered" and arithRepr ArithAdd = "Add" | arithRepr ArithSub = "Sub" | arithRepr ArithMult = "Mult" | arithRepr ArithQuot = "Quot" | arithRepr ArithRem = "Rem" | arithRepr ArithDiv = "Div" | arithRepr ArithMod = "Mod" and logicRepr LogicalAnd = "And" | logicRepr LogicalOr = "Or" | logicRepr LogicalXor = "Xor" and shiftRepr ShiftLeft = "Left" | shiftRepr ShiftRightLogical = "RightLogical" | shiftRepr ShiftRightArithmetic = "RightArithmetic" and precRepr PrecSingle = "Single" | precRepr PrecDouble = "Double" and rndModeRepr IEEEReal.TO_NEAREST = "Round" | rndModeRepr IEEEReal.TO_NEGINF = "Down" | rndModeRepr IEEEReal.TO_POSINF = "Up" | rndModeRepr IEEEReal.TO_ZERO = "Trunc" end datatype argumentType = GeneralType | DoubleFloatType | SingleFloatType datatype backendIC = BICNewenv of bicCodeBinding list * backendIC (* Set of bindings with an expression. *) | BICConstnt of machineWord * Universal.universal list (* Load a constant *) | BICExtract of bicLoadForm (* Get a local variable, an argument or a closure value *) | BICField of {base: backendIC, offset: int } (* Load a field from a tuple or record *) | BICEval of (* Evaluate a function with an argument list. *) { function: backendIC, argList: (backendIC * argumentType) list, resultType: argumentType } (* Built-in functions. *) | BICUnary of {oper: BuiltIns.unaryOps, arg1: backendIC} | BICBinary of {oper: BuiltIns.binaryOps, arg1: backendIC, arg2: backendIC} | BICArbitrary of {oper: BuiltIns.arithmeticOperations, shortCond: backendIC, arg1: backendIC, arg2: backendIC, longCall: backendIC} | BICLambda of bicLambdaForm (* Lambda expressions. *) | BICCond of backendIC * backendIC * backendIC (* If-then-else expression *) | BICCase of (* Case expressions *) { cases : backendIC option list, (* NONE means "jump to the default". *) test : backendIC, default : backendIC, isExhaustive: bool, firstIndex: word } | BICBeginLoop of (* Start of tail-recursive inline function. *) { loop: backendIC, arguments: (bicSimpleBinding * argumentType) list } | BICLoop of (backendIC * argumentType) list (* Jump back to start of tail-recursive function. *) | BICRaise of backendIC (* Raise an exception *) | BICHandle of (* Exception handler. *) { exp: backendIC, handler: backendIC, exPacketAddr: int } | BICTuple of backendIC list (* Tuple *) | BICSetContainer of (* Copy a tuple to a container. *) { container: backendIC, tuple: backendIC, filter: BoolVector.vector } | BICLoadContainer of {base: backendIC, offset: int } | BICTagTest of { test: backendIC, tag: word, maxTag: word } | BICLoadOperation of { kind: loadStoreKind, address: bicAddress } | BICStoreOperation of { kind: loadStoreKind, address: bicAddress, value: backendIC } | BICBlockOperation of { kind: blockOpKind, sourceLeft: bicAddress, destRight: bicAddress, length: backendIC } | BICGetThreadId | BICAllocateWordMemory of {numWords: backendIC, flags: backendIC, initial: backendIC} and bicCodeBinding = BICDeclar of bicSimpleBinding (* Make a local declaration or push an argument *) | BICRecDecs of { addr: int, lambda: bicLambdaForm } list (* Set of mutually recursive declarations. *) | BICNullBinding of backendIC (* Just evaluate the expression and discard the result. *) | BICDecContainer of { addr: int, size: int } (* Create a container for a tuple on the stack. *) and caseType = CaseWord (* Word or fixed-precision integer. *) | CaseTag of word and bicLoadForm = BICLoadLocal of int (* Local binding *) | BICLoadArgument of int (* Argument - 0 is first arg etc.*) | BICLoadClosure of int (* Closure - 0 is first closure item etc *) | BICLoadRecursive (* Recursive call *) and loadStoreKind = LoadStoreMLWord of {isImmutable: bool} (* Load/Store an ML word in an ML word cell. *) | LoadStoreMLByte of {isImmutable: bool} (* Load/Store a byte, tagging and untagging as appropriate, in an ML byte cell. *) | LoadStoreC8 (* Load/Store C values - The base address is a boxed SysWord.word value. *) | LoadStoreC16 | LoadStoreC32 | LoadStoreC64 | LoadStoreCFloat | LoadStoreCDouble | LoadStoreUntaggedUnsigned and blockOpKind = BlockOpMove of {isByteMove: bool} | BlockOpEqualByte | BlockOpCompareByte withtype bicSimpleBinding = { (* Declare a value or push an argument. *) value: backendIC, addr: int } and bicLambdaForm = { (* Lambda expressions. *) body : backendIC, name : string, closure : bicLoadForm list, argTypes : argumentType list, resultType : argumentType, localCount : int, heapClosure : bool } and bicAddress = (* Address form used in loads, store and block operations. The base is an ML address if this is to/from ML memory or a (boxed) SysWord.word if it is to/from C memory. The index is a value in units of the size of the item being loaded/stored and the offset is always in bytes. *) - {base: backendIC, index: backendIC option, offset: word} + {base: backendIC, index: backendIC option, offset: int} structure CodeTags = struct open Universal val tupleTag: universal list list tag = tag() fun splitProps _ [] = (NONE, []) | splitProps tag (hd::tl) = if Universal.tagIs tag hd then (SOME hd, tl) else let val (p, l) = splitProps tag tl in (p, hd :: l) end fun mergeTupleProps(p, []) = p | mergeTupleProps([], p) = p | mergeTupleProps(m, n) = ( case (splitProps tupleTag m, splitProps tupleTag n) of ((SOME mp, ml), (SOME np, nl)) => let val mpl = Universal.tagProject tupleTag mp and npl = Universal.tagProject tupleTag np val merge = ListPair.mapEq mergeTupleProps (mpl, npl) in Universal.tagInject tupleTag merge :: (ml @ nl) end | _ => m @ n ) end fun loadStoreKindRepr(LoadStoreMLWord {isImmutable=true}) = "MLWordImmutable" | loadStoreKindRepr(LoadStoreMLWord {isImmutable=false}) = "MLWord" | loadStoreKindRepr(LoadStoreMLByte {isImmutable=true}) = "MLByteImmutable" | loadStoreKindRepr(LoadStoreMLByte {isImmutable=false}) = "MLByte" | loadStoreKindRepr LoadStoreC8 = "C8Bit" | loadStoreKindRepr LoadStoreC16 = "C16Bit" | loadStoreKindRepr LoadStoreC32 = "C32Bit" | loadStoreKindRepr LoadStoreC64 = "C64Bit" | loadStoreKindRepr LoadStoreCFloat = "CFloat" | loadStoreKindRepr LoadStoreCDouble = "CDouble" | loadStoreKindRepr LoadStoreUntaggedUnsigned = "MLWordUntagged" fun blockOpKindRepr (BlockOpMove{isByteMove=false}) = "MoveWord" | blockOpKindRepr (BlockOpMove{isByteMove=true}) = "MoveByte" | blockOpKindRepr BlockOpEqualByte = "EqualByte" | blockOpKindRepr BlockOpCompareByte = "CompareByte" open Pretty fun pList ([]: 'b list, _: string, _: 'b->pretty) = [] | pList ([h], _, disp) = [disp h] | pList (h::t, sep, disp) = PrettyBlock (0, false, [], [ disp h, PrettyBreak (0, 0), PrettyString sep ] ) :: PrettyBreak (1, 0) :: pList (t, sep, disp) fun pretty (pt : backendIC) : pretty = let fun printList(start, lst, sep) : pretty = PrettyBlock (1, true, [], PrettyString (start ^ "(") :: pList(lst, sep, pretty) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) fun prettyArgType GeneralType = PrettyString "G" | prettyArgType DoubleFloatType = PrettyString "D" | prettyArgType SingleFloatType = PrettyString "F" fun prettyArg (c, t) = PrettyBlock(1, false, [], [pretty c, PrettyBreak (1, 0), prettyArgType t]) fun prettyArgs(start, lst, sep) : pretty = PrettyBlock (1, true, [], PrettyString (start ^ "(") :: pList(lst, sep, prettyArg) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) fun prettyAddress({base, index, offset}: bicAddress): pretty = let in PrettyBlock (1, true, [], [ PrettyString "[", PrettyBreak (0, 3), pretty base, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), case index of NONE => PrettyString "-" | SOME i => pretty i, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), - PrettyString(Word.toString offset), PrettyBreak (0, 0), PrettyString "]" + PrettyString(Int.toString offset), PrettyBreak (0, 0), PrettyString "]" ]) end in case pt of BICEval {function, argList, resultType} => let val prettyArgs = PrettyBlock (1, true, [], PrettyString ("$(") :: pList(argList, ",", prettyArg) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) in PrettyBlock (3, false, [], [ pretty function, PrettyBreak(1, 0), prettyArgType resultType, PrettyBreak(1, 0), prettyArgs ] ) end | BICGetThreadId => PrettyString "GetThreadId" | BICUnary { oper, arg1 } => PrettyBlock (3, false, [], [ PrettyString(BuiltIns.unaryRepr oper), PrettyBreak(1, 0), printList("", [arg1], ",") ] ) | BICBinary { oper, arg1, arg2 } => PrettyBlock (3, false, [], [ PrettyString(BuiltIns.binaryRepr oper), PrettyBreak(1, 0), printList("", [arg1, arg2], ",") ] ) | BICArbitrary { oper, shortCond, arg1, arg2, longCall } => PrettyBlock (3, false, [], [ PrettyString(BuiltIns.arithRepr oper), PrettyBreak(1, 0), printList("", [shortCond, arg1, arg2, longCall], ",") ] ) | BICAllocateWordMemory { numWords, flags, initial } => PrettyBlock (3, false, [], [ PrettyString "AllocateWordMemory", PrettyBreak(1, 0), printList("", [numWords, flags, initial], ",") ] ) | BICExtract (BICLoadLocal addr) => let val str : string = concat ["LOCAL(", Int.toString addr, ")"] in PrettyString str end | BICExtract (BICLoadArgument addr) => let val str : string = concat ["PARAM(", Int.toString addr, ")"] in PrettyString str end | BICExtract (BICLoadClosure addr) => let val str : string = concat ["CLOS(", Int.toString addr, ")"] in PrettyString str end | BICExtract (BICLoadRecursive) => let val str : string = concat ["RECURSIVE(", ")"] in PrettyString str end | BICField {base, offset} => let val str = "INDIRECT(" ^ Int.toString offset ^ ", "; in PrettyBlock(0, false, [], [ PrettyString str, pretty base, PrettyString ")" ] ) end | BICLambda {body, name, closure, argTypes, heapClosure, resultType, localCount} => let fun prettyArgTypes [] = [] | prettyArgTypes [last] = [prettyArgType last] | prettyArgTypes (hd::tl) = prettyArgType hd :: PrettyBreak(1, 0) :: prettyArgTypes tl in PrettyBlock (1, true, [], [ PrettyString ("LAMBDA("), PrettyBreak (1, 0), PrettyString name, PrettyBreak (1, 0), PrettyString ( "CL=" ^ Bool.toString heapClosure), PrettyString (" LOCALS=" ^ Int.toString localCount), PrettyBreak(1, 0), PrettyBlock (1, false, [], PrettyString "ARGS=" :: prettyArgTypes argTypes), PrettyBreak(1, 0), PrettyBlock (1, false, [], [PrettyString "RES=", prettyArgType resultType]), printList (" CLOS=", map BICExtract closure, ","), PrettyBreak (1, 0), pretty body, PrettyString "){LAMBDA}" ] ) end | BICConstnt (w, _) => PrettyString (stringOfWord w) | BICCond (f, s, t) => PrettyBlock (1, true, [], [ PrettyString "IF(", pretty f, PrettyString ", ", PrettyBreak (0, 0), pretty s, PrettyString ", ", PrettyBreak (0, 0), pretty t, PrettyBreak (0, 0), PrettyString (")") ] ) | BICNewenv(decs, final) => PrettyBlock (1, true, [], PrettyString ("BLOCK" ^ "(") :: pList(decs, ";", prettyBinding) @ [ PrettyBreak (1, 0), pretty final, PrettyBreak (0, 0), PrettyString (")") ] ) | BICBeginLoop{loop=loopExp, arguments=args } => let fun prettyArg (c, t) = PrettyBlock(1, false, [], [prettySimpleBinding c, PrettyBreak (1, 0), prettyArgType t]) in PrettyBlock (3, false, [], [ PrettyBlock (1, true, [], PrettyString ("BEGINLOOP(") :: pList(args, ",", prettyArg) @ [ PrettyBreak (0, 0), PrettyString (")") ] ), PrettyBreak (0, 0), PrettyString "(", PrettyBreak (0, 0), pretty loopExp, PrettyBreak (0, 0), PrettyString ")" ] ) end | BICLoop ptl => prettyArgs("LOOP", ptl, ",") | BICRaise c => PrettyBlock (1, true, [], [ PrettyString "RAISE(", pretty c, PrettyBreak (0, 0), PrettyString (")") ] ) | BICHandle {exp, handler, exPacketAddr} => PrettyBlock (3, false, [], [ PrettyString "HANDLE(", pretty exp, PrettyString ("WITH exid=" ^ Int.toString exPacketAddr), PrettyBreak (1, 0), pretty handler, PrettyString ")" ] ) | BICCase {cases, test, default, isExhaustive, firstIndex, ...} => PrettyBlock (1, true, [], PrettyString "CASE (" :: pretty test :: PrettyBreak (1, 0) :: PrettyString ("( from " ^ Word.toString firstIndex ^ (if isExhaustive then " exhaustive" else "")) :: PrettyBreak (1, 0) :: pList(cases, ",", fn (SOME exp) => PrettyBlock (1, true, [], [ PrettyString "=>", PrettyBreak (1, 0), pretty exp ]) | NONE => PrettyString "=> default" ) @ [ PrettyBreak (1, 0), PrettyBlock (1, false, [], [ PrettyString "ELSE:", PrettyBreak (1, 0), pretty default ] ), PrettyBreak (1, 0), PrettyString (") {"^"CASE"^"}") ] ) | BICTuple ptl => printList("RECCONSTR", ptl, ",") | BICSetContainer{container, tuple, filter} => let val source = BoolVector.length filter val dest = BoolVector.foldl(fn (true, n) => n+1 | (false, n) => n) 0 filter in PrettyBlock (3, false, [], [ PrettyString (concat["SETCONTAINER(", Int.toString dest, "/", Int.toString source, ", "]), pretty container, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), pretty tuple, PrettyBreak (0, 0), PrettyString ")" ] ) end | BICLoadContainer {base, offset} => let val str = "INDIRECTCONTAINER(" ^ Int.toString offset ^ ", "; in PrettyBlock(0, false, [], [ PrettyString str, pretty base, PrettyString ")" ] ) end | BICTagTest { test, tag, maxTag } => PrettyBlock (3, false, [], [ PrettyString (concat["TAGTEST(", Word.toString tag, ", ", Word.toString maxTag, ","]), PrettyBreak (1, 0), pretty test, PrettyBreak (0, 0), PrettyString ")" ] ) | BICLoadOperation{ kind, address } => PrettyBlock (3, false, [], [ PrettyString("Load" ^ loadStoreKindRepr kind), PrettyBreak (1, 0), prettyAddress address ] ) | BICStoreOperation{ kind, address, value } => PrettyBlock (3, false, [], [ PrettyString("Store" ^ loadStoreKindRepr kind), PrettyBreak (1, 0), prettyAddress address, PrettyBreak (1, 0), PrettyString "<=", PrettyBreak (1, 0), pretty value ] ) | BICBlockOperation{ kind, sourceLeft, destRight, length } => PrettyBlock (3, false, [], [ PrettyString(blockOpKindRepr kind ^ "("), PrettyBreak (1, 0), prettyAddress sourceLeft, PrettyBreak (1, 0), PrettyString ",", prettyAddress destRight, PrettyBreak (1, 0), PrettyString ",", pretty length, PrettyBreak (1, 0), PrettyString ")" ] ) (* That list should be exhaustive! *) end (* pretty *) and prettyBinding(BICDeclar dec) = prettySimpleBinding dec | prettyBinding(BICRecDecs ptl) = let fun prettyRDec {lambda, addr} = PrettyBlock (1, false, [], [ PrettyString (concat ["DECL #", Int.toString addr, "="]), PrettyBreak (1, 0), pretty(BICLambda lambda) ] ) in PrettyBlock (1, true, [], PrettyString ("MUTUAL" ^ "(") :: pList(ptl, " AND ", prettyRDec) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) end | prettyBinding(BICNullBinding c) = pretty c | prettyBinding(BICDecContainer{addr, size}) = PrettyString (concat ["CONTAINER #", Int.toString addr, "=", Int.toString size]) and prettySimpleBinding{value, addr} = PrettyBlock (1, false, [], [ PrettyString (concat ["DECL #", Int.toString addr, "="]), PrettyBreak (1, 0), pretty value ] ) structure Sharing = struct type backendIC = backendIC and bicLoadForm = bicLoadForm and caseType = caseType and pretty = pretty and argumentType = argumentType and bicCodeBinding = bicCodeBinding and bicSimpleBinding = bicSimpleBinding and loadStoreKind = loadStoreKind and blockOpKind = blockOpKind and unaryOps = BuiltIns.unaryOps and binaryOps = BuiltIns.binaryOps and testConditions = BuiltIns.testConditions and arithmeticOperations = BuiltIns.arithmeticOperations end end; diff --git a/mlsource/MLCompiler/CodeTree/BackendIntermediateCodeSig.sml b/mlsource/MLCompiler/CodeTree/BackendIntermediateCodeSig.sml index 48fbeb6d..0d37a439 100644 --- a/mlsource/MLCompiler/CodeTree/BackendIntermediateCodeSig.sml +++ b/mlsource/MLCompiler/CodeTree/BackendIntermediateCodeSig.sml @@ -1,187 +1,189 @@ (* - Copyright (c) 2012, 2016-18 David C.J. Matthews + Copyright (c) 2012, 2016-18, 2020 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Intermediate code tree for the back end of the compiler. *) signature BackendIntermediateCodeSig = sig type machineWord = Address.machineWord datatype argumentType = GeneralType | DoubleFloatType | SingleFloatType structure BuiltIns: BUILTINS datatype backendIC = BICNewenv of bicCodeBinding list * backendIC (* Set of bindings with an expression. *) | BICConstnt of machineWord * Universal.universal list (* Load a constant *) | BICExtract of bicLoadForm (* Get a local variable, an argument or a closure value *) | BICField of {base: backendIC, offset: int } (* Load a field from a tuple or record *) | BICEval of (* Evaluate a function with an argument list. *) { function: backendIC, argList: (backendIC * argumentType) list, resultType: argumentType } (* Built-in functions. *) | BICUnary of {oper: BuiltIns.unaryOps, arg1: backendIC} | BICBinary of {oper: BuiltIns.binaryOps, arg1: backendIC, arg2: backendIC} | BICArbitrary of {oper: BuiltIns.arithmeticOperations, shortCond: backendIC, arg1: backendIC, arg2: backendIC, longCall: backendIC} | BICLambda of bicLambdaForm (* Lambda expressions. *) | BICCond of backendIC * backendIC * backendIC (* If-then-else expression *) | BICCase of (* Case expressions *) { cases : backendIC option list, (* NONE means "jump to the default". *) test : backendIC, default : backendIC, isExhaustive: bool, firstIndex: word } | BICBeginLoop of (* Start of tail-recursive inline function. *) { loop: backendIC, arguments: (bicSimpleBinding * argumentType) list } | BICLoop of (backendIC * argumentType) list (* Jump back to start of tail-recursive function. *) | BICRaise of backendIC (* Raise an exception *) | BICHandle of (* Exception handler. *) { exp: backendIC, handler: backendIC, exPacketAddr: int } | BICTuple of backendIC list (* Tuple *) | BICSetContainer of (* Copy a tuple to a container. *) { container: backendIC, tuple: backendIC, filter: BoolVector.vector } | BICLoadContainer of {base: backendIC, offset: int } | BICTagTest of { test: backendIC, tag: word, maxTag: word } | BICLoadOperation of { kind: loadStoreKind, address: bicAddress } | BICStoreOperation of { kind: loadStoreKind, address: bicAddress, value: backendIC } | BICBlockOperation of { kind: blockOpKind, sourceLeft: bicAddress, destRight: bicAddress, length: backendIC } | BICGetThreadId | BICAllocateWordMemory of {numWords: backendIC, flags: backendIC, initial: backendIC} and bicCodeBinding = BICDeclar of bicSimpleBinding (* Make a local declaration or push an argument *) | BICRecDecs of { addr: int, lambda: bicLambdaForm } list (* Set of mutually recursive declarations. *) | BICNullBinding of backendIC (* Just evaluate the expression and discard the result. *) | BICDecContainer of { addr: int, size: int } (* Create a container for a tuple on the stack. *) and caseType = CaseWord (* Word or fixed-precision integer. *) | CaseTag of word and bicLoadForm = BICLoadLocal of int (* Local binding *) | BICLoadArgument of int (* Argument - 0 is first arg etc.*) | BICLoadClosure of int (* Closure - 0 is first closure item etc *) | BICLoadRecursive (* Recursive call *) and loadStoreKind = LoadStoreMLWord of {isImmutable: bool} (* Load/Store an ML word in an ML word cell. *) | LoadStoreMLByte of {isImmutable: bool} (* Load/Store a byte, tagging and untagging as appropriate, in an ML byte cell. *) | LoadStoreC8 (* Load/Store C values - The base address is a boxed SysWord.word value. *) | LoadStoreC16 | LoadStoreC32 | LoadStoreC64 | LoadStoreCFloat | LoadStoreCDouble | LoadStoreUntaggedUnsigned and blockOpKind = BlockOpMove of {isByteMove: bool} | BlockOpEqualByte | BlockOpCompareByte withtype bicSimpleBinding = { (* Declare a value or push an argument. *) value: backendIC, addr: int } and bicLambdaForm = { (* Lambda expressions. *) body : backendIC, name : string, closure : bicLoadForm list, argTypes : argumentType list, resultType : argumentType, localCount : int, heapClosure : bool } and bicAddress = (* Address form used in loads, store and block operations. The base is an ML address if this is to/from ML memory or a (boxed) SysWord.word if it is to/from C memory. The index is a value in units of the size of the item - being loaded/stored and the offset is always in bytes. *) - {base: backendIC, index: backendIC option, offset: word} + being loaded/stored and the offset is always in bytes. + For ML memory accesses the index and offset are unsigned; for C values + they are signed. *) + {base: backendIC, index: backendIC option, offset: int} type pretty val pretty : backendIC -> pretty val loadStoreKindRepr: loadStoreKind -> string and blockOpKindRepr: blockOpKind -> string structure CodeTags: sig val tupleTag: Universal.universal list list Universal.tag val mergeTupleProps: Universal.universal list * Universal.universal list -> Universal.universal list end structure Sharing: sig type backendIC = backendIC and bicLoadForm = bicLoadForm and caseType = caseType and pretty = pretty and argumentType = argumentType and bicCodeBinding = bicCodeBinding and bicSimpleBinding = bicSimpleBinding and loadStoreKind = loadStoreKind and blockOpKind = blockOpKind and unaryOps = BuiltIns.unaryOps and binaryOps = BuiltIns.binaryOps and testConditions = BuiltIns.testConditions and arithmeticOperations = BuiltIns.arithmeticOperations end end; diff --git a/mlsource/MLCompiler/CodeTree/BaseCodeTree.sml b/mlsource/MLCompiler/CodeTree/BaseCodeTree.sml index 7bfa7568..485d930b 100644 --- a/mlsource/MLCompiler/CodeTree/BaseCodeTree.sml +++ b/mlsource/MLCompiler/CodeTree/BaseCodeTree.sml @@ -1,777 +1,777 @@ (* Copyright (c) 2000 Cambridge University Technical Services Limited Modified David C. J. Matthews 2008-2010, 2013, 2015, 2017, 2020 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Basic code-tree data structure. This was previously partly in GCODE.ML and partly in CODETREE.ML. *) structure BaseCodeTree: BaseCodeTreeSig = struct open Address datatype argumentType = datatype BackendIntermediateCode.argumentType datatype loadStoreKind = datatype BackendIntermediateCode.loadStoreKind datatype blockOpKind = datatype BackendIntermediateCode.blockOpKind structure BuiltIns = BackendIntermediateCode.BuiltIns datatype arbPrecisionOps = ArbCompare of BuiltIns.testConditions | ArbArith of BuiltIns.arithmeticOperations datatype inlineStatus = DontInline | InlineAlways | SmallInline (* How variables are used. Added and examined by the optimisation pass. *) datatype codeUse = UseGeneral (* Used in some other context. *) | UseExport (* Exported i.e. the result of a top-level binding. *) | UseApply of codeUse list * codetree list (* Applied as a function - the list is where the result goes, the codetree list is the code that was used for each argument. *) | UseField of int * codeUse list (* Selected as a field - the list is where the result goes *) and codetree = Newenv of codeBinding list * codetree (* Set of bindings with an expression. *) | Constnt of machineWord * Universal.universal list (* Load a constant *) | Extract of loadForm (* Get a local variable, an argument or a closure value *) | Indirect of {base: codetree, offset: int, indKind: indKind } (* Load a value from the heap or the stack. *) | Eval of (* Evaluate a function with an argument list. *) { function: codetree, argList: (codetree * argumentType) list, resultType: argumentType } (* Built-in functions. *) | Unary of {oper: BuiltIns.unaryOps, arg1: codetree} | Binary of {oper: BuiltIns.binaryOps, arg1: codetree, arg2: codetree} (* Arbitrary precision operations. This combines some conditionals with the operation. shortCond is the condition that must be satisfied for the short precision operation to be executed. longCall is called if either argument is long or the evaluation overflows. *) | Arbitrary of {oper: arbPrecisionOps, shortCond: codetree, arg1: codetree, arg2: codetree, longCall: codetree} | Lambda of lambdaForm (* Lambda expressions. *) | Cond of codetree * codetree * codetree (* If-statement *) | BeginLoop of (* Start of tail-recursive inline function. *) { loop: codetree, arguments: (simpleBinding * argumentType) list } | Loop of (codetree * argumentType) list (* Jump back to start of tail-recursive function. *) | Raise of codetree (* Raise an exception *) | Handle of (* Exception handler. *) { exp: codetree, handler: codetree, exPacketAddr: int } | Tuple of { fields: codetree list, isVariant: bool } (* Tuples and datatypes *) | SetContainer of (* Copy a tuple to a container. *) { container: codetree, tuple: codetree, filter: BoolVector.vector } | TagTest of { test: codetree, tag: word, maxTag: word } | LoadOperation of { kind: loadStoreKind, address: codeAddress } | StoreOperation of { kind: loadStoreKind, address: codeAddress, value: codetree } | BlockOperation of { kind: blockOpKind, sourceLeft: codeAddress, destRight: codeAddress, length: codetree } | GetThreadId | AllocateWordMemory of {numWords: codetree, flags: codetree, initial: codetree} and codeBinding = Declar of simpleBinding (* Make a local declaration or push an argument *) | RecDecs of { addr: int, lambda: lambdaForm, use: codeUse list } list (* Set of mutually recursive declarations. *) | NullBinding of codetree (* Just evaluate the expression and discard the result. *) | Container of { addr: int, use: codeUse list, size: int, setter: codetree } (* Container: allocate a piece of stack space and set it to the values from a tuple. *) and loadForm = LoadArgument of int | LoadLocal of int | LoadClosure of int | LoadRecursive (* When we look up an entry in the environment we get a pair of a "general" value, which is either a constant or a load, and an optional special value, which is either a tuple or an inline function. Tuple entries are functions from an integer offset to one of these pairs; inline function entries are a lambda together with a map for the free variables. *) and envGeneral = EnvGenLoad of loadForm | EnvGenConst of machineWord * Universal.universal list (* Special entries. The type of both EnvSpecTuple and EnvSpecInlineFunction includes a function from int, the index, to the (general, special) pair rather than a list of either fields or closure entries. The main reason is that if we have a function that contains a reference to, say a tuple, in its closure we can pass in a EnvSpecTuple entry with a function that only adds a field to the closure if the field is actually used. Passing a list would require adding all the fields to the closure at the time the EnvSpecTuple was passed. EnvSpecBuiltInX are used for a small number of built-in functions which can be simplied if they occur in combination with others. *) and envSpecial = EnvSpecNone | EnvSpecTuple of int * (int -> envGeneral * envSpecial) | EnvSpecInlineFunction of lambdaForm * (int -> envGeneral * envSpecial) | EnvSpecUnary of BuiltIns.unaryOps * codetree | EnvSpecBinary of BuiltIns.binaryOps * codetree * codetree (* Indirection types. IndTuple is from a tuple so the field will always be present; IndVariant is from a datatype which may have other variants that do not have the field; IndContainer is from a container (a set of words on the stack). *) and indKind = IndTuple | IndVariant | IndContainer withtype simpleBinding = { (* Declare a value or push an argument. *) value: codetree, addr: int, use: codeUse list } and lambdaForm = { (* Lambda expressions. *) body : codetree, isInline : inlineStatus, name : string, closure : loadForm list, argTypes : (argumentType * codeUse list) list, resultType : argumentType, localCount : int, recUse : codeUse list } - and codeAddress = {base: codetree, index: codetree option, offset: word} + and codeAddress = {base: codetree, index: codetree option, offset: int} structure CodeTags = struct open Universal (* Import tags from back end *) open BackendIntermediateCode.CodeTags val inlineCodeTag: envSpecial tag = tag() end open Pretty (* Common cases. *) val space = PrettyBreak (1, 0) fun block l = PrettyBlock (0, false, [], l) val string = PrettyString fun pList ([]: 'b list, _: string, _: 'b->pretty) = [] | pList ([h], _, disp) = [disp h] | pList (h::t, sep, disp) = PrettyBlock (0, false, [], [ disp h, PrettyBreak (0, 0), PrettyString sep ] ) :: PrettyBreak (1, 0) :: pList (t, sep, disp) fun pretty (pt : codetree) : pretty = let fun printList(start, lst, sep) : pretty = PrettyBlock (1, true, [], PrettyString (start ^ "(") :: pList(lst, sep, pretty) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) fun prettyArg (c, _) = pretty c fun prettyArgs(start, lst, sep) : pretty = PrettyBlock (1, true, [], PrettyString (start ^ "(") :: pList(lst, sep, prettyArg) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) fun prettyBuiltin(opers, arglist) = PrettyBlock (2, false, [], [ PrettyString opers, PrettyBreak(1, 2), PrettyBlock(2, true, [], [ printList("", arglist, ","), PrettyBreak (0, 0), PrettyString (")") ] ) ] ) fun prettyAddress({base, index, offset}: codeAddress): pretty = let in PrettyBlock (1, true, [], [ PrettyString "[", PrettyBreak (0, 3), pretty base, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), case index of NONE => PrettyString "-" | SOME i => pretty i, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), - PrettyString(Word.toString offset), PrettyBreak (0, 0), PrettyString "]" + PrettyString(Int.toString offset), PrettyBreak (0, 0), PrettyString "]" ]) end in case pt of Eval {function, argList, ...} => PrettyBlock (2, false, [], [ case function of Extract _ => pretty function | Constnt _ => pretty function | _ => PrettyBlock(2, true, [], [ string "(", PrettyBreak(0, 0), pretty function, PrettyBreak(0, 0), string ")" ] ) , PrettyBreak(1, 2), PrettyBlock(2, true, [], ( string "(" :: PrettyBreak(0, 0) :: pList(argList, ",", prettyArg) @ [PrettyBreak (0, 0), PrettyString (")")] ) ) ] ) | GetThreadId => prettyBuiltin("GetThreadId", []) | Unary { oper, arg1 } => prettyBuiltin(BuiltIns.unaryRepr oper, [arg1]) | Binary { oper, arg1, arg2 } => prettyBuiltin(BuiltIns.binaryRepr oper, [arg1, arg2]) | Arbitrary { oper, shortCond, arg1, arg2, longCall } => let val operName = case oper of ArbCompare test => BuiltIns.testRepr test | ArbArith arith => BuiltIns.arithRepr arith in prettyBuiltin(operName ^ "Arbitrary", [shortCond, arg1, arg2, longCall]) end | AllocateWordMemory { numWords, flags, initial } => prettyBuiltin("AllocateWordMemory", [numWords, flags, initial]) | Extract(LoadArgument addr) => string ("Arg" ^ Int.toString addr) | Extract(LoadLocal addr) => string ("Local" ^ Int.toString addr) | Extract(LoadClosure addr) => string ("Closure" ^ Int.toString addr) | Extract LoadRecursive => string "Recursive" | Indirect {base, offset, indKind} => PrettyBlock(2, false, [], [ pretty base, PrettyBreak(0, 2), string(concat["[", Int.toString offset, "]", case indKind of IndTuple => "" | IndVariant => "(*V*)" | IndContainer => "(*C*)"]) ] ) | Lambda {body, isInline, name, closure, argTypes, localCount, recUse, resultType, ...} => let val inl = case isInline of DontInline => "" | InlineAlways => "inline," | SmallInline => "small," fun genType GeneralType = [] | genType DoubleFloatType = [ space, string ":double" ] | genType SingleFloatType = [ space, string ":float" ] fun printArgs(n, (t, u) :: rest) = PrettyBlock(4, false, [], [ string("Arg"^Int.toString n), space, prettyUses "" u ] @ genType t @ ( if null rest then [] else [PrettyBreak(0,0), string ",", space] ) ) :: printArgs(n+1, rest) | printArgs(_, []) = [] in PrettyBlock(2, true, [], [ PrettyBlock(4, false, [], [ string "fn(", space, block(printArgs(0, argTypes)), space, string ")"] @ genType resultType @ [ space, string "(*", space, string("\"" ^ name ^ "\""), space, string inl, space, string(Int.toString localCount ^ " locals,"), space, printList ("closure=", map Extract closure, ","), space, prettyUses "recursive use=" recUse, space, string "*)" ]), PrettyBreak(1, 2), pretty body ]) end | Constnt(w, m) => if isShort w andalso toShort w = 0w0 then ( case List.find (Universal.tagIs CodeTags.inlineCodeTag) m of SOME h => ( case Universal.tagProject CodeTags.inlineCodeTag h of EnvSpecInlineFunction(lambda, _) => pretty(Lambda lambda) | _ => PrettyString (stringOfWord w) ) | NONE => PrettyString (stringOfWord w) ) else PrettyString (stringOfWord w) | Cond (f, s, t) => PrettyBlock (0, true, [], [ PrettyBlock(2, false, [], [string "if", space, pretty f]), space, PrettyBlock(2, false, [], [string "then", space, pretty s]), space, PrettyBlock(2, false, [], [string "else", space, pretty t]) ] ) | Newenv(decs, final) => PrettyBlock (0, true, [], [ string "let", PrettyBreak (1, 2), PrettyBlock(2, true, [], pList(decs, ";", prettyBinding)), space, string "in", PrettyBreak(1, 2), PrettyBlock(2, true, [], [pretty final]), space, string "end" ] ) | BeginLoop{loop=loopExp, arguments=args } => let fun prettyArg (c, _) = prettySimpleBinding c in PrettyBlock (3, false, [], [ PrettyBlock (1, true, [], PrettyString ("BEGINLOOP(") :: pList(args, ",", prettyArg) @ [ PrettyBreak (0, 0), PrettyString (")") ] ), PrettyBreak (0, 0), PrettyString "(", PrettyBreak (0, 0), pretty loopExp, PrettyBreak (0, 0), PrettyString ")" ] ) end | Loop ptl => prettyArgs("LOOP", ptl, ",") | Raise c => PrettyBlock (1, true, [], [ PrettyString "RAISE(", pretty c, PrettyBreak (0, 0), PrettyString (")") ] ) | Handle {exp, handler, exPacketAddr} => PrettyBlock (3, false, [], [ PrettyString "HANDLE(", pretty exp, PrettyString ("WITH exid=" ^ Int.toString exPacketAddr), PrettyBreak (1, 0), pretty handler, PrettyString ")" ] ) | Tuple { fields, isVariant } => printList(if isVariant then "DATATYPE" else "TUPLE", fields, ",") | SetContainer{container, tuple, filter} => let val source = BoolVector.length filter val dest = BoolVector.foldl(fn (true, n) => n+1 | (false, n) => n) 0 filter in PrettyBlock (3, false, [], [ string (concat["SETCONTAINER(", Int.toString dest, "/", Int.toString source, ", "]), pretty container, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), pretty tuple, PrettyBreak (0, 0), PrettyString ")" ] ) end | TagTest { test, tag, maxTag } => PrettyBlock (3, false, [], [ PrettyString (concat["TAGTEST(", Word.toString tag, ", ", Word.toString maxTag, ","]), PrettyBreak (1, 0), pretty test, PrettyBreak (0, 0), PrettyString ")" ] ) | LoadOperation{ kind, address } => PrettyBlock (3, false, [], [ PrettyString("Load" ^ BackendIntermediateCode.loadStoreKindRepr kind), PrettyBreak (1, 0), prettyAddress address ] ) | StoreOperation{ kind, address, value } => PrettyBlock (3, false, [], [ PrettyString("Store" ^ BackendIntermediateCode.loadStoreKindRepr kind), PrettyBreak (1, 0), prettyAddress address, PrettyBreak (1, 0), PrettyString "<=", PrettyBreak (1, 0), pretty value ] ) | BlockOperation{ kind, sourceLeft, destRight, length } => PrettyBlock (3, false, [], [ PrettyString(BackendIntermediateCode.blockOpKindRepr kind ^ "("), PrettyBreak (1, 0), prettyAddress sourceLeft, PrettyBreak (1, 0), PrettyString ",", prettyAddress destRight, PrettyBreak (1, 0), PrettyString ",", pretty length, PrettyBreak (1, 0), PrettyString ")" ] ) (* That list should be exhaustive! *) end (* pretty *) and prettyBinding(Declar dec) = prettySimpleBinding dec | prettyBinding(RecDecs ptl) = let fun prettyRDec {lambda, addr, use, ...} = block [ string ("Local" ^ Int.toString addr), space, string "(*", prettyUses "" use, space, string "*)", space, string "=", PrettyBreak (1, 2), PrettyBlock (2, false, [], [pretty(Lambda lambda)]) ] in PrettyBlock(0, true, [], string "val rec " :: pList(ptl, " and ", prettyRDec) ) end | prettyBinding(NullBinding c) = pretty c | prettyBinding(Container{addr, use, size, setter}) = PrettyBlock(1, false, [], [ string ("val Local" ^ Int.toString addr), space, string "(*", string "", space, prettyUses "" use, space, string "*)", space, string ("= Container " ^ Int.toString size), space, string "with", space, pretty setter ] ) and prettySimpleBinding{value, addr, use, ...} = PrettyBlock (1, false, [], [ string ("val Local" ^ Int.toString addr), space, string "(*", string "", space, prettyUses "" use, space, string "*)", space, string "=", PrettyBreak (1, 2), PrettyBlock (2, false, [], [pretty value]) ] ) and prettyUses prefix cl = PrettyBlock (1, true, [], PrettyString (prefix ^ "[") :: pList(cl, ",", prettyUsage) @ [ PrettyBreak (0, 0), PrettyString ("]") ] ) and prettyUsage UseGeneral = PrettyString "_" | prettyUsage UseExport = PrettyString "Export" | prettyUsage (UseApply (cl, al)) = PrettyBlock (1, true, [], string "(" :: pList(al, "|", fn _ => string "-") @ string ")" :: space :: string "->" :: space :: string "(" :: pList(cl, "|", prettyUsage) @ [ PrettyBreak (0, 0), string ")" ] ) | prettyUsage (UseField (n, cl)) = PrettyBlock (1, true, [], string ("UseField"^ Int.toString n ^ "[") :: pList(cl, ",", prettyUsage) @ [ PrettyBreak (0, 0), string "]" ] ) (* Mapping function to enable parts of the tree to be replaced. *) fun mapCodetree f code = let (* We use these functions to allow all nodes to be processed even if they are not full codetree nodes. *) fun deExtract(Extract l) = l | deExtract _ = raise Misc.InternalError "deExtract" fun deLambda (Lambda l) = l | deLambda _ = raise Misc.InternalError "deLambda" fun mapt (Newenv(decs, exp)) = let fun mapbinding(Declar{value, addr, use}) = Declar{value=mapCodetree f value, addr=addr, use=use} | mapbinding(RecDecs l) = RecDecs(map(fn {addr, lambda, use} => {addr=addr, use = use, lambda = deLambda(mapCodetree f (Lambda lambda))}) l) | mapbinding(NullBinding exp) = NullBinding(mapCodetree f exp) | mapbinding(Container{addr, use, size, setter}) = Container{addr=addr, use=use, size=size, setter=mapCodetree f setter} in Newenv(map mapbinding decs, mapCodetree f exp) end | mapt (c as Constnt _) = c | mapt (e as Extract _) = e | mapt (Indirect { base, offset, indKind }) = Indirect{ base = mapCodetree f base, offset = offset, indKind = indKind } | mapt (Eval { function, argList, resultType }) = Eval { function = mapCodetree f function, argList = map (fn(c, a) => (mapCodetree f c, a)) argList, resultType = resultType } | mapt GetThreadId = GetThreadId | mapt(Unary { oper, arg1 }) = Unary { oper = oper, arg1 = mapCodetree f arg1 } | mapt(Binary { oper, arg1, arg2 }) = Binary { oper = oper, arg1 = mapCodetree f arg1, arg2 = mapCodetree f arg2 } | mapt(Arbitrary { oper, shortCond, arg1, arg2, longCall }) = Arbitrary { oper = oper, shortCond = mapCodetree f shortCond, arg1 = mapCodetree f arg1, arg2 = mapCodetree f arg2, longCall = mapCodetree f longCall } | mapt(AllocateWordMemory { numWords, flags, initial }) = AllocateWordMemory { numWords = mapCodetree f numWords, flags = mapCodetree f flags, initial = mapCodetree f initial } | mapt (Lambda { body, isInline, name, closure, argTypes, resultType, localCount, recUse }) = Lambda { body = mapCodetree f body, isInline = isInline, name = name, closure = map (deExtract o (mapCodetree f) o Extract) closure, argTypes = argTypes, resultType = resultType, localCount = localCount, recUse = recUse } | mapt (Cond(i, t, e)) = Cond(mapCodetree f i, mapCodetree f t, mapCodetree f e) | mapt (BeginLoop{loop, arguments}) = BeginLoop { loop = mapCodetree f loop, arguments = map(fn({value, addr, use}, t) => ({value=mapCodetree f value, addr=addr, use=use}, t)) arguments } | mapt (Loop l) = Loop (map(fn(c, t) => (mapCodetree f c, t)) l) | mapt (Raise r) = Raise(mapCodetree f r) | mapt (Handle{exp, handler, exPacketAddr}) = Handle{exp=mapCodetree f exp, handler=mapCodetree f handler, exPacketAddr=exPacketAddr } | mapt (Tuple { fields, isVariant} ) = Tuple { fields = map (mapCodetree f) fields, isVariant = isVariant } | mapt (SetContainer{container, tuple, filter}) = SetContainer{ container = mapCodetree f container, tuple = mapCodetree f tuple, filter = filter } | mapt (TagTest{test, tag, maxTag}) = TagTest{test = mapCodetree f test, tag = tag, maxTag = maxTag } | mapt (LoadOperation{kind, address}) = LoadOperation{kind = kind, address = maptAddress address } | mapt (StoreOperation{kind, address, value}) = StoreOperation{kind = kind, address = maptAddress address, value=mapCodetree f value } | mapt (BlockOperation{kind, sourceLeft, destRight, length}) = BlockOperation{kind = kind, sourceLeft = maptAddress sourceLeft, destRight = maptAddress destRight, length=mapCodetree f length } and maptAddress({base, index, offset}: codeAddress): codeAddress = {base=mapCodetree f base, index=case index of NONE => NONE | SOME i => SOME(mapCodetree f i), offset=offset} in (* Apply f to node. If it returns SOME c use that otherwise traverse the tree. *) case f code of SOME c => c | NONE => mapt code end (* Fold a function over the tree. f is applied to the node and the input value and returns an output and a flag. If the flag is FOLD_DONT_DESCEND the output value is used and the code tree is not examined further. Otherwise this function descends into the tree and folds over the subtree. *) datatype foldControl = FOLD_DESCEND | FOLD_DONT_DESCEND fun foldtree (f: codetree * 'a -> 'a * foldControl) (input: 'a) code = let fun ftree (Newenv(decs, exp), v) = let fun foldbinding(Declar{value, ...}, w) = foldtree f w value | foldbinding(RecDecs l, w) = foldl(fn ({lambda, ...}, x) => foldtree f x (Lambda lambda)) w l | foldbinding(NullBinding exp, w) = foldtree f w exp | foldbinding(Container{setter, ...}, w) = foldtree f w setter in foldtree f (foldl foldbinding v decs) exp end | ftree (Constnt _, v) = v | ftree (Extract _, v) = v | ftree (Indirect{base, ...}, v) = foldtree f v base | ftree (Eval { function, argList, ...}, v) = foldl(fn((c, _), w) => foldtree f w c) (foldtree f v function) argList | ftree (GetThreadId, v) = v | ftree (Unary {arg1, ...}, v) = foldtree f v arg1 | ftree (Binary {arg1, arg2, ...}, v) = foldtree f (foldtree f v arg1) arg2 | ftree (Arbitrary {shortCond, arg1, arg2, longCall, ...}, v) = foldtree f (foldtree f (foldtree f (foldtree f v shortCond) arg1) arg2) longCall | ftree (AllocateWordMemory {numWords, flags, initial}, v) = foldtree f (foldtree f (foldtree f v numWords) flags) initial | ftree (Lambda { body, closure, ...}, v) = foldtree f (foldl (fn (c, w) => foldtree f w (Extract c)) v closure) body | ftree (Cond(i, t, e), v) = foldtree f (foldtree f (foldtree f v i) t) e | ftree (BeginLoop{loop, arguments, ...}, v) = foldtree f (foldl (fn (({value, ...}, _), w) => foldtree f w value) v arguments) loop | ftree (Loop l, v) = foldl (fn ((c, _), w) => foldtree f w c) v l | ftree (Raise r, v) = foldtree f v r | ftree (Handle{exp, handler, ...}, v) = foldtree f (foldtree f v exp) handler | ftree (Tuple { fields, ...}, v) = foldl (fn (c, w) => foldtree f w c) v fields | ftree (SetContainer { container, tuple, ...}, v) = foldtree f (foldtree f v container) tuple | ftree (TagTest{test, ...}, v) = foldtree f v test | ftree (LoadOperation{address, ...}, v) = fAddress address v | ftree (StoreOperation{address, value, ...}, v) = foldtree f (fAddress address v) value | ftree (BlockOperation{sourceLeft, destRight, length, ...}, v) = foldtree f (fAddress sourceLeft (fAddress destRight v)) length and fAddress {base, index=NONE, ...} v = foldtree f v base | fAddress {base, index=SOME index, ...} v = foldtree f (foldtree f v base) index in case f (code, input) of (v, FOLD_DONT_DESCEND) => v | (v, FOLD_DESCEND) => ftree(code, v) end structure Sharing = struct type codetree = codetree and pretty = pretty and inlineStatus = inlineStatus and argumentType = argumentType and loadStoreKind = loadStoreKind and blockOpKind = blockOpKind and codeBinding = codeBinding and simpleBinding = simpleBinding and loadForm = loadForm and envGeneral = envGeneral and envSpecial = envSpecial and codeUse = codeUse and foldControl = foldControl and unaryOps = BuiltIns.unaryOps and binaryOps = BuiltIns.binaryOps and arbPrecisionOps = arbPrecisionOps and testConditions = BuiltIns.testConditions and arithmeticOperations = BuiltIns.arithmeticOperations end end; diff --git a/mlsource/MLCompiler/CodeTree/BaseCodeTreeSig.sml b/mlsource/MLCompiler/CodeTree/BaseCodeTreeSig.sml index f718d9aa..96cdd5ca 100644 --- a/mlsource/MLCompiler/CodeTree/BaseCodeTreeSig.sml +++ b/mlsource/MLCompiler/CodeTree/BaseCodeTreeSig.sml @@ -1,220 +1,223 @@ (* Copyright (c) 2000 Cambridge University Technical Services Limited Modified David C. J. Matthews 2008-2010, 2013, 2016-18, 2020 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Signature for the basic codetree types and operations. *) signature BaseCodeTreeSig = sig type machineWord = Address.machineWord datatype inlineStatus = DontInline | InlineAlways | SmallInline datatype argumentType = GeneralType | DoubleFloatType | SingleFloatType datatype loadStoreKind = LoadStoreMLWord of {isImmutable: bool} (* Load/Store an ML word in an ML word cell. *) | LoadStoreMLByte of {isImmutable: bool} (* Load/Store a byte, tagging and untagging as appropriate, in an ML byte cell. *) | LoadStoreC8 (* Load/Store C values - The base address is a boxed SysWord.word value. *) | LoadStoreC16 | LoadStoreC32 | LoadStoreC64 | LoadStoreCFloat | LoadStoreCDouble | LoadStoreUntaggedUnsigned datatype blockOpKind = BlockOpMove of {isByteMove: bool} | BlockOpEqualByte | BlockOpCompareByte structure BuiltIns: BUILTINS datatype arbPrecisionOps = ArbCompare of BuiltIns.testConditions | ArbArith of BuiltIns.arithmeticOperations (* How variables are used. Added and examined by the optimisation pass. *) datatype codeUse = UseGeneral (* Used in some other context. *) | UseExport (* Exported i.e. the result of a top-level binding. *) | UseApply of codeUse list * codetree list (* Applied as a function - the list is where the result goes, the codetree list is the code that was used for each argument. *) | UseField of int * codeUse list (* Selected as a field - the list is where the result goes *) and codetree = Newenv of codeBinding list * codetree (* Set of bindings with an expression. *) | Constnt of machineWord * Universal.universal list (* Load a constant *) | Extract of loadForm | Indirect of {base: codetree, offset: int, indKind: indKind } (* Load a value from the heap or the stack. *) | Eval of (* Evaluate a function with an argument list. *) { function: codetree, argList: (codetree * argumentType) list, resultType: argumentType } (* Built-in functions. *) | Unary of {oper: BuiltIns.unaryOps, arg1: codetree} | Binary of {oper: BuiltIns.binaryOps, arg1: codetree, arg2: codetree} (* Arbitrary precision operations. This combines some conditionals with the operation. shortCond is the condition that must be satisfied for the short precision operation to be executed. longCall is called if either argument is long or the evaluation overflows. *) | Arbitrary of {oper: arbPrecisionOps, shortCond: codetree, arg1: codetree, arg2: codetree, longCall: codetree} | Lambda of lambdaForm (* Lambda expressions. *) | Cond of codetree * codetree * codetree (* If-statement *) | BeginLoop of (* Start of tail-recursive inline function. *) { loop: codetree, arguments: (simpleBinding * argumentType) list } | Loop of (codetree * argumentType) list (* Jump back to start of tail-recursive function. *) | Raise of codetree (* Raise an exception *) | Handle of (* Exception handler. *) { exp: codetree, handler: codetree, exPacketAddr: int } | Tuple of { fields: codetree list, isVariant: bool } (* Tuples and datatypes *) | SetContainer of { container: codetree, tuple: codetree, filter: BoolVector.vector} (* Copy a tuple to a container. *) | TagTest of { test: codetree, tag: word, maxTag: word } | LoadOperation of { kind: loadStoreKind, address: codeAddress } | StoreOperation of { kind: loadStoreKind, address: codeAddress, value: codetree } | BlockOperation of { kind: blockOpKind, sourceLeft: codeAddress, destRight: codeAddress, length: codetree } | GetThreadId | AllocateWordMemory of {numWords: codetree, flags: codetree, initial: codetree} and codeBinding = Declar of simpleBinding (* Make a local declaration or push an argument *) | RecDecs of { addr: int, lambda: lambdaForm, use: codeUse list } list (* Set of mutually recursive declarations. *) | NullBinding of codetree (* Just evaluate the expression and discard the result. *) | Container of { addr: int, use: codeUse list, size: int, setter: codetree } and loadForm = LoadArgument of int | LoadLocal of int | LoadClosure of int | LoadRecursive (* When we look up an entry in the environment we get a pair of a "general" value, which is either a constant or a load, and an optional special value, which is either a tuple or an inline function. Tuple entries are functions from an integer offset to one of these pairs; inline function entries are a lambda together with a map for the free variables. *) and envGeneral = EnvGenLoad of loadForm | EnvGenConst of machineWord * Universal.universal list and envSpecial = EnvSpecNone | EnvSpecTuple of int * (int -> envGeneral * envSpecial) | EnvSpecInlineFunction of lambdaForm * (int -> envGeneral * envSpecial) | EnvSpecUnary of BuiltIns.unaryOps * codetree | EnvSpecBinary of BuiltIns.binaryOps * codetree * codetree (* Indirection types. IndTuple is from a tuple so the field will always be present; IndVariant is from a datatype which may have other variants that do not have the field; IndContainer is from a container (a set of words on the stack). *) and indKind = IndTuple | IndVariant | IndContainer withtype simpleBinding = { (* Declare a value or push an argument. *) value: codetree, addr: int, use: codeUse list } and lambdaForm = { (* Lambda expressions. *) body : codetree, (* The body of the function. *) isInline : inlineStatus, (* Whether it's inline - modified by optimiser *) name : string, (* Text name for profiling etc. *) closure : loadForm list, (* List of items for closure. *) argTypes : (argumentType * codeUse list) list, (* "Types" and usage of arguments. *) resultType : argumentType, (* Result "type" of the function. *) localCount : int, (* Maximum (+1) declaration address for locals. *) recUse : codeUse list (* Recursive use of the function *) } - and codeAddress = {base: codetree, index: codetree option, offset: word} + (* Code address. The base is either a Poly address or a SysWord value for C + loads and stores. For Poly addresses the index and offset are unsigned + values; for C operations they are signed. *) + and codeAddress = {base: codetree, index: codetree option, offset: int} type pretty val pretty : codetree -> pretty val mapCodetree: (codetree -> codetree option) -> codetree -> codetree datatype foldControl = FOLD_DESCEND | FOLD_DONT_DESCEND val foldtree: (codetree * 'a -> 'a * foldControl) -> 'a -> codetree -> 'a structure CodeTags: sig val tupleTag: Universal.universal list list Universal.tag val inlineCodeTag: envSpecial Universal.tag val mergeTupleProps: Universal.universal list * Universal.universal list -> Universal.universal list end structure Sharing: sig type codetree = codetree and pretty = pretty and inlineStatus = inlineStatus and argumentType = argumentType and loadStoreKind = loadStoreKind and blockOpKind = blockOpKind and codeBinding = codeBinding and simpleBinding = simpleBinding and loadForm = loadForm and envGeneral = envGeneral and envSpecial = envSpecial and codeUse = codeUse and foldControl = foldControl and unaryOps = BuiltIns.unaryOps and binaryOps = BuiltIns.binaryOps and arbPrecisionOps = arbPrecisionOps and testConditions = BuiltIns.testConditions and arithmeticOperations = BuiltIns.arithmeticOperations end end; diff --git a/mlsource/MLCompiler/CodeTree/CODETREE.ML b/mlsource/MLCompiler/CodeTree/CODETREE.ML index fe1f8696..8c7e1c07 100644 --- a/mlsource/MLCompiler/CodeTree/CODETREE.ML +++ b/mlsource/MLCompiler/CodeTree/CODETREE.ML @@ -1,604 +1,604 @@ (* Copyright (c) 2012,13,15-17, 20 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor CODETREE ( structure DEBUG: DEBUGSIG structure PRETTY : PRETTYSIG structure BASECODETREE: BaseCodeTreeSig structure CODETREE_FUNCTIONS: CodetreeFunctionsSig structure BACKEND: sig type codetree type machineWord = Address.machineWord val codeGenerate: codetree * int * Universal.universal list -> (unit -> machineWord) * Universal.universal list structure Foreign: FOREIGNCALLSIG structure Sharing : sig type codetree = codetree end end structure OPTIMISER: sig type codetree and envSpecial and codeBinding val codetreeOptimiser: codetree * Universal.universal list * int -> { numLocals: int, general: codetree, bindings: codeBinding list, special: envSpecial } structure Sharing: sig type codetree = codetree and envSpecial = envSpecial and codeBinding = codeBinding end end sharing type PRETTY.pretty = BASECODETREE.pretty sharing BASECODETREE.Sharing = CODETREE_FUNCTIONS.Sharing = BACKEND.Sharing = OPTIMISER.Sharing ) : CODETREESIG = struct open Address; open StretchArray; open BASECODETREE; open PRETTY; open CODETREE_FUNCTIONS exception InternalError = Misc.InternalError and Interrupt = Thread.Thread.Interrupt infix 9 sub; fun mkDec (laddr, res) = Declar{value = res, addr = laddr, use=[]} fun deExtract(Extract ext) = ext | deExtract _ = raise InternalError "deExtract" datatype level = Level of { lev: int, closure: createClosure, lookup: int * int * bool -> loadForm } local (* We can have locals at the outer level. *) fun bottomLevel(addr, 0, false) = if addr < 0 then raise InternalError "load: negative" else LoadLocal addr | bottomLevel _ = (* Either the level is wrong or it's a parameter. *) raise InternalError "bottom level" in val baseLevel = Level { lev = 0, closure = makeClosure(), lookup = bottomLevel } end fun newLevel (Level{ lev, lookup, ...}) = let val closureList = makeClosure() val makeClosure = addToClosure closureList fun check n = if n < 0 then raise InternalError "load: negative" else n fun thisLevel(addr, level, isParam) = if level < 0 then raise InternalError "mkLoad: level must be non-negative" else if level > 0 then makeClosure(lookup(addr, level-1, isParam)) else (* This level *) if isParam then LoadArgument(check addr) else LoadLocal(check addr) in Level { lev = lev+1, closure = closureList, lookup = thisLevel } end fun getClosure(Level{ closure, ...}) = List.map Extract (extractClosure closure) fun mkLoad (addr, Level { lev = newLevel, lookup, ... } , Level { lev = oldLevel, ... }) = Extract(lookup(addr, newLevel - oldLevel, false)) and mkLoadParam(addr, Level { lev = newLevel, lookup, ... } , Level { lev = oldLevel, ... }) = Extract(lookup(addr, newLevel - oldLevel, true)) (* Transform a function so that free variables are converted to closure form. Returns the maximum local address used. *) fun genCode(pt, debugSwitches, numLocals) = let val printCodeTree = DEBUG.getParameter DEBUG.codetreeTag debugSwitches and compilerOut = PRETTY.getCompilerOutput debugSwitches (* val printCodeTree = true and compilerOut = PRETTY.prettyPrint(TextIO.print, 70) *) (* If required, print it first. This is the code that the front-end has produced. *) val () = if printCodeTree then compilerOut(pretty pt) else () (* This ensures that everything is printed just before it is code-generated. *) fun codeAndPrint(code, localCount) = let val () = if printCodeTree then compilerOut (BASECODETREE.pretty code) else (); in BACKEND.codeGenerate(code, localCount, debugSwitches) end (* Optimise it. *) val { numLocals = localCount, general = gen, bindings = decs, special = spec } = OPTIMISER.codetreeOptimiser(pt, debugSwitches, numLocals) (* At this stage we have a "general" value and also, possibly a "special" value. We could simply create mkEnv(decs, gen) and run preCode and genCode on that. However, we would lose the ability to insert any inline functions from this code into subsequent top-level expressions. We can't simply retain the "special" entry either because that may refer to values that have to be created once when the code is run. Such values will be referenced by "load" entries which refer to entries in the "decs". We construct a tuple which will contain the actual values after the code is run. Then if we want the value at some time in the future when we use something from the "special" entry we can extract the corresponding value from this tuple. Previously, this code always generated a tuple containing every declaration. That led to some very long compilation times because the back-end has some code which is quadratic in the number of entries on the stack. We now try to prune bindings by only generating the tuple if we have an inline function somewhere and only generating bindings we actually need. *) fun simplifySpec (EnvSpecTuple(size, env)) = let (* Get all the field entries. *) fun simpPair (gen, spec) = (gen, simplifySpec spec) val fields = List.tabulate(size, simpPair o env) in if List.all(fn (_, EnvSpecNone) => true | _ => false) fields then EnvSpecNone else EnvSpecTuple(size, fn n => List.nth(fields, n)) end | simplifySpec s = s (* None or inline function. *) in case simplifySpec spec of EnvSpecNone => let val (code, props) = codeAndPrint (mkEnv(decs, gen), localCount) in fn () => Constnt(code(), props) end | simpleSpec => let (* The bindings are marked using a three-valued mark. A binding is needed if it is referenced in any way. During the scan to find the references we need to avoid processing an entry that has already been processed but it is possible that a binding may be referenced as a general value only (e.g. from a function closure) and separately as a special value. See Test148.ML *) datatype visit = UnVisited | VisitedGeneral | VisitedSpecial local val refArray = Array.array(localCount, UnVisited) fun findDecs EnvSpecNone = () | findDecs (EnvSpecTuple(size, env)) = let val fields = List.tabulate(size, env) in List.app processGenAndSpec fields end | findDecs (EnvSpecInlineFunction({closure, ...}, env)) = let val closureItems = List.tabulate(List.length closure, env) in List.app processGenAndSpec closureItems end | findDecs (EnvSpecUnary _) = () | findDecs (EnvSpecBinary _) = () and processGenAndSpec (gen, spec) = (* The spec part needs only to be processed if this entry has not yet been visited, *) case gen of EnvGenLoad(LoadLocal addr) => let val previous = Array.sub(refArray, addr) in case (previous, spec) of (VisitedSpecial, _) => () (* Fully done *) | (VisitedGeneral, EnvSpecNone) => () (* Nothing useful *) | (_, EnvSpecNone) => (* We need this entry but we don't have any special entry to process. We could find another reference with a special entry. *) Array.update(refArray, addr, VisitedGeneral) | (_, _) => ( (* This has a special entry. Mark it and process. *) Array.update(refArray, addr, VisitedSpecial); findDecs spec ) end | EnvGenConst _ => () | _ => raise InternalError "doGeneral: not LoadLocal or Constant" val () = findDecs simpleSpec in (* Convert to an immutable data structure. This will continue to be referenced in any inline function after the code has run. *) val refVector = Array.vector refArray end val decArray = Array.array(localCount, CodeZero) fun addDec(addr, dec) = if Vector.sub(refVector, addr) <> UnVisited then Array.update(decArray, addr, dec) else () fun addDecs(Declar{addr, ...}) = addDec(addr, mkLoadLocal addr) | addDecs(RecDecs decs) = List.app(fn {addr, ...} => addDec(addr, mkLoadLocal addr)) decs | addDecs(NullBinding _) = () | addDecs(Container{addr, size, ...}) = addDec(addr, mkTupleFromContainer(addr, size)) val () = List.app addDecs decs (* Construct the tuple and add the "general" value at the start. *) val resultTuple = mkTuple(gen :: Array.foldr(op ::) nil decArray) (* Now generate the machine code and return it as a function that can be called. *) val (code, codeProps) = codeAndPrint (mkEnv (decs, resultTuple), localCount) in (* Return a function that executes the compiled code and then creates the final "global" value as the result. *) fn () => let local (* Execute the code. This will perform any side-effects the user has programmed and may raise an exception if that is required. *) val resVector = code () (* The result is a vector containing the "general" value as the first word and the evaluated bindings for any "special" entries in subsequent words. *) val decVals : address = if isShort resVector then raise InternalError "Result vector is not an address" else toAddress resVector in fun resultWordN n = loadWord (decVals, n) (* Get the general value, the zero'th entry in the vector. *) val generalVal = resultWordN 0w0 (* Get the properties for a field in the tuple. Because the result is a tuple all the properties should be contained in a tupleTag entry. *) val fieldProps = case Option.map (Universal.tagProject CodeTags.tupleTag) (List.find(Universal.tagIs CodeTags.tupleTag) codeProps) of NONE => (fn _ => []) | SOME p => (fn n => List.nth(p, n)) val generalProps = fieldProps 0 end (* Construct a new environment so that when an entry is looked up the corresponding constant is returned. *) fun newEnviron (oldEnv) args = let val (oldGeneral, oldSpecial) = oldEnv args val genPair = case oldGeneral of EnvGenLoad(LoadLocal addr) => ( (* For the moment retain this check. It's better to have an assertion failure than a segfault. *) Vector.sub(refVector, addr) <> UnVisited orelse raise InternalError "Reference to non-existent binding"; (resultWordN(Word.fromInt addr+0w1), fieldProps(addr+1)) ) | EnvGenConst c => c | _ => raise InternalError "codetree newEnviron: Not Extract or Constnt" val specVal = mapSpec oldSpecial in (EnvGenConst genPair, specVal) end and mapSpec EnvSpecNone = EnvSpecNone | mapSpec (EnvSpecTuple(size, env)) = EnvSpecTuple(size, newEnviron env) | mapSpec (EnvSpecInlineFunction(spec, env)) = EnvSpecInlineFunction(spec, (newEnviron env)) | mapSpec (EnvSpecUnary _) = EnvSpecNone | mapSpec (EnvSpecBinary _) = EnvSpecNone in (* and return the whole lot as a global value. *) Constnt(generalVal, setInline(mapSpec simpleSpec) generalProps) end end end (* genCode *) (* Constructor functions for the front-end of the compiler. *) local fun mkSimpleFunction inlineType (lval, args, name, closure, numLocals) = { body = lval, isInline = inlineType, name = if name = "" then "" else name, closure = map deExtract closure, argTypes = List.tabulate(args, fn _ => (GeneralType, [])), resultType = GeneralType, localCount = numLocals, recUse = [] } in val mkProc = Lambda o mkSimpleFunction DontInline (* Normal function *) and mkInlproc = Lambda o mkSimpleFunction InlineAlways (* Explicitly inlined by the front-end *) (* Unless Compiler.inlineFunctor is false functors are treated as macros and expanded when they are applied. Unlike core-language functions they are not first-class values so if they are inline the "value" returned in the initial binding can just be zero except if there is something in the closure. Almost always the closure will be empty since free variables will come from previous topdecs and will be constants, The exception is if a structure and a functor using the structure appear in the same topdec (no semicolon between them). In that case we can't leave it. We would have to update the closure even if we leave the body untouched but we could have closure entries that are constants. e.g. structure S = struct val x = 1 end functor F() = struct open S end *) fun mkMacroProc (args as (_, _, _, [], _)) = Constnt(toMachineWord 0, setInline ( EnvSpecInlineFunction(mkSimpleFunction InlineAlways args, fn _ => raise InternalError "mkMacroProc: closure")) []) | mkMacroProc args = Lambda(mkSimpleFunction InlineAlways args) end local fun mkFunWithTypes inlineType { body, argTypes=argsAndTypes, resultType, name, closure, numLocals } = Lambda { body = body, isInline = inlineType, name = if name = "" then "" else name, closure = map deExtract closure, argTypes = map (fn t => (t, [])) argsAndTypes, resultType = resultType, localCount = numLocals, recUse = [] } in val mkFunction = mkFunWithTypes DontInline and mkInlineFunction = mkFunWithTypes InlineAlways end fun mkEval (ct, clist) = Eval { function = ct, argList = List.map(fn c => (c, GeneralType)) clist, resultType=GeneralType } fun mkCall(func, argsAndTypes, resultType) = Eval { function = func, argList = argsAndTypes, resultType=resultType } (* Basic built-in operations. *) fun mkUnary (oper, arg1) = Unary { oper = oper, arg1 = arg1 } and mkBinary (oper, arg1, arg2) = Binary { oper = oper, arg1 = arg1, arg2 = arg2 } val getCurrentThreadId = GetThreadId and getCurrentThreadIdFn = mkInlproc(GetThreadId, 1 (* Ignores argument *), "GetThreadId()", [], 0) fun mkAllocateWordMemory (numWords, flags, initial) = AllocateWordMemory { numWords = numWords, flags = flags, initial = initial } val mkAllocateWordMemoryFn = mkInlproc( mkAllocateWordMemory(mkInd(0, mkLoadArgument 0), mkInd(1, mkLoadArgument 0), mkInd(2, mkLoadArgument 0)), 1, "AllocateWordMemory()", [], 0) (* Builtins wrapped as functions. N.B. These all take a single argument which may be a tuple. *) fun mkUnaryFn oper = mkInlproc(mkUnary(oper, mkLoadArgument 0), 1, BuiltIns.unaryRepr oper ^ "()", [], 0) and mkBinaryFn oper = mkInlproc(mkBinary(oper, mkInd(0, mkLoadArgument 0), mkInd(1, mkLoadArgument 0)), 1, BuiltIns.binaryRepr oper ^ "()", [], 0) local open BuiltIns (* Word equality. The value of isSigned doesn't matter. *) val eqWord = WordComparison{test=TestEqual, isSigned=false} in fun mkNot arg = Unary{oper=NotBoolean, arg1=arg} and mkIsShort arg = Unary{oper=IsTaggedValue, arg1=arg} and mkEqualTaggedWord (arg1, arg2) = Binary{oper=eqWord, arg1=arg1, arg2=arg2} and mkEqualPointerOrWord (arg1, arg2) = Binary{oper=PointerEq, arg1=arg1, arg2=arg2} val equalTaggedWordFn = (* This takes two words, not a tuple. *) mkInlproc(mkBinary(eqWord, mkLoadArgument 0, mkLoadArgument 1), 2, "EqualWord()", [], 0) and equalPointerOrWordFn = (* This takes two words, not a tuple. *) mkInlproc(mkBinary(PointerEq, mkLoadArgument 0, mkLoadArgument 1), 2, "EqualWord()", [], 0) end fun mkLoadOperation(oper, base, index) = - LoadOperation{kind=oper, address={base=base, index=SOME index, offset=0w0}} + LoadOperation{kind=oper, address={base=base, index=SOME index, offset=0}} fun mkLoadOperationFn oper = mkInlproc(mkLoadOperation(oper, mkInd(0, mkLoadArgument 0), mkInd(1, mkLoadArgument 0)), 1, "loadOperation()", [], 0) fun mkStoreOperation(oper, base, index, value) = - StoreOperation{kind=oper, address={base=base, index=SOME index, offset=0w0}, value=value} + StoreOperation{kind=oper, address={base=base, index=SOME index, offset=0}, value=value} fun mkStoreOperationFn oper = mkInlproc(mkStoreOperation(oper, mkInd(0, mkLoadArgument 0), mkInd(1, mkLoadArgument 0), mkInd(2, mkLoadArgument 0)), 1, "storeOperation()", [], 0) fun mkBlockOperation {kind, leftBase, leftIndex, rightBase, rightIndex, length } = BlockOperation { kind = kind, - sourceLeft={base=leftBase, index=SOME leftIndex, offset=0w0}, - destRight={base=rightBase, index=SOME rightIndex, offset=0w0}, length=length} + sourceLeft={base=leftBase, index=SOME leftIndex, offset=0}, + destRight={base=rightBase, index=SOME rightIndex, offset=0}, length=length} (* Construct a function that takes five arguments. The order is left-base, right-base, left-index, right-index, length. *) fun mkBlockOperationFn kind = mkInlproc( mkBlockOperation{kind=kind, leftBase=mkInd(0, mkLoadArgument 0), rightBase=mkInd(1, mkLoadArgument 0), leftIndex=mkInd(2, mkLoadArgument 0), rightIndex=mkInd(3, mkLoadArgument 0), length=mkInd(4, mkLoadArgument 0)}, 1, "blockOperation()", [], 0) fun identityFunction (name : string) : codetree = mkInlproc (mkLoadArgument 0, 1, name, [], 0) (* Returns its argument. *); (* Test a tag value. *) fun mkTagTest(test: codetree, tagValue: word, maxTag: word) = TagTest {test=test, tag=tagValue, maxTag=maxTag } fun mkHandle (exp, handler, exId) = Handle {exp = exp, handler = handler, exPacketAddr = exId} fun mkStr (strbuff:string) = Constnt (toMachineWord strbuff, []) (* If we have multiple references to a piece of code we may have to save it in a temporary and then use it from there. If the code has side-effects we certainly must do that to ensure that the side-effects are done exactly once and in the correct order, however if the code is just a constant or a load we can reduce the amount of code we generate by simply returning the original code. *) fun multipleUses (code as Constnt _, _, _) = {load = (fn _ => code), dec = []} (* | multipleUses (code as Extract(LoadLegacy{addr, level=loadLevel, ...}), _, level) = let (* May have to adjust the level. *) fun loadFn lev = if lev = level then code else mkLoad (addr, loadLevel + lev, level)) in {load = loadFn, dec = []} end | multipleUses (code as Extract(LoadLocal addr), _, level) = let (* May have to adjust the level. *) fun loadFn lev = if lev = level then code else mkLoad (addr, lev - level) in {load = loadFn, dec = []} end | multipleUses (code as Extract(LoadArgument _), _, level) = let (* May have to adjust the level. *) fun loadFn lev = if lev = level then code else raise InternalError "multipleUses: different level" (*else mkLoad (addr, lev - level)*) in {load = loadFn, dec = []} end | multipleUses (Extract _, _, _) = raise InternalError "multipleUses: TODO" *) | multipleUses (code, nextAddress, level) = let val addr = nextAddress(); fun loadFn lev = mkLoad (addr, lev, level); in {load = loadFn, dec = [mkDec (addr, code)]} end (* multipleUses *); fun mkMutualDecs [] = raise InternalError "mkMutualDecs: empty declaration list" | mkMutualDecs l = let fun convertDec(a, Lambda lam) = {lambda = lam, addr = a, use=[]} | convertDec _ = raise InternalError "mkMutualDecs: Recursive declaration is not a function" in RecDecs(List.map convertDec l) end val mkNullDec = NullBinding fun mkContainer(addr, size, setter) = Container{addr=addr, size=size, use=[], setter=setter} val mkIf = Cond and mkRaise = Raise fun mkConst v = Constnt(v, []) (* For the moment limit these to general arguments. *) fun mkLoop args = Loop (List.map(fn c => (c, GeneralType)) args) and mkBeginLoop(exp, args) = BeginLoop{loop=exp, arguments=List.map(fn(i, v) => ({value=v, addr=i, use=[]}, GeneralType)) args} fun mkWhile(b, e) = (* Generated as if b then (e; ) else (). *) mkBeginLoop(mkIf(b, mkEnv([NullBinding e], mkLoop[]), CodeZero), []) (* We previously had conditional-or and conditional-and as separate instructions. I've taken them out since they can be implemented just as efficiently as a normal conditional. In addition they were interfering with the optimisation where the second expression contained the last reference to something. We needed to add a "kill entry" to the other branch but there wasn't another branch to add it to. DCJM 7/12/00. *) fun mkCor(xp1, xp2) = mkIf(xp1, CodeTrue, xp2); fun mkCand(xp1, xp2) = mkIf(xp1, xp2, CodeZero); val mkSetContainer = fn (container, tuple, size) => mkSetContainer(container, tuple, BoolVector.tabulate(size, fn _ => true)) (* We don't generate the +, -, < etc operations directly here. Instead we create functions that the basis library can use to create the final versions by applying these functions to the arguments and an RTS function. The inline expansion system takes care of all the optimisation. An arbitrary precision operation takes a tuple consisting of a pair of arguments and a function. The code that is constructed checks both arguments to see if they are short. If they are not or the short precision operation overflows the code to call the function is executed. *) local val argX = mkInd(0, mkLoadArgument 0) and argY = mkInd(1, mkLoadArgument 0) val testShort = mkCand(mkIsShort argX, mkIsShort argY) val longCall = mkEval(mkInd(2, mkLoadArgument 0), [mkTuple[argX, argY]]) in fun mkArbitraryFn (oper as ArbArith arith) = mkInlproc( Arbitrary{oper=oper, shortCond=testShort, arg1=argX, arg2=argY, longCall=longCall }, 1, "Arbitrary" ^ BuiltIns.arithRepr arith ^ "()", [], 0) | mkArbitraryFn (oper as ArbCompare test) = (* The long function here is PolyCompareArbitrary which returns -1,0,+1 so the result has to be compared with zero. *) let val comparedResult = Binary{oper=BuiltIns.WordComparison{test=test, isSigned=true}, arg1=longCall, arg2=CodeZero} in mkInlproc( Arbitrary{oper=oper, shortCond=testShort, arg1=argX, arg2=argY, longCall=comparedResult }, 1, "Arbitrary" ^ BuiltIns.testRepr test ^ "()", [], 0) end end structure Foreign = BACKEND.Foreign structure Sharing = struct type machineWord = machineWord type codetree = codetree type pretty = pretty type argumentType=argumentType type codeBinding = codeBinding type level = level end end (* CODETREE functor body *); diff --git a/mlsource/MLCompiler/CodeTree/CODETREE_SIMPLIFIER.sml b/mlsource/MLCompiler/CodeTree/CODETREE_SIMPLIFIER.sml index fe3822b8..2ea3c308 100644 --- a/mlsource/MLCompiler/CodeTree/CODETREE_SIMPLIFIER.sml +++ b/mlsource/MLCompiler/CodeTree/CODETREE_SIMPLIFIER.sml @@ -1,1764 +1,1774 @@ (* Copyright (c) 2013, 2016-17, 2020 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* This is a cut-down version of the optimiser which simplifies the code but does not apply any heuristics. It follows chained bindings, in particular through tuples, folds constants expressions involving built-in functions, expands inline functions that have previously been marked as inlineable. It does not detect small functions that can be inlined nor does it code-generate functions without free variables. *) functor CODETREE_SIMPLIFIER( structure BASECODETREE: BaseCodeTreeSig structure CODETREE_FUNCTIONS: CodetreeFunctionsSig structure REMOVE_REDUNDANT: sig type codetree type loadForm type codeUse val cleanProc : (codetree * codeUse list * (int -> loadForm) * int) -> codetree structure Sharing: sig type codetree = codetree and loadForm = loadForm and codeUse = codeUse end end structure DEBUG: DEBUGSIG sharing BASECODETREE.Sharing = CODETREE_FUNCTIONS.Sharing = REMOVE_REDUNDANT.Sharing ) : sig type codetree and codeBinding and envSpecial val simplifier: { code: codetree, numLocals: int, maxInlineSize: int } -> (codetree * codeBinding list * envSpecial) * int * bool val specialToGeneral: codetree * codeBinding list * envSpecial -> codetree structure Sharing: sig type codetree = codetree and codeBinding = codeBinding and envSpecial = envSpecial end end = struct open BASECODETREE open Address open CODETREE_FUNCTIONS open BuiltIns exception InternalError = Misc.InternalError exception RaisedException (* The bindings are held internally as a reversed list. This is really only a check that the reversed and forward lists aren't confused. *) datatype revlist = RevList of codeBinding list type simpContext = { lookupAddr: loadForm -> envGeneral * envSpecial, enterAddr: int * (envGeneral * envSpecial) -> unit, nextAddress: unit -> int, reprocess: bool ref, maxInlineSize: int } fun envGeneralToCodetree(EnvGenLoad ext) = Extract ext | envGeneralToCodetree(EnvGenConst w) = Constnt w fun mkDec (laddr, res) = Declar{value = res, addr = laddr, use=[]} fun mkEnv([], exp) = exp | mkEnv(decs, exp as Extract(LoadLocal loadAddr)) = ( (* A common case is where we have a binding as the last item and then a load of that binding. Reduce this so other optimisations are possible. This is still something of a special case that could/should be generalised. *) case List.last decs of Declar{addr=decAddr, value, ... } => if loadAddr = decAddr then mkEnv(List.take(decs, List.length decs - 1), value) else Newenv(decs, exp) | _ => Newenv(decs, exp) ) | mkEnv(decs, exp) = Newenv(decs, exp) fun isConstnt(Constnt _) = true | isConstnt _ = false (* Wrap up the general, bindings and special value as a codetree node. The special entry is discarded except for Constnt entries which are converted to ConstntWithInline. That allows any inlineable code to be carried forward to later passes. *) fun specialToGeneral(g, RevList(b as _ :: _), s) = mkEnv(List.rev b, specialToGeneral(g, RevList [], s)) | specialToGeneral(Constnt(w, p), RevList [], s) = Constnt(w, setInline s p) | specialToGeneral(g, RevList [], _) = g (* Convert a constant to a fixed value. Used in some constant folding. *) val toFix: machineWord -> FixedInt.int = FixedInt.fromInt o Word.toIntX o toShort local - val ffiSizeFloat: unit -> word = RunCall.rtsCallFast1 "PolySizeFloat" - and ffiSizeDouble: unit -> word = RunCall.rtsCallFast1 "PolySizeDouble" + val ffiSizeFloat: unit -> int = RunCall.rtsCallFast1 "PolySizeFloat" + and ffiSizeDouble: unit -> int = RunCall.rtsCallFast1 "PolySizeDouble" in (* If we have a constant index value we convert that into a byte offset. We need to know the size of the item on this platform. We have to make this check when we actually compile the code because the interpreted version will generally be run on a platform different from the one the pre-built compiler was compiled on. The ML word length will be the same because - we have separate pre-built compilers for 32 and 64-bit. *) - fun getMultiplier (LoadStoreMLWord _) = RunCall.bytesPerWord - | getMultiplier (LoadStoreMLByte _) = 0w1 - | getMultiplier LoadStoreC8 = 0w1 - | getMultiplier LoadStoreC16 = 0w2 - | getMultiplier LoadStoreC32 = 0w4 - | getMultiplier LoadStoreC64 = 0w8 - | getMultiplier LoadStoreCFloat = ffiSizeFloat() - | getMultiplier LoadStoreCDouble = ffiSizeDouble() - | getMultiplier LoadStoreUntaggedUnsigned = RunCall.bytesPerWord + we have separate pre-built compilers for 32 and 64-bit. + Loads from C memory use signed offsets. Loads from ML memory never + have a negative offset and are limited by the maximum size of a cell + so can always be unsigned. *) + fun getMultiplier (LoadStoreMLWord _) = (Word.toInt RunCall.bytesPerWord, false (* unsigned *)) + | getMultiplier (LoadStoreMLByte _) = (1, false) + | getMultiplier LoadStoreC8 = (1, true (* signed *) ) + | getMultiplier LoadStoreC16 = (2, true (* signed *) ) + | getMultiplier LoadStoreC32 = (4, true (* signed *) ) + | getMultiplier LoadStoreC64 = (8, true (* signed *) ) + | getMultiplier LoadStoreCFloat = (ffiSizeFloat(), true (* signed *) ) + | getMultiplier LoadStoreCDouble = (ffiSizeDouble(), true (* signed *) ) + | getMultiplier LoadStoreUntaggedUnsigned = (Word.toInt RunCall.bytesPerWord, false (* unsigned *)) end fun simplify(c, s) = mapCodetree (simpGeneral s) c (* Process the codetree to return a codetree node. This is used when we don't want the special case. *) and simpGeneral { lookupAddr, ...} (Extract ext) = let val (gen, spec) = lookupAddr ext in SOME(specialToGeneral(envGeneralToCodetree gen, RevList [], spec)) end | simpGeneral context (Newenv envArgs) = SOME(specialToGeneral(simpNewenv(envArgs, context, RevList []))) | simpGeneral context (Lambda lambda) = SOME(Lambda(#1(simpLambda(lambda, context, NONE, NONE)))) | simpGeneral context (Eval {function, argList, resultType}) = SOME(specialToGeneral(simpFunctionCall(function, argList, resultType, context, RevList[]))) (* BuiltIn0 functions can't be processed specially. *) | simpGeneral context (Unary{oper, arg1}) = SOME(specialToGeneral(simpUnary(oper, arg1, context, RevList []))) | simpGeneral context (Binary{oper, arg1, arg2}) = SOME(specialToGeneral(simpBinary(oper, arg1, arg2, context, RevList []))) | simpGeneral context (Arbitrary{oper=ArbCompare test, shortCond, arg1, arg2, longCall}) = SOME(specialToGeneral(simpArbitraryCompare(test, shortCond, arg1, arg2, longCall, context, RevList []))) | simpGeneral context (Arbitrary{oper=ArbArith arith, shortCond, arg1, arg2, longCall}) = SOME(specialToGeneral(simpArbitraryArith(arith, shortCond, arg1, arg2, longCall, context, RevList []))) | simpGeneral context (AllocateWordMemory {numWords, flags, initial}) = SOME(specialToGeneral(simpAllocateWordMemory(numWords, flags, initial, context, RevList []))) | simpGeneral context (Cond(condTest, condThen, condElse)) = SOME(specialToGeneral(simpIfThenElse(condTest, condThen, condElse, context, RevList []))) | simpGeneral context (Tuple { fields, isVariant }) = SOME(specialToGeneral(simpTuple(fields, isVariant, context, RevList []))) | simpGeneral context (Indirect{ base, offset, indKind }) = SOME(specialToGeneral(simpFieldSelect(base, offset, indKind, context, RevList []))) | simpGeneral context (SetContainer{container, tuple, filter}) = let val optCont = simplify(container, context) val (cGen, cDecs, cSpec) = simpSpecial(tuple, context, RevList []) in case cSpec of (* If the tuple is a local binding it is simpler to pick it up from the "special" entry. *) EnvSpecTuple(size, recEnv) => let val fields = List.tabulate(size, envGeneralToCodetree o #1 o recEnv) in SOME(simpPostSetContainer(optCont, Tuple{isVariant=false, fields=fields}, cDecs, filter)) end | _ => SOME(simpPostSetContainer(optCont, cGen, cDecs, filter)) end | simpGeneral (context as { enterAddr, nextAddress, reprocess, ...}) (BeginLoop{loop, arguments, ...}) = let val didReprocess = ! reprocess (* To see if we really need the loop first try simply binding the arguments and process it. It's often the case that if one or more arguments is a constant that the looping case will be eliminated. *) val withoutBeginLoop = simplify(mkEnv(List.map (Declar o #1) arguments, loop), context) fun foldLoop f n (Loop l) = f(l, n) | foldLoop f n (Newenv(_, exp)) = foldLoop f n exp | foldLoop f n (Cond(_, t, e)) = foldLoop f (foldLoop f n t) e | foldLoop f n (Handle {handler, ...}) = foldLoop f n handler | foldLoop f n (SetContainer{tuple, ...}) = foldLoop f n tuple | foldLoop _ n _ = n (* Check if the Loop instruction is there. This assumes that these are the only tail-recursive cases. *) val hasLoop = foldLoop (fn _ => true) false in if not (hasLoop withoutBeginLoop) then SOME withoutBeginLoop else let (* Reset "reprocess". It may have been set in the withoutBeginLoop that's not the code we're going to return. *) val () = reprocess := didReprocess (* We need the BeginLoop. Create new addresses for the arguments. *) fun declArg({addr, value, use, ...}, typ) = let val newAddr = nextAddress() in enterAddr(addr, (EnvGenLoad(LoadLocal newAddr), EnvSpecNone)); ({addr = newAddr, value = simplify(value, context), use = use }, typ) end (* Now look to see if the (remaining) loops have any arguments that do not change. Do this after processing because we could be eliminating other loops that may change the arguments. *) val declArgs = map declArg arguments val beginBody = simplify(loop, context) local fun argsMatch((Extract (LoadLocal argNo), _), ({addr, ...}, _)) = argNo = addr | argsMatch _ = false fun checkLoopArgs(loopArgs, checks) = let fun map3(loopA :: loopArgs, decA :: decArgs, checkA :: checkArgs) = (argsMatch(loopA, decA) andalso checkA) :: map3(loopArgs, decArgs, checkArgs) | map3 _ = [] in map3(loopArgs, declArgs, checks) end in val checkList = foldLoop checkLoopArgs (map (fn _ => true) arguments) beginBody end in if List.exists (fn l => l) checkList then let (* Turn the original arguments into bindings. *) local fun argLists(true, (arg, _), (tArgs, fArgs)) = (Declar arg :: tArgs, fArgs) | argLists(false, arg, (tArgs, fArgs)) = (tArgs, arg :: fArgs) in val (unchangedArgs, filteredDeclArgs) = ListPair.foldrEq argLists ([], []) (checkList, declArgs) end fun changeLoops (Loop loopArgs) = let val newArgs = ListPair.foldrEq(fn (false, arg, l) => arg :: l | (true, _, l) => l) [] (checkList, loopArgs) in Loop newArgs end | changeLoops(Newenv(decs, exp)) = Newenv(decs, changeLoops exp) | changeLoops(Cond(i, t, e)) = Cond(i, changeLoops t, changeLoops e) | changeLoops(Handle{handler, exp, exPacketAddr}) = Handle{handler=changeLoops handler, exp=exp, exPacketAddr=exPacketAddr} | changeLoops(SetContainer{tuple, container, filter}) = SetContainer{tuple=changeLoops tuple, container=container, filter=filter} | changeLoops code = code val beginBody = simplify(changeLoops loop, context) (* Reprocess because we've lost any special part from the arguments that haven't changed. *) val () = reprocess := true in SOME(mkEnv(unchangedArgs, BeginLoop {loop=beginBody, arguments=filteredDeclArgs})) end else SOME(BeginLoop {loop=beginBody, arguments=declArgs}) end end | simpGeneral context (TagTest{test, tag, maxTag}) = ( case simplify(test, context) of Constnt(testResult, _) => if isShort testResult andalso toShort testResult = tag then SOME CodeTrue else SOME CodeFalse | sTest => SOME(TagTest{test=sTest, tag=tag, maxTag=maxTag}) ) | simpGeneral context (LoadOperation{kind, address}) = let (* Try to move constants out of the index. *) val (genAddress, RevList decAddress) = simpAddress(address, getMultiplier kind, context) (* If the base address and index are constant and this is an immutable load we can do this at compile time. *) val result = case (genAddress, kind) of ({base=Constnt(baseAddr, _), index=NONE, offset}, LoadStoreMLWord _) => if isShort baseAddr then LoadOperation{kind=kind, address=genAddress} else let (* Ignore the "isImmutable" flag and look at the immutable status of the memory. Check that this is a word object and that the offset is within range. The code for Vector.sub, for example, raises an exception if the index is out of range but still generates the (unreachable) indexing code. *) val addr = toAddress baseAddr - val wordOffset = offset div RunCall.bytesPerWord + val wordOffset = Word.fromInt offset div RunCall.bytesPerWord in if isMutable addr orelse not(isWords addr) orelse wordOffset >= length addr then LoadOperation{kind=kind, address=genAddress} else Constnt(toMachineWord(loadWord(addr, wordOffset)), []) end | ({base=Constnt(baseAddr, _), index=NONE, offset}, LoadStoreMLByte _) => if isShort baseAddr then LoadOperation{kind=kind, address=genAddress} else let val addr = toAddress baseAddr - val wordOffset = offset div RunCall.bytesPerWord + val wordOffset = Word.fromInt offset div RunCall.bytesPerWord in if isMutable addr orelse not(isBytes addr) orelse wordOffset >= length addr then LoadOperation{kind=kind, address=genAddress} - else Constnt(toMachineWord(loadByte(addr, offset)), []) + else Constnt(toMachineWord(loadByte(addr, Word.fromInt offset)), []) end | ({base=Constnt(baseAddr, _), index=NONE, offset}, LoadStoreUntaggedUnsigned) => if isShort baseAddr then LoadOperation{kind=kind, address=genAddress} else let val addr = toAddress baseAddr (* We don't currently have loadWordUntagged in Address but it's only ever used to load the string length word so we can use that. *) in - if isMutable addr orelse not(isBytes addr) orelse offset <> 0w0 + if isMutable addr orelse not(isBytes addr) orelse offset <> 0 then LoadOperation{kind=kind, address=genAddress} else Constnt(toMachineWord(String.size(RunCall.unsafeCast addr)), []) end | _ => LoadOperation{kind=kind, address=genAddress} in SOME(mkEnv(List.rev decAddress, result)) end | simpGeneral context (StoreOperation{kind, address, value}) = let val (genAddress, decAddress) = simpAddress(address, getMultiplier kind, context) val (genValue, RevList decValue, _) = simpSpecial(value, context, decAddress) in SOME(mkEnv(List.rev decValue, StoreOperation{kind=kind, address=genAddress, value=genValue})) end | simpGeneral (context as {reprocess, ...}) (BlockOperation{kind, sourceLeft, destRight, length}) = let val multiplier = case kind of - BlockOpMove{isByteMove=false} => RunCall.bytesPerWord - | BlockOpMove{isByteMove=true} => 0w1 - | BlockOpEqualByte => 0w1 - | BlockOpCompareByte => 0w1 - val (genSrcAddress, RevList decSrcAddress) = simpAddress(sourceLeft, multiplier, context) - val (genDstAddress, RevList decDstAddress) = simpAddress(destRight, multiplier, context) + BlockOpMove{isByteMove=false} => Word.toInt RunCall.bytesPerWord + | BlockOpMove{isByteMove=true} => 1 + | BlockOpEqualByte => 1 + | BlockOpCompareByte => 1 + val (genSrcAddress, RevList decSrcAddress) = simpAddress(sourceLeft, (multiplier, false), context) + val (genDstAddress, RevList decDstAddress) = simpAddress(destRight, (multiplier, false), context) val (genLength, RevList decLength, _) = simpSpecial(length, context, RevList []) (* If we have a short length move we're better doing it as a sequence of loads and stores. This is particularly useful with string concatenation. Small here means three or less. Four and eight byte moves are handled as single instructions in the code-generator provided the alignment is correct. *) val shortLength = case genLength of Constnt(lenConst, _) => if isShort lenConst then let val l = toShort lenConst in if l <= 0w3 then SOME l else NONE end else NONE | _ => NONE val combinedDecs = List.rev decSrcAddress @ List.rev decDstAddress @ List.rev decLength val operation = case (shortLength, kind) of (SOME length, BlockOpMove{isByteMove}) => let val _ = reprocess := true (* Frequently the source will be a constant. *) val {base=baseSrc, index=indexSrc, offset=offsetSrc} = genSrcAddress and {base=baseDst, index=indexDst, offset=offsetDst} = genDstAddress (* We don't know if the source is immutable but the destination definitely isn't *) val moveKind = if isByteMove then LoadStoreMLByte{isImmutable=false} else LoadStoreMLWord{isImmutable=false} fun makeMoves offset = - if offset = length + if offset = Word.toInt length then [] else NullBinding( StoreOperation{kind=moveKind, address={base=baseDst, index=indexDst, offset=offsetDst+offset*multiplier}, value=LoadOperation{kind=moveKind, address={base=baseSrc, index=indexSrc, offset=offsetSrc+offset*multiplier}}}) :: - makeMoves(offset+0w1) + makeMoves(offset+1) in - mkEnv(combinedDecs @ makeMoves 0w0, CodeZero (* unit result *)) + mkEnv(combinedDecs @ makeMoves 0, CodeZero (* unit result *)) end | (SOME length, BlockOpEqualByte) => (* Comparing with the null string and up to 3 characters. *) let val {base=baseSrc, index=indexSrc, offset=offsetSrc} = genSrcAddress and {base=baseDst, index=indexDst, offset=offsetDst} = genDstAddress val moveKind = LoadStoreMLByte{isImmutable=false} (* Build andalso tree to check each byte. For the null string this simply returns "true". *) fun makeComparison offset = - if offset = length + if offset = Word.toInt length then CodeTrue else Cond( Binary{oper=WordComparison{test=TestEqual, isSigned=false}, arg1=LoadOperation{kind=moveKind, address={base=baseSrc, index=indexSrc, offset=offsetSrc+offset*multiplier}}, arg2=LoadOperation{kind=moveKind, address={base=baseDst, index=indexDst, offset=offsetDst+offset*multiplier}}}, - makeComparison(offset+0w1), + makeComparison(offset+1), CodeFalse) in - mkEnv(combinedDecs, makeComparison 0w0) + mkEnv(combinedDecs, makeComparison 0) end | _ => mkEnv(combinedDecs, BlockOperation{kind=kind, sourceLeft=genSrcAddress, destRight=genDstAddress, length=genLength}) in SOME operation end | simpGeneral (context as {enterAddr, nextAddress, ...}) (Handle{exp, handler, exPacketAddr}) = let (* We need to make a new binding for the exception packet. *) val expBody = simplify(exp, context) val newAddr = nextAddress() val () = enterAddr(exPacketAddr, (EnvGenLoad(LoadLocal newAddr), EnvSpecNone)) val handleBody = simplify(handler, context) in SOME(Handle{exp=expBody, handler=handleBody, exPacketAddr=newAddr}) end | simpGeneral _ _ = NONE (* Where we have an Indirect or Eval we want the argument as either a tuple or an inline function respectively if that's possible. Getting that also involves various other cases as well. Because a binding may later be used in such a context we treat any binding in that way as well. *) and simpSpecial (Extract ext, { lookupAddr, ...}, tailDecs) = let val (gen, spec) = lookupAddr ext in (envGeneralToCodetree gen, tailDecs, spec) end | simpSpecial (Newenv envArgs, context, tailDecs) = simpNewenv(envArgs, context, tailDecs) | simpSpecial (Lambda lambda, context, tailDecs) = let val (gen, spec) = simpLambda(lambda, context, NONE, NONE) in (Lambda gen, tailDecs, spec) end | simpSpecial (Eval {function, argList, resultType}, context, tailDecs) = simpFunctionCall(function, argList, resultType, context, tailDecs) | simpSpecial (Unary{oper, arg1}, context, tailDecs) = simpUnary(oper, arg1, context, tailDecs) | simpSpecial (Binary{oper, arg1, arg2}, context, tailDecs) = simpBinary(oper, arg1, arg2, context, tailDecs) | simpSpecial (Arbitrary{oper=ArbCompare test, shortCond, arg1, arg2, longCall}, context, tailDecs) = simpArbitraryCompare(test, shortCond, arg1, arg2, longCall, context, tailDecs) | simpSpecial (Arbitrary{oper=ArbArith arith, shortCond, arg1, arg2, longCall}, context, tailDecs) = simpArbitraryArith(arith, shortCond, arg1, arg2, longCall, context, tailDecs) | simpSpecial (AllocateWordMemory{numWords, flags, initial}, context, tailDecs) = simpAllocateWordMemory(numWords, flags, initial, context, tailDecs) | simpSpecial (Cond(condTest, condThen, condElse), context, tailDecs) = simpIfThenElse(condTest, condThen, condElse, context, tailDecs) | simpSpecial (Tuple { fields, isVariant }, context, tailDecs) = simpTuple(fields, isVariant, context, tailDecs) | simpSpecial (Indirect{ base, offset, indKind }, context, tailDecs) = simpFieldSelect(base, offset, indKind, context, tailDecs) | simpSpecial (c: codetree, s: simpContext, tailDecs): codetree * revlist * envSpecial = let (* Anything else - copy it and then split it into the fields. *) fun split(Newenv(l, e), RevList tailDecs) = (* Pull off bindings. *) split (e, RevList(List.rev l @ tailDecs)) | split(Constnt(m, p), tailDecs) = (Constnt(m, p), tailDecs, findInline p) | split(c, tailDecs) = (c, tailDecs, EnvSpecNone) in split(simplify(c, s), tailDecs) end (* Process a Newenv. We need to add the bindings to the context. *) and simpNewenv((envDecs: codeBinding list, envExp), context as { enterAddr, nextAddress, reprocess, ...}, tailDecs): codetree * revlist * envSpecial = let fun copyDecs ([], decs) = simpSpecial(envExp, context, decs) (* End of the list - process the result expression. *) | copyDecs ((Declar{addr, value, ...} :: vs), decs) = ( case simpSpecial(value, context, decs) of (* If this raises an exception stop here. *) vBinding as (Raise _, _, _) => vBinding | vBinding => let (* Add the declaration to the table. *) val (optV, dec) = makeNewDecl(vBinding, context) val () = enterAddr(addr, optV) in copyDecs(vs, dec) end ) | copyDecs(NullBinding v :: vs, decs) = (* Not a binding - process this and the rest.*) ( case simpSpecial(v, context, decs) of (* If this raises an exception stop here. *) vBinding as (Raise _, _, _) => vBinding | (cGen, RevList cDecs, _) => copyDecs(vs, RevList(NullBinding cGen :: cDecs)) ) | copyDecs(RecDecs mutuals :: vs, RevList decs) = (* Mutually recursive declarations. Any of the declarations may refer to any of the others. They should all be lambdas. The front end generates functions with more than one argument (either curried or tupled) as pairs of mutually recursive functions. The main function body takes its arguments on the stack (or in registers) and the auxiliary inline function, possibly nested, takes the tupled or curried arguments and calls it. If the main function is recursive it will first call the inline function which is why the pair are mutually recursive. As far as possible we want to use the main function since that uses the least memory. Specifically, if the function recurses we want the recursive call to pass all the arguments if it can. *) let (* Reorder the function so the explicitly-inlined ones come first. Their code can then be inserted into the main functions. *) local val (inlines, nonInlines) = List.partition ( fn {lambda = { isInline=DontInline, ...}, ... } => false | _ => true) mutuals in val orderedDecs = inlines @ nonInlines end (* Go down the functions creating new addresses for them and entering them in the table. *) val addresses = map (fn {addr, ... } => let val decAddr = nextAddress() in enterAddr (addr, (EnvGenLoad(LoadLocal decAddr), EnvSpecNone)); decAddr end) orderedDecs fun processFunction({ lambda, addr, ... }, newAddr) = let val (gen, spec) = simpLambda(lambda, context, SOME addr, SOME newAddr) (* Update the entry in the table to include any inlineable function. *) val () = enterAddr (addr, (EnvGenLoad (LoadLocal newAddr), spec)) in {addr=newAddr, lambda=gen, use=[]} end val rlist = ListPair.map processFunction (orderedDecs, addresses) in (* and put these declarations onto the list. *) copyDecs(vs, RevList(List.rev(partitionMutualBindings(RecDecs rlist)) @ decs)) end | copyDecs (Container{addr, size, setter, ...} :: vs, RevList decs) = let (* Enter the new address immediately - it's needed in the setter. *) val decAddr = nextAddress() val () = enterAddr (addr, (EnvGenLoad(LoadLocal decAddr), EnvSpecNone)) val (setGen, RevList setDecs, _) = simpSpecial(setter, context, RevList []) in (* If we have inline expanded a function that sets the container we're better off eliminating the container completely. *) case setGen of SetContainer { tuple, filter, container } => let (* Check the container we're setting is the address we've made for it. *) val _ = (case container of Extract(LoadLocal a) => a = decAddr | _ => false) orelse raise InternalError "copyDecs: Container/SetContainer" val newDecAddr = nextAddress() val () = enterAddr (addr, (EnvGenLoad(LoadLocal newDecAddr), EnvSpecNone)) val tupleAddr = nextAddress() val tupleDec = Declar{addr=tupleAddr, use=[], value=tuple} val tupleLoad = mkLoadLocal tupleAddr val resultTuple = BoolVector.foldri(fn (i, true, l) => mkInd(i, tupleLoad) :: l | (_, false, l) => l) [] filter val _ = List.length resultTuple = size orelse raise InternalError "copyDecs: Container/SetContainer size" val containerDec = Declar{addr=newDecAddr, use=[], value=mkTuple resultTuple} (* TODO: We're replacing a container with what is notionally a tuple on the heap. It should be optimised away as a result of a further pass but we currently have indirections from a container for these. On the native platforms that doesn't matter but on 32-in-64 indirecting from the heap and from the stack are different. *) val _ = reprocess := true in copyDecs(vs, RevList(containerDec :: tupleDec :: setDecs @ decs)) end | _ => let (* The setDecs could refer the container itself if we've optimised this with simpPostSetContainer so we must include them within the setter and not lift them out. *) val dec = Container{addr=decAddr, use=[], size=size, setter=mkEnv(List.rev setDecs, setGen)} in copyDecs(vs, RevList(dec :: decs)) end end in copyDecs(envDecs, tailDecs) end (* Prepares a binding for entry into a look-up table. Returns the entry to put into the table together with any bindings that must be made. If the general part of the optVal is a constant we can just put the constant in the table. If it is a load (Extract) it is just renaming an existing entry so we can return it. Otherwise we have to make a new binding and return a load (Extract) entry for it. *) and makeNewDecl((Constnt w, RevList decs, spec), _) = ((EnvGenConst w, spec), RevList decs) (* No need to create a binding for a constant. *) | makeNewDecl((Extract ext, RevList decs, spec), _) = ((EnvGenLoad ext, spec), RevList decs) (* Binding is simply giving a new name to a variable - can ignore this declaration. *) | makeNewDecl((gen, RevList decs, spec), { nextAddress, ...}) = let (* Create a binding for this value. *) val newAddr = nextAddress() in ((EnvGenLoad(LoadLocal newAddr), spec), RevList(mkDec(newAddr, gen) :: decs)) end and simpLambda({body, isInline, name, argTypes, resultType, closure, localCount, ...}, { lookupAddr, reprocess, maxInlineSize, ... }, myOldAddrOpt, myNewAddrOpt) = let (* A new table for the new function. *) val oldAddrTab = Array.array (localCount, NONE) val optClosureList = makeClosure() val isNowRecursive = ref false local fun localOldAddr (LoadLocal addr) = valOf(Array.sub(oldAddrTab, addr)) | localOldAddr (ext as LoadArgument _) = (EnvGenLoad ext, EnvSpecNone) | localOldAddr (ext as LoadRecursive) = (EnvGenLoad ext, EnvSpecNone) | localOldAddr (LoadClosure addr) = let val oldEntry = List.nth(closure, addr) (* If the entry in the closure is our own address this is recursive. *) fun isRecursive(EnvGenLoad(LoadLocal a), SOME b) = if a = b then (isNowRecursive := true; true) else false | isRecursive _ = false in if isRecursive(EnvGenLoad oldEntry, myOldAddrOpt) then (EnvGenLoad LoadRecursive, EnvSpecNone) else let val newEntry = lookupAddr oldEntry val makeClosure = addToClosure optClosureList fun convertResult(genEntry, specEntry) = (* If after looking up the entry we get our new address it's recursive. *) if isRecursive(genEntry, myNewAddrOpt) then (EnvGenLoad LoadRecursive, EnvSpecNone) else let val newGeneral = case genEntry of EnvGenLoad ext => EnvGenLoad(makeClosure ext) | EnvGenConst w => EnvGenConst w (* Have to modify the environment here so that if we look up free variables we add them to the closure. *) fun convertEnv env args = convertResult(env args) val newSpecial = case specEntry of EnvSpecTuple(size, env) => EnvSpecTuple(size, convertEnv env) | EnvSpecInlineFunction(spec, env) => EnvSpecInlineFunction(spec, convertEnv env) | EnvSpecUnary _ => EnvSpecNone (* Don't pass this in *) | EnvSpecBinary _ => EnvSpecNone (* Don't pass this in *) | EnvSpecNone => EnvSpecNone in (newGeneral, newSpecial) end in convertResult newEntry end end and setTab (index, v) = Array.update (oldAddrTab, index, SOME v) in val newAddressAllocator = ref 0 fun mkAddr () = ! newAddressAllocator before newAddressAllocator := ! newAddressAllocator + 1 val newCode = simplify (body, { enterAddr = setTab, lookupAddr = localOldAddr, nextAddress=mkAddr, reprocess = reprocess, maxInlineSize = maxInlineSize }) end val closureAfterOpt = extractClosure optClosureList val localCount = ! newAddressAllocator (* If we have mutually recursive "small" functions we may turn them into recursive functions. We have to remove the "small" status from them to prevent them from being expanded inline anywhere else. The optimiser may turn them back into "small" functions if the recursion is actually tail-recursion. *) val isNowInline = case isInline of SmallInline => if ! isNowRecursive then DontInline else SmallInline | InlineAlways => (* Functions marked as inline could become recursive as a result of other inlining. *) if ! isNowRecursive then DontInline else InlineAlways | DontInline => DontInline (* Clean up the function body at this point if it could be inlined. There are examples where failing to do this can blow up. This can be the result of creating both a general and special function inside an inline function. *) val cleanBody = if isNowInline = DontInline then newCode else REMOVE_REDUNDANT.cleanProc(newCode, [UseExport], LoadClosure, localCount) (* The optimiser checks the size of a function and decides whether it can be inlined. However if we have expanded some other inlines inside the body it may now be too big. In some cases we can get exponential blow-up. We check here that the body is still small enough before allowing it to be used inline. The limit is set to 10 times the optimiser's limit because it seems that otherwise significant functions are not inlined. *) val stillInline = case isNowInline of SmallInline => if evaluateInlining(cleanBody, List.length argTypes, maxInlineSize*10) <> TooBig then SmallInline else DontInline | inl => inl val copiedLambda: lambdaForm = { body = cleanBody, isInline = isNowInline, name = name, closure = closureAfterOpt, argTypes = argTypes, resultType = resultType, localCount = localCount, recUse = [] } (* The optimiser checks the size of a function and decides whether it can be inlined. However if we have expanded some other inlines inside the body it may now be too big. In some cases we can get exponential blow-up. We check here that the body is still small enough before allowing it to be used inline. *) val inlineCode = if stillInline <> DontInline then EnvSpecInlineFunction(copiedLambda, fn addr => (EnvGenLoad(List.nth(closureAfterOpt, addr)), EnvSpecNone)) else EnvSpecNone in ( copiedLambda, inlineCode ) end and simpFunctionCall(function, argList, resultType, context as { reprocess, maxInlineSize, ...}, tailDecs) = let (* Function call - This may involve inlining the function. *) (* Get the function to be called and see if it is inline or a lambda expression. *) val (genFunct, decsFunct, specFunct) = simpSpecial(function, context, tailDecs) (* We have to make a special check here that we are not passing in the function we are trying to expand. This could result in an infinitely recursive expansion. It is only going to happen in very special circumstances such as a definition of the Y combinator. If we see that we don't attempt to expand inline. It could be embedded in a tuple or the closure of a function as well as passed directly. *) val isRecursiveArg = case function of Extract extOrig => let fun containsFunction(Extract thisArg, v) = (v orelse thisArg = extOrig, FOLD_DESCEND) | containsFunction(Lambda{closure, ...}, v) = (* Only the closure, not the body *) (foldl (fn (c, w) => foldtree containsFunction w (Extract c)) v closure, FOLD_DONT_DESCEND) | containsFunction(Eval _, v) = (v, FOLD_DONT_DESCEND) (* OK if it's called *) | containsFunction(_, v) = (v, FOLD_DESCEND) in List.exists(fn (c, _) => foldtree containsFunction false c) argList end | _ => false in case (specFunct, genFunct, isRecursiveArg) of (EnvSpecInlineFunction({body=lambdaBody, localCount, argTypes, ...}, functEnv), _, false) => let val _ = List.length argTypes = List.length argList orelse raise InternalError "simpFunctionCall: argument mismatch" val () = reprocess := true (* If we expand inline we have to reprocess *) and { nextAddress, reprocess, ...} = context (* Expand a function inline, either one marked explicitly to be inlined or one detected as "small". *) (* Calling inline proc or a lambda expression which is just called. The function is replaced with a block containing declarations of the parameters. We need a new table here because the addresses we use to index it are the addresses which are local to the function. New addresses are created in the range of the surrounding function. *) val localVec = Array.array(localCount, NONE) local fun processArgs([], bindings) = ([], bindings) | processArgs((arg, _)::args, bindings) = let val (thisArg, newBindings) = makeNewDecl(simpSpecial(arg, context, bindings), context) val (otherArgs, resBindings) = processArgs(args, newBindings) in (thisArg::otherArgs, resBindings) end val (params, bindings) = processArgs(argList, decsFunct) val paramVec = Vector.fromList params in fun getParameter n = Vector.sub(paramVec, n) (* Bindings necessary for the arguments *) val copiedArgs = bindings end local fun localOldAddr(LoadLocal addr) = valOf(Array.sub(localVec, addr)) | localOldAddr(LoadArgument addr) = getParameter addr | localOldAddr(LoadClosure closureEntry) = functEnv closureEntry | localOldAddr LoadRecursive = raise InternalError "localOldAddr: LoadRecursive" fun setTabForInline (index, v) = Array.update (localVec, index, SOME v) val lambdaContext = { lookupAddr=localOldAddr, enterAddr=setTabForInline, nextAddress=nextAddress, reprocess = reprocess, maxInlineSize = maxInlineSize } in val (cGen, cDecs, cSpec) = simpSpecial(lambdaBody,lambdaContext, copiedArgs) end in (cGen, cDecs, cSpec) end | (_, gen as Constnt _, _) => (* Not inlinable - constant function. *) let val copiedArgs = map (fn (arg, argType) => (simplify(arg, context), argType)) argList val evCopiedCode = Eval {function = gen, argList = copiedArgs, resultType=resultType} in (evCopiedCode, decsFunct, EnvSpecNone) end | (_, gen, _) => (* Anything else. *) let val copiedArgs = map (fn (arg, argType) => (simplify(arg, context), argType)) argList val evCopiedCode = Eval {function = gen, argList = copiedArgs, resultType=resultType} in (evCopiedCode, decsFunct, EnvSpecNone) end end (* Special processing for the current builtIn1 operations. *) (* Constant folding for built-ins. These ought to be type-correct i.e. we should have tagged values in some cases and addresses in others. However there may be run-time tests that would ensure type-correctness and we can't be sure that they will always be folded at compile-time. e.g. we may have if isShort c then shortOp c else longOp c If c is a constant then we may try to fold both the shortOp and the longOp and one of these will be type-incorrect although never executed at run-time. *) and simpUnary(oper, arg1, context as { reprocess, ...}, tailDecs) = let val (genArg1, decArg1, specArg1) = simpSpecial(arg1, context, tailDecs) in case (oper, genArg1) of (NotBoolean, Constnt(v, _)) => ( reprocess := true; (if isShort v andalso toShort v = 0w0 then CodeTrue else CodeFalse, decArg1, EnvSpecNone) ) | (NotBoolean, genArg1) => ( (* NotBoolean: This can be the result of using Bool.not but more usually occurs as a result of other code. We don't have TestNotEqual or IsAddress so both of these use NotBoolean with TestEqual and IsTagged. Also we can insert a NotBoolean as a result of a Cond. We try to eliminate not(not a) and to push other NotBooleans down to a point where a boolean is tested. *) case specArg1 of EnvSpecUnary(NotBoolean, originalArg) => ( (* not(not a) - Eliminate. *) reprocess := true; (originalArg, decArg1, EnvSpecNone) ) | _ => (* Otherwise pass this on. It is also extracted in a Cond. *) (Unary{oper=NotBoolean, arg1=genArg1}, decArg1, EnvSpecUnary(NotBoolean, genArg1)) ) | (IsTaggedValue, Constnt(v, _)) => ( reprocess := true; (if isShort v then CodeTrue else CodeFalse, decArg1, EnvSpecNone) ) | (IsTaggedValue, genArg1) => ( (* We use this to test for nil values and if we have constructed a record (or possibly a function) it can't be null. *) case specArg1 of EnvSpecTuple _ => (CodeFalse, decArg1, EnvSpecNone) before reprocess := true | EnvSpecInlineFunction _ => (CodeFalse, decArg1, EnvSpecNone) before reprocess := true | _ => (Unary{oper=oper, arg1=genArg1}, decArg1, EnvSpecNone) ) | (MemoryCellLength, Constnt(v, _)) => ( reprocess := true; (if isShort v then CodeZero else Constnt(toMachineWord(Address.length(toAddress v)), []), decArg1, EnvSpecNone) ) | (MemoryCellFlags, Constnt(v, _)) => ( reprocess := true; (if isShort v then CodeZero else Constnt(toMachineWord(Address.flags(toAddress v)), []), decArg1, EnvSpecNone) ) | (LongWordToTagged, Constnt(v, _)) => ( reprocess := true; (Constnt(toMachineWord(Word.fromLargeWord(RunCall.unsafeCast v)), []), decArg1, EnvSpecNone) ) | (LongWordToTagged, genArg1) => ( (* If we apply LongWordToTagged to an argument we have created with UnsignedToLongWord we can return the original argument. *) case specArg1 of EnvSpecUnary(UnsignedToLongWord, originalArg) => ( reprocess := true; (originalArg, decArg1, EnvSpecNone) ) | _ => (Unary{oper=LongWordToTagged, arg1=genArg1}, decArg1, EnvSpecNone) ) | (SignedToLongWord, Constnt(v, _)) => ( reprocess := true; (Constnt(toMachineWord(Word.toLargeWordX(RunCall.unsafeCast v)), []), decArg1, EnvSpecNone) ) | (UnsignedToLongWord, Constnt(v, _)) => ( reprocess := true; (Constnt(toMachineWord(Word.toLargeWord(RunCall.unsafeCast v)), []), decArg1, EnvSpecNone) ) | (UnsignedToLongWord, genArg1) => (* Add the operation as the special entry. It can then be recognised by LongWordToTagged. *) (Unary{oper=oper, arg1=genArg1}, decArg1, EnvSpecUnary(UnsignedToLongWord, genArg1)) | _ => (Unary{oper=oper, arg1=genArg1}, decArg1, EnvSpecNone) end and simpBinary(oper, arg1, arg2, context as {reprocess, ...}, tailDecs) = let val (genArg1, decArg1, _ (*specArg1*)) = simpSpecial(arg1, context, tailDecs) val (genArg2, decArgs, _ (*specArg2*)) = simpSpecial(arg2, context, decArg1) in case (oper, genArg1, genArg2) of (WordComparison{test, isSigned}, Constnt(v1, _), Constnt(v2, _)) => if not(isShort v1) orelse not(isShort v2) (* E.g. arbitrary precision on unreachable path. *) then (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) else let val () = reprocess := true val testResult = case (test, isSigned) of (* TestEqual can be applied to addresses. *) (TestEqual, _) => toShort v1 = toShort v2 | (TestLess, false) => toShort v1 < toShort v2 | (TestLessEqual, false) => toShort v1 <= toShort v2 | (TestGreater, false) => toShort v1 > toShort v2 | (TestGreaterEqual, false) => toShort v1 >= toShort v2 | (TestLess, true) => toFix v1 < toFix v2 | (TestLessEqual, true) => toFix v1 <= toFix v2 | (TestGreater, true) => toFix v1 > toFix v2 | (TestGreaterEqual, true) => toFix v1 >= toFix v2 | (TestUnordered, _) => raise InternalError "WordComparison: TestUnordered" in (if testResult then CodeTrue else CodeFalse, decArgs, EnvSpecNone) end | (PointerEq, Constnt(v1, _), Constnt(v2, _)) => ( reprocess := true; (if RunCall.pointerEq(v1, v2) then CodeTrue else CodeFalse, decArgs, EnvSpecNone) ) | (FixedPrecisionArith arithOp, Constnt(v1, _), Constnt(v2, _)) => if not(isShort v1) orelse not(isShort v2) then (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) else let val () = reprocess := true val v1S = toFix v1 and v2S = toFix v2 fun asConstnt v = Constnt(toMachineWord v, []) val raiseOverflow = Raise(Constnt(toMachineWord Overflow, [])) val raiseDiv = Raise(Constnt(toMachineWord Div, [])) (* ?? There's usually an explicit test. *) val resultCode = case arithOp of ArithAdd => (asConstnt(v1S+v2S) handle Overflow => raiseOverflow) | ArithSub => (asConstnt(v1S-v2S) handle Overflow => raiseOverflow) | ArithMult => (asConstnt(v1S*v2S) handle Overflow => raiseOverflow) | ArithQuot => (asConstnt(FixedInt.quot(v1S,v2S)) handle Overflow => raiseOverflow | Div => raiseDiv) | ArithRem => (asConstnt(FixedInt.rem(v1S,v2S)) handle Overflow => raiseOverflow | Div => raiseDiv) | ArithDiv => (asConstnt(FixedInt.div(v1S,v2S)) handle Overflow => raiseOverflow | Div => raiseDiv) | ArithMod => (asConstnt(FixedInt.mod(v1S,v2S)) handle Overflow => raiseOverflow | Div => raiseDiv) in (resultCode, decArgs, EnvSpecNone) end (* Addition and subtraction of zero. These can arise as a result of inline expansion of more general functions. *) | (FixedPrecisionArith ArithAdd, arg1, Constnt(v2, _)) => if isShort v2 andalso toShort v2 = 0w0 then (arg1, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (FixedPrecisionArith ArithAdd, Constnt(v1, _), arg2) => if isShort v1 andalso toShort v1 = 0w0 then (arg2, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (FixedPrecisionArith ArithSub, arg1, Constnt(v2, _)) => if isShort v2 andalso toShort v2 = 0w0 then (arg1, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (WordArith arithOp, Constnt(v1, _), Constnt(v2, _)) => if not(isShort v1) orelse not(isShort v2) then (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) else let val () = reprocess := true val v1S = toShort v1 and v2S = toShort v2 fun asConstnt v = Constnt(toMachineWord v, []) val resultCode = case arithOp of ArithAdd => asConstnt(v1S+v2S) | ArithSub => asConstnt(v1S-v2S) | ArithMult => asConstnt(v1S*v2S) | ArithQuot => raise InternalError "WordArith: ArithQuot" | ArithRem => raise InternalError "WordArith: ArithRem" | ArithDiv => asConstnt(v1S div v2S) | ArithMod => asConstnt(v1S mod v2S) in (resultCode, decArgs, EnvSpecNone) end | (WordArith ArithAdd, arg1, Constnt(v2, _)) => if isShort v2 andalso toShort v2 = 0w0 then (arg1, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (WordArith ArithAdd, Constnt(v1, _), arg2) => if isShort v1 andalso toShort v1 = 0w0 then (arg2, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (WordArith ArithSub, arg1, Constnt(v2, _)) => if isShort v2 andalso toShort v2 = 0w0 then (arg1, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (WordLogical logOp, Constnt(v1, _), Constnt(v2, _)) => if not(isShort v1) orelse not(isShort v2) then (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) else let val () = reprocess := true val v1S = toShort v1 and v2S = toShort v2 fun asConstnt v = Constnt(toMachineWord v, []) val resultCode = case logOp of LogicalAnd => asConstnt(Word.andb(v1S,v2S)) | LogicalOr => asConstnt(Word.orb(v1S,v2S)) | LogicalXor => asConstnt(Word.xorb(v1S,v2S)) in (resultCode, decArgs, EnvSpecNone) end | (WordLogical logop, arg1, Constnt(v2, _)) => (* Return the zero if we are anding with zero otherwise the original arg *) if isShort v2 andalso toShort v2 = 0w0 then (case logop of LogicalAnd => CodeZero | _ => arg1, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (WordLogical logop, Constnt(v1, _), arg2) => if isShort v1 andalso toShort v1 = 0w0 then (case logop of LogicalAnd => CodeZero | _ => arg2, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) (* TODO: Constant folding of shifts. *) | _ => (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) end (* Arbitrary precision operations. This is a sort of mixture of a built-in and a conditional. *) and simpArbitraryCompare(TestEqual, _, _, _, _, _, _) = (* We no longer generate this for equality. General equality for arbitrary precision uses a combination of PointerEq and byte comparison. *) raise InternalError "simpArbitraryCompare: TestEqual" | simpArbitraryCompare(test, shortCond, arg1, arg2, longCall, context as {reprocess, ...}, tailDecs) = let val (genCond, decCond, _ (*specArg1*)) = simpSpecial(shortCond, context, tailDecs) val (genArg1, decArg1, _ (*specArg1*)) = simpSpecial(arg1, context, decCond) val (genArg2, decArgs, _ (*specArg2*)) = simpSpecial(arg2, context, decArg1) val posFlags = Address.F_bytes and negFlags = Word8.orb(Address.F_bytes, Address.F_negative) in (* Fold any constant/constant operations but more importantly, if we have variable/constant operations where the constant is short we can avoid using the full arbitrary precision call by just looking at the sign bit. *) case (genCond, genArg1, genArg2) of (_, Constnt(v1, _), Constnt(v2, _)) => let val a1: LargeInt.int = RunCall.unsafeCast v1 and a2: LargeInt.int = RunCall.unsafeCast v2 val testResult = case test of TestLess => a1 < a2 | TestGreater => a1 > a2 | TestLessEqual => a1 <= a2 | TestGreaterEqual => a1 >= a2 | _ => raise InternalError "simpArbitraryCompare: Unimplemented function" in (if testResult then CodeTrue else CodeFalse, decArgs, EnvSpecNone) end | (Constnt(c1, _), _, _) => (* The condition is "isShort X andalso isShort Y". This will have been reduced to a constant false or true if either (a) either argument is long or (b) both arguments are short.*) if isShort c1 andalso toShort c1 = 0w0 then (* One argument is definitely long - generate the long form. *) (simplify(longCall, context), decArgs, EnvSpecNone) else (* Both arguments are short. That should mean they're constants. *) (Binary{oper=WordComparison{test=test, isSigned=true}, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) before reprocess := true | (_, genArg1, cArg2 as Constnt _) => let (* The constant must be short otherwise the test would be false. *) val isNeg = case test of TestLess => true | TestLessEqual => true | _ => false (* Translate i < c into if isShort i then toShort i < c else isNegative i *) val newCode = Cond(Unary{oper=BuiltIns.IsTaggedValue, arg1=genArg1}, Binary { oper = BuiltIns.WordComparison{test=test, isSigned=true}, arg1 = genArg1, arg2 = cArg2 }, Binary { oper = BuiltIns.WordComparison{test=TestEqual, isSigned=false}, arg1=Unary { oper = MemoryCellFlags, arg1=genArg1 }, arg2=Constnt(toMachineWord(if isNeg then negFlags else posFlags), [])} ) in (newCode, decArgs, EnvSpecNone) end | (_, cArg1 as Constnt _, genArg2) => let (* We're testing c < i so the test is if isShort i then c < toShort i else isPositive i *) val isPos = case test of TestLess => true | TestLessEqual => true | _ => false val newCode = Cond(Unary{oper=BuiltIns.IsTaggedValue, arg1=genArg2}, Binary { oper = BuiltIns.WordComparison{test=test, isSigned=true}, arg1 = cArg1, arg2 = genArg2 }, Binary { oper = BuiltIns.WordComparison{test=TestEqual, isSigned=false}, arg1=Unary { oper = MemoryCellFlags, arg1=genArg2 }, arg2=Constnt(toMachineWord(if isPos then posFlags else negFlags), [])} ) in (newCode, decArgs, EnvSpecNone) end | _ => (Arbitrary{oper=ArbCompare test, shortCond=genCond, arg1=genArg1, arg2=genArg2, longCall=simplify(longCall, context)}, decArgs, EnvSpecNone) end and simpArbitraryArith(arith, shortCond, arg1, arg2, longCall, context, tailDecs) = let (* arg1 and arg2 are the arguments. shortCond is the condition that must be satisfied in order to use the short precision operation i.e. each argument must be short. *) val (genCond, decCond, _ (*specArg1*)) = simpSpecial(shortCond, context, tailDecs) val (genArg1, decArg1, _ (*specArg1*)) = simpSpecial(arg1, context, decCond) val (genArg2, decArgs, _ (*specArg2*)) = simpSpecial(arg2, context, decArg1) in case (genArg1, genArg2, genCond) of (Constnt(v1, _), Constnt(v2, _), _) => let val a1: LargeInt.int = RunCall.unsafeCast v1 and a2: LargeInt.int = RunCall.unsafeCast v2 (*val _ = print ("Fold arbitrary precision: " ^ PolyML.makestring(arith, a1, a2) ^ "\n")*) in case arith of ArithAdd => (Constnt(toMachineWord(a1+a2), []), decArgs, EnvSpecNone) | ArithSub => (Constnt(toMachineWord(a1-a2), []), decArgs, EnvSpecNone) | ArithMult => (Constnt(toMachineWord(a1*a2), []), decArgs, EnvSpecNone) | _ => raise InternalError "simpArbitraryArith: Unimplemented function" end | (_, _, Constnt(c1, _)) => if isShort c1 andalso toShort c1 = 0w0 then (* One argument is definitely long - generate the long form. *) (simplify(longCall, context), decArgs, EnvSpecNone) else (Arbitrary{oper=ArbArith arith, shortCond=genCond, arg1=genArg1, arg2=genArg2, longCall=simplify(longCall, context)}, decArgs, EnvSpecNone) | _ => (Arbitrary{oper=ArbArith arith, shortCond=genCond, arg1=genArg1, arg2=genArg2, longCall=simplify(longCall, context)}, decArgs, EnvSpecNone) end and simpAllocateWordMemory(numWords, flags, initial, context, tailDecs) = let val (genArg1, decArg1, _ (*specArg1*)) = simpSpecial(numWords, context, tailDecs) val (genArg2, decArg2, _ (*specArg2*)) = simpSpecial(flags, context, decArg1) val (genArg3, decArg3, _ (*specArg3*)) = simpSpecial(initial, context, decArg2) in (AllocateWordMemory{numWords=genArg1, flags=genArg2, initial=genArg3}, decArg3, EnvSpecNone) end (* Loads, stores and block operations use address values. The index value is initially an arbitrary code tree but we can recognise common cases of constant index values or where a constant has been added to the index. TODO: If these are C memory moves we can also look at the base address. The base address for C memory operations is a LargeWord.word value i.e. the address is contained in a box. The base addresses for ML memory moves is an ML address i.e. unboxed. *) and simpAddress({base, index=NONE, offset}, _, context) = let val (genBase, decBase, _ (*specBase*)) = simpSpecial(base, context, RevList[]) in ({base=genBase, index=NONE, offset=offset}, decBase) end - | simpAddress({base, index=SOME index, offset}, multiplier, context) = + | simpAddress({base, index=SOME index, offset: int}, (multiplier: int, isSigned), context) = let val (genBase, RevList decBase, _) = simpSpecial(base, context, RevList[]) val (genIndex, RevList decIndex, _ (* specIndex *)) = simpSpecial(index, context, RevList[]) val (newIndex, newOffset) = case genIndex of Constnt(indexOffset, _) => (* Convert small, positive offsets but leave large values as indexes. We could have silly index values here which will never be executed because of a range check but should still compile. *) - if isShort indexOffset andalso toShort indexOffset < 0w1000 - then (NONE, offset + toShort indexOffset * multiplier) + if isShort indexOffset + then + let + val indexOffsetW = toShort indexOffset + in + if indexOffsetW < 0w1000 orelse isSigned andalso indexOffsetW > ~ 0w1000 + then (NONE, offset + (if isSigned then Word.toIntX else Word.toInt)indexOffsetW * multiplier) + else (SOME genIndex, offset) + end else (SOME genIndex, offset) | _ => (SOME genIndex, offset) in ({base=genBase, index=newIndex, offset=newOffset}, RevList(decIndex @ decBase)) end (* (* A built-in function. We can call certain built-ins immediately if the arguments are constants. *) and simpBuiltIn(rtsCallNo, argList, context as { reprocess, ...}) = let val copiedArgs = map (fn arg => simpSpecial(arg, context)) argList open RuntimeCalls (* When checking for a constant we need to check that there are no bindings. They could have side-effects. *) fun isAConstant(Constnt _, [], _) = true | isAConstant _ = false in (* If the function is an RTS call that is safe to evaluate immediately and all the arguments are constants evaluate it now. *) if earlyRtsCall rtsCallNo andalso List.all isAConstant copiedArgs then let val () = reprocess := true exception Interrupt = Thread.Thread.Interrupt (* Turn the arguments into a vector. *) val argVector = case makeConstVal(mkTuple(List.map specialToGeneral copiedArgs)) of Constnt(w, _) => w | _ => raise InternalError "makeConstVal: Not constant" (* Call the function. If it raises an exception (e.g. divide by zero) generate code to raise the exception at run-time. We don't do that for Interrupt which we assume only arises by user interaction and not as a result of executing the code so we reraise that exception immediately. *) val ioOp : int -> machineWord = RunCall.run_call1 RuntimeCalls.POLY_SYS_io_operation (* We need callcode_tupled here because we pass the arguments as a tuple but the RTS functions we're calling expect arguments in registers or on the stack. *) val call: (address * machineWord) -> machineWord = RunCall.run_call1 RuntimeCalls.POLY_SYS_callcode_tupled val code = Constnt (call(toAddress(ioOp rtsCallNo), argVector), []) handle exn as Interrupt => raise exn (* Must not handle this *) | exn => Raise (Constnt(toMachineWord exn, [])) in (code, [], EnvSpecNone) end (* We can optimise certain built-ins in combination with others. If we have POLY_SYS_unsigned_to_longword combined with POLY_SYS_longword_to_tagged we can eliminate both. This can occur in cases such as Word.fromLargeWord o Word8.toLargeWord. If we have POLY_SYS_cmem_load_X functions where the address is formed by adding a constant to an address we can move the addend into the load instruction. *) (* TODO: Could we also have POLY_SYS_signed_to_longword here? *) else if rtsCallNo = POLY_SYS_longword_to_tagged andalso (case copiedArgs of [(_, _, EnvSpecBuiltIn(r, _))] => r = POLY_SYS_unsigned_to_longword | _ => false) then let val arg = (* Get the argument of the argument. *) case copiedArgs of [(_, _, EnvSpecBuiltIn(_, [arg]))] => arg | _ => raise Bind in (arg, [], EnvSpecNone) end else if (rtsCallNo = POLY_SYS_cmem_load_8 orelse rtsCallNo = POLY_SYS_cmem_load_16 orelse rtsCallNo = POLY_SYS_cmem_load_32 orelse rtsCallNo = POLY_SYS_cmem_load_64 orelse rtsCallNo = POLY_SYS_cmem_store_8 orelse rtsCallNo = POLY_SYS_cmem_store_16 orelse rtsCallNo = POLY_SYS_cmem_store_32 orelse rtsCallNo = POLY_SYS_cmem_store_64) andalso (* Check if the first argument is an addition. The second should be a constant. If the addend is a constant it will be a large integer i.e. the address of a byte segment. *) let (* Check that we have a valid value to add to a large word. The cmem_load/store values sign extend their arguments so we use toLargeWordX here. *) fun isAcceptableOffset c = if isShort c (* Shouldn't occur. *) then false else let val l: LargeWord.word = RunCall.unsafeCast c in Word.toLargeWordX(Word.fromLargeWord l) = l end in case copiedArgs of (_, _, EnvSpecBuiltIn(r, args)) :: (Constnt _, _, _) :: _ => r = POLY_SYS_plus_longword andalso (case args of (* If they were both constants we'd have folded them. *) [Constnt(c, _), _] => isAcceptableOffset c | [_, Constnt(c, _)] => isAcceptableOffset c | _ => false) | _ => false end then let (* We have a load or store with an added constant. *) val (base, offset) = case copiedArgs of (_, _, EnvSpecBuiltIn(_, [Constnt(offset, _), base])) :: (Constnt(existing, _), _, _) :: _ => (base, Word.fromLargeWord(RunCall.unsafeCast offset) + toShort existing) | (_, _, EnvSpecBuiltIn(_, [base, Constnt(offset, _)])) :: (Constnt(existing, _), _, _) :: _ => (base, Word.fromLargeWord(RunCall.unsafeCast offset) + toShort existing) | _ => raise Bind val newDecs = List.map(fn h => makeNewDecl(h, context)) copiedArgs val genArgs = List.map(fn ((g, _), _) => envGeneralToCodetree g) newDecs val preDecs = List.foldr (op @) [] (List.map #2 newDecs) val gen = BuiltIn(rtsCallNo, base :: Constnt(toMachineWord offset, []) :: List.drop(genArgs, 2)) in (gen, preDecs, EnvSpecNone) end else let (* Create bindings for the arguments. This ensures that any side-effects in the evaluation of the arguments are performed in the correct order even if the application of the built-in itself is applicative. The new arguments are either loads or constants which are applicative. *) val newDecs = List.map(fn h => makeNewDecl(h, context)) copiedArgs val genArgs = List.map(fn ((g, _), _) => envGeneralToCodetree g) newDecs val preDecs = List.foldr (op @) [] (List.map #2 newDecs) val gen = BuiltIn(rtsCallNo, genArgs) val spec = if reorderable gen then EnvSpecBuiltIn(rtsCallNo, genArgs) else EnvSpecNone in (gen, preDecs, spec) end end *) and simpIfThenElse(condTest, condThen, condElse, context, tailDecs) = (* If-then-else. The main simplification is if we have constants in the test or in both the arms. *) let val word0 = toMachineWord 0 val word1 = toMachineWord 1 val False = word0 val True = word1 in case simpSpecial(condTest, context, tailDecs) of (* If the test is a constant we can return the appropriate arm and ignore the other. *) (Constnt(testResult, _), bindings, _) => let val arm = if wordEq (testResult, False) (* false - return else-part *) then condElse (* if false then x else y == y *) (* if true then x else y == x *) else condThen in simpSpecial(arm, context, bindings) end | (testGen, testbindings as RevList testBList, testSpec) => let fun mkNot (Unary{oper=BuiltIns.NotBoolean, arg1}) = arg1 | mkNot arg = Unary{oper=BuiltIns.NotBoolean, arg1=arg} (* If the test involves a variable that was created with a NOT it's better to move it in here. *) val testCond = case testSpec of EnvSpecUnary(BuiltIns.NotBoolean, arg1) => mkNot arg1 | _ => testGen in case (simpSpecial(condThen, context, RevList[]), simpSpecial(condElse, context, RevList[])) of ((thenConst as Constnt(thenVal, _), RevList [], _), (elseConst as Constnt(elseVal, _), RevList [], _)) => (* Both arms return constants. This situation can arise in situations where we have andalso/orelse where the second "argument" has been reduced to a constant. *) if wordEq (thenVal, elseVal) then (* If the test has a side-effect we have to do it otherwise we can remove it. If we're in a nested andalso/orelse that may mean we can simplify the next level out. *) (thenConst (* or elseConst *), if sideEffectFree testCond then testbindings else RevList(NullBinding testCond :: testBList), EnvSpecNone) (* if x then true else false == x *) else if wordEq (thenVal, True) andalso wordEq (elseVal, False) then (testCond, testbindings, EnvSpecNone) (* if x then false else true == not x *) else if wordEq (thenVal, False) andalso wordEq (elseVal, True) then (mkNot testCond, testbindings, EnvSpecNone) else (* can't optimise *) (Cond (testCond, thenConst, elseConst), testbindings, EnvSpecNone) (* Rewrite "if x then raise y else z" into "(if x then raise y else (); z)" The advantage is that any tuples in z are lifted outside the "if". *) | (thenPart as (Raise _, _:revlist, _), (elsePart, RevList elseBindings, elseSpec)) => (* then-part raises an exception *) (elsePart, RevList(elseBindings @ NullBinding(Cond (testCond, specialToGeneral thenPart, CodeZero)) :: testBList), elseSpec) | ((thenPart, RevList thenBindings, thenSpec), elsePart as (Raise _, _, _)) => (* else part raises an exception *) (thenPart, RevList(thenBindings @ NullBinding(Cond (testCond, CodeZero, specialToGeneral elsePart)) :: testBList), thenSpec) | (thenPart, elsePart) => (Cond (testCond, specialToGeneral thenPart, specialToGeneral elsePart), testbindings, EnvSpecNone) end end (* Tuple construction. Tuples are also used for datatypes and structures (i.e. modules) *) and simpTuple(entries, isVariant, context, tailDecs) = (* The main reason for optimising record constructions is that they appear as tuples in ML. We try to ensure that loads from locally created tuples do not involve indirecting from the tuple but can get the value which was put into the tuple directly. If that is successful we may find that the tuple is never used directly so the use-count mechanism will ensure it is never created. *) let val tupleSize = List.length entries (* The record construction is treated as a block of local declarations so that any expressions which might have side-effects are done exactly once. *) (* We thread the bindings through here to avoid having to append the result. *) fun processFields([], bindings) = ([], bindings) | processFields(field::fields, bindings) = let val (thisField, newBindings) = makeNewDecl(simpSpecial(field, context, bindings), context) val (otherFields, resBindings) = processFields(fields, newBindings) in (thisField::otherFields, resBindings) end val (fieldEntries, allBindings) = processFields(entries, tailDecs) (* Make sure we include any inline code in the result. If this tuple is being "exported" we will lose the "special" part. *) fun envResToCodetree(EnvGenLoad(ext), _) = Extract ext | envResToCodetree(EnvGenConst(w, p), s) = Constnt(w, setInline s p) val generalFields = List.map envResToCodetree fieldEntries val genRec = if List.all isConstnt generalFields then makeConstVal(Tuple{ fields = generalFields, isVariant = isVariant }) else Tuple{ fields = generalFields, isVariant = isVariant } (* Get the field from the tuple if possible. If it's a variant, though, we may try to get an invalid field. See Tests/Succeed/Test167. *) fun getField addr = if addr < tupleSize then List.nth(fieldEntries, addr) else if isVariant then (EnvGenConst(toMachineWord 0, []), EnvSpecNone) else raise InternalError "getField - invalid index" val specRec = EnvSpecTuple(tupleSize, getField) in (genRec, allBindings, specRec) end and simpFieldSelect(base, offset, indKind, context, tailDecs) = let val (genSource, decSource, specSource) = simpSpecial(base, context, tailDecs) in (* Try to do the selection now if possible. *) case specSource of EnvSpecTuple(_, recEnv) => let (* The "special" entry we've found is a tuple. That means that we are taking a field from a tuple we made earlier and so we should be able to get the original code we used when we made the tuple. That might mean the tuple is never used and we can optimise away the construction of it completely. *) val (newGen, newSpec) = recEnv offset in (envGeneralToCodetree newGen, decSource, newSpec) end | _ => (* No special case possible. If the tuple is a constant mkInd/mkVarField will do the selection immediately. *) let val genSelect = case indKind of IndTuple => mkInd(offset, genSource) | IndVariant => mkVarField(offset, genSource) | IndContainer => mkIndContainer(offset, genSource) in (genSelect, decSource, EnvSpecNone) end end (* Process a SetContainer. Unlike the other simpXXX functions this is called after the arguments have been processed. We try to push the SetContainer to the leaves of the expression. This is particularly important with tail-recursive functions that return tuples. Without this the function will lose tail-recursion since each recursion will be followed by code to copy the result back to the previous container. *) and simpPostSetContainer(container, Tuple{fields, ...}, RevList tupleDecs, filter) = let (* Apply the filter now. *) fun select(n, hd::tl) = if n >= BoolVector.length filter then [] else if BoolVector.sub(filter, n) then hd :: select(n+1, tl) else select(n+1, tl) | select(_, []) = [] val selected = select(0, fields) (* Frequently we will have produced an indirection from the same base. These will all be bindings so we have to reverse the process. *) fun findOriginal a = List.find(fn Declar{addr, ...} => addr = a | _ => false) tupleDecs fun checkFields(last, Extract(LoadLocal a) :: tl) = ( case findOriginal a of SOME(Declar{value=Indirect{base=Extract ext, indKind=IndContainer, offset, ...}, ...}) => ( case last of NONE => checkFields(SOME(ext, [offset]), tl) | SOME(lastExt, offsets) => (* It has to be the same base and with increasing offsets (no reordering). *) if lastExt = ext andalso offset > hd offsets then checkFields(SOME(ext, offset :: offsets), tl) else NONE ) | _ => NONE ) | checkFields(_, _ :: _) = NONE | checkFields(last, []) = last fun fieldsToFilter fields = let val maxDest = List.foldl Int.max ~1 fields val filterArray = BoolArray.array(maxDest+1, false) val _ = List.app(fn n => BoolArray.update(filterArray, n, true)) fields in BoolArray.vector filterArray end in case checkFields(NONE, selected) of SOME (ext, fields) => (* It may be a container. *) let val filter = fieldsToFilter fields in case ext of LoadLocal localAddr => let (* Is this a container? If it is and we're copying all of it we can replace the inner container with a binding to the outer. We have to be careful because it is possible that we may create and set the inner container, then have some bindings that do some side-effects with the inner container before then copying it to the outer container. For simplicity and to maintain the condition that the container is set in the tails we only merge the containers if it's at the end (after any "filtering"). *) val allSet = BoolVector.foldl (fn (a, t) => a andalso t) true filter fun findContainer [] = NONE | findContainer (Declar{value, ...} :: tl) = if sideEffectFree value then findContainer tl else NONE | findContainer (Container{addr, size, setter, ...} :: tl) = if localAddr = addr andalso size = BoolVector.length filter andalso allSet then SOME (setter, tl) else NONE | findContainer _ = NONE in case findContainer tupleDecs of SOME (setter, decs) => (* Put in a binding for the inner container address so the setter will set the outer container. For this to work all loads from the stack must use native word length. *) mkEnv(List.rev(Declar{addr=localAddr, value=container, use=[]} :: decs), setter) | NONE => mkEnv(List.rev tupleDecs, SetContainer{container=container, tuple = mkTuple selected, filter=BoolVector.tabulate(List.length selected, fn _ => true)}) end | _ => mkEnv(List.rev tupleDecs, SetContainer{container=container, tuple = mkTuple selected, filter=BoolVector.tabulate(List.length selected, fn _ => true)}) end | NONE => mkEnv(List.rev tupleDecs, SetContainer{container=container, tuple = mkTuple selected, filter=BoolVector.tabulate(List.length selected, fn _ => true)}) end | simpPostSetContainer(container, Cond(ifpt, thenpt, elsept), RevList tupleDecs, filter) = mkEnv(List.rev tupleDecs, Cond(ifpt, simpPostSetContainer(container, thenpt, RevList [], filter), simpPostSetContainer(container, elsept, RevList [], filter))) | simpPostSetContainer(container, Newenv(envDecs, envExp), RevList tupleDecs, filter) = simpPostSetContainer(container, envExp, RevList(List.rev envDecs @ tupleDecs), filter) | simpPostSetContainer(container, BeginLoop{loop, arguments}, RevList tupleDecs, filter) = mkEnv(List.rev tupleDecs, BeginLoop{loop = simpPostSetContainer(container, loop, RevList [], filter), arguments=arguments}) | simpPostSetContainer(_, loop as Loop _, RevList tupleDecs, _) = (* If we are inside a BeginLoop we only set the container on leaves that exit the loop. Loop entries will go back to the BeginLoop so we don't add SetContainer nodes. *) mkEnv(List.rev tupleDecs, loop) | simpPostSetContainer(container, Handle{exp, handler, exPacketAddr}, RevList tupleDecs, filter) = mkEnv(List.rev tupleDecs, Handle{ exp = simpPostSetContainer(container, exp, RevList [], filter), handler = simpPostSetContainer(container, handler, RevList [], filter), exPacketAddr = exPacketAddr}) | simpPostSetContainer(container, tupleGen, RevList tupleDecs, filter) = mkEnv(List.rev tupleDecs, mkSetContainer(container, tupleGen, filter)) fun simplifier{code, numLocals, maxInlineSize} = let val localAddressAllocator = ref 0 val addrTab = Array.array(numLocals, NONE) fun lookupAddr (LoadLocal addr) = valOf(Array.sub(addrTab, addr)) | lookupAddr (env as LoadArgument _) = (EnvGenLoad env, EnvSpecNone) | lookupAddr (env as LoadRecursive) = (EnvGenLoad env, EnvSpecNone) | lookupAddr (LoadClosure _) = raise InternalError "top level reached in simplifier" and enterAddr (addr, tab) = Array.update (addrTab, addr, SOME tab) fun mkAddr () = ! localAddressAllocator before localAddressAllocator := ! localAddressAllocator + 1 val reprocess = ref false val (gen, RevList bindings, spec) = simpSpecial(code, {lookupAddr = lookupAddr, enterAddr = enterAddr, nextAddress = mkAddr, reprocess = reprocess, maxInlineSize = maxInlineSize}, RevList[]) in ((gen, List.rev bindings, spec), ! localAddressAllocator, !reprocess) end fun specialToGeneral(g, b as _ :: _, s) = mkEnv(b, specialToGeneral(g, [], s)) | specialToGeneral(Constnt(w, p), [], s) = Constnt(w, setInline s p) | specialToGeneral(g, [], _) = g structure Sharing = struct type codetree = codetree and codeBinding = codeBinding and envSpecial = envSpecial end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML index 2c33ad34..c89110d4 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML @@ -1,3975 +1,3997 @@ (* Copyright David C. J. Matthews 2016-20 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86CodetreeToICode( structure BACKENDTREE: BackendIntermediateCodeSig structure ICODE: ICodeSig structure DEBUG: DEBUGSIG structure X86FOREIGN: FOREIGNCALLSIG structure ICODETRANSFORM: X86ICODETRANSFORMSIG structure CODE_ARRAY: CODEARRAYSIG sharing ICODE.Sharing = ICODETRANSFORM.Sharing = CODE_ARRAY.Sharing ): GENCODESIG = struct open BACKENDTREE open Address open ICODE open CODE_ARRAY exception InternalError = Misc.InternalError local val regs = case targetArch of Native32Bit => [eax, ebx] | Native64Bit => [eax, ebx, r8, r9, r10] | ObjectId32Bit => [eax, esi, r8, r9, r10] val fpResult = case targetArch of Native32Bit => FPReg fp0 | _ => XMMReg xmm0 val fpArgRegs = case targetArch of Native32Bit => [] | _ => [xmm0, xmm1, xmm2] in val generalArgRegs = List.map GenReg regs val floatingPtArgRegs = List.map XMMReg fpArgRegs fun resultReg GeneralType = GenReg eax | resultReg DoubleFloatType = fpResult | resultReg SingleFloatType = fpResult end (* tag a short constant *) fun tag c = 2 * c + 1 (* shift a short constant, but don't set tag bit *) fun semitag c = 2 * c (* Reverse a list and append the second. This is used a lot when converting between the reverse and forward list versions. e.g. codeToICode and codeToICodeRev *) fun revApp([], l) = l | revApp(hd :: tl, l) = revApp(tl, hd :: l) datatype blockStruct = BlockSimple of x86ICode | BlockExit of x86ICode | BlockLabel of int | BlockFlow of controlFlow | BlockBegin of { regArgs: (preg * reg) list, stackArgs: stackLocn list } | BlockRaiseAndHandle of x86ICode * int | BlockOptionalHandle of {call: x86ICode, handler: int, label: int } local open RunCall val F_mutable_bytes = Word.fromLargeWord(Word8.toLargeWord(Word8.orb (F_mutable, F_bytes))) fun makeRealConst l = let val r = allocateByteMemory(0wx8 div bytesPerWord, F_mutable_bytes) fun setBytes([], _) = () | setBytes(hd::tl, n) = (storeByte(r, n, hd); setBytes(tl, n+0wx1)) val () = setBytes(l, 0w0) val () = clearMutableBit r in r end in (* These are floating point constants used to change and mask the sign bit. *) val realSignBit: machineWord = makeRealConst [0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx80] and realAbsMask: machineWord = makeRealConst [0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wx7f] and floatSignBit: machineWord = makeRealConst [0wx00, 0wx00, 0wx00, 0wx80, 0wx00, 0wx00, 0wx00, 0wx00] and floatAbsMask: machineWord = makeRealConst [0wxff, 0wxff, 0wxff, 0wx7f, 0wx00, 0wx00, 0wx00, 0wx00] end datatype commutative = Commutative | NonCommutative (* Check that a large-word constant looks right and get the value as a large int*) fun largeWordConstant value = if isShort value then raise InternalError "largeWordConstant: invalid" else let val addr = toAddress value in if length addr <> nativeWordSize div wordSize orelse flags addr <> F_bytes then raise InternalError "largeWordConstant: invalid" else (); LargeWord.toLargeInt(RunCall.unsafeCast addr) end fun codeFunctionToX86({body, localCount, name, argTypes, resultType=fnResultType, closure, ...}:bicLambdaForm, debugSwitches, resultClosure) = let (* Pseudo-registers are allocated sequentially and the properties added to the list. *) val pregCounter = ref 0 val pregPropList = ref [] fun newPReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropGeneral :: !pregPropList in PReg regNo end and newUReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropUntagged :: !pregPropList in PReg regNo end and newStackLoc size = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropStack size :: !pregPropList in StackLoc{size=size, rno=regNo} end and newMergeReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropMultiple :: !pregPropList in PReg regNo end datatype locationValue = NoLocation | PregLocation of preg | ContainerLocation of { container: stackLocn, stackOffset: int } val locToPregArray = Array.array(localCount, NoLocation) val labelCounter = ref 1 (* Start at 1. Zero is used for the root. *) fun newLabel() = !labelCounter before labelCounter := !labelCounter + 1 val ccRefCounter = ref 0 fun newCCRef() = CcRef(!ccRefCounter) before ccRefCounter := !ccRefCounter + 1 fun constantAsArgument value = if isShort value then IntegerConstant(tag(Word.toLargeIntX(toShort value))) else AddressConstant value (* Create the branch condition from the test, isSigned and jumpOn values. (In)equality tests are the same for signed and unsigned values. *) local open BuiltIns in fun testAsBranch(TestEqual, _, true) = JE | testAsBranch(TestEqual, _, false) = JNE (* Signed tests *) | testAsBranch(TestLess, true, true) = JL | testAsBranch(TestLess, true, false) = JGE | testAsBranch(TestLessEqual, true, true) = JLE | testAsBranch(TestLessEqual, true, false) = JG | testAsBranch(TestGreater, true, true) = JG | testAsBranch(TestGreater, true, false) = JLE | testAsBranch(TestGreaterEqual, true, true) = JGE | testAsBranch(TestGreaterEqual, true, false) = JL (* Unsigned tests *) | testAsBranch(TestLess, false, true) = JB | testAsBranch(TestLess, false, false) = JNB | testAsBranch(TestLessEqual, false, true) = JNA | testAsBranch(TestLessEqual, false, false) = JA | testAsBranch(TestGreater, false, true) = JA | testAsBranch(TestGreater, false, false) = JNA | testAsBranch(TestGreaterEqual, false, true) = JNB | testAsBranch(TestGreaterEqual, false, false) = JB | testAsBranch(TestUnordered, _, _) = raise InternalError "TestUnordered" (* Switch the direction of a test if we turn c op x into x op c. *) fun leftRightTest TestEqual = TestEqual | leftRightTest TestLess = TestGreater | leftRightTest TestLessEqual = TestGreaterEqual | leftRightTest TestGreater = TestLess | leftRightTest TestGreaterEqual = TestLessEqual | leftRightTest TestUnordered = TestUnordered end (* Overflow check. This raises Overflow if the overflow bit is set in the cc. This generates a single block for the function unless there is a handler. As well as reducing the size of the code this also means that overflow checks are generally JO instructions to the end of the code. Since the default branch prediction is not to take forward jumps this should improve prefetching on the normal, non-overflow, path. *) fun checkOverflow ({currHandler=NONE, overflowBlock=ref(SOME overFlowLab), ...}) ccRef = (* It's already been set and there's no surrounding handler - use this. *) let val noOverflowLab = newLabel() in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel noOverflowLab ] end | checkOverflow ({currHandler=NONE, overflowBlock, ...}) ccRef = let (* *) val overFlowLab = newLabel() and noOverflowLab = newLabel() val packetReg = newPReg() val () = overflowBlock := SOME overFlowLab in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel overFlowLab, BlockSimple(LoadArgument{source=AddressConstant(toMachineWord(Overflow)), dest=packetReg, kind=movePolyWord}), BlockExit(RaiseExceptionPacket{packetReg=packetReg}), BlockLabel noOverflowLab ] end | checkOverflow ({currHandler=SOME h, ...}) ccRef = let val overFlowLab = newLabel() and noOverflowLab = newLabel() val packetReg = newPReg() in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel overFlowLab, BlockSimple(LoadArgument{source=AddressConstant(toMachineWord(Overflow)), dest=packetReg, kind=movePolyWord}), BlockRaiseAndHandle(RaiseExceptionPacket{packetReg=packetReg}, h), BlockLabel noOverflowLab ] end fun setAndRestoreRounding (rndMode, doWithRounding) = let open IEEEReal val savedRnd = newUReg() and setRnd = newUReg() in case fpMode of FPModeX87 => [BlockSimple(GetX87ControlReg{dest=savedRnd})] @ (* Set the appropriate bits in the control word. *) (case rndMode of TO_NEAREST => (* The bits need to be zero - just mask them. *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32})] | TO_NEGINF => let val wrk = newUReg() in (* Mask the bits and set to 01 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x400, ccRef=newCCRef(), opSize=OpSize32})] end | TO_POSINF => let val wrk = newUReg() in (* Mask the bits and set to 10 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x800, ccRef=newCCRef(), opSize=OpSize32})] end | TO_ZERO => (* The bits need to be one - just set them. *) [BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xc00, ccRef=newCCRef(), opSize=OpSize32})]) @ [BlockSimple(SetX87ControlReg{source=setRnd})] @ doWithRounding() @ (* Restore the original rounding. *) [BlockSimple(SetX87ControlReg{source=savedRnd})] | FPModeSSE2 => [BlockSimple(GetSSE2ControlReg{dest=savedRnd})] @ (* Set the appropriate bits in the control word. *) (case rndMode of TO_NEAREST => (* The bits need to be zero - just mask them. *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32})] | TO_NEGINF => let val wrk = newUReg() in (* Mask the bits and set to 01 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x2000, ccRef=newCCRef(), opSize=OpSize32})] end | TO_POSINF => let val wrk = newUReg() in (* Mask the bits and set to 10 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x4000, ccRef=newCCRef(), opSize=OpSize32})] end | TO_ZERO => (* The bits need to be one - just set them. *) [BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x6000, ccRef=newCCRef(), opSize=OpSize32})]) @ [BlockSimple(SetSSE2ControlReg{source=setRnd})] @ doWithRounding() @ [BlockSimple(SetSSE2ControlReg{source=savedRnd})] end (* Put a floating point value into a box or tag it so the value can be held in a general register. *) fun boxOrTagReal(srcReg, destReg, precision) = if precision = BuiltIns.PrecDouble orelse wordSize <> 0w8 then let open BuiltIns val boxFloat = case (fpMode, precision) of (FPModeX87, PrecDouble) => BoxX87Double | (FPModeX87, PrecSingle) => BoxX87Float | (FPModeSSE2, PrecDouble) => BoxSSE2Double | (FPModeSSE2, PrecSingle) => BoxSSE2Float in [BlockSimple(BoxValue{boxKind=boxFloat, source=srcReg, dest=destReg, saveRegs=[]})] end else [BlockSimple(TagFloat{source=srcReg, dest=destReg})] (* Indicate that the base address is actually an object index where appropriate. *) val memIndexOrObject = case targetArch of ObjectId32Bit => ObjectIndex | _ => NoMemIndex (* Generally we have an offset in words and no index register. *) fun wordOffsetAddress(offset, baseReg: preg): argument = MemoryLocation{offset=offset*Word.toInt wordSize, base=baseReg, index=memIndexOrObject, cache=NONE} (* The large-word operations all work on the value within the box pointed at by the register. We generate all large-word operations using this even where the X86 instruction requires a register. This allows the next level to optimise cases of cascaded instructions and avoid creating boxes for intermediate values. *) fun wordAt reg = wordOffsetAddress(0, reg) val returnAddressEntry = newStackLoc 1 datatype argLoc = ArgInReg of { realReg: reg, argReg: preg } | ArgOnStack of { stackOffset: int, stackReg: stackLocn } (* Pseudo-regs for the result, the closure and the args that were passed in real regs. *) val resultTarget = newPReg() val closureRegAddr = newPReg() (* Create a map for the arguments indicating their register or stack location. *) local (* Select the appropriate argument register depending on the argument type. *) fun argTypesToArgEntries([], _, _, _) = ([], [], [], []) | argTypesToArgEntries(DoubleFloatType :: tl, gRegs, fpReg :: fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val pRegArg = newPReg() and uRegArg = newUReg() in (ArgInReg{realReg=fpReg, argReg=pRegArg} :: argTypes, boxOrTagReal(uRegArg, pRegArg, BuiltIns.PrecDouble) @ argCode, (uRegArg, fpReg) :: argRegs, stackArgs) end | argTypesToArgEntries(SingleFloatType :: tl, gRegs, fpReg :: fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val pRegArg = newPReg() and uRegArg = newUReg() in (ArgInReg{realReg=fpReg, argReg=pRegArg} :: argTypes, boxOrTagReal(uRegArg, pRegArg, BuiltIns.PrecSingle) @ argCode, (uRegArg, fpReg) :: argRegs, stackArgs) end | argTypesToArgEntries(_ :: tl, gReg :: gRegs, fpRegs, n) = (* This deals with general arguments but also with extra floating point arguments. They are boxed as usual. *) let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val argReg=newPReg() in (ArgInReg{realReg=gReg, argReg=argReg} :: argTypes, argCode, (argReg, gReg) :: argRegs, stackArgs) end | argTypesToArgEntries(_ :: tl, [], fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, [], fpRegs, n-1) val stackLoc = newStackLoc 1 in (ArgOnStack {stackOffset=n, stackReg = stackLoc } :: argTypes, argCode, argRegs, stackLoc :: stackArgs) end val (argEntries, argCode, argRegs, stackArguments) = argTypesToArgEntries(argTypes, generalArgRegs, floatingPtArgRegs, List.length argTypes) val clReg = case closure of [] => [] | _ => [(closureRegAddr, GenReg edx)] in val argumentVector = Vector.fromList argEntries (* Start code for the function. *) val beginInstructions = argCode @ [BlockBegin{regArgs=clReg @ argRegs, stackArgs=stackArguments @ [returnAddressEntry]}] (* The number of arguments on the stack. Needed in return instrs and tail calls. *) val currentStackArgs = List.length stackArguments end (* The return instruction. This can be added on to various tails but there is always one at the end anyway. *) fun returnInstruction({stackPtr, ...}, target, tailCode) = let val (returnCode, resReg) = case fnResultType of GeneralType => ([], target) | DoubleFloatType => let val resReg = newUReg() in ([BlockSimple(LoadArgument{source=wordAt target, dest=resReg, kind=MoveDouble})], resReg) end | SingleFloatType => let val resReg = newUReg() val unpack = if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument target, dest=resReg, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt target, dest=resReg, kind=MoveFloat}) in ([unpack], resReg) end in BlockExit(ReturnResultFromFunction{resultReg=resReg, realReg=resultReg fnResultType, numStackArgs=currentStackArgs}) :: returnCode @ (if stackPtr <> 0 then BlockSimple(ResetStackPtr{numWords=stackPtr, preserveCC=false}) :: tailCode else tailCode) end (* This controls what codeAsArgument returns. Different instructions have different requirements. If an option is set to false the value is instead loaded into a new preg. "const32s" means that it will fit into 32-bits. Any constant satisfies that on X86/32 but on the X86/64 we don't allow addresses because we can't be sure whether they will fit or not. *) type allowedArgument = { anyConstant: bool, const32s: bool, memAddr: bool, existingPreg: bool } val allowInMemMove = (* We can move a 32-bit constant into memory but not a long constant. *) { anyConstant=false, const32s=true, memAddr=false, existingPreg=true } and allowInPReg = { anyConstant=false, const32s=false, memAddr=false, existingPreg=true } (* AllowDefer can be used to ensure that any side-effects are done before something else but otherwise we only evaluate afterwards. *) and allowDefer = { anyConstant=true, const32s=true, memAddr=true, existingPreg=true } datatype destination = SpecificPReg of preg | NoResult | Allowed of allowedArgument (* Context type. *) type context = { loopArgs: (preg list * int * int) option, stackPtr: int, currHandler: int option, overflowBlock: int option ref } (* If a preg has been provided, use that, otherwise generate a new one. *) fun asTarget(SpecificPReg preg) = preg | asTarget NoResult = newPReg() | asTarget(Allowed _) = newPReg() fun moveIfNotAllowed(NoResult, code, arg) = (code, arg, false) | moveIfNotAllowed(Allowed{anyConstant=true, ...}, code, arg as AddressConstant _) = (code, arg, false) | moveIfNotAllowed(Allowed{anyConstant=true, ...}, code, arg as IntegerConstant _) = (code, arg, false) | moveIfNotAllowed(dest as Allowed{const32s=true, ...}, code, arg as IntegerConstant value) = (* This is allowed if the value is within 32-bits *) if is32bit value then (code, arg, false) else moveToTarget(dest, code, arg) | moveIfNotAllowed(dest as Allowed{const32s=true, ...}, code, arg as AddressConstant _) = if targetArch = Native32Bit then (code, arg, false) (* We can store the address directly *) else moveToTarget(dest, code, arg) | moveIfNotAllowed(Allowed{existingPreg=true, ...}, code, arg as RegisterArgument(PReg _)) = (code, arg, false) | moveIfNotAllowed(Allowed{memAddr=true, ...}, code, arg as MemoryLocation _) = (code, arg, false) | moveIfNotAllowed(dest, code, arg) = moveToTarget(dest, code, arg) and moveToTarget(dest, code, arg) = let val target = asTarget dest val moveSize = case arg of AddressConstant _ => movePolyWord | MemoryLocation _ => movePolyWord | _ => moveNativeWord in (code @ [BlockSimple(LoadArgument{source=arg, dest=target, kind=moveSize})], RegisterArgument target, false) end (* Create a bool result from a test by returning true or false. *) fun makeBoolResultRev(condition, ccRef, target, testCode) = let val trueLab = newLabel() and falseLab = newLabel() and mergeLab = newLabel() val mergeReg = newMergeReg() in BlockSimple(LoadArgument{dest=target, source=RegisterArgument mergeReg, kind=Move32Bit}) :: BlockLabel mergeLab :: BlockFlow(Unconditional mergeLab) :: BlockSimple(LoadArgument{dest=mergeReg, source=IntegerConstant(tag 0), kind=Move32Bit}) :: BlockLabel falseLab :: BlockFlow(Unconditional mergeLab) :: BlockSimple(LoadArgument{dest=mergeReg, source=IntegerConstant(tag 1), kind=Move32Bit}) :: BlockLabel trueLab :: BlockFlow(Conditional{ ccRef=ccRef, condition=condition, trueJump=trueLab, falseJump=falseLab }) :: testCode end fun moveIfNotAllowedRev(NoResult, code, arg) = (code, arg, false) | moveIfNotAllowedRev(Allowed{anyConstant=true, ...}, code, arg as AddressConstant _) = (code, arg, false) | moveIfNotAllowedRev(Allowed{anyConstant=true, ...}, code, arg as IntegerConstant _) = (code, arg, false) | moveIfNotAllowedRev(dest as Allowed{const32s=true, ...}, code, arg as IntegerConstant value) = (* This is allowed if the value is within 32-bits *) if is32bit value then (code, arg, false) else moveToTargetRev(dest, code, arg) | moveIfNotAllowedRev(dest as Allowed{const32s=true, ...}, code, arg as AddressConstant _) = if targetArch = Native32Bit then (code, arg, false) else moveToTargetRev(dest, code, arg) | moveIfNotAllowedRev(Allowed{existingPreg=true, ...}, code, arg as RegisterArgument(PReg _)) = (code, arg, false) | moveIfNotAllowedRev(Allowed{memAddr=true, ...}, code, arg as MemoryLocation _) = (code, arg, false) | moveIfNotAllowedRev(dest, code, arg) = moveToTargetRev(dest, code, arg) and moveToTargetRev(dest, code, arg) = let val target = asTarget dest val moveSize = case arg of AddressConstant _ => movePolyWord | MemoryLocation _ => movePolyWord | _ => moveNativeWord in (BlockSimple(LoadArgument{source=arg, dest=target, kind=moveSize}) :: code, RegisterArgument target, false) end (* Use a move if there's no offset or index. We could use an add if there's no index. *) and loadAddress{base, offset=0, index=NoMemIndex, dest} = LoadArgument{source=RegisterArgument base, dest=dest, kind=movePolyWord} | loadAddress{base, offset, index, dest} = LoadEffectiveAddress{base=SOME base, offset=offset, dest=dest, index=index, opSize=nativeWordOpSize} and codeToICodeTarget(instr, context: context, isTail, target) = (* This is really for backwards compatibility. *) let val (code, _, _) = codeToICode(instr, context, isTail, SpecificPReg target) in code end and codeToPReg(instr, context) = let (* Many instructions require an argument in a register. If it's already in a register use that rather than creating a new one. *) val (code, result, _) = codeToICode(instr, context, false, Allowed allowInPReg) val preg = case result of RegisterArgument pr => pr | _ => raise InternalError "codeToPReg" in (code, preg) end and codeToPRegRev(instr, context, tailCode) = let (* Many instructions require an argument in a register. If it's already in a register use that rather than creating a new one. *) val (code, result, _) = codeToICodeRev(instr, context, false, Allowed allowInPReg, tailCode) val preg = case result of RegisterArgument pr => pr | _ => raise InternalError "codeToPRegRev" in (code, preg) end and codeToICode(instr, context, isTail, destination) = let val (code, dest, haveExited) = codeToICodeRev(instr, context, isTail, destination, []) in (List.rev code, dest, haveExited) end (* Main function to turn the codetree into ICode. Optimisation is generally left to later passes. This does detect tail recursion. This builds the result up in reverse order. There was an allocation hotspot in loadFields in the BICTuple case which was eliminated by building the list in reverse and then reversing the result. It seems better to build the list in reverse generally but for the moment there are too many special cases to do everything. *) and codeToICodeRev(BICNewenv (bindings, exp), context: context as {stackPtr=initialSp, ...} , isTail, destination, tailCode) = let (* Process a list of bindings. We need to accumulate the space used by any containers and reset the stack pointer at the end if necessary. *) fun doBindings([], context, tailCode) = (tailCode, context) | doBindings(BICDeclar{value=BICExtract(BICLoadLocal l), addr, ...} :: decs, context, tailCode) = let (* Giving a new name to an existing entry. This should have been removed at a higher level but it doesn't always seem to be. In particular we must treat this specially if it's a container. *) val original = Array.sub(locToPregArray, l) val () = Array.update(locToPregArray, addr, original) in doBindings(decs, context, tailCode) end | doBindings(BICDeclar{value, addr, ...} :: decs, context, tailCode) = let val (code, dest) = codeToPRegRev(value, context, tailCode) val () = Array.update(locToPregArray, addr, PregLocation dest) in doBindings(decs, context, code) end | doBindings(BICRecDecs [{lambda, addr, ...}] :: decs, context, tailCode) = (* We shouldn't have single entries in RecDecs but it seems to occur at the moment. *) let val dest = newPReg() val (code, _, _) = codeToICodeRev(BICLambda lambda, context, false, SpecificPReg dest, tailCode) val () = Array.update(locToPregArray, addr, PregLocation dest) in doBindings(decs, context, code) end | doBindings(BICRecDecs recDecs :: decs, context, tailCode) = let val destRegs = map (fn _ => newPReg()) recDecs (* First build the closures as mutable cells containing zeros. Set the entry in the address table to the register containing the address. *) fun makeClosure({lambda={closure, ...}, addr, ...}, dest, c) = let val () = Array.update(locToPregArray, addr, PregLocation dest) val sizeClosure = List.length closure + (if targetArch = ObjectId32Bit then 2 else 1) open Address fun clear n = if n = sizeClosure then [BlockSimple(AllocateMemoryOperation{size=sizeClosure, flags=if targetArch = ObjectId32Bit then Word8.orb(F_mutable, F_closure) else F_mutable, dest=dest, saveRegs=[]})] else (clear (n+1) @ [BlockSimple( StoreArgument{source=IntegerConstant(tag 0), base=dest, offset=n*Word.toInt wordSize, index=memIndexOrObject, kind=movePolyWord, isMutable=false})]) in c @ clear 0 @ [BlockSimple InitialisationComplete] end val allocClosures = ListPair.foldlEq makeClosure [] (recDecs, destRegs) fun setClosure({lambda as {closure, ...}, ...}, dest, l) = let val clResult = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, clResult) (* Basically the same as tuple except we load the address of the closure we've made. *) fun loadFields([], _) = [] | loadFields(f :: rest, n) = let val (code, source, _) = codeToICode(BICExtract f, context, false, Allowed allowInMemMove) val storeValue = [BlockSimple(StoreArgument{ source=source, base=dest, offset=n*Word.toInt wordSize, index=memIndexOrObject, kind=movePolyWord, isMutable=false })] in code @ storeValue @ loadFields(rest, n+1) end val setCodeAddress = if targetArch = ObjectId32Bit then let (* We can't get the code address until run time. *) val codeReg = newUReg() val closureReg = newPReg() in map BlockSimple [ LoadArgument{ source=AddressConstant(toMachineWord clResult), dest=closureReg, kind=movePolyWord}, LoadArgument{ source=MemoryLocation{offset=0, base=closureReg, index=ObjectIndex, cache=NONE}, dest=codeReg, kind=Move64Bit}, StoreArgument{ source=RegisterArgument codeReg, offset=0, base=dest, index=ObjectIndex, kind=moveNativeWord, isMutable=false} ] end else let val codeAddr = codeAddressFromClosure clResult val (code, source, _) = moveIfNotAllowed(Allowed allowInMemMove, [], AddressConstant codeAddr) in code @ [BlockSimple( StoreArgument{ source=source, base=dest, offset=0, index=NoMemIndex, kind=movePolyWord, isMutable=false })] end val setFields = setCodeAddress @ loadFields(closure, if targetArch = ObjectId32Bit then 2 else 1) in l @ setFields @ [BlockSimple(LockMutable{addr=dest})] end val setClosures = ListPair.foldlEq setClosure [] (recDecs, destRegs) val code = List.rev(allocClosures @ setClosures) in doBindings(decs, context, code @ tailCode) end | doBindings(BICNullBinding exp :: decs, context, tailCode) = let val (code, _, _) = codeToICodeRev(exp, context, false, NoResult, tailCode) (* And discard result. *) in doBindings(decs, context, code) end | doBindings(BICDecContainer{ addr, size } :: decs, {loopArgs, stackPtr, currHandler, overflowBlock}, tailCode) = let val containerReg = newStackLoc size val () = Array.update(locToPregArray, addr, ContainerLocation{container=containerReg, stackOffset=stackPtr+size}) in doBindings(decs, {loopArgs=loopArgs, stackPtr=stackPtr+size, currHandler=currHandler, overflowBlock=overflowBlock}, BlockSimple(ReserveContainer{size=size, container=containerReg}) :: tailCode) end val (codeBindings, resContext as {stackPtr=finalSp, ...}) = doBindings(bindings, context, tailCode) (* If we have had a container we'll need to reset the stack *) in if initialSp <> finalSp then let val _ = finalSp >= initialSp orelse raise InternalError "codeToICode - stack ptr" val bodyReg = newPReg() and resultReg = asTarget destination val (codeExp, result, haveExited) = codeToICodeRev(exp, resContext, isTail, SpecificPReg bodyReg, codeBindings) val afterAdjustSp = if haveExited then codeExp else BlockSimple(LoadArgument{source=result, dest=resultReg, kind=movePolyWord}) :: BlockSimple(ResetStackPtr{numWords=finalSp-initialSp, preserveCC=false}) :: codeExp in (afterAdjustSp, RegisterArgument resultReg, haveExited) end else codeToICodeRev(exp, resContext, isTail, destination, codeBindings) end | codeToICodeRev(BICConstnt(value, _), _, _, destination, tailCode) = moveIfNotAllowedRev(destination, tailCode, constantAsArgument value) | codeToICodeRev(BICExtract(BICLoadLocal l), {stackPtr, ...}, _, destination, tailCode) = ( case Array.sub(locToPregArray, l) of NoLocation => raise InternalError "codeToICodeRev - local unset" | PregLocation preg => moveIfNotAllowedRev(destination, tailCode, RegisterArgument preg) | ContainerLocation{container, stackOffset} => (* This always returns a ContainerAddr whatever the "allowed". *) (tailCode, ContainerAddr{container=container, stackOffset=stackPtr-stackOffset}, false) ) | codeToICodeRev(BICExtract(BICLoadArgument a), {stackPtr, ...}, _, destination, tailCode) = ( case Vector.sub(argumentVector, a) of ArgInReg{argReg, ...} => (* It was originally in a register. It's now in a preg. *) moveIfNotAllowedRev(destination, tailCode, RegisterArgument argReg) | ArgOnStack{stackOffset, stackReg} => (* Pushed before call. *) let val target = asTarget destination in (BlockSimple(LoadArgument{ source=StackLocation{wordOffset=stackOffset+stackPtr, container=stackReg, field=0, cache=NONE}, dest=target, kind=moveNativeWord}) :: tailCode, RegisterArgument target, false) end ) | codeToICodeRev(BICExtract(BICLoadClosure c), _, _, destination, tailCode) = let (* Add the number of words for the code address. This is 1 in native but 2 in 32-in-64. *) val offset = case targetArch of ObjectId32Bit => c+2 | _ => c+1 in if c >= List.length closure then raise InternalError "BICExtract: closure" else (); (* N.B. We need to add one to the closure entry because zero is the code address. *) moveIfNotAllowedRev(destination, tailCode, wordOffsetAddress(offset, closureRegAddr)) end | codeToICodeRev(BICExtract BICLoadRecursive, _, _, destination, tailCode) = (* If the closure is empty we must use the constant. We can't guarantee that the caller will actually load the closure register if it knows the closure is empty. *) moveIfNotAllowedRev(destination, tailCode, case closure of [] => AddressConstant(closureAsAddress resultClosure) | _ => RegisterArgument closureRegAddr) | codeToICodeRev(BICField{base, offset}, context, _, destination, tailCode) = let val (codeBase, baseEntry, _) = codeToICodeRev(base, context, false, Allowed allowInPReg, tailCode) in (* This should not be used with a container. *) case baseEntry of RegisterArgument baseR => moveIfNotAllowedRev(destination, codeBase, wordOffsetAddress(offset, baseR)) | _ => raise InternalError "codeToICodeRev-BICField" end | codeToICodeRev(BICLoadContainer{base, offset}, context, _, destination, tailCode) = let val (codeBase, baseEntry, _) = codeToICodeRev(base, context, false, Allowed allowInPReg, tailCode) val multiplier = Word.toInt(nativeWordSize div wordSize) in (* If this is a local container we extract the field. *) case baseEntry of RegisterArgument baseR => moveIfNotAllowedRev(destination, codeBase, wordOffsetAddress(offset*multiplier, baseR)) | ContainerAddr{container, stackOffset} => let val target = asTarget destination val finalOffset = stackOffset+offset val _ = finalOffset >= 0 orelse raise InternalError "offset" in (BlockSimple(LoadArgument{ source=StackLocation{wordOffset=finalOffset, container=container, field=offset, cache=NONE}, dest=target, kind=moveNativeWord}) :: tailCode, RegisterArgument target, false) end | _ => raise InternalError "codeToICodeRev-BICField" end | codeToICodeRev(BICEval{function, argList, resultType, ...}, context as { currHandler, ...}, isTail, destination, tailCode) = let val target = asTarget destination (* Create pregs for the closure and each argument. *) val clPReg = newPReg() (* If we have a constant closure we can go directly to the entry point. If the closure is a single word we don't need to load the closure register. *) val (functionCode, closureEntry, callKind) = case function of BICConstnt(addr, _) => let val addrAsAddr = toAddress addr (* If this is a closure we're still compiling we can't get the code address. However if this is directly recursive we can use the recursive convention. *) in if wordEq(closureAsAddress resultClosure, addr) then (tailCode, [], Recursive) else if flags addrAsAddr <> Address.F_words andalso flags addrAsAddr <> Address.F_closure then (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], FullCall) else if targetArch = ObjectId32Bit then (* We can't actually load the code address here. *) let val addrLength = length addrAsAddr val _ = addrLength >= 0w1 orelse raise InternalError "BICEval address" val _ = flags addrAsAddr = Address.F_closure orelse raise InternalError "BICEval address not a closure" in if addrLength = 0w2 then (tailCode, [], ConstantCode addr) else (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], ConstantCode addr) end else (* Native 32 or 64-bits. *) let val addrLength = length addrAsAddr val _ = addrLength >= 0w1 orelse raise InternalError "BICEval address" val codeAddr = loadWord(addrAsAddr, 0w0) val _ = isCode (toAddress codeAddr) orelse raise InternalError "BICEval address not code" in if addrLength = 0w1 then (tailCode, [], ConstantCode codeAddr) else (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], ConstantCode codeAddr) end end | BICExtract BICLoadRecursive => ( (* If the closure is empty we don't need to load rdx *) case closure of [] => (tailCode, [], Recursive) | _ => (BlockSimple(LoadArgument {source=RegisterArgument closureRegAddr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], Recursive) ) | function => (* General case. *) (#1 (codeToICodeRev(function, context, false, SpecificPReg clPReg, tailCode)), [(RegisterArgument clPReg, GenReg edx)], FullCall) (* Optimise arguments. We have to be careful with tail-recursive functions because they need to save any stack arguments that could be overwritten. This is complicated because we overwrite the stack before loading the register arguments. In some circumstances it could be safe but for the moment leave it. This should be safe in the new code-transform but not the old codeICode. Currently we don't allow memory arguments at all. There's the potential for problems later. Memory arguments could possibly lead to aliasing of the stack if the memory actually refers to a container on the stack. That would mess up the code that ensures that stack arguments are stored in the right order. *) (* We don't allow long constants in stack arguments to a tail-recursive call because we may use a memory move to set them. We also don't allow them in 32-in-64 because we can't push an address constant. *) val allowInStackArg = Allowed {anyConstant=not isTail andalso targetArch <> ObjectId32Bit, const32s=true, memAddr=false, existingPreg=not isTail } and allowInRegArg = Allowed {anyConstant=true, const32s=true, memAddr=false, existingPreg=not isTail } (* Load the first arguments into registers and the rest to the stack. *) fun loadArgs ([], _, _, tailCode) = (tailCode, [], []) | loadArgs ((arg, DoubleFloatType) :: args, gRegs, fpReg :: fpRegs, tailCode) = let (* Floating point register argument. *) val (c, r) = codeToPRegRev(arg, context, tailCode) val r1 = newUReg() val c1 = BlockSimple(LoadArgument{source=wordAt r, dest=r1, kind=MoveDouble}) :: c val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c1) in (code, (RegisterArgument r1, fpReg) :: regArgs, stackArgs) end | loadArgs ((arg, SingleFloatType) :: args, gRegs, fpReg :: fpRegs, tailCode) = let (* Floating point register argument. *) val (c, r) = codeToPRegRev(arg, context, tailCode) val r1 = newUReg() val c1 = if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument r, dest=r1, cache=NONE}) :: c else BlockSimple(LoadArgument{source=wordAt r, dest=r1, kind=MoveFloat}) :: c val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c1) in (code, (RegisterArgument r1, fpReg) :: regArgs, stackArgs) end | loadArgs ((arg, _) :: args, gReg::gRegs, fpRegs, tailCode) = let (* General register argument. *) val (c, r, _) = codeToICodeRev(arg, context, false, allowInRegArg, tailCode) val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c) in (code, (r, gReg) :: regArgs, stackArgs) end | loadArgs ((arg, _) :: args, [], fpRegs, tailCode) = let (* Stack argument. *) val (c, r, _) = codeToICodeRev(arg, context, false, allowInStackArg, tailCode) val (code, regArgs, stackArgs) = loadArgs(args, [], fpRegs, c) in (code, regArgs, r :: stackArgs) end val (codeArgs, regArgs, stackArgs) = loadArgs(argList, generalArgRegs, floatingPtArgRegs, functionCode) (* If this is at the end of the function and the result types are the same we can use a tail-recursive call. *) val tailCall = isTail andalso resultType = fnResultType val callCode = if tailCall then let val {stackPtr, ...} = context (* The number of arguments currently on the stack. *) val currentStackArgCount = currentStackArgs val newStackArgCount = List.length stackArgs (* The offset of the first argument or the return address if there are no stack arguments. N.B. We actually have currentStackArgCount+1 items on the stack including the return address. Offsets can be negative. *) val stackOffset = stackPtr val firstArgumentAddr = currentStackArgCount fun makeStackArgs([], _) = [] | makeStackArgs(arg::args, offset) = {src=arg, stack=offset} :: makeStackArgs(args, offset-1) val stackArgs = makeStackArgs(stackArgs, firstArgumentAddr) (* The stack adjustment needed to compensate for any items that have been pushed and the differences in the number of arguments. May be positive or negative. This is also the destination address of the return address so when we enter the new function the return address will be the first item on the stack. *) val stackAdjust = firstArgumentAddr - newStackArgCount (* Add an entry for the return address to the stack arguments. *) val returnEntry = {src=StackLocation{wordOffset=stackPtr, container=returnAddressEntry, field=0, cache=NONE}, stack=stackAdjust} (* Because we're storing into the stack we may be overwriting values we want. If the source of any value is a stack location below the current stack pointer we load it except in the special case where the destination is the same as the source (which is often the case with the return address). *) local fun loadArgs [] = ([], []) | loadArgs (arg :: rest) = let val (loadCode, loadedArgs) = loadArgs rest in case arg of {src as StackLocation{wordOffset, ...}, stack} => if wordOffset = stack+stackOffset (* Same location *) orelse stack+stackOffset < 0 (* Storing above current top of stack *) orelse stackOffset+wordOffset > ~ stackAdjust (* Above the last argument *) then (loadCode, arg :: loadedArgs) else let val preg = newPReg() in (BlockSimple(LoadArgument{source=src, dest=preg, kind=moveNativeWord}) :: loadCode, {src=RegisterArgument preg, stack=stack} :: loadedArgs) end | _ => (loadCode, arg :: loadedArgs) end in val (loadStackArgs, loadedStackArgs) = loadArgs(returnEntry :: stackArgs) end in BlockExit(TailRecursiveCall{regArgs=closureEntry @ regArgs, stackArgs=loadedStackArgs, stackAdjust = stackAdjust, currStackSize=stackOffset, callKind=callKind, workReg=newPReg()}) :: loadStackArgs @ codeArgs end else let val (moveResult, resReg) = case resultType of GeneralType => ([], target) | DoubleFloatType => let val fpRegDest = newUReg() in (boxOrTagReal(fpRegDest, target, BuiltIns.PrecDouble), fpRegDest) end | SingleFloatType => let val fpRegDest = newUReg() in (boxOrTagReal(fpRegDest, target, BuiltIns.PrecSingle), fpRegDest) end val call = FunctionCall{regArgs=closureEntry @ regArgs, stackArgs=stackArgs, dest=resReg, realDest=resultReg resultType, callKind=callKind, saveRegs=[]} val callBlock = case currHandler of NONE => BlockSimple call :: codeArgs | SOME h => BlockOptionalHandle{call=call, handler=h, label=newLabel()} :: codeArgs in moveResult @ callBlock end in (callCode, RegisterArgument target, tailCall (* We've exited if this was a tail jump *)) end | codeToICodeRev(BICGetThreadId, _, _, destination, tailCode) = (* Get the ID of the current thread. *) let val target = asTarget destination in (BlockSimple(LoadMemReg{offset=memRegThreadSelf, dest=target}) :: tailCode, RegisterArgument target, false) end | codeToICodeRev(BICUnary instr, context, isTail, destination, tailCode) = codeToICodeUnaryRev(instr, context, isTail, destination, tailCode) | codeToICodeRev(BICBinary instr, context, isTail, destination, tailCode) = codeToICodeBinaryRev(instr, context, isTail, destination, tailCode) | codeToICodeRev(BICArbitrary{oper, shortCond, arg1, arg2, longCall}, context, _, destination, tailCode) = let val startLong = newLabel() and resultLabel = newLabel() val target = asTarget destination val condResult = newMergeReg() (* Overflow check - if there's an overflow jump to the long precision case. *) fun jumpOnOverflow ccRef = let val noOverFlow = newLabel() in [BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=startLong, falseJump=noOverFlow }), BlockLabel noOverFlow] end val (longCode, _, _) = codeToICode(longCall, context, false, SpecificPReg condResult) (* We could use a tail jump here if this is a tail. *) val (code, dest, haveExited) = ( (* Test the tag bits and skip to the long case if either is clear. *) List.rev(codeConditionRev(shortCond, context, false, startLong, [])) @ (* Try evaluating as fixed precision and jump if we get an overflow. *) codeFixedPrecisionArith(oper, arg1, arg2, context, condResult, jumpOnOverflow) @ (* If we haven't had an overflow jump to the result. *) [BlockFlow(Unconditional resultLabel), (* If we need to use the full long-precision call we come here. *) BlockLabel startLong] @ longCode @ [BlockLabel resultLabel, BlockSimple(LoadArgument{source=RegisterArgument condResult, dest=target, kind=movePolyWord})], RegisterArgument target, false) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICAllocateWordMemory instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeAllocate(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICLambda(lambda as { closure = [], ...}), _, _, destination, tailCode) = (* Empty closure - create a constant closure for any recursive calls. *) let val closure = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, closure) (* Return the closure itself as the value. *) in moveIfNotAllowedRev(destination, tailCode, AddressConstant(closureAsAddress closure)) end | codeToICodeRev(BICLambda(lambda as { closure, ...}), context, isTail, destination, tailCode) = (* Non-empty closure. Ignore stack closure option at the moment. *) let val closureRef = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, closureRef) in if targetArch = ObjectId32Bit then let val target = asTarget destination val memAddr = newPReg() fun loadFields([], n, tlCode) = let val codeReg = newUReg() val closureReg = newPReg() in (* The code address occupies the first native word but we need to extract it at run-time. We don't currently have a way to have 64-bit constants. *) BlockSimple( StoreArgument{ source=RegisterArgument codeReg, offset=0, base=memAddr, index=ObjectIndex, kind=moveNativeWord, isMutable=false}) :: BlockSimple(LoadArgument{ source=MemoryLocation{offset=0, base=closureReg, index=ObjectIndex, cache=NONE}, dest=codeReg, kind=Move64Bit}) :: BlockSimple(LoadArgument{ source=AddressConstant(toMachineWord closureRef), dest=closureReg, kind=movePolyWord}) :: BlockSimple(AllocateMemoryOperation{size=n, flags=F_closure, dest=memAddr, saveRegs=[]}) :: tlCode end | loadFields(f :: rest, n, tlCode) = let (* Defer the evaluation if possible. We may have a constant that we can't move directly but it's better to load it after the allocation otherwise we will have to push the register if we need to GC. *) val (code1, source1, _) = codeToICodeRev(BICExtract f, context, false, Allowed allowDefer, tlCode) val restAndAlloc = loadFields(rest, n+1, code1) val (code2, source, _) = moveIfNotAllowedRev(Allowed allowInMemMove, restAndAlloc, source1) val storeValue = BlockSimple(StoreArgument{ source=source, offset=n*Word.toInt wordSize, base=memAddr, index=ObjectIndex, kind=movePolyWord, isMutable=false}) in storeValue :: code2 end val code = BlockSimple InitialisationComplete :: BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord}) :: loadFields(closure, 2, tailCode) in (code, RegisterArgument target, false) end (* Treat it as a tuple with the code as the first field. *) else codeToICodeRev(BICTuple(BICConstnt(codeAddressFromClosure closureRef, []) :: map BICExtract closure), context, isTail, destination, tailCode) end | codeToICodeRev(BICCond(test, thenPt, elsePt), context, isTail, NoResult, tailCode) = let (* If we don't want the result but are only evaluating for side-effects we may be able to optimise special cases. This was easier in the forward case but for now we don't bother and leave it to the lower levels. *) val startElse = newLabel() and skipElse = newLabel() val codeTest = codeConditionRev(test, context, false, startElse, tailCode) val (codeThen, _, _) = codeToICodeRev(thenPt, context, isTail, NoResult, codeTest) val (codeElse, _, _) = codeToICodeRev(elsePt, context, isTail, NoResult, BlockLabel startElse :: BlockFlow(Unconditional skipElse) :: codeThen) in (BlockLabel skipElse :: codeElse, (* Unit result *) IntegerConstant(tag 0), false) end | codeToICodeRev(BICCond(test, thenPt, elsePt), context, isTail, destination, tailCode) = let (* Because we may push the result onto the stack we have to create a new preg to hold the result and then copy that to the final result. *) (* If this is a tail each arm will exit separately and neither will return a result. *) val target = asTarget destination val condResult = newMergeReg() val thenTarget = if isTail then newPReg() else condResult val startElse = newLabel() val testCode = codeConditionRev(test, context, false, startElse, tailCode) (* Put the result in the target register. *) val (thenCode, _, thenExited) = codeToICodeRev(thenPt, context, isTail, SpecificPReg thenTarget, testCode) (* Add a jump round the else-part except that if this is a tail we return. The then-part could have exited e.g. with a raise or a loop. *) val (exitThen, thenLabel, elseTarget) = if thenExited then (thenCode, [], target (* Can use original target. *)) else if isTail then (returnInstruction(context, thenTarget, thenCode), [], newPReg()) else let val skipElse = newLabel() in (BlockFlow(Unconditional skipElse) :: thenCode, [BlockSimple(LoadArgument{source=RegisterArgument condResult, dest=target, kind=movePolyWord}), BlockLabel skipElse], condResult) end val (elseCode, _, elseExited) = codeToICodeRev(elsePt, context, isTail, SpecificPReg elseTarget, BlockLabel startElse :: exitThen) (* Add a return to the else-part if necessary so we will always exit on a tail. *) val exitElse = if isTail andalso not elseExited then returnInstruction(context, elseTarget, elseCode) else elseCode in (thenLabel @ exitElse, RegisterArgument target, isTail orelse thenExited andalso elseExited) end | codeToICodeRev(BICCase { cases, test, default, isExhaustive, firstIndex}, context, isTail, destination, tailCode) = let (* We have to create a new preg for the result in case we need to push it to the stack. *) val targetReg = newMergeReg() local val initialTestReg = newPReg() val (testCode, _, _) = codeToICodeRev(test, context, false, SpecificPReg initialTestReg, tailCode) (* Subtract the minimum value so the value we're testing is always in the range of (tagged) 0 to the maximum. It is possible to adjust the value when computing the index but that can lead to overflows during compilation if the minimum is very large or small. We can ignore overflow and allow values to wrap round. *) in val (testCode, testReg) = if firstIndex = 0w0 then (testCode, initialTestReg) else let val newTestReg = newPReg() val subtract = BlockSimple(ArithmeticFunction{oper=SUB, resultReg=newTestReg, operand1=initialTestReg, operand2=IntegerConstant(semitag(Word.toLargeInt firstIndex)), ccRef=newCCRef(), opSize=polyWordOpSize}) in (subtract :: testCode, newTestReg) end end val workReg = newPReg() (* Unless this is exhaustive we need to add a range check. *) val (rangeCheck, extraDefaults) = if isExhaustive then (testCode, []) else let val defLab1 = newLabel() val tReg1 = newPReg() val ccRef1 = newCCRef() (* Since we've subtracted any minimum we only have to check whether the value is greater (unsigned) than the maximum. *) val numberOfCases = LargeInt.fromInt(List.length cases) val continueLab = newLabel() val testCode2 = BlockLabel continueLab :: BlockFlow(Conditional{ccRef=ccRef1, condition=JNB, trueJump=defLab1, falseJump=continueLab}) :: BlockSimple(WordComparison{arg1=tReg1, arg2=IntegerConstant(tag numberOfCases), ccRef=ccRef1, opSize=polyWordOpSize}) :: BlockSimple(LoadArgument {source=RegisterArgument testReg, dest=tReg1, kind=movePolyWord}) :: testCode in (testCode2, [defLab1]) end (* Make a label for each item in the list. *) val codeLabels = map (fn _ => newLabel()) cases (* Create an exit label in case it's needed. *) val labelForExit = if isTail then ~1 (* Illegal label. *) else newLabel() (* Generate the code for each of the cases and the default. We need to put an unconditional branch after each to skip the other cases. *) fun codeCases (SOME c :: otherCases, startLabel :: otherLabels, tailCode) = let val caseTarget = if isTail then newPReg() else targetReg (* Put in the case with a jump to the end of the sequence. *) val (codeThisCase, _, caseExited) = codeToICodeRev(c, context, isTail, SpecificPReg caseTarget, BlockLabel startLabel :: tailCode) val exitThisCase = if caseExited then codeThisCase else if isTail then returnInstruction(context, caseTarget, codeThisCase) else BlockFlow(Unconditional labelForExit) :: codeThisCase in codeCases(otherCases, otherLabels, exitThisCase) end | codeCases(NONE :: otherCases, _ :: otherLabels, tailCode) = codeCases(otherCases, otherLabels, tailCode) | codeCases ([], [], tailCode) = let (* We need to add labels for all the gaps we filled and also for a "default" label for the indexed-case instruction itself as well as any range checks. *) fun addDefault (startLabel, NONE, l) = BlockLabel startLabel :: l | addDefault (_, SOME _, l) = l fun asForward l = BlockLabel l val dLabs = map asForward extraDefaults @ tailCode val defLabels = ListPair.foldlEq addDefault dLabs (codeLabels, cases) val defaultTarget = if isTail then newPReg() else targetReg val (defaultCode, _, defaultExited) = codeToICodeRev(default, context, isTail, SpecificPReg defaultTarget, defLabels) in (* Put in the default. Because this is the last we don't need to jump round it. However if this is a tail and we haven't exited we put in a return. That way the case will always have exited if this is a tail. *) if isTail andalso not defaultExited then returnInstruction(context, defaultTarget, defaultCode) else defaultCode end | codeCases _ = raise InternalError "codeCases: mismatch" val codedCases = codeCases(cases, codeLabels, BlockFlow(IndexedBr codeLabels) :: BlockSimple(IndexedCaseOperation{testReg=testReg, workReg=workReg}) :: rangeCheck) (* We can now copy to the target. If we need to push the result this load will be converted into a push. *) val target = asTarget destination val copyToTarget = if isTail then codedCases else BlockSimple(LoadArgument{source=RegisterArgument targetReg, dest=target, kind=movePolyWord}) :: BlockLabel labelForExit :: codedCases in (copyToTarget, RegisterArgument target, isTail (* We have always exited on a tail. *)) end | codeToICodeRev(BICBeginLoop {loop, arguments}, context as { stackPtr, currHandler, overflowBlock, ...}, isTail, destination, tailCode) = let val target = asTarget destination fun codeArgs ([], tailCode) = ([], tailCode) | codeArgs (({value, addr}, _) :: rest, tailCode) = let val pr = newPReg() val () = Array.update(locToPregArray, addr, PregLocation pr) val (code, _, _) = codeToICodeRev(value, context, false, SpecificPReg pr, tailCode) val (pregs, othercode) = codeArgs(rest, code) in (pr::pregs, othercode) end val (loopRegs, argCode) = codeArgs(arguments, tailCode) val loopLabel = newLabel() val (loopBody, _, loopExited) = codeToICodeRev(loop, {loopArgs=SOME (loopRegs, loopLabel, stackPtr), stackPtr=stackPtr, currHandler=currHandler, overflowBlock=overflowBlock }, isTail, SpecificPReg target, BlockLabel loopLabel :: BlockSimple BeginLoop :: argCode) in (loopBody, RegisterArgument target, loopExited) end | codeToICodeRev(BICLoop args, context as {loopArgs=SOME (loopRegs, loopLabel, loopSp), stackPtr, currHandler, ...}, _, destination, tailCode) = let val target = asTarget destination (* Registers to receive the evaluated arguments. We can't put the values into the loop variables yet because the values could depend on the current values of the loop variables. *) val argPRegs = map(fn _ => newPReg()) args val codeArgs = ListPair.foldlEq(fn ((arg, _), pr, l) => #1 (codeToICodeRev(arg, context, false, SpecificPReg pr, l))) tailCode (args, argPRegs) val jumpArgs = ListPair.mapEq(fn (s, l) => (RegisterArgument s, l)) (argPRegs, loopRegs) (* If we've allocated a container in the loop we have to remove it before jumping back. *) val stackReset = if loopSp = stackPtr then codeArgs else BlockSimple(ResetStackPtr{numWords=stackPtr-loopSp, preserveCC=false}) :: codeArgs val jumpLoop = JumpLoop{regArgs=jumpArgs, stackArgs=[], checkInterrupt=SOME[], workReg=NONE} (* "checkInterrupt" could result in a Interrupt exception so we treat this like a function call. *) val code = case currHandler of NONE => BlockFlow(Unconditional loopLabel) :: BlockSimple jumpLoop :: stackReset | SOME h => BlockOptionalHandle{call=jumpLoop, handler=h, label=loopLabel} :: stackReset in (code, RegisterArgument target, true) end | codeToICodeRev(BICLoop _, {loopArgs=NONE, ...}, _, _, _) = raise InternalError "BICLoop without BICBeginLoop" | codeToICodeRev(BICRaise exc, context as { currHandler, ...}, _, destination, tailCode) = let val packetReg = newPReg() val (code, _, _) = codeToICodeRev(exc, context, false, SpecificPReg packetReg, tailCode) val raiseCode = RaiseExceptionPacket{packetReg=packetReg} val block = case currHandler of NONE => BlockExit raiseCode | SOME h => BlockRaiseAndHandle(raiseCode, h) in (block :: code, RegisterArgument(asTarget destination), true (* Always exits *)) end | codeToICodeRev(BICHandle{exp, handler, exPacketAddr}, context as { stackPtr, loopArgs, overflowBlock, ... }, isTail, destination, tailCode) = let (* As with BICCond and BICCase we need to create a new register for the result in case we need to push it to the stack. *) val handleResult = newMergeReg() val handlerLab = newLabel() and startHandling = newLabel() val (bodyTarget, handlerTarget) = if isTail then (newPReg(), newPReg()) else (handleResult, handleResult) (* TODO: Even if we don't actually want a result we force one in here by using "asTarget". *) (* The expression cannot be treated as a tail because the handler has to be removed after. It may "exit" if it has raised an unconditional exception. If it has we mustn't generate a PopExceptionHandler because there won't be any result for resultReg. We need to add two words to the stack to account for the items pushed by PushExceptionHandler. We create an instruction to push the handler followed by a block fork to the start of the code and, potentially the handler, then a label to start the code that the handler is in effect for. *) val initialCode = BlockLabel startHandling :: BlockFlow(SetHandler{handler=handlerLab, continue=startHandling}) :: BlockSimple(PushExceptionHandler{workReg=newPReg()}) :: tailCode val (expCode, _, expExit) = codeToICodeRev(exp, {stackPtr=stackPtr+2, loopArgs=loopArgs, currHandler=SOME handlerLab, overflowBlock=overflowBlock}, false (* Not tail *), SpecificPReg bodyTarget, initialCode) (* If this is the tail we can replace the jump at the end of the handled code with returns. If the handler has exited we don't need a return there. Otherwise we need to add an unconditional jump to skip the handler. *) val (atExpEnd, skipExpLabel) = case (isTail, expExit) of (true, true) => (* Tail and exited. *) (expCode, NONE) | (true, false) => (* Tail and not exited. *) (returnInstruction(context, bodyTarget, BlockSimple(PopExceptionHandler{workReg=newPReg()}) :: expCode), NONE) | (false, true) => (* Not tail but exited. *) (expCode, NONE) | (false, false) => let val skipHandler = newLabel() in (BlockFlow(Unconditional skipHandler) :: BlockSimple(PopExceptionHandler{workReg=newPReg()}) :: expCode, SOME skipHandler) end (* Make a register to hold the exception packet and put eax into it. *) val packetAddr = newPReg() val () = Array.update(locToPregArray, exPacketAddr, PregLocation packetAddr) val (handleCode, _, handleExit) = codeToICodeRev(handler, context, isTail, SpecificPReg handlerTarget, BlockSimple(BeginHandler{workReg=newPReg(), packetReg=packetAddr}) :: BlockLabel handlerLab :: atExpEnd) val target = asTarget destination val afterHandler = case (isTail, handleExit) of (true, true) => (* Tail and exited. *) handleCode | (true, false) => (* Tail and not exited. *) returnInstruction(context, handlerTarget, handleCode) | (false, _) => (* Not tail. *) handleCode val addLabel = case skipExpLabel of SOME lab => BlockLabel lab:: afterHandler | NONE => afterHandler in (BlockSimple(LoadArgument{source=RegisterArgument handleResult, dest=target, kind=movePolyWord}) :: addLabel, RegisterArgument target, isTail) end | codeToICodeRev(BICTuple fields, context, _, destination, tailCode) = let (* TODO: This is a relic of the old fall-back code-generator. It required the result of a tuple to be at the top of the stack. It should be changed. *) val target = asTarget destination (* Actually we want this. *) val memAddr = newPReg() fun loadFields([], n, tlCode) = BlockSimple(AllocateMemoryOperation{size=n, flags=0w0, dest=memAddr, saveRegs=[]}) :: tlCode | loadFields(f :: rest, n, tlCode) = let (* Defer the evaluation if possible. We may have a constant that we can't move directly but it's better to load it after the allocation otherwise we will have to push the register if we need to GC. *) val (code1, source1, _) = codeToICodeRev(f, context, false, Allowed allowDefer, tlCode) val restAndAlloc = loadFields(rest, n+1, code1) val (code2, source, _) = moveIfNotAllowedRev(Allowed allowInMemMove, restAndAlloc, source1) val storeValue = BlockSimple(StoreArgument{ source=source, offset=n*Word.toInt wordSize, base=memAddr, index=memIndexOrObject, kind=movePolyWord, isMutable=false}) in storeValue :: code2 end val code = BlockSimple InitialisationComplete :: BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord}) :: loadFields(fields, 0, tailCode) in (code, RegisterArgument target, false) end (* Copy the source tuple into the container. There are important special cases for both the source tuple and the container. If the source tuple is a BICTuple we have the fields and can store them without creating a tuple on the heap. If the destination is a local container we can store directly into the stack. *) | codeToICodeRev(BICSetContainer{container, tuple, filter}, context as {stackPtr, ...}, _, destination, tailCode) = let local fun createStore containerReg (source, destWord) = StoreArgument{source=source, offset=destWord*Word.toInt nativeWordSize, base=containerReg, index=NoMemIndex, kind=moveNativeWord, isMutable=false} in val findContainer = case container of BICExtract(BICLoadLocal l) => ( case Array.sub(locToPregArray, l) of ContainerLocation{container, stackOffset} => let fun storeToStack(source, destWord) = StoreToStack{source=source, container=container, field=destWord, stackOffset=stackPtr-stackOffset+destWord} in SOME storeToStack end | _ => NONE ) | _ => NONE val (codeContainer, storeInstr) = case findContainer of SOME storeToStack => (tailCode, storeToStack) | NONE => let val containerTarget = newPReg() val (codeContainer, _, _) = codeToICodeRev(container, context, false, SpecificPReg containerTarget, tailCode) in (codeContainer, createStore containerTarget) end end val filterLength = BoolVector.length filter val code = case tuple of BICTuple cl => let (* In theory it's possible that the tuple could contain fields that are not used but nevertheless need to be evaluated for their side-effects. Create all the fields and push to the stack. *) fun codeField(arg, (regs, tailCode)) = let val (c, r, _) = codeToICodeRev(arg, context, false, Allowed allowInMemMove, tailCode) in (r :: regs, c) end val (pregsRev, codeFields) = List.foldl codeField ([], codeContainer) cl val pregs = List.rev pregsRev fun copyField(srcReg, (sourceWord, destWord, tailCode)) = if sourceWord < filterLength andalso BoolVector.sub(filter, sourceWord) then (sourceWord+1, destWord+1, BlockSimple(storeInstr(srcReg, destWord)) :: tailCode) else (sourceWord+1, destWord, tailCode) val (_, _, resultCode) = List.foldl copyField (0, 0, codeFields) pregs in resultCode end | tuple => let (* Copy a heap tuple. It is possible that this is another container in which case we must load the fields directly. We mustn't load its address and then copy because loading the address would be the last reference and might cause the container to be reused prematurely. *) val findContainer = case tuple of BICExtract(BICLoadLocal l) => ( case Array.sub(locToPregArray, l) of ContainerLocation{container, stackOffset} => let fun getAddr sourceWord = StackLocation{wordOffset=stackPtr-stackOffset+sourceWord, container=container, field=sourceWord, cache=NONE} in SOME getAddr end | _ => NONE ) | _ => NONE val (codeTuple, loadField) = case findContainer of SOME getAddr => (codeContainer, getAddr) | NONE => let val tupleTarget = newPReg() val (codeTuple, _, _) = codeToICodeRev(tuple, context, false, SpecificPReg tupleTarget, codeContainer) fun loadField sourceWord = wordOffsetAddress(sourceWord, tupleTarget) in (codeTuple, loadField) end fun copyContainer(sourceWord, destWord, tailCode) = if sourceWord = filterLength then tailCode else if BoolVector.sub(filter, sourceWord) then let val loadReg = newPReg() val code = BlockSimple(storeInstr(RegisterArgument loadReg, destWord)) :: BlockSimple(LoadArgument{source=loadField sourceWord, dest=loadReg, kind=movePolyWord}) :: tailCode in copyContainer(sourceWord+1, destWord+1, code) end else copyContainer(sourceWord+1, destWord, tailCode) in copyContainer(0, 0, codeTuple) end in moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeRev(BICTagTest{test, tag=tagValue, ...}, context, _, destination, tailCode) = (* Check the "tag" word of a union (datatype). N.B. Not the same as testing the tag bit of a word. *) let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (testCode, tagArg, _) = codeToICodeRev(test, context, false, Allowed memOrReg, tailCode) val target = asTarget destination in (makeBoolResultRev(JE, ccRef, target, (* Use CompareLiteral because the tag must fit in 32-bits. *) BlockSimple(CompareLiteral{arg1=tagArg, arg2=tag(Word.toLargeInt tagValue), opSize=polyWordOpSize, ccRef=ccRef}) :: testCode), RegisterArgument target, false) end | codeToICodeRev(BICLoadOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeLoad(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICStoreOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeStore(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICBlockOperation ({kind=BlockOpEqualByte, sourceLeft, destRight, length}), context, _, destination, tailCode) = let val vec1Reg = newUReg() and vec2Reg = newUReg() val ccRef = newCCRef() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddressRev(sourceLeft, true, context, tailCode) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddressRev(destRight, true, context, leftCode) val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToRegRev(length, false (* unsigned *), context, rightCode) val target = asTarget destination val code = makeBoolResultRev(JE, ccRef, target, BlockSimple(CompareByteVectors{ vec1Addr=vec1Reg, vec2Addr=vec2Reg, length=lengthArg, ccRef=ccRef }) :: lengthUntag @ BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg}) :: rightUntag @ BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg}) :: leftUntag @ lengthCode) in (code, RegisterArgument target, false) end | codeToICodeRev(BICBlockOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeBlock(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end and codeToICodeUnaryRev({oper=BuiltIns.NotBoolean, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val ccRef = newCCRef() val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} val (argCode, testDest, _) = codeToICodeRev(arg1, context, false, allow, tailCode) in (* Test the argument and return a boolean result. If either the argument is a condition or the result is used in a test this will be better than using XOR. *) (makeBoolResultRev(JNE, ccRef, target, BlockSimple(CompareLiteral{arg1=testDest, arg2=tag 1, opSize=polyWordOpSize, ccRef=ccRef}) :: argCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.IsTaggedValue, arg1}, context, _, destination, tailCode) = let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (testCode, testResult, _) = codeToICodeRev(arg1, context, false, Allowed memOrReg, tailCode) (* Test the tag bit. This sets the zero bit if the value is untagged. *) val target = asTarget destination in (makeBoolResultRev(JNE, ccRef, target, BlockSimple(TestTagBit{arg=testResult, ccRef=ccRef}) :: testCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.MemoryCellLength, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val argReg1 = newUReg() and argReg2 = newUReg() and argReg3 = newUReg() (* These are untagged until the tag is put in. *) and ccRef1 = newCCRef() and ccRef2 = newCCRef() and ccRef3 = newCCRef() (* Get the length of a memory cell (heap object). We need to mask out the top byte containing the flags and to tag the result. The mask is 56 bits on 64-bit which won't fit in an inline constant. Since we have to shift it anyway we might as well do this by shifts. *) val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=argReg3, operand2=IntegerConstant 1, ccRef=ccRef3, opSize=polyWordOpSize}) :: BlockSimple(ShiftOperation{shift=SHR, resultReg=argReg3, operand=argReg2, shiftAmount=IntegerConstant 7 (* 8-tagshift*), ccRef=ccRef2, opSize=polyWordOpSize }) :: BlockSimple(ShiftOperation{shift=SHL, resultReg=argReg2, operand=argReg1, shiftAmount=IntegerConstant 8, ccRef=ccRef1, opSize=polyWordOpSize }) :: BlockSimple(LoadArgument{source=wordOffsetAddress(~1, addrReg), dest=argReg1, kind=movePolyWord}) :: argCode, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.MemoryCellFlags, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val argReg1 = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(TagValue{ source=argReg1, dest=target, isSigned=false, opSize=OpSize32 }) :: BlockSimple(LoadArgument{source=MemoryLocation{offset= ~1, base=addrReg, index=memIndexOrObject, cache=NONE}, dest=argReg1, kind=MoveByte}) :: argCode, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.ClearMutableFlag, arg1}, context, _, destination, tailCode) = let val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in moveIfNotAllowedRev(destination, BlockSimple(LockMutable{addr=addrReg}) :: argCode, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeUnaryRev({oper=BuiltIns.AtomicIncrement, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val incrReg = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) val code = (* We want the result to be the new value but we've returned the old value. *) BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=incrReg, operand2=IntegerConstant(semitag 1), ccRef=newCCRef(), opSize=polyWordOpSize}) :: BlockSimple(AtomicExchangeAndAdd{ base=addrReg, source=incrReg }) :: BlockSimple(LoadArgument{source=IntegerConstant(semitag 1), dest=incrReg, kind=movePolyWord}) :: argCode in (code, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.AtomicDecrement, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val incrReg = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) val code = BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=incrReg, operand2=IntegerConstant(semitag 1), ccRef=newCCRef(), opSize=polyWordOpSize}) :: BlockSimple(AtomicExchangeAndAdd{ base=addrReg, source=incrReg }) :: BlockSimple(LoadArgument{source=IntegerConstant(semitag ~1), dest=incrReg, kind=movePolyWord}) :: argCode in (code, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.AtomicReset, arg1}, context, _, destination, tailCode) = let (* This is needed only for the interpreted version where we have a single real mutex to interlock atomic increment and decrement. We have to use the same mutex to interlock clearing a mutex. On the X86 we use hardware locking and the hardware guarantees that an assignment of a word will be atomic. *) val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) (* Store tagged 1 in the mutex. This is the unlocked value. *) val code = BlockSimple(StoreArgument{source=IntegerConstant(tag 1), base=addrReg, index=memIndexOrObject, offset=0, kind=movePolyWord, isMutable=true}) :: argCode in moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeUnaryRev({oper=BuiltIns.LongWordToTagged, arg1}, context, _, destination, tailCode) = let (* This is exactly the same as StringLengthWord at the moment. TODO: introduce a new ICode entry so that the next stage can optimise longword operations. *) val target = asTarget destination val argReg1 = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) val code = BlockSimple(TagValue{ source=argReg1, dest=target, isSigned=false, opSize=polyWordOpSize }) :: BlockSimple(LoadArgument{source=wordAt addrReg, dest=argReg1, kind=movePolyWord}) :: argCode in (code, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.SignedToLongWord, arg1}, context, _, destination, tailCode) = let val addrReg = newPReg() and untagArg = newUReg() val (argCode, argReg1) = codeToPRegRev(arg1, context, tailCode) val (signExtend, sxReg) = case targetArch of ObjectId32Bit => let val sReg = newUReg() in ([BlockSimple(SignExtend32To64{source=RegisterArgument argReg1, dest=sReg})], sReg) end | _ => ([], argReg1) val code = BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untagArg, dest=addrReg, saveRegs=[]}) :: BlockSimple(UntagValue{source=sxReg, dest=untagArg, isSigned=true, cache=NONE, opSize=nativeWordOpSize}) :: signExtend @ argCode in moveIfNotAllowedRev(destination, code, RegisterArgument addrReg) end | codeToICodeUnaryRev({oper=BuiltIns.UnsignedToLongWord, arg1}, context, _, destination, tailCode) = let val addrReg = newPReg() and untagArg = newUReg() val (argCode, argReg1) = codeToPRegRev(arg1, context, tailCode) val code = BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untagArg, dest=addrReg, saveRegs=[]}) :: (* We can just use a polyWord operation to untag the unsigned value. *) BlockSimple(UntagValue{source=argReg1, dest=untagArg, isSigned=false, cache=NONE, opSize=polyWordOpSize}) :: argCode in moveIfNotAllowedRev(destination, code, RegisterArgument addrReg) end | codeToICodeUnaryRev({oper=BuiltIns.RealNeg precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val fpRegSrc = newUReg() and fpRegDest = newUReg() and sse2ConstReg = newUReg() (* The SSE2 code uses an SSE2 logical operation to flip the sign bit. This requires the values to be loaded into registers first because the logical operations require 128-bit operands. *) val (argCode, aReg1) = codeToPReg(arg1, context) (* Double precision values are always boxed and single precision values if they won't fit in a word. Otherwise we can using tagging. *) open BuiltIns val load = if precision = PrecDouble then BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveDouble}) else if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpRegSrc, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveFloat}) val code = case fpMode of FPModeX87 => [BlockSimple(X87FPUnaryOps{ fpOp=FCHS, dest=fpRegDest, source=fpRegSrc})] | FPModeSSE2 => let (* In single precision mode the sign bit is in the low 32-bits. There may be a better way to load it. *) val signBit = if precision = PrecDouble then realSignBit else floatSignBit in [BlockSimple(LoadArgument{source=AddressConstant signBit, dest=sse2ConstReg, kind=MoveDouble}), BlockSimple(SSE2FPBinary{opc=SSE2BXor, resultReg=fpRegDest, arg1=fpRegSrc, arg2=RegisterArgument sse2ConstReg})] end val result = boxOrTagReal(fpRegDest, target, precision) in (revApp(argCode @ load :: code @ result, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.RealAbs precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val fpRegSrc = newUReg() and fpRegDest = newUReg() and sse2ConstReg = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) open BuiltIns val load = if precision = PrecDouble then BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveDouble}) else if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpRegSrc, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveFloat}) val code = case fpMode of FPModeX87 => [BlockSimple(X87FPUnaryOps{ fpOp=FABS, dest=fpRegDest, source=fpRegSrc})] | FPModeSSE2 => let val mask = if precision = PrecDouble then realAbsMask else floatAbsMask in [BlockSimple(LoadArgument{source=AddressConstant mask, dest=sse2ConstReg, kind=MoveDouble}), BlockSimple(SSE2FPBinary{opc=SSE2BAnd, resultReg=fpRegDest, arg1=fpRegSrc, arg2=RegisterArgument sse2ConstReg})] end val result = boxOrTagReal(fpRegDest, target, precision) in (revApp(argCode @ load :: code @ result, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.RealFixedInt precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val untagReg = newUReg() and fpReg = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) val floatOp = case fpMode of FPModeX87 => X87Float | FPModeSSE2 => SSE2Float val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double val _ = precision = BuiltIns.PrecDouble orelse raise InternalError "RealFixedInt - single" val code = argCode @ [BlockSimple(UntagValue{source=aReg1, dest=untagReg, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(floatOp{ dest=fpReg, source=RegisterArgument untagReg}), BlockSimple(BoxValue{boxKind=boxFloat, source=fpReg, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.FloatToDouble, arg1}, context, _, destination, tailCode) = let (* Convert a single precision floating point value to double precision. *) val target = asTarget destination val fpReg = newUReg() and fpReg2 = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) (* MoveFloat always converts from single to double-precision. *) val unboxOrUntag = case (fpMode, wordSize) of (FPModeX87, _) => [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg2, kind=MoveFloat})] | (FPModeSSE2, 0w4) => [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveFloat}), BlockSimple(SSE2FPUnary{opc=SSE2UFloatToDouble, resultReg=fpReg2, source=RegisterArgument fpReg})] | (FPModeSSE2, _) => [BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpReg, cache=NONE}), BlockSimple(SSE2FPUnary{opc=SSE2UFloatToDouble, resultReg=fpReg2, source=RegisterArgument fpReg})] val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double val code = argCode @ unboxOrUntag @ [BlockSimple(BoxValue{boxKind=boxFloat, source=fpReg2, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.DoubleToFloat NONE, arg1}, context, _, destination, tailCode) = let (* Convert a double precision value to a single precision using the current rounding mode. This is simpler than setting the rounding mode and then restoring it. *) val target = asTarget destination val fpReg = newUReg() and fpReg2 = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) (* In 32-bit mode we need to box the float. In 64-bit mode we can tag it. *) val boxOrTag = case fpMode of FPModeX87 => [BlockSimple(BoxValue{boxKind=BoxX87Float, source=fpReg, dest=target, saveRegs=[]})] | FPModeSSE2 => BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=fpReg2, source=RegisterArgument fpReg}) :: boxOrTagReal(fpReg2, target, BuiltIns.PrecSingle) val code = argCode @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveDouble})] @ boxOrTag in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.DoubleToFloat (SOME rndMode), arg1}, context, _, destination, tailCode) = let (* Convert a double precision value to a single precision. The rounding mode is passed in explicitly. *) val target = asTarget destination val fpReg = newUReg() and fpReg2 = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) (* In 32-bit mode we need to box the float. In 64-bit mode we can tag it. *) (* We need to save the rounding mode before we change it and restore it afterwards. *) open IEEEReal fun doConversion() = case fpMode of FPModeX87 => (* Convert the value using the appropriate rounding. *) [BlockSimple(BoxValue{boxKind=BoxX87Float, source=fpReg, dest=target, saveRegs=[]})] | FPModeSSE2 => BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=fpReg2, source=RegisterArgument fpReg}) :: boxOrTagReal(fpReg2, target, BuiltIns.PrecSingle) val code = argCode @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveDouble})] @ setAndRestoreRounding(rndMode, doConversion) in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.RealToInt(precision, rndMode), arg1}, context, _, destination, tailCode) = let val target = asTarget destination val chkOverflow = newCCRef() val convResult = newUReg() and wrkReg2 = newUReg() (* Convert a floating point value to an integer. We need to raise overflow if the result is out of range. We first convert the value to 32/64 bits then tag it. An overflow can happen either because the real number does not fit in 32/64 bits or if it is not a 31/63 bit value. Fortunately, if the first conversion fails the result is a value that causes an overflow when we try it shift it so the check for overflow only needs to happen there. There is an SSE2 instruction that implements truncation (round to zero) directly but in other cases we need to set the rounding mode. *) val doConvert = case (fpMode, precision) of (FPModeX87, _) => let val fpReg = newUReg() val (argCode, aReg) = codeToPReg(arg1, context) fun doConvert() = [BlockSimple(X87RealToInt{source=fpReg, dest=convResult })] in argCode @ [BlockSimple(LoadArgument{source=wordAt aReg, dest=fpReg, kind=MoveDouble})] @ setAndRestoreRounding(rndMode, doConvert) end | (FPModeSSE2, BuiltIns.PrecDouble) => let val (argCode, argReg) = codeToPReg(arg1, context) fun doConvert() = [BlockSimple( SSE2RealToInt{source=wordAt argReg, dest=convResult, isDouble=true, isTruncate = rndMode = IEEEReal.TO_ZERO }) ] in argCode @ ( case rndMode of IEEEReal.TO_ZERO => doConvert() | _ => setAndRestoreRounding(rndMode, doConvert)) end | (FPModeSSE2, BuiltIns.PrecSingle) => let val (argCode, aReg) = codeToPReg(arg1, context) val fpReg = newUReg() fun doConvert() = [BlockSimple( SSE2RealToInt{source=RegisterArgument fpReg, dest=convResult, isDouble=false, isTruncate = rndMode = IEEEReal.TO_ZERO })] in argCode @ [BlockSimple(UntagFloat{source=RegisterArgument aReg, dest=fpReg, cache=NONE})] @ ( case rndMode of IEEEReal.TO_ZERO => doConvert() | _ => setAndRestoreRounding(rndMode, doConvert) ) end val checkAndTag = BlockSimple(ShiftOperation{ shift=SHL, resultReg=wrkReg2, operand=convResult, shiftAmount=IntegerConstant 1, ccRef=chkOverflow, opSize=polyWordOpSize}) :: checkOverflow context chkOverflow @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=wrkReg2, operand2=IntegerConstant 1, ccRef = newCCRef(), opSize=polyWordOpSize})] in (revApp(doConvert @ checkAndTag, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.TouchAddress, arg1}, context, _, destination, tailCode) = let (* Put the value in a register. This is not entirely necessary but ensures that if the value is a constant the constant will be included in the code. *) val (argCode, aReg) = codeToPRegRev(arg1, context, tailCode) in moveIfNotAllowedRev(destination, BlockSimple(TouchArgument{source=aReg}) :: argCode, (* Unit result *) IntegerConstant(tag 0)) end and codeToICodeBinaryRev({oper=BuiltIns.WordComparison{test, isSigned}, arg1, arg2=BICConstnt(arg2Value, _)}, context, _, destination, tailCode) = let (* Comparisons. Because this is also used for pointer equality and even for exception matching it is perfectly possible that the argument could be an address. The higher levels used to generate this for pointer equality. *) val ccRef = newCCRef() val comparison = (* If the argument is a tagged value that will fit in 32-bits we can use the literal version. Use toLargeIntX here because the value will be sign-extended even if we're actually doing an unsigned comparison. *) if isShort arg2Value andalso is32bit(tag(Word.toLargeIntX(toShort arg2Value))) then let val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} in (* We're often comparing with a character or a string length field that has to be untagged. In that case we can avoid loading it into a register and untagging it by doing the comparison directly. *) case arg1 of BICLoadOperation{kind=LoadStoreUntaggedUnsigned, address} => let val (codeBaseIndex, codeUntag, memLoc) = codeAddressRev(address, false, context, tailCode) val literal = Word.toLargeIntX(toShort arg2Value) in BlockSimple(CompareLiteral{arg1=MemoryLocation memLoc, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: codeUntag @ codeBaseIndex end | BICLoadOperation{kind=LoadStoreMLByte _, address} => let val (codeBaseIndex, codeUntag, {base, index, offset, ...}) = codeAddressRev(address, true, context, tailCode) val _ = toShort arg2Value >= 0w0 andalso toShort arg2Value < 0w256 orelse raise InternalError "Compare byte not a byte" val literal = Word8.fromLargeWord(Word.toLargeWord(toShort arg2Value)) in BlockSimple(CompareByteMem{arg1={base=base, index=index, offset=offset}, arg2=literal, ccRef=ccRef}) :: codeUntag @ codeBaseIndex end | BICUnary({oper=BuiltIns.MemoryCellFlags, arg1}) => (* This occurs particularly in arbitrary precision comparisons. *) let val (baseCode, baseReg) = codeToPRegRev(arg1, context, tailCode) val _ = toShort arg2Value >= 0w0 andalso toShort arg2Value < 0w256 orelse raise InternalError "Compare memory cell not a byte" val literal = Word8.fromLargeWord(Word.toLargeWord(toShort arg2Value)) in BlockSimple(CompareByteMem{arg1={base=baseReg, index=memIndexOrObject, offset= ~1}, arg2=literal, ccRef=ccRef}) :: baseCode end | _ => let (* TODO: We could include rarer cases of tagging by looking at the code and seeing if it's a TagValue. *) val (testCode, testDest, _) = codeToICodeRev(arg1, context, false, allow, tailCode) val literal = tag(Word.toLargeIntX(toShort arg2Value)) in BlockSimple(CompareLiteral{arg1=testDest, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: testCode end end else (* Addresses or larger values. We need to use a register comparison. *) let val (testCode, testReg) = codeToPRegRev(arg1, context, tailCode) val arg2Arg = constantAsArgument arg2Value in BlockSimple(WordComparison{arg1=testReg, arg2=arg2Arg, ccRef=ccRef, opSize=polyWordOpSize}) :: testCode end val target = asTarget destination in (makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, comparison), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordComparison{test, isSigned}, arg1=BICConstnt(arg1Value, _), arg2}, context, _, destination, tailCode) = let (* If we have the constant first we need to reverse the test so the first argument is a register. *) val ccRef = newCCRef() val comparison = if isShort arg1Value andalso is32bit(tag(Word.toLargeIntX(toShort arg1Value))) then let val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} val (testCode, testDest, _) = codeToICodeRev(arg2, context, false, allow, tailCode) val literal = tag(Word.toLargeIntX(toShort arg1Value)) in BlockSimple(CompareLiteral{arg1=testDest, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: testCode end else (* Addresses or larger values. We need to use a register comparison. *) let val (testCode, testReg) = codeToPRegRev(arg2, context, tailCode) val arg1Arg = constantAsArgument arg1Value in BlockSimple(WordComparison{arg1=testReg, arg2=arg1Arg, ccRef=ccRef, opSize=polyWordOpSize}) :: testCode end val target = asTarget destination in (makeBoolResultRev(testAsBranch(leftRightTest test, isSigned, true), ccRef, target, comparison), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordComparison {test, isSigned}, arg1, arg2}, context, _, destination, tailCode) = let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (arg1Code, arg1Result, _) = codeToICodeRev(arg1, context, false, Allowed memOrReg, tailCode) val (arg2Code, arg2Result, _) = codeToICodeRev(arg2, context, false, Allowed memOrReg, arg1Code) val target = asTarget destination val code = case (arg1Result, arg2Result) of (RegisterArgument arg1Reg, arg2Result) => makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg1Reg, arg2=arg2Result, ccRef=ccRef, opSize=polyWordOpSize}) :: arg2Code) | (arg1Result, RegisterArgument arg2Reg) => (* The second argument is in a register - switch the sense of the test. *) makeBoolResultRev(testAsBranch(leftRightTest test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg2Reg, arg2=arg1Result, ccRef=ccRef, opSize=polyWordOpSize}) :: arg2Code) | (arg1Result, arg2Result) => let (* Have to load an argument - pick the first. *) val arg1Reg = newPReg() in makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg1Reg, arg2=arg2Result, ccRef=ccRef, opSize=polyWordOpSize}) :: BlockSimple(LoadArgument{source=arg1Result, dest=arg1Reg, kind=movePolyWord}) :: arg2Code) end in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.PointerEq, arg1, arg2}, context, isTail, destination, tailCode) = (* Equality of general values which can include pointers. This can be treated exactly as a word equality. It has to be analysed differently for indexed cases. *) codeToICodeBinaryRev({oper=BuiltIns.WordComparison{test=BuiltIns.TestEqual, isSigned=false}, arg1=arg1, arg2=arg2}, context, isTail, destination, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.FixedPrecisionArith oper, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val code = codeFixedPrecisionArith(oper, arg1, arg2, context, target, checkOverflow context) in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. N.B. it is possible to have type-incorrect values in dead code. i.e. code that will never be executed because of a run-time check. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef = newCCRef(), opSize=polyWordOpSize}) :: arg1Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg2Code, aReg2) = codeToPRegRev(arg2, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg2, operand2=IntegerConstant constVal, ccRef = newCCRef(), opSize=polyWordOpSize}) :: arg2Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) (* Use LEA to do the addition since we're not concerned with overflow. This is shorter than subtracting the tag and adding the values and also moves the result into the appropriate register. *) val code = arg1Code @ arg2Code @ [BlockSimple(LoadEffectiveAddress{base=SOME aReg1, offset= ~1, index=MemIndex1 aReg2, dest=target, opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithSub, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=polyWordOpSize}) :: arg1Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithSub, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val aReg3 = newPReg() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Do the subtraction and add in the tag bit. This could be reordered if we have cascaded operations since we don't need to check for overflow. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=RegisterArgument aReg2, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = codeMultiplyConstantWordRev(arg1, context, destination, if isShort value then toShort value else 0w0, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = codeMultiplyConstantWordRev(arg2, context, destination, if isShort value then toShort value else 0w0, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() and resUntagged = newUReg() val code = arg1Code @ arg2Code @ (* Shift one argument and subtract the tag from the other. It's possible this could be reordered if we have a value that is already untagged. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=arg2Untagged, operand1=aReg2, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=arg1Untagged, operand2=RegisterArgument arg2Untagged, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithDiv, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() val code = arg1Code @ arg2Code @ (* Shift both of the arguments to remove the tags. We don't test for zero here - that's done explicitly. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = false, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=quotient, dest=target, isSigned=false, opSize=polyWordOpSize })] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMod, arg1, arg2}, context, _, destination, tailCode) = let (* Identical to Quot except that the result is the remainder. *) val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() val code = arg1Code @ arg2Code @ (* Shift both of the arguments to remove the tags. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = false, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=remainder, dest=target, isSigned=false, opSize=polyWordOpSize })] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith _, ...}, _, _, _, _) = raise InternalError "codeToICodeNonRev: WordArith - unimplemented operation" | codeToICodeBinaryRev({oper=BuiltIns.WordLogical logOp, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) (* Use a semitagged value for XOR. This preserves the tag bit. Use toLargeIntX here because the operations will sign-extend 32-bit values. *) val constVal = if isShort value then (case logOp of BuiltIns.LogicalXor => semitag | _ => tag) (Word.toLargeIntX(toShort value)) else 0 val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constVal <= 0xffffffff andalso constVal >= 0 then OpSize32 else polyWordOpSize val code = arg1Code @ [BlockSimple(ArithmeticFunction{oper=oper, resultReg=target, operand1=arg1Reg, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=opSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical logOp, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, arg2Reg) = codeToPReg(arg2, context) (* Use a semitagged value for XOR. This preserves the tag bit. *) val constVal = if isShort value then (case logOp of BuiltIns.LogicalXor => semitag | _ => tag) (Word.toLargeIntX(toShort value)) else 0 val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constVal <= 0xffffffff andalso constVal >= 0 then OpSize32 else polyWordOpSize val code = arg2Code @ [BlockSimple(ArithmeticFunction{oper=oper, resultReg=target, operand1=arg2Reg, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=opSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalOr, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Or-ing preserves the tag bit. *) [BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalAnd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Since they're both tagged the result will be tagged. *) [BlockSimple(ArithmeticFunction{oper=AND, resultReg=target, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalXor, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val aReg3 = newPReg() val code = arg1Code @ arg2Code @ (* We need to restore the tag bit after the operation. *) [BlockSimple(ArithmeticFunction{oper=XOR, resultReg=aReg3, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordShift BuiltIns.ShiftLeft, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = (* Use the general case multiplication code. This will use a shift except for small values. It does detect special cases such as multiplication by 4 and 8 which can be implemented with LEA. *) codeMultiplyConstantWordRev(arg1, context, destination, if isShort value then Word.<<(0w1, toShort value) else 0w1, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordShift shift, arg1, arg2}, context, _, destination, tailCode) = (* N.B. X86 shifts of greater than the word length mask the higher bits. That isn't what ML wants but that is dealt with at a higher level *) let open BuiltIns val target = asTarget destination (* Load the value into an untagged register. If this is a left shift we need to clear the tag bit. We don't need to do that for right shifts. *) val argRegUntagged = newUReg() val arg1Code = case arg1 of BICConstnt(value, _) => let (* Remove the tag bit. This isn't required for right shifts. *) val cnstntVal = if isShort value then semitag(Word.toLargeInt(toShort value)) else 1 in [BlockSimple(LoadArgument{source=IntegerConstant cnstntVal, dest=argRegUntagged, kind=movePolyWord})] end | _ => let val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val removeTag = case shift of ShiftLeft => ArithmeticFunction{oper=SUB, resultReg=argRegUntagged, operand1=arg1Reg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize} | _ => LoadArgument{source=RegisterArgument arg1Reg, dest=argRegUntagged, kind=movePolyWord} in arg1Code @ [BlockSimple removeTag] end (* The shift amount can usefully be a constant. *) val (arg2Code, untag2Code, arg2Arg) = codeAsUntaggedByte(arg2, false, context) val resRegUntagged = newUReg() val shiftOp = case shift of ShiftLeft => SHL | ShiftRightLogical => SHR | ShiftRightArithmetic => SAR val code = arg1Code @ arg2Code @ untag2Code @ [BlockSimple(ShiftOperation{ shift=shiftOp, resultReg=resRegUntagged, operand=argRegUntagged, shiftAmount=arg2Arg, ccRef=newCCRef(), opSize=polyWordOpSize }), (* Set the tag by ORing it in. This will work whether or not a right shift has shifted a 1 into this position. *) BlockSimple( ArithmeticFunction{oper=OR, resultReg=target, operand1=resRegUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.AllocateByteMemory, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val sizeReg = newPReg() and baseReg = newPReg() val sizeCode = codeToICodeTarget(arg1, context, false, sizeReg) val (flagsCode, flagUntag, flagArg) = codeAsUntaggedByte(arg2, false, context) val code =sizeCode @ flagsCode @ [BlockSimple(AllocateMemoryVariable{size=sizeReg, dest=baseReg, saveRegs=[]})] @ flagUntag @ [BlockSimple(StoreArgument{ source=flagArg, base=baseReg, offset= ~1, index=memIndexOrObject, kind=MoveByte, isMutable=false}), BlockSimple InitialisationComplete, BlockSimple(LoadArgument{ source=RegisterArgument baseReg, dest=target, kind=movePolyWord})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordComparison test, arg1, arg2}, context, _, destination, tailCode) = let val ccRef = newCCRef() val (arg1Code, arg1Reg) = codeToPRegRev(arg1, context, tailCode) (* In X64 we can extract the word from a constant and do the comparison directly. That can't be done in X86/32 because the value isn't tagged and might look like an address. The RTS scans for comparisons with inline constant addresses. *) val (arg2Code, arg2Operand) = if targetArch <> Native32Bit then (* Native 64-bit or 32-in-64. *) ( case arg2 of BICConstnt(value, _) => (arg1Code, IntegerConstant(largeWordConstant value)) | _ => let val (code, reg) = codeToPRegRev(arg2, context, arg1Code) in (code, wordAt reg) end ) else let val (code, reg) = codeToPRegRev(arg2, context, arg1Code) in (code, wordAt reg) end val argReg = newUReg() val target = asTarget destination val code = makeBoolResultRev(testAsBranch(test, false, true), ccRef, target, BlockSimple(WordComparison{arg1=argReg, arg2=arg2Operand, ccRef=ccRef, opSize=nativeWordOpSize}) :: BlockSimple(LoadArgument{source=wordAt arg1Reg, dest=argReg, kind=moveNativeWord}) :: arg2Code) in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code =arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code = arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg2, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithSub, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code = arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithSub, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithMult, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val resValue = newUReg() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val argReg1 = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg1, kind=moveNativeWord}), BlockSimple(Multiplication{resultReg=resValue, operand1=argReg1, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=resValue, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithDiv, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val quotient = newUReg() and remainder = newUReg() val dividendReg = newUReg() and divisorReg = newUReg() val code = arg1Code @ arg2Code @ (* We don't test for zero here - that's done explicitly. *) [BlockSimple(LoadArgument{source=wordAt aReg1, dest=dividendReg, kind=moveNativeWord}), BlockSimple(LoadArgument{source=wordAt aReg2, dest=divisorReg, kind=moveNativeWord}), BlockSimple(Division { isSigned = false, dividend=dividendReg, divisor=RegisterArgument divisorReg, quotient=quotient, remainder=remainder, opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=quotient, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithMod, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val quotient = newUReg() and remainder = newUReg() val dividendReg = newUReg() and divisorReg = newUReg() val code = arg1Code @ arg2Code @ (* We don't test for zero here - that's done explicitly. *) [BlockSimple(LoadArgument{source=wordAt aReg1, dest=dividendReg, kind=moveNativeWord}), BlockSimple(LoadArgument{source=wordAt aReg2, dest=divisorReg, kind=moveNativeWord}), BlockSimple(Division { isSigned = false, dividend=dividendReg, divisor=RegisterArgument divisorReg, quotient=quotient, remainder=remainder, opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=remainder, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith _, ...}, _, _, _, _) = raise InternalError "codeToICodeNonRev: LargeWordArith - unimplemented operation" | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constantValue <= 0xffffffff andalso constantValue >= 0 then OpSize32 else nativeWordOpSize val code = arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=opSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constantValue <= 0xffffffff andalso constantValue >= 0 then OpSize32 else nativeWordOpSize val code = arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg2, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=opSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordShift shift, arg1, arg2}, context, _, destination, tailCode) = (* The shift is always a Word.word value i.e. tagged. There is a check at the higher level that the shift does not exceed 32/64 bits. *) let open BuiltIns val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, untag2Code, arg2Arg) = codeAsUntaggedByte(arg2, false, context) val aReg3 = newUReg() val shiftOp = case shift of ShiftLeft => SHL | ShiftRightLogical => SHR | ShiftRightArithmetic => SAR val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord})] @ untag2Code @ [BlockSimple(ShiftOperation{ shift=shiftOp, resultReg=aReg3, operand=argReg, shiftAmount=arg2Arg, ccRef=newCCRef(), opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealArith(fpOpPrec as (fpOp, fpPrec)), arg1, arg2}, context, _, destination, tailCode) = let open BuiltIns val commutative = case fpOp of ArithSub => NonCommutative | ArithDiv => NonCommutative | ArithAdd => Commutative | ArithMult => Commutative | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" val (argCodeRev, fpRegSrc, arg2Value) = codeFPBinaryArgsRev(arg1, arg2, fpPrec, commutative, context, []) val argCode = List.rev argCodeRev val target = asTarget destination val fpRegDest = newUReg() val arith = case fpMode of FPModeX87 => let val fpOp = case fpOp of ArithAdd => FADD | ArithSub => FSUB | ArithMult => FMUL | ArithDiv => FDIV | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" val isDouble = case fpPrec of PrecSingle => false | PrecDouble => true in [BlockSimple(X87FPArith{ opc=fpOp, resultReg=fpRegDest, arg1=fpRegSrc, arg2=arg2Value, isDouble=isDouble})] end | FPModeSSE2 => let val fpOp = case fpOpPrec of (ArithAdd, PrecSingle) => SSE2BAddSingle | (ArithSub, PrecSingle) => SSE2BSubSingle | (ArithMult, PrecSingle) => SSE2BMulSingle | (ArithDiv, PrecSingle) => SSE2BDivSingle | (ArithAdd, PrecDouble) => SSE2BAddDouble | (ArithSub, PrecDouble) => SSE2BSubDouble | (ArithMult, PrecDouble) => SSE2BMulDouble | (ArithDiv, PrecDouble) => SSE2BDivDouble | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" in [BlockSimple(SSE2FPBinary{ opc=fpOp, resultReg=fpRegDest, arg1=fpRegSrc, arg2=arg2Value})] end (* Box or tag the result. *) val result = boxOrTagReal(fpRegDest, target, fpPrec) in (revApp(argCode @ arith @ result, tailCode), RegisterArgument target, false) end (* Floating point comparison. This is complicated because we have different instruction sequences for SSE2 and X87. We also have to get the handling of unordered (NaN) values right. All the tests are treated as false if either argument is a NaN. To combine that test with the other tests we sometimes have to reverse the comparison. *) | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(BuiltIns.TestEqual, precision), arg1, arg2}, context, _, destination, tailCode) = let (* Get the arguments. It's commutative. *) val (arg2Code, fpReg, arg2Val) = codeFPBinaryArgsRev(arg1, arg2, precision, Commutative, context, tailCode) val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testReg1 = newUReg() and testReg2 = newUReg() and testReg3 = newUReg() (* If this is X87 we get the condition into RAX and test it there. If it is SSE2 we have to treat the unordered result (parity set) specially. *) val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=XOR, resultReg=testReg3, operand1=testReg2, operand2=IntegerConstant 0x4000, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant 0x4400, ccRef=newCCRef(), opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) | FPModeSSE2 => let val noParityLabel = newLabel() val resultLabel = newLabel() val falseLabel = newLabel() val trueLabel = newLabel() val mergeReg = newMergeReg() in BlockSimple(LoadArgument{ source=RegisterArgument mergeReg, dest=target, kind=Move32Bit }) :: BlockLabel resultLabel :: BlockFlow(Unconditional resultLabel) :: (* Result is false if parity is set i.e. unordered or if unequal. *) BlockSimple(LoadArgument{ source=IntegerConstant(tag 0), dest=mergeReg, kind=Move32Bit }) :: BlockLabel falseLabel :: BlockFlow(Unconditional resultLabel) :: (* Result is true if it's ordered and equal. *) BlockSimple(LoadArgument{ source=IntegerConstant(tag 1), dest=mergeReg, kind=Move32Bit }) :: BlockLabel trueLabel :: (* Not unordered - test the equality *) BlockFlow(Conditional{ccRef=ccRef1, condition=JE, trueJump=trueLabel, falseJump=falseLabel}) :: BlockLabel noParityLabel :: (* Go to falseLabel if unordered and therefore not equal. *) BlockFlow(Conditional{ccRef=ccRef1, condition=JP, trueJump=falseLabel, falseJump=noParityLabel}) :: BlockSimple(SSE2Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code end in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(BuiltIns.TestUnordered, precision), arg1, arg2}, context, _, destination, tailCode) = let (* The unordered test is really included because it is easy to implement and is the simplest way of implementing isNan. *) (* Get the arguments. It's commutative. *) val (arg2Code, fpReg, arg2Val) = codeFPBinaryArgsRev(arg1, arg2, precision, Commutative, context, tailCode) val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testReg1 = newUReg() and testReg2 = newUReg() and testReg3 = newUReg() (* If this is X87 we get the condition into RAX and test it there. If it is SSE2 we have to treat the unordered result (parity set) specially. *) val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => (* And with 0x4500. We have to use XOR rather than CMP to avoid having an untagged constant comparison. *) makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=XOR, resultReg=testReg3, operand1=testReg2, operand2=IntegerConstant 0x4500, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant 0x4500, ccRef=newCCRef(), opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) | FPModeSSE2 => makeBoolResultRev(JP, ccRef1, target, BlockSimple(SSE2Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(comparison, precision), arg1, arg2}, context, _, destination, tailCode) = let (* Ordered comparisons are complicated because they are all defined to be false if either argument is a NaN. We have two different tests for a > b and a >= b and implement a < b and a <= b by changing the order of the arguments. *) val (arg1Code, arg1Value) = codeFPArgument(arg1, precision, context, tailCode) val (arg2Code, arg2Value) = codeFPArgument(arg2, precision, context, arg1Code) val (regArg, opArg, isGeq) = case comparison of BuiltIns.TestGreater => (arg1Value, arg2Value, false) | BuiltIns.TestLess => (arg2Value, arg1Value, false) (* Reversed: aa. *) | BuiltIns.TestGreaterEqual => (arg1Value, arg2Value, true) | BuiltIns.TestLessEqual => (arg2Value, arg1Value, true) (* Reversed: a<=b is b>=a. *) | _ => raise InternalError "RealComparison: unimplemented operation" (* Load the first operand into a register. *) val (fpReg, loadCode) = case regArg of RegisterArgument fpReg => (fpReg, arg2Code) | regArg => let val fpReg = newUReg() val moveOp = case precision of BuiltIns.PrecDouble => MoveDouble | BuiltIns.PrecSingle => MoveFloat in (fpReg, BlockSimple(LoadArgument{source=regArg, dest=fpReg, kind=moveOp}) :: arg2Code) end val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => let val testReg1 = newUReg() and testReg2 = newUReg() val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testBits = if isGeq then 0x500 else 0x4500 in makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant testBits, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=opArg, ccRef=ccRef1, isDouble = isDouble}) :: loadCode) end | FPModeSSE2 => let val ccRef1 = newCCRef() val condition = if isGeq then JNB (* >=, <= *) else JA (* >, < *) in makeBoolResultRev(condition, ccRef1, target, BlockSimple(SSE2Compare{arg1=fpReg, arg2=opArg, ccRef=ccRef1, isDouble = isDouble}) :: loadCode) end in (code, RegisterArgument target, false) end (* Multiply tagged word by a constant. We're not concerned with overflow so it's possible to use various short cuts. *) and codeMultiplyConstantWordRev(arg, context, destination, multiplier, tailCode) = let val target = asTarget destination val (argCode, aReg) = codeToPReg(arg, context) val doMultiply = case multiplier of 0w0 => [BlockSimple(LoadArgument{source=IntegerConstant 1, dest=target, kind=movePolyWord})] | 0w1 => [BlockSimple(LoadArgument{source=RegisterArgument aReg, dest=target, kind=movePolyWord})] | 0w2 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~1, index=MemIndex1 aReg, dest=target, opSize=polyWordOpSize})] | 0w3 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~2, index=MemIndex2 aReg, dest=target, opSize=polyWordOpSize})] | 0w4 => [BlockSimple(LoadEffectiveAddress{base=NONE, offset= ~3, index=MemIndex4 aReg, dest=target, opSize=polyWordOpSize})] | 0w5 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~4, index=MemIndex4 aReg, dest=target, opSize=polyWordOpSize})] | 0w8 => [BlockSimple(LoadEffectiveAddress{base=NONE, offset= ~7, index=MemIndex8 aReg, dest=target, opSize=polyWordOpSize})] | 0w9 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~8, index=MemIndex8 aReg, dest=target, opSize=polyWordOpSize})] | _ => let val tReg = newUReg() val tagCorrection = Word.toLargeInt multiplier - 1 fun getPower2 n = let fun p2 (n, l) = if n = 0w1 then SOME l else if Word.andb(n, 0w1) = 0w1 then NONE else p2(Word.>>(n, 0w1), l+0w1) in if n = 0w0 then NONE else p2(n,0w0) end val multiply = case getPower2 multiplier of SOME power => (* Shift it including the tag. *) BlockSimple(ShiftOperation{ shift=SHL, resultReg=tReg, operand=aReg, shiftAmount=IntegerConstant(Word.toLargeInt power), ccRef=newCCRef(), opSize=polyWordOpSize }) | NONE => (* Multiply including the tag. *) BlockSimple(Multiplication{resultReg=tReg, operand1=aReg, operand2=IntegerConstant(Word.toLargeInt multiplier), ccRef=newCCRef(), opSize=polyWordOpSize}) in [multiply, BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=tReg, operand2=IntegerConstant tagCorrection, ccRef=newCCRef(), opSize=polyWordOpSize})] end in (revApp(argCode @ doMultiply, tailCode), RegisterArgument target, false) end and codeToICodeAllocate({numWords as BICConstnt(length, _), flags as BICConstnt(flagValue, _), initial}, context, _, destination) = (* Constant length and flags is used for ref. We could handle other cases. *) if isShort length andalso isShort flagValue andalso toShort length = 0w1 then let val target = asTarget destination (* Force a different register. *) val vecLength = Word.toInt(toShort length) val flagByte = Word8.fromLargeWord(Word.toLargeWord(toShort flagValue)) val memAddr = newPReg() and valueReg = newPReg() fun initialise n = BlockSimple(StoreArgument{ source=RegisterArgument valueReg, offset=n*Word.toInt wordSize, base=memAddr, index=memIndexOrObject, kind=movePolyWord, isMutable=false}) val code = codeToICodeTarget(initial, context, false, valueReg) @ [BlockSimple(AllocateMemoryOperation{size=vecLength, flags=flagByte, dest=memAddr, saveRegs=[]})] @ List.tabulate(vecLength, initialise) @ [BlockSimple InitialisationComplete, BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord})] in (code, RegisterArgument target, false) end else (* If it's longer use the full run-time form. *) allocateMemoryVariable(numWords, flags, initial, context, destination) | codeToICodeAllocate({numWords, flags, initial}, context, _, destination) = allocateMemoryVariable(numWords, flags, initial, context, destination) and codeToICodeLoad({kind=LoadStoreMLWord _, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, false, context) in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument {source=MemoryLocation memLoc, dest=target, kind=movePolyWord})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreMLByte _, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, true, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveByte}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC8, address}, context, _, destination) = let (* Load a byte from C memory. This is almost exactly the same as LoadStoreMLByte except that the base address is a LargeWord.word value. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w1, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveByte}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC16, address}, context, _, destination) = let (* Load a 16-bit value from C memory. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w2, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move16Bit}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC32, address}, context, _, destination) = let (* Load a 32-bit value from C memory. If this is 64-bit mode we can tag it but if this is 32-bit mode we need to box it. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w4, context) val untaggedResReg = newUReg() val boxTagCode = if targetArch = Native64Bit then BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize64 (* It becomes 33 bits *)}) else BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untaggedResReg, dest=target, saveRegs=[]}) in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move32Bit}), boxTagCode], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC64, address}, context, _, destination) = let (* Load a 64-bit value from C memory. This is only allowed in 64-bit mode. The result is a boxed value. *) val _ = targetArch <> Native32Bit orelse raise InternalError "codeToICodeNonRev: BICLoadOperation LoadStoreC64 in 32-bit" val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w8, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move64Bit}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreCFloat, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w4, context) val untaggedResReg = newUReg() val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double (* We need to convert the float into a double. *) val loadArg = case fpMode of FPModeX87 => BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveFloat}) | FPModeSSE2 => BlockSimple(SSE2FPUnary { source=MemoryLocation memLoc, resultReg=untaggedResReg, opc=SSE2UFloatToDouble}) in (codeBaseIndex @ codeUntag @ [loadArg, BlockSimple(BoxValue{boxKind=boxFloat, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreCDouble, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w8, context) val untaggedResReg = newUReg() val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveDouble}), BlockSimple(BoxValue{boxKind=boxFloat, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreUntaggedUnsigned, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, false, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=movePolyWord}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=polyWordOpSize})], RegisterArgument target, false) end and codeToICodeStore({kind=LoadStoreMLWord _, address, value}, context, _, destination) = let val (sourceCode, source, _) = codeToICode(value, context, false, Allowed allowInMemMove) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, false, context) val code = codeBaseIndex @ sourceCode @ codeUntag @ [BlockSimple(StoreArgument {source=source, base=base, offset=offset, index=index, kind=movePolyWord, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreMLByte _, address, value}, context, _, destination) = let val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, true, context) (* We have to untag the value to store. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedByte(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=valueArg, base=base, offset=offset, index=index, kind=MoveByte, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC8, address, value}, context, _, destination) = let (* Store a byte to C memory. Almost exactly the same as LoadStoreMLByte. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w1, context) val (valueCode, untagValue, valueArg) = codeAsUntaggedByte(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=valueArg, base=base, offset=offset, index=index, kind=MoveByte, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC16, address, value}, context, _, destination) = let (* Store a 16-bit value to C memory. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w2, context) (* We don't currently implement 16-bit constant moves so this must always be in a reg. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedToReg(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueArg, base=base, offset=offset, index=index, kind=Move16Bit, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC32, address, value}, context, _, destination) = (* Store a 32-bit value. If this is 64-bit mode we untag it but if this is 32-bit mode we unbox it. *) let val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w4, context) val code = if targetArch = Native64Bit then let (* We don't currently implement 32-bit constant moves so this must always be in a reg. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedToReg(value, false, context) in codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueArg, base=base, offset=offset, index=index, kind=Move32Bit, isMutable=true})] end else let val (valueCode, valueReg) = codeToPReg(value, context) val valueReg1 = newUReg() in codeBaseIndex @ valueCode @ BlockSimple(LoadArgument{source=wordAt valueReg, dest=valueReg1, kind=Move32Bit}) :: codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueReg1, base=base, offset=offset, index=index, kind=Move32Bit, isMutable=true})] end in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC64, address, value}, context, _, destination) = let (* Store a 64-bit value. *) val _ = targetArch <> Native32Bit orelse raise InternalError "codeToICodeNonRev: BICStoreOperation LoadStoreC64 in 32-bit" val (valueCode, valueReg) = codeToPReg(value, context) val valueReg1 = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w8, context) val code = codeBaseIndex @ valueCode @ codeUntag @ [BlockSimple(LoadArgument{source=wordAt valueReg, dest=valueReg1, kind=Move64Bit}), BlockSimple(StoreArgument {source=RegisterArgument valueReg1, base=base, offset=offset, index=index, kind=Move64Bit, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreCFloat, address, value}, context, _, destination) = let val floatReg = newUReg() and float2Reg = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w4, context) val (valueCode, valueReg) = codeToPReg(value, context) (* If we're using an SSE2 reg we have to convert it from double to single precision. *) val (storeReg, cvtCode) = case fpMode of FPModeSSE2 => (float2Reg, [BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=float2Reg, source=RegisterArgument floatReg})]) | FPModeX87 => (floatReg, []) val code = codeBaseIndex @ valueCode @ codeUntag @ BlockSimple(LoadArgument{source=wordAt valueReg, dest=floatReg, kind=MoveDouble}) :: cvtCode @ [BlockSimple(StoreArgument {source=RegisterArgument storeReg, base=base, offset=offset, index=index, kind=MoveFloat, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreCDouble, address, value}, context, _, destination) = let val floatReg = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w8, context) val (valueCode, valueReg) = codeToPReg(value, context) val code = codeBaseIndex @ valueCode @ codeUntag @ [BlockSimple(LoadArgument{source=wordAt valueReg, dest=floatReg, kind=MoveDouble}), BlockSimple(StoreArgument {source=RegisterArgument floatReg, base=base, offset=offset, index=index, kind=MoveDouble, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreUntaggedUnsigned, address, value}, context, _, destination) = let (* We have to untag the value to store. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, false, context) (* See if it's a constant. This is frequently used to set the last word of a string to zero. *) (* We have to be a bit more careful on the X86. We use moves to store constants that can include addresses. To avoid problems we only use a move if the value is zero or odd and so looks like a tagged value. *) val storeAble = case value of BICConstnt(value, _) => if not(isShort value) then NONE else let val ival = Word.toLargeIntX(toShort value) in if targetArch = Native64Bit then if is32bit ival then SOME ival else NONE else if ival = 0 orelse ival mod 2 = 1 then SOME ival else NONE end | _ => NONE val (storeVal, valCode) = case storeAble of SOME value => (IntegerConstant value (* Leave untagged *), []) | NONE => let val valueReg = newPReg() and valueReg1 = newUReg() in (RegisterArgument valueReg1, codeToICodeTarget(value, context, false, valueReg) @ [BlockSimple(UntagValue{dest=valueReg1, source=valueReg, isSigned=false, cache=NONE, opSize=polyWordOpSize})]) end val code = codeBaseIndex @ valCode @ codeUntag @ [BlockSimple(StoreArgument {source=storeVal, base=base, offset=offset, index=index, kind=movePolyWord, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end and codeToICodeBlock({kind=BlockOpCompareByte, sourceLeft, destRight, length}, context, _, destination) = let (* This is effectively a big-endian comparison since we compare the bytes until we find an inequality. *) val target = asTarget destination val mergeResult = newMergeReg() val vec1Reg = newUReg() and vec2Reg = newUReg() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddress(sourceLeft, true, context) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddress(destRight, true, context) val ccRef = newCCRef() val labLess = newLabel() and labGreater = newLabel() and exitLab = newLabel() val labNotLess = newLabel() and labNotGreater = newLabel() val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToReg(length, false (* unsigned *), context) val code = leftCode @ rightCode @ lengthCode @ leftUntag @ [BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg})] @ rightUntag @ [BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg})] @ lengthUntag @ [BlockSimple(CompareByteVectors{ vec1Addr=vec1Reg, vec2Addr=vec2Reg, length=lengthArg, ccRef=ccRef }), (* N.B. These are unsigned comparisons. *) BlockFlow(Conditional{ ccRef=ccRef, condition=JB, trueJump=labLess, falseJump=labNotLess }), BlockLabel labNotLess, BlockFlow(Conditional{ ccRef=ccRef, condition=JA, trueJump=labGreater, falseJump=labNotGreater }), BlockLabel labNotGreater, BlockSimple(LoadArgument{ source=IntegerConstant(tag 0), dest=mergeResult, kind=movePolyWord }), BlockFlow(Unconditional exitLab), BlockLabel labLess, BlockSimple(LoadArgument{ source=IntegerConstant(tag ~1), dest=mergeResult, kind=movePolyWord }), BlockFlow(Unconditional exitLab), BlockLabel labGreater, BlockSimple(LoadArgument{ source=IntegerConstant(tag 1), dest=mergeResult, kind=movePolyWord }), BlockLabel exitLab, BlockSimple(LoadArgument{ source=RegisterArgument mergeResult, dest=target, kind=movePolyWord })] in (code, RegisterArgument target, false) end | codeToICodeBlock({kind=BlockOpMove {isByteMove}, sourceLeft, destRight, length}, context, _, destination) = let (* Moves of 4 or 8 bytes can be done as word moves provided the alignment is correct. Although this will work for strings it is really to handle moves between SysWord and volatileRef in Foreign.Memory. Moves of 1, 2 or 3 bytes or words are converted into a sequence of byte or word moves. *) val isWordMove = case (isByteMove, length) of (true, BICConstnt(l, _)) => if not (isShort l) orelse (toShort l <> 0w4 andalso toShort l <> nativeWordSize) then NONE else let - val leng = toShort l + val leng = Word.toInt(toShort l) val moveKind = if toShort l = nativeWordSize then moveNativeWord else Move32Bit val isLeftAligned = case sourceLeft of - {index=NONE, offset, ...} => offset mod leng = 0w0 + {index=NONE, offset:int, ...} => offset mod leng = 0 | _ => false val isRightAligned = case destRight of - {index=NONE, offset, ...} => offset mod leng = 0w0 + {index=NONE, offset: int, ...} => offset mod leng = 0 | _ => false in if isLeftAligned andalso isRightAligned then SOME moveKind else NONE end | _ => NONE in case isWordMove of SOME moveKind => let val (leftCode, leftUntag, leftMem) = codeAddress(sourceLeft, isByteMove, context) val (rightCode, rightUntag, {base, offset, index, ...}) = codeAddress(destRight, isByteMove, context) val untaggedResReg = newUReg() val code = leftCode @ rightCode @ leftUntag @ rightUntag @ [BlockSimple(LoadArgument { source=MemoryLocation leftMem, dest=untaggedResReg, kind=moveKind}), BlockSimple(StoreArgument {source=RegisterArgument untaggedResReg, base=base, offset=offset, index=index, kind=moveKind, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | _ => let val vec1Reg = newUReg() and vec2Reg = newUReg() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddress(sourceLeft, isByteMove, context) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddress(destRight, isByteMove, context) val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToReg(length, false (* unsigned *), context) val code = leftCode @ rightCode @ lengthCode @ leftUntag @ [BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg})] @ rightUntag @ [BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg})] @ lengthUntag @ [BlockSimple(BlockMove{ srcAddr=vec1Reg, destAddr=vec2Reg, length=lengthArg, isByteMove=isByteMove })] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end end | codeToICodeBlock({kind=BlockOpEqualByte, ...}, _, _, _) = (* TODO: Move the code from codeToICodeRev. However, that is already reversed. *) raise InternalError "codeToICodeBlock - BlockOpEqualByte" (* Already done *) and codeConditionRev(condition, context, jumpOn, jumpLabel, tailCode) = (* General case. Load the value into a register and compare it with 1 (true) *) let val ccRef = newCCRef() val (testCode, testReg) = codeToPRegRev(condition, context, tailCode) val noJumpLabel = newLabel() in BlockLabel noJumpLabel :: BlockFlow(Conditional{ccRef=ccRef, condition=if jumpOn then JE else JNE, trueJump=jumpLabel, falseJump=noJumpLabel}) :: BlockSimple(CompareLiteral{arg1=RegisterArgument testReg, arg2=tag 1, opSize=OpSize32, ccRef=ccRef}) :: testCode end (* The fixed precision functions are also used for arbitrary precision but instead of raising Overflow we need to jump to the code that handles the long format. *) and codeFixedPrecisionArith(BuiltIns.ArithAdd, arg1, BICConstnt(value, _), context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. This should always be a tagged value if the type is correct. However it's possible for it not to be if we have an arbitrary precision value. There will be a run-time check that the value is short and so this code will never be executed. It will generally be edited out by the higher level be we can't rely on that. Because it's never executed we can just put in zero. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPReg(arg1, context) in arg1Code @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithAdd, BICConstnt(value, _), arg2, context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg2Code @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg2, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithAdd, arg1, arg2, context, target, onOverflow) = let val aReg3 = newPReg() and ccRef = newCCRef() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg1Code @ arg2Code @ (* Subtract the tag bit from the second argument, do the addition and check for overflow. *) (* TODO: We should really do the detagging in the transform phase. It can make a better choice of the argument if one of the arguments is already untagged or if we have a constant argument. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=RegisterArgument aReg2, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end (* Subtraction. We can handle the special case of the second argument being a constant but not the first. *) | codeFixedPrecisionArith(BuiltIns.ArithSub, arg1, BICConstnt(value, _), context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPReg(arg1, context) in arg1Code @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithSub, arg1, arg2, context, target, onOverflow) = let val aReg3 = newPReg() val ccRef = newCCRef() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg1Code @ arg2Code @ (* Do the subtraction, test for overflow and afterwards add in the tag bit. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=RegisterArgument aReg2, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, arg1, BICConstnt(value, _), context, target, onOverflow) = let val aReg = newPReg() and argUntagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* Is it better to untag the constant or the register argument? *) val constVal = if isShort value then Word.toLargeIntX(toShort value) else 0 in codeToICodeTarget(arg1, context, false, aReg) @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=argUntagged, operand1=aReg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=argUntagged, operand2=IntegerConstant constVal, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, BICConstnt(value, _), arg2, context, target, onOverflow) = let val aReg = newPReg() and argUntagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* Is it better to untag the constant or the register argument? *) val constVal = if isShort value then Word.toLargeIntX(toShort value) else 0 in codeToICodeTarget(arg2, context, false, aReg) @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=argUntagged, operand1=aReg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=argUntagged, operand2=IntegerConstant constVal, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, arg1, arg2, context, target, onOverflow) = let val aReg1 = newPReg() and aReg2 = newPReg() and arg1Untagged = newUReg() and arg2Untagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* This is almost the same as the word operation except we use a signed shift and check for overflow. *) in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift one argument and subtract the tag from the other. It's possible this could be reordered if we have a value that is already untagged. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true (* Signed shift here. *), cache=NONE, opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=arg2Untagged, operand1=aReg2, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=arg1Untagged, operand2=RegisterArgument arg2Untagged, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithQuot, arg1, arg2, context, target, _) = let val aReg1 = newPReg() and aReg2 = newPReg() val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift both of the arguments to remove the tags. We don't test for zero here - that's done explicitly. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = true, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=quotient, dest=target, isSigned=true, opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithRem, arg1, arg2, context, target, _) = let (* Identical to Quot except that the result is the remainder. *) val aReg1 = newPReg() and aReg2 = newPReg() val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift both of the arguments to remove the tags. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = true, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=remainder, dest=target, isSigned=true, opSize=polyWordOpSize})] end | codeFixedPrecisionArith(_, _, _, _, _, _) = raise InternalError "codeToICode: FixedPrecisionArith - unimplemented operation" (* Generate code for floating point arguments where one of the arguments must be in a register. If the first argument is in a register use that, if the second is in a register and it's commutative use that otherwise load the first argument into a register. *) and codeFPBinaryArgsRev(arg1, arg2, precision, commutative, context, tailCode) = let val (arg1Code, arg1Value) = codeFPArgument(arg1, precision, context, tailCode) val (arg2Code, arg2Value) = codeFPArgument(arg2, precision, context, arg1Code) in case (arg1Value, arg2Value, commutative) of (RegisterArgument fpReg, _, _) => (arg2Code, fpReg, arg2Value) | (_, RegisterArgument fpReg, Commutative) => (arg2Code, fpReg, arg1Value) | (arg1Val, _, _) => let val fpReg = newUReg() val moveOp = case precision of BuiltIns.PrecDouble => MoveDouble | BuiltIns.PrecSingle => MoveFloat in (BlockSimple(LoadArgument{source=arg1Val, dest=fpReg, kind=moveOp}) :: arg2Code, fpReg, arg2Value) end end (* Generate code to evaluate a floating point argument. The aim of this code is to avoid the overhead of untagging a short-precision floating point value in memory. *) and codeFPArgument(BICConstnt(value, _), _, _, tailCode) = let val argVal = (* Single precision constants in 64-bit mode are represented by the value shifted left 32 bits. A word is shifted left one bit so the result is 0w31. *) if isShort value then IntegerConstant(Word.toLargeInt(Word.>>(toShort value, 0w31))) else AddressConstant value in (tailCode, argVal) end | codeFPArgument(arg, precision, context, tailCode) = ( case (precision, wordSize) of (BuiltIns.PrecSingle, 0w8) => (* If this is a single precision value and the word size is 8 the values are tagged. If it is memory we can load the value directly from the high-order word. *) let val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (code, result, _) = codeToICodeRev(arg, context, false, Allowed memOrReg, tailCode) in case result of RegisterArgument argReg => let val fpReg = newUReg() in (BlockSimple(UntagFloat{source=RegisterArgument argReg, dest=fpReg, cache=NONE}) :: code, RegisterArgument fpReg) end | MemoryLocation{offset, base, index, ...} => (code, MemoryLocation{offset=offset+4, base=base, index=index, cache=NONE}) | _ => raise InternalError "codeFPArgument" end | _ => (* Otherwise the value is boxed. *) let val (argCode, argReg) = codeToPRegRev(arg, context, tailCode) in (argCode, wordAt argReg) end ) (* Code an address. The index is optional. *) and codeAddressRev({base, index=SOME index, offset}, true (* byte move *), context, tailCode) = let (* Byte address with index. The index needs to be untagged. *) val indexReg1 = newUReg() val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) val (codeIndex, indexReg) = codeToPRegRev(index, context, codeBase) val untagCode = [BlockSimple(UntagValue{dest=indexReg1, source=indexReg, isSigned=false, cache=NONE, opSize=polyWordOpSize})] val (codeLoadAddr, realBase) = if targetArch = ObjectId32Bit then let val addrReg = newUReg() in ([BlockSimple(LoadEffectiveAddress{ base=SOME baseReg, offset=0, index=ObjectIndex, dest=addrReg, opSize=nativeWordOpSize})], addrReg) end else ([], baseReg) - val memResult = {base=realBase, offset=Word.toInt offset, index=MemIndex1 indexReg1, cache=NONE} + val memResult = {base=realBase, offset=offset, index=MemIndex1 indexReg1, cache=NONE} in (codeLoadAddr @ codeIndex, untagCode, memResult) end | codeAddressRev({base, index=SOME index, offset}, false (* word move *), context, tailCode) = let (* Word address with index. We can avoid untagging the index by adjusting the multiplier and offset *) val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) val (codeIndex, indexReg) = codeToPRegRev(index, context, codeBase) val (codeLoadAddr, realBase) = if targetArch = ObjectId32Bit then let val addrReg = newUReg() in ([BlockSimple(LoadEffectiveAddress{ base=SOME baseReg, offset=0, index=ObjectIndex, dest=addrReg, opSize=nativeWordOpSize})], addrReg) end else ([], baseReg) - val iOffset = Word.toInt offset handle Overflow => 0 (* See below: special case may not happen. *) val memResult = if wordSize = 0w8 - then {base=realBase, offset=iOffset-4, index=MemIndex4 indexReg, cache=NONE} - else {base=realBase, offset=iOffset-2, index=MemIndex2 indexReg, cache=NONE} + then {base=realBase, offset=offset-4, index=MemIndex4 indexReg, cache=NONE} + else {base=realBase, offset=offset-2, index=MemIndex2 indexReg, cache=NONE} in (codeLoadAddr @ codeIndex, [], memResult) end | codeAddressRev({base, index=NONE, offset}, _, context, tailCode) = let val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) - (* A negative value for "offset" will produce an overflow at compile time. It should never be - reached at run-time because of bounds checking. See Test192. *) - val iOffset = Word.toInt offset handle Overflow => 0 - val memResult = {offset=iOffset, base=baseReg, index=memIndexOrObject, cache=NONE} + val memResult = {offset=offset, base=baseReg, index=memIndexOrObject, cache=NONE} in (codeBase, [], memResult) end and codeAddress(addr, isByte, context) = let val (code, untag, res) = codeAddressRev(addr, isByte, context, []) in (List.rev code, untag, res) end (* C-memory operations are slightly different. The base address is a LargeWord.word value. The index is a byte index so may have to be untagged. *) and codeCAddress({base, index=SOME index, offset}, 0w1, context) = let (* Byte address with index. The index needs to be untagged. *) - val untaggedBaseReg = newUReg() and indexReg1 = newUReg() + val untaggedBaseReg = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) and (codeIndex, indexReg) = codeToPReg(index, context) - val untagCode = - [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord}), - BlockSimple(UntagValue{dest=indexReg1, source=indexReg, isSigned=false, cache=NONE, opSize=polyWordOpSize})] - val memResult = {base=untaggedBaseReg, offset=Word.toInt offset, index=MemIndex1 indexReg1, cache=NONE} + (* The index needs to untagged and, if necessary, sign-extended to the native word size. *) + val (untagCode, sxReg) = + if targetArch = ObjectId32Bit + then + let + val sReg1 = newUReg() and sReg2 = newUReg() + in + ([BlockSimple(SignExtend32To64{dest=sReg1, source=RegisterArgument indexReg}), + BlockSimple(UntagValue{dest=sReg2, source=sReg1, isSigned=true, cache=NONE, opSize=nativeWordOpSize})], sReg2) + end + else + let + val sReg = newUReg() + in + ([BlockSimple(UntagValue{dest=sReg, source=indexReg, isSigned=true, cache=NONE, opSize=nativeWordOpSize})], sReg) + end + val loadCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] + val memResult = {base=untaggedBaseReg, offset=offset, index=MemIndex1 sxReg, cache=NONE} in - (codeBase @ codeIndex, untagCode, memResult) + (codeBase @ codeIndex, loadCode @ untagCode, memResult) end | codeCAddress({base, index=SOME index, offset}, size, context) = let (* Non-byte address with index. By using an appropriate multiplier we can avoid having to untag the index. *) val untaggedBaseReg = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) and (codeIndex, indexReg) = codeToPReg(index, context) - val untagCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] + (* The index is signed i.e. negative index values are legal. We don't have + to do anything special on the native code versions but on 32-in-64 we need + to sign extend. *) + val (untagCode, sxReg) = + if targetArch = ObjectId32Bit + then + let + val sReg = newUReg() + in + ([BlockSimple(SignExtend32To64{source=RegisterArgument indexReg, dest=sReg})], sReg) + end + else ([], indexReg) + val loadCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] val memResult = case size of - 0w2 => {base=untaggedBaseReg, offset=Word.toInt offset-1, index=MemIndex1 indexReg, cache=NONE} - | 0w4 => {base=untaggedBaseReg, offset=Word.toInt offset-2, index=MemIndex2 indexReg, cache=NONE} - | 0w8 => {base=untaggedBaseReg, offset=Word.toInt offset-4, index=MemIndex4 indexReg, cache=NONE} + 0w2 => {base=untaggedBaseReg, offset=offset-1, index=MemIndex1 sxReg, cache=NONE} + | 0w4 => {base=untaggedBaseReg, offset=offset-2, index=MemIndex2 sxReg, cache=NONE} + | 0w8 => {base=untaggedBaseReg, offset=offset-4, index=MemIndex4 sxReg, cache=NONE} | _ => raise InternalError "codeCAddress: unknown size" in - (codeBase @ codeIndex, untagCode, memResult) + (codeBase @ codeIndex, loadCode @ untagCode, memResult) end | codeCAddress({base, index=NONE, offset}, _, context) = let val untaggedBaseReg = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) val untagCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] - val memResult = {offset=Word.toInt offset, base=untaggedBaseReg, index=NoMemIndex, cache=NONE} + val memResult = {offset=offset, base=untaggedBaseReg, index=NoMemIndex, cache=NONE} in (codeBase, untagCode, memResult) end (* Return an untagged value. If we have a constant just return it. Otherwise return the code to evaluate the argument, the code to untag it and the reference to the untagged register. *) and codeAsUntaggedToRegRev(BICConstnt(value, _), isSigned, _, tailCode) = let (* Should always be short except for unreachable code. *) val untagReg = newUReg() val cval = if isShort value then toShort value else 0w0 val cArg = IntegerConstant(if isSigned then Word.toLargeIntX cval else Word.toLargeInt cval) (* Don't tag *) val untag = [BlockSimple(LoadArgument{source=cArg, dest=untagReg, kind=movePolyWord})] in (tailCode, untag, untagReg) (* Don't tag. *) end | codeAsUntaggedToRegRev(arg, isSigned, context, tailCode) = let val untagReg = newUReg() val (code, srcReg) = codeToPRegRev(arg, context, tailCode) val untag = [BlockSimple(UntagValue{source=srcReg, dest=untagReg, isSigned=isSigned, cache=NONE, opSize=polyWordOpSize})] in (code, untag, untagReg) end and codeAsUntaggedToReg(arg, isSigned, context) = let val (code, untag, untagReg) = codeAsUntaggedToRegRev(arg, isSigned, context, []) in (List.rev code, untag, untagReg) end (* Return the argument as an untagged value. We separate evaluating the argument from untagging because we may have to evaluate other arguments and that could involve a function call and we can't save the value to the stack after we've untagged it. Currently this is only used for byte values but we may have to be careful if we use it for word values on the X86. Moving an untagged value into a register might look like loading a constant address. *) and codeAsUntaggedByte(BICConstnt(value, _), isSigned, _) = let val cval = if isShort value then toShort value else 0w0 val cArg = IntegerConstant(if isSigned then Word.toLargeIntX cval else Word.toLargeInt cval) (* Don't tag *) in ([], [], cArg) end | codeAsUntaggedByte(arg, isSigned, context) = let val untagReg = newUReg() val (code, argReg) = codeToPReg(arg, context) val untag = [BlockSimple(UntagValue{source=argReg, dest=untagReg, isSigned=isSigned, cache=NONE, opSize=OpSize32})] in (code, untag, RegisterArgument untagReg) end (* Allocate memory. This is used both for true variable length cells and also for longer constant length cells. *) and allocateMemoryVariable(numWords, flags, initial, context, destination) = let val target = asTarget destination (* With the exception of flagReg all these registers are modified by the code. So, we have to copy the size value into a new register. *) val sizeReg = newPReg() and initReg = newPReg() val sizeReg2 = newPReg() val untagSizeReg = newUReg() and initAddrReg = newPReg() and allocReg = newPReg() val sizeCode = codeToICodeTarget(numWords, context, false, sizeReg) and (flagsCode, flagUntag, flagArg) = codeAsUntaggedByte(flags, false, context) (* We're better off deferring the initialiser if possible. If the value is a constant we don't have to save it. *) val (initCode, initResult, _) = codeToICode(initial, context, false, Allowed allowDefer) in (sizeCode @ flagsCode @ initCode @ [(* We need to copy the size here because AllocateMemoryVariable modifies the size in order to store the length word. This is unfortunate especially as we're going to untag it anyway. *) BlockSimple(LoadArgument{source=RegisterArgument sizeReg, dest=sizeReg2, kind=movePolyWord}), BlockSimple(AllocateMemoryVariable{size=sizeReg, dest=allocReg, saveRegs=[]})] @ flagUntag @ [BlockSimple(StoreArgument{ source=flagArg, base=allocReg, offset= ~1, index=memIndexOrObject, kind=MoveByte, isMutable=false}), (* We need to copy the address here because InitialiseMem modifies all its arguments. *) BlockSimple( if targetArch = ObjectId32Bit then LoadEffectiveAddress{ base=SOME allocReg, offset=0, index=ObjectIndex, dest=initAddrReg, opSize=nativeWordOpSize} else LoadArgument{source=RegisterArgument allocReg, dest=initAddrReg, kind=movePolyWord}), BlockSimple(UntagValue{source=sizeReg2, dest=untagSizeReg, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(LoadArgument{source=initResult, dest=initReg, kind=movePolyWord}), BlockSimple(InitialiseMem{size=untagSizeReg, init=initReg, addr=initAddrReg}), BlockSimple InitialisationComplete, BlockSimple(LoadArgument{source=RegisterArgument allocReg, dest=target, kind=movePolyWord})], RegisterArgument target, false) end (*Turn the codetree structure into icode. *) val bodyContext = {loopArgs=NONE, stackPtr=0, currHandler=NONE, overflowBlock=ref NONE} val (bodyCode, _, bodyExited) = codeToICodeRev(body, bodyContext, true, SpecificPReg resultTarget, beginInstructions) val icode = if bodyExited then bodyCode else returnInstruction(bodyContext, resultTarget, bodyCode) (* Turn the icode list into basic blocks. The input list is in reverse so as part of this we reverse the list. *) local val resArray = Array.array(!labelCounter, BasicBlock{ block=[], flow=ExitCode }) fun createEntry (blockNo, block, flow) = Array.update(resArray, blockNo, BasicBlock{ block=block, flow=flow}) fun splitCode([], _, _) = (* End of code. We should have had a BeginFunction. *) raise InternalError "splitCode - no begin" | splitCode(BlockBegin args :: _, sinceLabel, flow) = (* Final instruction. Create the initial block and exit. *) createEntry(0, BeginFunction args ::sinceLabel, flow) | splitCode(BlockSimple instr :: rest, sinceLabel, flow) = splitCode(rest, instr :: sinceLabel, flow) | splitCode(BlockLabel label :: rest, sinceLabel, flow) = (* Label - finish this block and start another. *) ( createEntry(label, sinceLabel, flow); (* Default to a jump to this label. That is used if we have assumed a drop-through. *) splitCode(rest, [], Unconditional label) ) | splitCode(BlockExit instr :: rest, _, _) = splitCode(rest, [instr], ExitCode) | splitCode(BlockFlow flow :: rest, _, _) = splitCode(rest, [], flow) | splitCode(BlockRaiseAndHandle(instr, handler) :: rest, _, _) = splitCode(rest, [instr], UnconditionalHandle handler) | splitCode(BlockOptionalHandle{call, handler, label} :: rest, sinceLabel, flow) = let (* A function call within a handler. This could go to the handler but if there is no exception will go to the next instruction. Also includes JumpLoop since the stack check could result in an Interrupt exception. *) in createEntry(label, sinceLabel, flow); splitCode(rest, [call], ConditionalHandle{handler=handler, continue=label}) end in val () = splitCode(icode, [], ExitCode) val resultVector = Array.vector resArray end open ICODETRANSFORM val pregProperties = Vector.fromList(List.rev(! pregPropList)) in codeICodeFunctionToX86{blocks = resultVector, functionName = name, pregProps = pregProperties, ccCount= ! ccRefCounter, debugSwitches = debugSwitches, resultClosure = resultClosure} end fun gencodeLambda(lambda, debugSwitches, closure) = let open DEBUG Universal (*val debugSwitches = [tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), tagInject assemblyCodeTag true] @ debugSwitches*) in codeFunctionToX86(lambda, debugSwitches, closure) end structure Foreign = X86FOREIGN structure Sharing = struct type backendIC = backendIC and bicLoadForm = bicLoadForm and argumentType = argumentType and closureRef = closureRef end end;