diff --git a/mlsource/MLCompiler/CodeTree/CODETREE_SIMPLIFIER.sml b/mlsource/MLCompiler/CodeTree/CODETREE_SIMPLIFIER.sml index 77494a98..a947634f 100644 --- a/mlsource/MLCompiler/CodeTree/CODETREE_SIMPLIFIER.sml +++ b/mlsource/MLCompiler/CodeTree/CODETREE_SIMPLIFIER.sml @@ -1,1731 +1,1737 @@ (* Copyright (c) 2013, 2016-17 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* This is a cut-down version of the optimiser which simplifies the code but does not apply any heuristics. It follows chained bindings, in particular through tuples, folds constants expressions involving built-in functions, expands inline functions that have previously been marked as inlineable. It does not detect small functions that can be inlined nor does it code-generate functions without free variables. *) functor CODETREE_SIMPLIFIER( structure BASECODETREE: BaseCodeTreeSig structure CODETREE_FUNCTIONS: CodetreeFunctionsSig structure REMOVE_REDUNDANT: sig type codetree type loadForm type codeUse val cleanProc : (codetree * codeUse list * (int -> loadForm) * int) -> codetree structure Sharing: sig type codetree = codetree and loadForm = loadForm and codeUse = codeUse end end sharing BASECODETREE.Sharing = CODETREE_FUNCTIONS.Sharing = REMOVE_REDUNDANT.Sharing ) : sig type codetree and codeBinding and envSpecial val simplifier: codetree * int -> (codetree * codeBinding list * envSpecial) * int * bool val specialToGeneral: codetree * codeBinding list * envSpecial -> codetree structure Sharing: sig type codetree = codetree and codeBinding = codeBinding and envSpecial = envSpecial end end = struct open BASECODETREE open Address open CODETREE_FUNCTIONS open BuiltIns exception InternalError = Misc.InternalError exception RaisedException (* The bindings are held internally as a reversed list. This is really only a check that the reversed and forward lists aren't confused. *) datatype revlist = RevList of codeBinding list type simpContext = { lookupAddr: loadForm -> envGeneral * envSpecial, enterAddr: int * (envGeneral * envSpecial) -> unit, nextAddress: unit -> int, reprocess: bool ref } fun envGeneralToCodetree(EnvGenLoad ext) = Extract ext | envGeneralToCodetree(EnvGenConst w) = Constnt w fun mkDec (laddr, res) = Declar{value = res, addr = laddr, use=[]} fun mkEnv([], exp) = exp | mkEnv(decs, exp as Extract(LoadLocal loadAddr)) = ( (* A common case is where we have a binding as the last item and then a load of that binding. Reduce this so other optimisations are possible. This is still something of a special case that could/should be generalised. *) case List.last decs of Declar{addr=decAddr, value, ... } => if loadAddr = decAddr then mkEnv(List.take(decs, List.length decs - 1), value) else Newenv(decs, exp) | _ => Newenv(decs, exp) ) | mkEnv(decs, exp) = Newenv(decs, exp) fun isConstnt(Constnt _) = true | isConstnt _ = false (* Wrap up the general, bindings and special value as a codetree node. The special entry is discarded except for Constnt entries which are converted to ConstntWithInline. That allows any inlineable code to be carried forward to later passes. *) fun specialToGeneral(g, RevList(b as _ :: _), s) = mkEnv(List.rev b, specialToGeneral(g, RevList [], s)) | specialToGeneral(Constnt(w, p), RevList [], s) = Constnt(w, setInline s p) | specialToGeneral(g, RevList [], _) = g (* Convert a constant to a fixed value. Used in some constant folding. *) val toFix: machineWord -> FixedInt.int = FixedInt.fromInt o Word.toIntX o toShort local val ffiSizeFloat: unit -> word = RunCall.rtsCallFast1 "PolySizeFloat" and ffiSizeDouble: unit -> word = RunCall.rtsCallFast1 "PolySizeDouble" in (* If we have a constant index value we convert that into a byte offset. We need to know the size of the item on this platform. We have to make this check when we actually compile the code because the interpreted version will generally be run on a platform different from the one the pre-built compiler was compiled on. The ML word length will be the same because we have separate pre-built compilers for 32 and 64-bit. *) fun getMultiplier (LoadStoreMLWord _) = RunCall.bytesPerWord | getMultiplier (LoadStoreMLByte _) = 0w1 | getMultiplier LoadStoreC8 = 0w1 | getMultiplier LoadStoreC16 = 0w2 | getMultiplier LoadStoreC32 = 0w4 | getMultiplier LoadStoreC64 = 0w8 | getMultiplier LoadStoreCFloat = ffiSizeFloat() | getMultiplier LoadStoreCDouble = ffiSizeDouble() | getMultiplier LoadStoreUntaggedUnsigned = RunCall.bytesPerWord end fun simplify(c, s) = mapCodetree (simpGeneral s) c (* Process the codetree to return a codetree node. This is used when we don't want the special case. *) and simpGeneral { lookupAddr, ...} (Extract ext) = let val (gen, spec) = lookupAddr ext in SOME(specialToGeneral(envGeneralToCodetree gen, RevList [], spec)) end | simpGeneral context (Newenv envArgs) = SOME(specialToGeneral(simpNewenv(envArgs, context, RevList []))) | simpGeneral context (Lambda lambda) = SOME(Lambda(#1(simpLambda(lambda, context, NONE, NONE)))) | simpGeneral context (Eval {function, argList, resultType}) = SOME(specialToGeneral(simpFunctionCall(function, argList, resultType, context, RevList[]))) (* BuiltIn0 functions can't be processed specially. *) | simpGeneral context (Unary{oper, arg1}) = SOME(specialToGeneral(simpUnary(oper, arg1, context, RevList []))) | simpGeneral context (Binary{oper, arg1, arg2}) = SOME(specialToGeneral(simpBinary(oper, arg1, arg2, context, RevList []))) | simpGeneral context (Arbitrary{oper=ArbCompare test, shortCond, arg1, arg2, longCall}) = SOME(specialToGeneral(simpArbitraryCompare(test, shortCond, arg1, arg2, longCall, context, RevList []))) | simpGeneral context (Arbitrary{oper=ArbArith arith, shortCond, arg1, arg2, longCall}) = SOME(specialToGeneral(simpArbitraryArith(arith, shortCond, arg1, arg2, longCall, context, RevList []))) | simpGeneral context (AllocateWordMemory {numWords, flags, initial}) = SOME(specialToGeneral(simpAllocateWordMemory(numWords, flags, initial, context, RevList []))) | simpGeneral context (Cond(condTest, condThen, condElse)) = SOME(specialToGeneral(simpIfThenElse(condTest, condThen, condElse, context, RevList []))) | simpGeneral context (Tuple { fields, isVariant }) = SOME(specialToGeneral(simpTuple(fields, isVariant, context, RevList []))) | simpGeneral context (Indirect{ base, offset, indKind }) = SOME(specialToGeneral(simpFieldSelect(base, offset, indKind, context, RevList []))) | simpGeneral context (SetContainer{container, tuple, filter}) = let val optCont = simplify(container, context) val (cGen, cDecs, cSpec) = simpSpecial(tuple, context, RevList []) in case cSpec of (* If the tuple is a local binding it is simpler to pick it up from the "special" entry. *) EnvSpecTuple(size, recEnv) => let val fields = List.tabulate(size, envGeneralToCodetree o #1 o recEnv) in SOME(simpPostSetContainer(optCont, Tuple{isVariant=false, fields=fields}, cDecs, filter)) end | _ => SOME(simpPostSetContainer(optCont, cGen, cDecs, filter)) end | simpGeneral (context as { enterAddr, nextAddress, reprocess, ...}) (BeginLoop{loop, arguments, ...}) = let val didReprocess = ! reprocess (* To see if we really need the loop first try simply binding the arguments and process it. It's often the case that if one or more arguments is a constant that the looping case will be eliminated. *) val withoutBeginLoop = simplify(mkEnv(List.map (Declar o #1) arguments, loop), context) fun foldLoop f n (Loop l) = f(l, n) | foldLoop f n (Newenv(_, exp)) = foldLoop f n exp | foldLoop f n (Cond(_, t, e)) = foldLoop f (foldLoop f n t) e | foldLoop f n (Handle {handler, ...}) = foldLoop f n handler | foldLoop f n (SetContainer{tuple, ...}) = foldLoop f n tuple | foldLoop _ n _ = n (* Check if the Loop instruction is there. This assumes that these are the only tail-recursive cases. *) val hasLoop = foldLoop (fn _ => true) false in if not (hasLoop withoutBeginLoop) then SOME withoutBeginLoop else let (* Reset "reprocess". It may have been set in the withoutBeginLoop that's not the code we're going to return. *) val () = reprocess := didReprocess (* We need the BeginLoop. Create new addresses for the arguments. *) fun declArg({addr, value, use, ...}, typ) = let val newAddr = nextAddress() in enterAddr(addr, (EnvGenLoad(LoadLocal newAddr), EnvSpecNone)); ({addr = newAddr, value = simplify(value, context), use = use }, typ) end (* Now look to see if the (remaining) loops have any arguments that do not change. Do this after processing because we could be eliminating other loops that may change the arguments. *) val declArgs = map declArg arguments val beginBody = simplify(loop, context) local fun argsMatch((Extract (LoadLocal argNo), _), ({addr, ...}, _)) = argNo = addr | argsMatch _ = false fun checkLoopArgs(loopArgs, checks) = let fun map3(loopA :: loopArgs, decA :: decArgs, checkA :: checkArgs) = (argsMatch(loopA, decA) andalso checkA) :: map3(loopArgs, decArgs, checkArgs) | map3 _ = [] in map3(loopArgs, declArgs, checks) end in val checkList = foldLoop checkLoopArgs (map (fn _ => true) arguments) beginBody end in if List.exists (fn l => l) checkList then let (* Turn the original arguments into bindings. *) local fun argLists(true, (arg, _), (tArgs, fArgs)) = (Declar arg :: tArgs, fArgs) | argLists(false, arg, (tArgs, fArgs)) = (tArgs, arg :: fArgs) in val (unchangedArgs, filteredDeclArgs) = ListPair.foldrEq argLists ([], []) (checkList, declArgs) end fun changeLoops (Loop loopArgs) = let val newArgs = ListPair.foldrEq(fn (false, arg, l) => arg :: l | (true, _, l) => l) [] (checkList, loopArgs) in Loop newArgs end | changeLoops(Newenv(decs, exp)) = Newenv(decs, changeLoops exp) | changeLoops(Cond(i, t, e)) = Cond(i, changeLoops t, changeLoops e) | changeLoops(Handle{handler, exp, exPacketAddr}) = Handle{handler=changeLoops handler, exp=exp, exPacketAddr=exPacketAddr} | changeLoops(SetContainer{tuple, container, filter}) = SetContainer{tuple=changeLoops tuple, container=container, filter=filter} | changeLoops code = code val beginBody = simplify(changeLoops loop, context) (* Reprocess because we've lost any special part from the arguments that haven't changed. *) val () = reprocess := true in SOME(mkEnv(unchangedArgs, BeginLoop {loop=beginBody, arguments=filteredDeclArgs})) end else SOME(BeginLoop {loop=beginBody, arguments=declArgs}) end end | simpGeneral context (TagTest{test, tag, maxTag}) = ( case simplify(test, context) of Constnt(testResult, _) => if isShort testResult andalso toShort testResult = tag then SOME CodeTrue else SOME CodeFalse | sTest => SOME(TagTest{test=sTest, tag=tag, maxTag=maxTag}) ) | simpGeneral context (LoadOperation{kind, address}) = let (* Try to move constants out of the index. *) val (genAddress, RevList decAddress) = simpAddress(address, getMultiplier kind, context) (* If the base address and index are constant and this is an immutable load we can do this at compile time. *) val result = case (genAddress, kind) of ({base=Constnt(baseAddr, _), index=NONE, offset}, LoadStoreMLWord _) => if isShort baseAddr then LoadOperation{kind=kind, address=genAddress} else let (* Ignore the "isImmutable" flag and look at the immutable status of the memory. Check that this is a word object and that the offset is within range. The code for Vector.sub, for example, raises an exception if the index is out of range but still generates the (unreachable) indexing code. *) val addr = toAddress baseAddr val wordOffset = offset div RunCall.bytesPerWord in if isMutable addr orelse not(isWords addr) orelse wordOffset >= length addr then LoadOperation{kind=kind, address=genAddress} else Constnt(toMachineWord(loadWord(addr, wordOffset)), []) end | ({base=Constnt(baseAddr, _), index=NONE, offset}, LoadStoreMLByte _) => if isShort baseAddr then LoadOperation{kind=kind, address=genAddress} else let val addr = toAddress baseAddr val wordOffset = offset div RunCall.bytesPerWord in if isMutable addr orelse not(isBytes addr) orelse wordOffset >= length addr then LoadOperation{kind=kind, address=genAddress} else Constnt(toMachineWord(loadByte(addr, offset)), []) end | ({base=Constnt(baseAddr, _), index=NONE, offset}, LoadStoreUntaggedUnsigned) => if isShort baseAddr then LoadOperation{kind=kind, address=genAddress} else let val addr = toAddress baseAddr (* We don't currently have loadWordUntagged in Address but it's only ever used to load the string length word so we can use that. *) in if isMutable addr orelse not(isBytes addr) orelse offset <> 0w0 then LoadOperation{kind=kind, address=genAddress} else Constnt(toMachineWord(String.size(RunCall.unsafeCast addr)), []) end | _ => LoadOperation{kind=kind, address=genAddress} in SOME(mkEnv(List.rev decAddress, result)) end | simpGeneral context (StoreOperation{kind, address, value}) = let val (genAddress, decAddress) = simpAddress(address, getMultiplier kind, context) val (genValue, RevList decValue, _) = simpSpecial(value, context, decAddress) in SOME(mkEnv(List.rev decValue, StoreOperation{kind=kind, address=genAddress, value=genValue})) end | simpGeneral (context as {reprocess, ...}) (BlockOperation{kind, sourceLeft, destRight, length}) = let val multiplier = case kind of BlockOpMove{isByteMove=false} => RunCall.bytesPerWord | BlockOpMove{isByteMove=true} => 0w1 | BlockOpEqualByte => 0w1 | BlockOpCompareByte => 0w1 val (genSrcAddress, RevList decSrcAddress) = simpAddress(sourceLeft, multiplier, context) val (genDstAddress, RevList decDstAddress) = simpAddress(destRight, multiplier, context) val (genLength, RevList decLength, _) = simpSpecial(length, context, RevList []) (* If we have a short length move we're better doing it as a sequence of loads and stores. This is particularly useful with string concatenation. Small here means three or less. Four and eight byte moves are handled as single instructions in the code-generator provided the alignment is correct. *) val shortLength = case genLength of Constnt(lenConst, _) => if isShort lenConst then let val l = toShort lenConst in if l <= 0w3 then SOME l else NONE end else NONE | _ => NONE val combinedDecs = List.rev decSrcAddress @ List.rev decDstAddress @ List.rev decLength val operation = case (shortLength, kind) of (SOME length, BlockOpMove{isByteMove}) => let val _ = reprocess := true (* Frequently the source will be a constant. *) val {base=baseSrc, index=indexSrc, offset=offsetSrc} = genSrcAddress and {base=baseDst, index=indexDst, offset=offsetDst} = genDstAddress (* We don't know if the source is immutable but the destination definitely isn't *) val moveKind = if isByteMove then LoadStoreMLByte{isImmutable=false} else LoadStoreMLWord{isImmutable=false} fun makeMoves offset = if offset = length then [] else NullBinding( StoreOperation{kind=moveKind, address={base=baseDst, index=indexDst, offset=offsetDst+offset*multiplier}, value=LoadOperation{kind=moveKind, address={base=baseSrc, index=indexSrc, offset=offsetSrc+offset*multiplier}}}) :: makeMoves(offset+0w1) in mkEnv(combinedDecs @ makeMoves 0w0, CodeZero (* unit result *)) end | (SOME length, BlockOpEqualByte) => (* Comparing with the null string and up to 3 characters. *) let val {base=baseSrc, index=indexSrc, offset=offsetSrc} = genSrcAddress and {base=baseDst, index=indexDst, offset=offsetDst} = genDstAddress val moveKind = LoadStoreMLByte{isImmutable=false} (* Build andalso tree to check each byte. For the null string this simply returns "true". *) fun makeComparison offset = if offset = length then CodeTrue else Cond( Binary{oper=WordComparison{test=TestEqual, isSigned=false}, arg1=LoadOperation{kind=moveKind, address={base=baseSrc, index=indexSrc, offset=offsetSrc+offset*multiplier}}, arg2=LoadOperation{kind=moveKind, address={base=baseDst, index=indexDst, offset=offsetDst+offset*multiplier}}}, makeComparison(offset+0w1), CodeFalse) in mkEnv(combinedDecs, makeComparison 0w0) end | _ => mkEnv(combinedDecs, BlockOperation{kind=kind, sourceLeft=genSrcAddress, destRight=genDstAddress, length=genLength}) in SOME operation end | simpGeneral (context as {enterAddr, nextAddress, ...}) (Handle{exp, handler, exPacketAddr}) = let (* We need to make a new binding for the exception packet. *) val expBody = simplify(exp, context) val newAddr = nextAddress() val () = enterAddr(exPacketAddr, (EnvGenLoad(LoadLocal newAddr), EnvSpecNone)) val handleBody = simplify(handler, context) in SOME(Handle{exp=expBody, handler=handleBody, exPacketAddr=newAddr}) end | simpGeneral _ _ = NONE (* Where we have an Indirect or Eval we want the argument as either a tuple or an inline function respectively if that's possible. Getting that also involves various other cases as well. Because a binding may later be used in such a context we treat any binding in that way as well. *) and simpSpecial (Extract ext, { lookupAddr, ...}, tailDecs) = let val (gen, spec) = lookupAddr ext in (envGeneralToCodetree gen, tailDecs, spec) end | simpSpecial (Newenv envArgs, context, tailDecs) = simpNewenv(envArgs, context, tailDecs) | simpSpecial (Lambda lambda, context, tailDecs) = let val (gen, spec) = simpLambda(lambda, context, NONE, NONE) in (Lambda gen, tailDecs, spec) end | simpSpecial (Eval {function, argList, resultType}, context, tailDecs) = simpFunctionCall(function, argList, resultType, context, tailDecs) | simpSpecial (Unary{oper, arg1}, context, tailDecs) = simpUnary(oper, arg1, context, tailDecs) | simpSpecial (Binary{oper, arg1, arg2}, context, tailDecs) = simpBinary(oper, arg1, arg2, context, tailDecs) | simpSpecial (Arbitrary{oper=ArbCompare test, shortCond, arg1, arg2, longCall}, context, tailDecs) = simpArbitraryCompare(test, shortCond, arg1, arg2, longCall, context, tailDecs) | simpSpecial (Arbitrary{oper=ArbArith arith, shortCond, arg1, arg2, longCall}, context, tailDecs) = simpArbitraryArith(arith, shortCond, arg1, arg2, longCall, context, tailDecs) | simpSpecial (AllocateWordMemory{numWords, flags, initial}, context, tailDecs) = simpAllocateWordMemory(numWords, flags, initial, context, tailDecs) | simpSpecial (Cond(condTest, condThen, condElse), context, tailDecs) = simpIfThenElse(condTest, condThen, condElse, context, tailDecs) | simpSpecial (Tuple { fields, isVariant }, context, tailDecs) = simpTuple(fields, isVariant, context, tailDecs) | simpSpecial (Indirect{ base, offset, indKind }, context, tailDecs) = simpFieldSelect(base, offset, indKind, context, tailDecs) | simpSpecial (c: codetree, s: simpContext, tailDecs): codetree * revlist * envSpecial = let (* Anything else - copy it and then split it into the fields. *) fun split(Newenv(l, e), RevList tailDecs) = (* Pull off bindings. *) split (e, RevList(List.rev l @ tailDecs)) | split(Constnt(m, p), tailDecs) = (Constnt(m, p), tailDecs, findInline p) | split(c, tailDecs) = (c, tailDecs, EnvSpecNone) in split(simplify(c, s), tailDecs) end (* Process a Newenv. We need to add the bindings to the context. *) and simpNewenv((envDecs: codeBinding list, envExp), context as { enterAddr, nextAddress, reprocess, ...}, tailDecs): codetree * revlist * envSpecial = let fun copyDecs ([], decs) = simpSpecial(envExp, context, decs) (* End of the list - process the result expression. *) | copyDecs ((Declar{addr, value, ...} :: vs), decs) = ( case simpSpecial(value, context, decs) of (* If this raises an exception stop here. *) vBinding as (Raise _, _, _) => vBinding | vBinding => let (* Add the declaration to the table. *) val (optV, dec) = makeNewDecl(vBinding, context) val () = enterAddr(addr, optV) in copyDecs(vs, dec) end ) | copyDecs(NullBinding v :: vs, decs) = (* Not a binding - process this and the rest.*) ( case simpSpecial(v, context, decs) of (* If this raises an exception stop here. *) vBinding as (Raise _, _, _) => vBinding | (cGen, RevList cDecs, _) => copyDecs(vs, RevList(NullBinding cGen :: cDecs)) ) | copyDecs(RecDecs mutuals :: vs, RevList decs) = (* Mutually recursive declarations. Any of the declarations may refer to any of the others. They should all be lambdas. The front end generates functions with more than one argument (either curried or tupled) as pairs of mutually recursive functions. The main function body takes its arguments on the stack (or in registers) and the auxiliary inline function, possibly nested, takes the tupled or curried arguments and calls it. If the main function is recursive it will first call the inline function which is why the pair are mutually recursive. As far as possible we want to use the main function since that uses the least memory. Specifically, if the function recurses we want the recursive call to pass all the arguments if it can. *) let (* Reorder the function so the explicitly-inlined ones come first. Their code can then be inserted into the main functions. *) local val (inlines, nonInlines) = List.partition ( fn {lambda = { isInline=Inline, ...}, ... } => true | _ => false) mutuals in val orderedDecs = inlines @ nonInlines end (* Go down the functions creating new addresses for them and entering them in the table. *) val addresses = map (fn {addr, ... } => let val decAddr = nextAddress() in enterAddr (addr, (EnvGenLoad(LoadLocal decAddr), EnvSpecNone)); decAddr end) orderedDecs fun processFunction({ lambda, addr, ... }, newAddr) = let val (gen, spec) = simpLambda(lambda, context, SOME addr, SOME newAddr) (* Update the entry in the table to include any inlineable function. *) val () = enterAddr (addr, (EnvGenLoad (LoadLocal newAddr), spec)) in {addr=newAddr, lambda=gen, use=[]} end val rlist = ListPair.map processFunction (orderedDecs, addresses) in (* and put these declarations onto the list. *) copyDecs(vs, RevList(List.rev(partitionMutableBindings(RecDecs rlist)) @ decs)) end | copyDecs (Container{addr, size, setter, ...} :: vs, RevList decs) = let (* Enter the new address immediately - it's needed in the setter. *) val decAddr = nextAddress() val () = enterAddr (addr, (EnvGenLoad(LoadLocal decAddr), EnvSpecNone)) val (setGen, RevList setDecs, _) = simpSpecial(setter, context, RevList []) in (* If we have inline expanded a function that sets the container we're better off eliminating the container completely. *) case setGen of SetContainer { tuple, filter, container } => let (* Check the container we're setting is the address we've made for it. *) val _ = (case container of Extract(LoadLocal a) => a = decAddr | _ => false) orelse raise InternalError "copyDecs: Container/SetContainer" val newDecAddr = nextAddress() val () = enterAddr (addr, (EnvGenLoad(LoadLocal newDecAddr), EnvSpecNone)) val tupleAddr = nextAddress() val tupleDec = Declar{addr=tupleAddr, use=[], value=tuple} val tupleLoad = mkLoadLocal tupleAddr val resultTuple = BoolVector.foldri(fn (i, true, l) => mkInd(i, tupleLoad) :: l | (_, false, l) => l) [] filter val _ = List.length resultTuple = size orelse raise InternalError "copyDecs: Container/SetContainer size" val containerDec = Declar{addr=newDecAddr, use=[], value=mkTuple resultTuple} (* TODO: We're replacing a container with what is notionally a tuple on the heap. It should be optimised away as a result of a further pass but we currently have indirections from a container for these. On the native platforms that doesn't matter but on 32-in-64 indirecting from the heap and from the stack are different. *) val _ = reprocess := true in copyDecs(vs, RevList(containerDec :: tupleDec :: setDecs @ decs)) end | _ => let val dec = Container{addr=decAddr, use=[], size=size, setter=setGen} in copyDecs(vs, RevList(dec :: setDecs @ decs)) end end in copyDecs(envDecs, tailDecs) end (* Prepares a binding for entry into a look-up table. Returns the entry to put into the table together with any bindings that must be made. If the general part of the optVal is a constant we can just put the constant in the table. If it is a load (Extract) it is just renaming an existing entry so we can return it. Otherwise we have to make a new binding and return a load (Extract) entry for it. *) and makeNewDecl((Constnt w, RevList decs, spec), _) = ((EnvGenConst w, spec), RevList decs) (* No need to create a binding for a constant. *) | makeNewDecl((Extract ext, RevList decs, spec), _) = ((EnvGenLoad ext, spec), RevList decs) (* Binding is simply giving a new name to a variable - can ignore this declaration. *) | makeNewDecl((gen, RevList decs, spec), { nextAddress, ...}) = let (* Create a binding for this value. *) val newAddr = nextAddress() in ((EnvGenLoad(LoadLocal newAddr), spec), RevList(mkDec(newAddr, gen) :: decs)) end and simpLambda({body, isInline, name, argTypes, resultType, closure, localCount, ...}, { lookupAddr, reprocess, ... }, myOldAddrOpt, myNewAddrOpt) = let (* A new table for the new function. *) val oldAddrTab = Array.array (localCount, NONE) val optClosureList = makeClosure() val isNowRecursive = ref false local fun localOldAddr (LoadLocal addr) = valOf(Array.sub(oldAddrTab, addr)) | localOldAddr (ext as LoadArgument _) = (EnvGenLoad ext, EnvSpecNone) | localOldAddr (ext as LoadRecursive) = (EnvGenLoad ext, EnvSpecNone) | localOldAddr (LoadClosure addr) = let val oldEntry = List.nth(closure, addr) (* If the entry in the closure is our own address this is recursive. *) fun isRecursive(EnvGenLoad(LoadLocal a), SOME b) = if a = b then (isNowRecursive := true; true) else false | isRecursive _ = false in if isRecursive(EnvGenLoad oldEntry, myOldAddrOpt) then (EnvGenLoad LoadRecursive, EnvSpecNone) else let val newEntry = lookupAddr oldEntry val makeClosure = addToClosure optClosureList fun convertResult(genEntry, specEntry) = (* If after looking up the entry we get our new address it's recursive. *) if isRecursive(genEntry, myNewAddrOpt) then (EnvGenLoad LoadRecursive, EnvSpecNone) else let val newGeneral = case genEntry of EnvGenLoad ext => EnvGenLoad(makeClosure ext) | EnvGenConst w => EnvGenConst w (* Have to modify the environment here so that if we look up free variables we add them to the closure. *) fun convertEnv env args = convertResult(env args) val newSpecial = case specEntry of EnvSpecTuple(size, env) => EnvSpecTuple(size, convertEnv env) | EnvSpecInlineFunction(spec, env) => EnvSpecInlineFunction(spec, convertEnv env) | EnvSpecUnary _ => EnvSpecNone (* Don't pass this in *) | EnvSpecBinary _ => EnvSpecNone (* Don't pass this in *) | EnvSpecNone => EnvSpecNone in (newGeneral, newSpecial) end in convertResult newEntry end end and setTab (index, v) = Array.update (oldAddrTab, index, SOME v) in val newAddressAllocator = ref 0 fun mkAddr () = ! newAddressAllocator before newAddressAllocator := ! newAddressAllocator + 1 val newCode = simplify (body, { enterAddr = setTab, lookupAddr = localOldAddr, nextAddress=mkAddr, reprocess = reprocess }) end val closureAfterOpt = extractClosure optClosureList val localCount = ! newAddressAllocator (* If we have mutually recursive "small" functions we may turn them into recursive functions. We have to remove the "small" status from them to prevent them from being expanded inline anywhere else. The optimiser may turn them back into "small" functions if the recursion is actually tail-recursion. *) val isNowInline = case isInline of Inline => if ! isNowRecursive then NonInline else Inline | NonInline => NonInline (* Clean up the function body at this point if it could be inlined. There are examples where failing to do this can blow up. This can be the result of creating both a general and special function inside an inline function. *) val cleanBody = case isNowInline of NonInline => newCode | _ => REMOVE_REDUNDANT.cleanProc(newCode, [UseExport], LoadClosure, localCount) val copiedLambda: lambdaForm = { body = cleanBody, isInline = isNowInline, name = name, closure = closureAfterOpt, argTypes = argTypes, resultType = resultType, localCount = localCount, recUse = [] } val inlineCode = case isNowInline of NonInline => EnvSpecNone | _ => EnvSpecInlineFunction(copiedLambda, fn addr => (EnvGenLoad(List.nth(closureAfterOpt, addr)), EnvSpecNone)) in ( copiedLambda, inlineCode ) end and simpFunctionCall(function, argList, resultType, context as { reprocess, ...}, tailDecs) = let (* Function call - This may involve inlining the function. *) (* Get the function to be called and see if it is inline or a lambda expression. *) val (genFunct, decsFunct, specFunct) = simpSpecial(function, context, tailDecs) (* We have to make a special check here that we are not passing in the function we are trying to expand. This could result in an infinitely recursive expansion. It is only going to happen in very special circumstances such as a definition of the Y combinator. If we see that we don't attempt to expand inline. It could be embedded in a tuple or the closure of a function as well as passed directly. *) val isRecursiveArg = case function of Extract extOrig => let fun containsFunction(Extract thisArg, v) = (v orelse thisArg = extOrig, FOLD_DESCEND) | containsFunction(Lambda{closure, ...}, v) = (* Only the closure, not the body *) (foldl (fn (c, w) => foldtree containsFunction w (Extract c)) v closure, FOLD_DONT_DESCEND) | containsFunction(Eval _, v) = (v, FOLD_DONT_DESCEND) (* OK if it's called *) | containsFunction(_, v) = (v, FOLD_DESCEND) in List.exists(fn (c, _) => foldtree containsFunction false c) argList end | _ => false in case (specFunct, genFunct, isRecursiveArg) of (EnvSpecInlineFunction({body=lambdaBody, localCount, argTypes, ...}, functEnv), _, false) => let val _ = List.length argTypes = List.length argList orelse raise InternalError "simpFunctionCall: argument mismatch" val () = reprocess := true (* If we expand inline we have to reprocess *) and { nextAddress, reprocess, ...} = context (* Expand a function inline, either one marked explicitly to be inlined or one detected as "small". *) (* Calling inline proc or a lambda expression which is just called. The function is replaced with a block containing declarations of the parameters. We need a new table here because the addresses we use to index it are the addresses which are local to the function. New addresses are created in the range of the surrounding function. *) val localVec = Array.array(localCount, NONE) local fun processArgs([], bindings) = ([], bindings) | processArgs((arg, _)::args, bindings) = let val (thisArg, newBindings) = makeNewDecl(simpSpecial(arg, context, bindings), context) val (otherArgs, resBindings) = processArgs(args, newBindings) in (thisArg::otherArgs, resBindings) end val (params, bindings) = processArgs(argList, decsFunct) val paramVec = Vector.fromList params in fun getParameter n = Vector.sub(paramVec, n) (* Bindings necessary for the arguments *) val copiedArgs = bindings end local fun localOldAddr(LoadLocal addr) = valOf(Array.sub(localVec, addr)) | localOldAddr(LoadArgument addr) = getParameter addr | localOldAddr(LoadClosure closureEntry) = functEnv closureEntry | localOldAddr LoadRecursive = raise InternalError "localOldAddr: LoadRecursive" fun setTabForInline (index, v) = Array.update (localVec, index, SOME v) val lambdaContext = { lookupAddr=localOldAddr, enterAddr=setTabForInline, nextAddress=nextAddress, reprocess = reprocess } in val (cGen, cDecs, cSpec) = simpSpecial(lambdaBody,lambdaContext, copiedArgs) end in (cGen, cDecs, cSpec) end | (_, gen as Constnt _, _) => (* Not inlinable - constant function. *) let val copiedArgs = map (fn (arg, argType) => (simplify(arg, context), argType)) argList val evCopiedCode = Eval {function = gen, argList = copiedArgs, resultType=resultType} in (evCopiedCode, decsFunct, EnvSpecNone) end | (_, gen, _) => (* Anything else. *) let val copiedArgs = map (fn (arg, argType) => (simplify(arg, context), argType)) argList val evCopiedCode = Eval {function = gen, argList = copiedArgs, resultType=resultType} in (evCopiedCode, decsFunct, EnvSpecNone) end end (* Special processing for the current builtIn1 operations. *) (* Constant folding for built-ins. These ought to be type-correct i.e. we should have tagged values in some cases and addresses in others. However there may be run-time tests that would ensure type-correctness and we can't be sure that they will always be folded at compile-time. e.g. we may have if isShort c then shortOp c else longOp c If c is a constant then we may try to fold both the shortOp and the longOp and one of these will be type-incorrect although never executed at run-time. *) and simpUnary(oper, arg1, context as { reprocess, ...}, tailDecs) = let val (genArg1, decArg1, specArg1) = simpSpecial(arg1, context, tailDecs) in case (oper, genArg1) of (NotBoolean, Constnt(v, _)) => ( reprocess := true; (if isShort v andalso toShort v = 0w0 then CodeTrue else CodeFalse, decArg1, EnvSpecNone) ) | (NotBoolean, genArg1) => ( (* NotBoolean: This can be the result of using Bool.not but more usually occurs as a result of other code. We don't have TestNotEqual or IsAddress so both of these use NotBoolean with TestEqual and IsTagged. Also we can insert a NotBoolean as a result of a Cond. We try to eliminate not(not a) and to push other NotBooleans down to a point where a boolean is tested. *) case specArg1 of EnvSpecUnary(NotBoolean, originalArg) => ( (* not(not a) - Eliminate. *) reprocess := true; (originalArg, decArg1, EnvSpecNone) ) | _ => (* Otherwise pass this on. It is also extracted in a Cond. *) (Unary{oper=NotBoolean, arg1=genArg1}, decArg1, EnvSpecUnary(NotBoolean, genArg1)) ) | (IsTaggedValue, Constnt(v, _)) => ( reprocess := true; (if isShort v then CodeTrue else CodeFalse, decArg1, EnvSpecNone) ) | (IsTaggedValue, genArg1) => ( (* We use this to test for nil values and if we have constructed a record (or possibly a function) it can't be null. *) case specArg1 of EnvSpecTuple _ => (CodeFalse, decArg1, EnvSpecNone) before reprocess := true | EnvSpecInlineFunction _ => (CodeFalse, decArg1, EnvSpecNone) before reprocess := true | _ => (Unary{oper=oper, arg1=genArg1}, decArg1, EnvSpecNone) ) | (MemoryCellLength, Constnt(v, _)) => ( reprocess := true; (if isShort v then CodeZero else Constnt(toMachineWord(Address.length(toAddress v)), []), decArg1, EnvSpecNone) ) | (MemoryCellFlags, Constnt(v, _)) => ( reprocess := true; (if isShort v then CodeZero else Constnt(toMachineWord(Address.flags(toAddress v)), []), decArg1, EnvSpecNone) ) | (LongWordToTagged, Constnt(v, _)) => ( reprocess := true; (Constnt(toMachineWord(Word.fromLargeWord(RunCall.unsafeCast v)), []), decArg1, EnvSpecNone) ) | (LongWordToTagged, genArg1) => ( (* If we apply LongWordToTagged to an argument we have created with UnsignedToLongWord we can return the original argument. *) case specArg1 of EnvSpecUnary(UnsignedToLongWord, originalArg) => ( reprocess := true; (originalArg, decArg1, EnvSpecNone) ) | _ => (Unary{oper=LongWordToTagged, arg1=genArg1}, decArg1, EnvSpecNone) ) | (SignedToLongWord, Constnt(v, _)) => ( reprocess := true; (Constnt(toMachineWord(Word.toLargeWordX(RunCall.unsafeCast v)), []), decArg1, EnvSpecNone) ) | (UnsignedToLongWord, Constnt(v, _)) => ( reprocess := true; (Constnt(toMachineWord(Word.toLargeWord(RunCall.unsafeCast v)), []), decArg1, EnvSpecNone) ) | (UnsignedToLongWord, genArg1) => (* Add the operation as the special entry. It can then be recognised by LongWordToTagged. *) (Unary{oper=oper, arg1=genArg1}, decArg1, EnvSpecUnary(UnsignedToLongWord, genArg1)) | _ => (Unary{oper=oper, arg1=genArg1}, decArg1, EnvSpecNone) end and simpBinary(oper, arg1, arg2, context as {reprocess, ...}, tailDecs) = let val (genArg1, decArg1, _ (*specArg1*)) = simpSpecial(arg1, context, tailDecs) val (genArg2, decArgs, _ (*specArg2*)) = simpSpecial(arg2, context, decArg1) in case (oper, genArg1, genArg2) of (WordComparison{test, isSigned}, Constnt(v1, _), Constnt(v2, _)) => if (case test of TestEqual => false | _ => not(isShort v1) orelse not(isShort v2)) then (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) else let val () = reprocess := true val testResult = case (test, isSigned) of (* TestEqual can be applied to addresses. *) (TestEqual, _) => RunCall.pointerEq(v1, v2) | (TestLess, false) => toShort v1 < toShort v2 | (TestLessEqual, false) => toShort v1 <= toShort v2 | (TestGreater, false) => toShort v1 > toShort v2 | (TestGreaterEqual, false) => toShort v1 >= toShort v2 | (TestLess, true) => toFix v1 < toFix v2 | (TestLessEqual, true) => toFix v1 <= toFix v2 | (TestGreater, true) => toFix v1 > toFix v2 | (TestGreaterEqual, true) => toFix v1 >= toFix v2 | (TestUnordered, _) => raise InternalError "WordComparison: TestUnordered" in (if testResult then CodeTrue else CodeFalse, decArgs, EnvSpecNone) end | (FixedPrecisionArith arithOp, Constnt(v1, _), Constnt(v2, _)) => if not(isShort v1) orelse not(isShort v2) then (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) else let val () = reprocess := true val v1S = toFix v1 and v2S = toFix v2 fun asConstnt v = Constnt(toMachineWord v, []) val raiseOverflow = Raise(Constnt(toMachineWord Overflow, [])) val raiseDiv = Raise(Constnt(toMachineWord Div, [])) (* ?? There's usually an explicit test. *) val resultCode = case arithOp of ArithAdd => (asConstnt(v1S+v2S) handle Overflow => raiseOverflow) | ArithSub => (asConstnt(v1S-v2S) handle Overflow => raiseOverflow) | ArithMult => (asConstnt(v1S*v2S) handle Overflow => raiseOverflow) | ArithQuot => (asConstnt(FixedInt.quot(v1S,v2S)) handle Overflow => raiseOverflow | Div => raiseDiv) | ArithRem => (asConstnt(FixedInt.rem(v1S,v2S)) handle Overflow => raiseOverflow | Div => raiseDiv) | ArithDiv => (asConstnt(FixedInt.div(v1S,v2S)) handle Overflow => raiseOverflow | Div => raiseDiv) | ArithMod => (asConstnt(FixedInt.mod(v1S,v2S)) handle Overflow => raiseOverflow | Div => raiseDiv) in (resultCode, decArgs, EnvSpecNone) end (* Addition and subtraction of zero. These can arise as a result of inline expansion of more general functions. *) | (FixedPrecisionArith ArithAdd, arg1, Constnt(v2, _)) => if isShort v2 andalso toShort v2 = 0w0 then (arg1, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (FixedPrecisionArith ArithAdd, Constnt(v1, _), arg2) => if isShort v1 andalso toShort v1 = 0w0 then (arg2, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (FixedPrecisionArith ArithSub, arg1, Constnt(v2, _)) => if isShort v2 andalso toShort v2 = 0w0 then (arg1, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (WordArith arithOp, Constnt(v1, _), Constnt(v2, _)) => if not(isShort v1) orelse not(isShort v2) then (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) else let val () = reprocess := true val v1S = toShort v1 and v2S = toShort v2 fun asConstnt v = Constnt(toMachineWord v, []) val resultCode = case arithOp of ArithAdd => asConstnt(v1S+v2S) | ArithSub => asConstnt(v1S-v2S) | ArithMult => asConstnt(v1S*v2S) | ArithQuot => raise InternalError "WordArith: ArithQuot" | ArithRem => raise InternalError "WordArith: ArithRem" | ArithDiv => asConstnt(v1S div v2S) | ArithMod => asConstnt(v1S mod v2S) in (resultCode, decArgs, EnvSpecNone) end | (WordArith ArithAdd, arg1, Constnt(v2, _)) => if isShort v2 andalso toShort v2 = 0w0 then (arg1, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (WordArith ArithAdd, Constnt(v1, _), arg2) => if isShort v1 andalso toShort v1 = 0w0 then (arg2, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (WordArith ArithSub, arg1, Constnt(v2, _)) => if isShort v2 andalso toShort v2 = 0w0 then (arg1, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (WordLogical logOp, Constnt(v1, _), Constnt(v2, _)) => if not(isShort v1) orelse not(isShort v2) then (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) else let val () = reprocess := true val v1S = toShort v1 and v2S = toShort v2 fun asConstnt v = Constnt(toMachineWord v, []) val resultCode = case logOp of LogicalAnd => asConstnt(Word.andb(v1S,v2S)) | LogicalOr => asConstnt(Word.orb(v1S,v2S)) | LogicalXor => asConstnt(Word.xorb(v1S,v2S)) in (resultCode, decArgs, EnvSpecNone) end | (WordLogical logop, arg1, Constnt(v2, _)) => (* Return the zero if we are anding with zero otherwise the original arg *) if isShort v2 andalso toShort v2 = 0w0 then (case logop of LogicalAnd => CodeZero | _ => arg1, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) | (WordLogical logop, Constnt(v1, _), arg2) => if isShort v1 andalso toShort v1 = 0w0 then (case logop of LogicalAnd => CodeZero | _ => arg2, decArgs, EnvSpecNone) else (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) (* TODO: Constant folding of shifts. *) | _ => (Binary{oper=oper, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) end (* Arbitrary precision operations. This is a sort of mixture of a built-in and a conditional. *) and simpArbitraryCompare(TestEqual, shortCond, arg1, arg2, longCall, context, tailDecs) = (* Equality is a special case and is only there to ensure that it is not accidentally converted into an indexed case further down. We must leave it as it is. *) let val (genCond, decCond, _ (*specArg1*)) = simpSpecial(shortCond, context, tailDecs) val (genArg1, decArg1, _ (*specArg1*)) = simpSpecial(arg1, context, decCond) val (genArg2, decArgs, _ (*specArg2*)) = simpSpecial(arg2, context, decArg1) in case (genArg1, genArg2) of (Constnt(v1, _), Constnt(v2, _)) => let val a1: LargeInt.int = RunCall.unsafeCast v1 and a2: LargeInt.int = RunCall.unsafeCast v2 in (if a1 = a2 then CodeTrue else CodeFalse, decArgs, EnvSpecNone) end | _ => (Arbitrary{oper=ArbCompare TestEqual, shortCond=genCond, arg1=genArg1, arg2=genArg2, longCall=simplify(longCall, context)}, decArgs, EnvSpecNone) end | simpArbitraryCompare(test, shortCond, arg1, arg2, longCall, context as {reprocess, ...}, tailDecs) = let val (genCond, decCond, _ (*specArg1*)) = simpSpecial(shortCond, context, tailDecs) val (genArg1, decArg1, _ (*specArg1*)) = simpSpecial(arg1, context, decCond) val (genArg2, decArgs, _ (*specArg2*)) = simpSpecial(arg2, context, decArg1) val posFlags = Address.F_bytes and negFlags = Word8.orb(Address.F_bytes, Address.F_negative) in (* Fold any constant/constant operations but more importantly, if we have variable/constant operations where the constant is short we can avoid using the full arbitrary precision call by just looking at the sign bit. *) case (genCond, genArg1, genArg2) of (_, Constnt(v1, _), Constnt(v2, _)) => let val a1: LargeInt.int = RunCall.unsafeCast v1 and a2: LargeInt.int = RunCall.unsafeCast v2 val testResult = case test of TestLess => a1 < a2 | TestGreater => a1 > a2 | TestLessEqual => a1 <= a2 | TestGreaterEqual => a1 >= a2 | _ => raise InternalError "simpArbitraryCompare: Unimplemented function" in (if testResult then CodeTrue else CodeFalse, decArgs, EnvSpecNone) end | (Constnt(c1, _), _, _) => if isShort c1 andalso toShort c1 = 0w0 then (* One argument is definitely long - generate the long form. *) (Binary{oper=WordComparison{test=test, isSigned=true}, arg1=simplify(longCall, context), arg2=CodeZero}, decArgs, EnvSpecNone) else (* Both arguments are short. That should mean they're constants. *) (Binary{oper=WordComparison{test=test, isSigned=true}, arg1=genArg1, arg2=genArg2}, decArgs, EnvSpecNone) before reprocess := true | (_, genArg1, cArg2 as Constnt _) => let (* The constant must be short otherwise the test would be false. *) val isNeg = case test of TestLess => true | TestLessEqual => true | _ => false (* Translate i < c into if isShort i then toShort i < c else isNegative i *) val newCode = Cond(Unary{oper=BuiltIns.IsTaggedValue, arg1=genArg1}, Binary { oper = BuiltIns.WordComparison{test=test, isSigned=true}, arg1 = genArg1, arg2 = cArg2 }, Binary { oper = BuiltIns.WordComparison{test=TestEqual, isSigned=false}, arg1=Unary { oper = MemoryCellFlags, arg1=genArg1 }, arg2=Constnt(toMachineWord(if isNeg then negFlags else posFlags), [])} ) in (newCode, decArgs, EnvSpecNone) end | (_, cArg1 as Constnt _, genArg2) => let (* We're testing c < i so the test is if isShort i then c < toShort i else isPositive i *) val isPos = case test of TestLess => true | TestLessEqual => true | _ => false val newCode = Cond(Unary{oper=BuiltIns.IsTaggedValue, arg1=genArg2}, Binary { oper = BuiltIns.WordComparison{test=test, isSigned=true}, arg1 = cArg1, arg2 = genArg2 }, Binary { oper = BuiltIns.WordComparison{test=TestEqual, isSigned=false}, arg1=Unary { oper = MemoryCellFlags, arg1=genArg2 }, arg2=Constnt(toMachineWord(if isPos then posFlags else negFlags), [])} ) in (newCode, decArgs, EnvSpecNone) end | _ => (Arbitrary{oper=ArbCompare test, shortCond=genCond, arg1=genArg1, arg2=genArg2, longCall=simplify(longCall, context)}, decArgs, EnvSpecNone) end and simpArbitraryArith(arith, shortCond, arg1, arg2, longCall, context, tailDecs) = let (* arg1 and arg2 are the arguments. shortCond is the condition that must be satisfied in order to use the short precision operation i.e. each argument must be short. *) val (genCond, decCond, _ (*specArg1*)) = simpSpecial(shortCond, context, tailDecs) val (genArg1, decArg1, _ (*specArg1*)) = simpSpecial(arg1, context, decCond) val (genArg2, decArgs, _ (*specArg2*)) = simpSpecial(arg2, context, decArg1) in case (genArg1, genArg2, genCond) of (Constnt(v1, _), Constnt(v2, _), _) => let val a1: LargeInt.int = RunCall.unsafeCast v1 and a2: LargeInt.int = RunCall.unsafeCast v2 (*val _ = print ("Fold arbitrary precision: " ^ PolyML.makestring(arith, a1, a2) ^ "\n")*) in case arith of ArithAdd => (Constnt(toMachineWord(a1+a2), []), decArgs, EnvSpecNone) | ArithSub => (Constnt(toMachineWord(a1-a2), []), decArgs, EnvSpecNone) | ArithMult => (Constnt(toMachineWord(a1*a2), []), decArgs, EnvSpecNone) | _ => raise InternalError "simpArbitraryArith: Unimplemented function" end | (_, _, Constnt(c1, _)) => if isShort c1 andalso toShort c1 = 0w0 then (* One argument is definitely long - generate the long form. *) (simplify(longCall, context), decArgs, EnvSpecNone) else (Arbitrary{oper=ArbArith arith, shortCond=genCond, arg1=genArg1, arg2=genArg2, longCall=simplify(longCall, context)}, decArgs, EnvSpecNone) | _ => (Arbitrary{oper=ArbArith arith, shortCond=genCond, arg1=genArg1, arg2=genArg2, longCall=simplify(longCall, context)}, decArgs, EnvSpecNone) end and simpAllocateWordMemory(numWords, flags, initial, context, tailDecs) = let val (genArg1, decArg1, _ (*specArg1*)) = simpSpecial(numWords, context, tailDecs) val (genArg2, decArg2, _ (*specArg2*)) = simpSpecial(flags, context, decArg1) val (genArg3, decArg3, _ (*specArg3*)) = simpSpecial(initial, context, decArg2) in (AllocateWordMemory{numWords=genArg1, flags=genArg2, initial=genArg3}, decArg3, EnvSpecNone) end (* Loads, stores and block operations use address values. The index value is initially an arbitrary code tree but we can recognise common cases of constant index values or where a constant has been added to the index. TODO: If these are C memory moves we can also look at the base address. The base address for C memory operations is a LargeWord.word value i.e. the address is contained in a box. The base addresses for ML memory moves is an ML address i.e. unboxed. *) and simpAddress({base, index=NONE, offset}, _, context) = let val (genBase, decBase, _ (*specBase*)) = simpSpecial(base, context, RevList[]) in ({base=genBase, index=NONE, offset=offset}, decBase) end | simpAddress({base, index=SOME index, offset}, multiplier, context) = let val (genBase, RevList decBase, _) = simpSpecial(base, context, RevList[]) val (genIndex, RevList decIndex, _ (* specIndex *)) = simpSpecial(index, context, RevList[]) val (newIndex, newOffset) = case genIndex of Constnt(indexOffset, _) => if isShort indexOffset then (NONE, offset + toShort indexOffset * multiplier) else (SOME genIndex, offset) | _ => (SOME genIndex, offset) in ({base=genBase, index=newIndex, offset=newOffset}, RevList(decIndex @ decBase)) end (* (* A built-in function. We can call certain built-ins immediately if the arguments are constants. *) and simpBuiltIn(rtsCallNo, argList, context as { reprocess, ...}) = let val copiedArgs = map (fn arg => simpSpecial(arg, context)) argList open RuntimeCalls (* When checking for a constant we need to check that there are no bindings. They could have side-effects. *) fun isAConstant(Constnt _, [], _) = true | isAConstant _ = false in (* If the function is an RTS call that is safe to evaluate immediately and all the arguments are constants evaluate it now. *) if earlyRtsCall rtsCallNo andalso List.all isAConstant copiedArgs then let val () = reprocess := true exception Interrupt = Thread.Thread.Interrupt (* Turn the arguments into a vector. *) val argVector = case makeConstVal(mkTuple(List.map specialToGeneral copiedArgs)) of Constnt(w, _) => w | _ => raise InternalError "makeConstVal: Not constant" (* Call the function. If it raises an exception (e.g. divide by zero) generate code to raise the exception at run-time. We don't do that for Interrupt which we assume only arises by user interaction and not as a result of executing the code so we reraise that exception immediately. *) val ioOp : int -> machineWord = RunCall.run_call1 RuntimeCalls.POLY_SYS_io_operation (* We need callcode_tupled here because we pass the arguments as a tuple but the RTS functions we're calling expect arguments in registers or on the stack. *) val call: (address * machineWord) -> machineWord = RunCall.run_call1 RuntimeCalls.POLY_SYS_callcode_tupled val code = Constnt (call(toAddress(ioOp rtsCallNo), argVector), []) handle exn as Interrupt => raise exn (* Must not handle this *) | exn => Raise (Constnt(toMachineWord exn, [])) in (code, [], EnvSpecNone) end (* We can optimise certain built-ins in combination with others. If we have POLY_SYS_unsigned_to_longword combined with POLY_SYS_longword_to_tagged we can eliminate both. This can occur in cases such as Word.fromLargeWord o Word8.toLargeWord. If we have POLY_SYS_cmem_load_X functions where the address is formed by adding a constant to an address we can move the addend into the load instruction. *) (* TODO: Could we also have POLY_SYS_signed_to_longword here? *) else if rtsCallNo = POLY_SYS_longword_to_tagged andalso (case copiedArgs of [(_, _, EnvSpecBuiltIn(r, _))] => r = POLY_SYS_unsigned_to_longword | _ => false) then let val arg = (* Get the argument of the argument. *) case copiedArgs of [(_, _, EnvSpecBuiltIn(_, [arg]))] => arg | _ => raise Bind in (arg, [], EnvSpecNone) end else if (rtsCallNo = POLY_SYS_cmem_load_8 orelse rtsCallNo = POLY_SYS_cmem_load_16 orelse rtsCallNo = POLY_SYS_cmem_load_32 orelse rtsCallNo = POLY_SYS_cmem_load_64 orelse rtsCallNo = POLY_SYS_cmem_store_8 orelse rtsCallNo = POLY_SYS_cmem_store_16 orelse rtsCallNo = POLY_SYS_cmem_store_32 orelse rtsCallNo = POLY_SYS_cmem_store_64) andalso (* Check if the first argument is an addition. The second should be a constant. If the addend is a constant it will be a large integer i.e. the address of a byte segment. *) let (* Check that we have a valid value to add to a large word. The cmem_load/store values sign extend their arguments so we use toLargeWordX here. *) fun isAcceptableOffset c = if isShort c (* Shouldn't occur. *) then false else let val l: LargeWord.word = RunCall.unsafeCast c in Word.toLargeWordX(Word.fromLargeWord l) = l end in case copiedArgs of (_, _, EnvSpecBuiltIn(r, args)) :: (Constnt _, _, _) :: _ => r = POLY_SYS_plus_longword andalso (case args of (* If they were both constants we'd have folded them. *) [Constnt(c, _), _] => isAcceptableOffset c | [_, Constnt(c, _)] => isAcceptableOffset c | _ => false) | _ => false end then let (* We have a load or store with an added constant. *) val (base, offset) = case copiedArgs of (_, _, EnvSpecBuiltIn(_, [Constnt(offset, _), base])) :: (Constnt(existing, _), _, _) :: _ => (base, Word.fromLargeWord(RunCall.unsafeCast offset) + toShort existing) | (_, _, EnvSpecBuiltIn(_, [base, Constnt(offset, _)])) :: (Constnt(existing, _), _, _) :: _ => (base, Word.fromLargeWord(RunCall.unsafeCast offset) + toShort existing) | _ => raise Bind val newDecs = List.map(fn h => makeNewDecl(h, context)) copiedArgs val genArgs = List.map(fn ((g, _), _) => envGeneralToCodetree g) newDecs val preDecs = List.foldr (op @) [] (List.map #2 newDecs) val gen = BuiltIn(rtsCallNo, base :: Constnt(toMachineWord offset, []) :: List.drop(genArgs, 2)) in (gen, preDecs, EnvSpecNone) end else let (* Create bindings for the arguments. This ensures that any side-effects in the evaluation of the arguments are performed in the correct order even if the application of the built-in itself is applicative. The new arguments are either loads or constants which are applicative. *) val newDecs = List.map(fn h => makeNewDecl(h, context)) copiedArgs val genArgs = List.map(fn ((g, _), _) => envGeneralToCodetree g) newDecs val preDecs = List.foldr (op @) [] (List.map #2 newDecs) val gen = BuiltIn(rtsCallNo, genArgs) val spec = if reorderable gen then EnvSpecBuiltIn(rtsCallNo, genArgs) else EnvSpecNone in (gen, preDecs, spec) end end *) and simpIfThenElse(condTest, condThen, condElse, context, tailDecs) = (* If-then-else. The main simplification is if we have constants in the test or in both the arms. *) let val word0 = toMachineWord 0 val word1 = toMachineWord 1 val False = word0 val True = word1 in case simpSpecial(condTest, context, tailDecs) of (* If the test is a constant we can return the appropriate arm and ignore the other. *) (Constnt(testResult, _), bindings, _) => let val arm = if wordEq (testResult, False) (* false - return else-part *) then condElse (* if false then x else y == y *) (* if true then x else y == x *) else condThen in simpSpecial(arm, context, bindings) end | (testGen, testbindings as RevList testBList, testSpec) => let fun mkNot (Unary{oper=BuiltIns.NotBoolean, arg1}) = arg1 | mkNot arg = Unary{oper=BuiltIns.NotBoolean, arg1=arg} (* If the test involves a variable that was created with a NOT it's better to move it in here. *) val testCond = case testSpec of EnvSpecUnary(BuiltIns.NotBoolean, arg1) => mkNot arg1 | _ => testGen in case (simpSpecial(condThen, context, RevList[]), simpSpecial(condElse, context, RevList[])) of ((thenConst as Constnt(thenVal, _), RevList [], _), (elseConst as Constnt(elseVal, _), RevList [], _)) => (* Both arms return constants. This situation can arise in situations where we have andalso/orelse where the second "argument" has been reduced to a constant. *) if wordEq (thenVal, elseVal) then (* If the test has a side-effect we have to do it otherwise we can remove it. If we're in a nested andalso/orelse that may mean we can simplify the next level out. *) (thenConst (* or elseConst *), if sideEffectFree testCond then testbindings else RevList(NullBinding testCond :: testBList), EnvSpecNone) (* if x then true else false == x *) else if wordEq (thenVal, True) andalso wordEq (elseVal, False) then (testCond, testbindings, EnvSpecNone) (* if x then false else true == not x *) else if wordEq (thenVal, False) andalso wordEq (elseVal, True) then (mkNot testCond, testbindings, EnvSpecNone) else (* can't optimise *) (Cond (testCond, thenConst, elseConst), testbindings, EnvSpecNone) (* Rewrite "if x then raise y else z" into "(if x then raise y else (); z)" The advantage is that any tuples in z are lifted outside the "if". *) | (thenPart as (Raise _, _:revlist, _), (elsePart, RevList elseBindings, elseSpec)) => (* then-part raises an exception *) (elsePart, RevList(elseBindings @ NullBinding(Cond (testCond, specialToGeneral thenPart, CodeZero)) :: testBList), elseSpec) | ((thenPart, RevList thenBindings, thenSpec), elsePart as (Raise _, _, _)) => (* else part raises an exception *) (thenPart, RevList(thenBindings @ NullBinding(Cond (testCond, CodeZero, specialToGeneral elsePart)) :: testBList), thenSpec) | (thenPart, elsePart) => (Cond (testCond, specialToGeneral thenPart, specialToGeneral elsePart), testbindings, EnvSpecNone) end end (* Tuple construction. Tuples are also used for datatypes and structures (i.e. modules) *) and simpTuple(entries, isVariant, context, tailDecs) = (* The main reason for optimising record constructions is that they appear as tuples in ML. We try to ensure that loads from locally created tuples do not involve indirecting from the tuple but can get the value which was put into the tuple directly. If that is successful we may find that the tuple is never used directly so the use-count mechanism will ensure it is never created. *) let val tupleSize = List.length entries (* The record construction is treated as a block of local declarations so that any expressions which might have side-effects are done exactly once. *) (* We thread the bindings through here to avoid having to append the result. *) fun processFields([], bindings) = ([], bindings) | processFields(field::fields, bindings) = let val (thisField, newBindings) = makeNewDecl(simpSpecial(field, context, bindings), context) val (otherFields, resBindings) = processFields(fields, newBindings) in (thisField::otherFields, resBindings) end val (fieldEntries, allBindings) = processFields(entries, tailDecs) (* Make sure we include any inline code in the result. If this tuple is being "exported" we will lose the "special" part. *) fun envResToCodetree(EnvGenLoad(ext), _) = Extract ext | envResToCodetree(EnvGenConst(w, p), s) = Constnt(w, setInline s p) val generalFields = List.map envResToCodetree fieldEntries val genRec = if List.all isConstnt generalFields then makeConstVal(Tuple{ fields = generalFields, isVariant = isVariant }) else Tuple{ fields = generalFields, isVariant = isVariant } (* Get the field from the tuple if possible. If it's a variant, though, we may try to get an invalid field. See Tests/Succeed/Test167. *) fun getField addr = if addr < tupleSize then List.nth(fieldEntries, addr) else if isVariant then (EnvGenConst(toMachineWord 0, []), EnvSpecNone) else raise InternalError "getField - invalid index" val specRec = EnvSpecTuple(tupleSize, getField) in (genRec, allBindings, specRec) end and simpFieldSelect(base, offset, indKind, context, tailDecs) = let val (genSource, decSource, specSource) = simpSpecial(base, context, tailDecs) in (* Try to do the selection now if possible. *) case specSource of EnvSpecTuple(_, recEnv) => let (* The "special" entry we've found is a tuple. That means that we are taking a field from a tuple we made earlier and so we should be able to get the original code we used when we made the tuple. That might mean the tuple is never used and we can optimise away the construction of it completely. *) val (newGen, newSpec) = recEnv offset in (envGeneralToCodetree newGen, decSource, newSpec) end | _ => (* No special case possible. If the tuple is a constant mkInd/mkVarField will do the selection immediately. *) let val genSelect = case indKind of IndTuple => mkInd(offset, genSource) | IndVariant => mkVarField(offset, genSource) | IndContainer => mkIndContainer(offset, genSource) in (genSelect, decSource, EnvSpecNone) end end (* Process a SetContainer. Unlike the other simpXXX functions this is called after the arguments have been processed. We try to push the SetContainer - to the leaves of the expression. *) + to the leaves of the expression. This is particularly important with tail-recursive + functions that return tuples. Without this the function will lose tail-recursion + since each recursion will be followed by code to copy the result back to the + previous container. *) and simpPostSetContainer(container, Tuple{fields, ...}, RevList tupleDecs, filter) = let (* Apply the filter now. *) fun select(n, hd::tl) = if n >= BoolVector.length filter then [] else if BoolVector.sub(filter, n) then hd :: select(n+1, tl) else select(n+1, tl) | select(_, []) = [] val selected = select(0, fields) (* Frequently we will have produced an indirection from the same base. These will all be bindings so we have to reverse the process. *) fun findOriginal a = List.find(fn Declar{addr, ...} => addr = a | _ => false) tupleDecs fun checkFields(last, Extract(LoadLocal a) :: tl) = ( case findOriginal a of - SOME(Declar{value=Indirect{base=Extract ext, indKind=IndTuple, offset, ...}, ...}) => + SOME(Declar{value=Indirect{base=Extract ext, indKind=IndContainer, offset, ...}, ...}) => ( case last of NONE => checkFields(SOME(ext, [offset]), tl) | SOME(lastExt, offsets) => (* It has to be the same base and with increasing offsets (no reordering). *) if lastExt = ext andalso offset > hd offsets then checkFields(SOME(ext, offset :: offsets), tl) else NONE ) | _ => NONE ) | checkFields(_, _ :: _) = NONE | checkFields(last, []) = last fun fieldsToFilter fields = let val maxDest = List.foldl Int.max ~1 fields val filterArray = BoolArray.array(maxDest+1, false) val _ = List.app(fn n => BoolArray.update(filterArray, n, true)) fields in BoolArray.vector filterArray end in case checkFields(NONE, selected) of - SOME (ext, fields) => + SOME (ext, fields) => (* It may be a container. *) let val filter = fieldsToFilter fields in case ext of LoadLocal localAddr => let (* Is this a container? If it is and we're copying all of it we can replace the inner container with a binding to the outer. We have to be careful because it is possible that we may create and set the inner container, then have some bindings that do some side-effects with the inner container before then copying it to the outer container. For simplicity and to maintain the condition that the container is set in the tails we only merge the containers if it's at the end (after any "filtering"). *) val allSet = BoolVector.foldl (fn (a, t) => a andalso t) true filter fun findContainer [] = NONE | findContainer (Declar{value, ...} :: tl) = if sideEffectFree value then findContainer tl else NONE | findContainer (Container{addr, size, setter, ...} :: tl) = if localAddr = addr andalso size = BoolVector.length filter andalso allSet then SOME (setter, tl) else NONE | findContainer _ = NONE in case findContainer tupleDecs of SOME (setter, decs) => (* Put in a binding for the inner container address so the - setter will set the outer container. *) + setter will set the outer container. + For this to work all loads from the stack must use native word length. *) mkEnv(List.rev(Declar{addr=localAddr, value=container, use=[]} :: decs), setter) | NONE => mkEnv(List.rev tupleDecs, - SetContainer{container=container, tuple = Extract ext, filter=filter}) + SetContainer{container=container, tuple = mkTuple selected, + filter=BoolVector.tabulate(List.length selected, fn _ => true)}) end | _ => mkEnv(List.rev tupleDecs, - SetContainer{container=container, tuple = Extract ext, filter=filter}) + SetContainer{container=container, tuple = mkTuple selected, + filter=BoolVector.tabulate(List.length selected, fn _ => true)}) end | NONE => mkEnv(List.rev tupleDecs, SetContainer{container=container, tuple = mkTuple selected, filter=BoolVector.tabulate(List.length selected, fn _ => true)}) end | simpPostSetContainer(container, Cond(ifpt, thenpt, elsept), RevList tupleDecs, filter) = mkEnv(List.rev tupleDecs, Cond(ifpt, simpPostSetContainer(container, thenpt, RevList [], filter), simpPostSetContainer(container, elsept, RevList [], filter))) | simpPostSetContainer(container, Newenv(envDecs, envExp), RevList tupleDecs, filter) = simpPostSetContainer(container, envExp, RevList(List.rev envDecs @ tupleDecs), filter) | simpPostSetContainer(container, BeginLoop{loop, arguments}, RevList tupleDecs, filter) = mkEnv(List.rev tupleDecs, BeginLoop{loop = simpPostSetContainer(container, loop, RevList [], filter), arguments=arguments}) | simpPostSetContainer(_, loop as Loop _, RevList tupleDecs, _) = (* If we are inside a BeginLoop we only set the container on leaves that exit the loop. Loop entries will go back to the BeginLoop so we don't add SetContainer nodes. *) mkEnv(List.rev tupleDecs, loop) | simpPostSetContainer(container, Handle{exp, handler, exPacketAddr}, RevList tupleDecs, filter) = mkEnv(List.rev tupleDecs, Handle{ exp = simpPostSetContainer(container, exp, RevList [], filter), handler = simpPostSetContainer(container, handler, RevList [], filter), exPacketAddr = exPacketAddr}) | simpPostSetContainer(container, tupleGen, RevList tupleDecs, filter) = mkEnv(List.rev tupleDecs, mkSetContainer(container, tupleGen, filter)) fun simplifier(c, numLocals) = let val localAddressAllocator = ref 0 val addrTab = Array.array(numLocals, NONE) fun lookupAddr (LoadLocal addr) = valOf(Array.sub(addrTab, addr)) | lookupAddr (env as LoadArgument _) = (EnvGenLoad env, EnvSpecNone) | lookupAddr (env as LoadRecursive) = (EnvGenLoad env, EnvSpecNone) | lookupAddr (LoadClosure _) = raise InternalError "top level reached in simplifier" and enterAddr (addr, tab) = Array.update (addrTab, addr, SOME tab) fun mkAddr () = ! localAddressAllocator before localAddressAllocator := ! localAddressAllocator + 1 val reprocess = ref false val (gen, RevList bindings, spec) = simpSpecial(c, {lookupAddr = lookupAddr, enterAddr = enterAddr, nextAddress = mkAddr, reprocess = reprocess}, RevList[]) in ((gen, List.rev bindings, spec), ! localAddressAllocator, !reprocess) end fun specialToGeneral(g, b as _ :: _, s) = mkEnv(b, specialToGeneral(g, [], s)) | specialToGeneral(Constnt(w, p), [], s) = Constnt(w, setInline s p) | specialToGeneral(g, [], _) = g structure Sharing = struct type codetree = codetree and codeBinding = codeBinding and envSpecial = envSpecial end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML index 9ff629a5..bf49f44b 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML @@ -1,3968 +1,3968 @@ (* Copyright David C. J. Matthews 2016-18 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86CodetreeToICode( structure BACKENDTREE: BackendIntermediateCodeSig structure ICODE: ICodeSig structure DEBUG: DEBUGSIG structure X86FOREIGN: FOREIGNCALLSIG structure ICODETRANSFORM: X86ICODETRANSFORMSIG structure CODE_ARRAY: CODEARRAYSIG sharing ICODE.Sharing = ICODETRANSFORM.Sharing = CODE_ARRAY.Sharing ): GENCODESIG = struct open BACKENDTREE open Address open ICODE open CODE_ARRAY exception InternalError = Misc.InternalError local val regs = case targetArch of Native32Bit => [eax, ebx] | Native64Bit => [eax, ebx, r8, r9, r10] | ObjectId32Bit => [eax, esi, r8, r9, r10] val fpResult = case targetArch of Native32Bit => FPReg fp0 | _ => XMMReg xmm0 val fpArgRegs = case targetArch of Native32Bit => [] | _ => [xmm0, xmm1, xmm2] in val generalArgRegs = List.map GenReg regs val floatingPtArgRegs = List.map XMMReg fpArgRegs fun resultReg GeneralType = GenReg eax | resultReg DoubleFloatType = fpResult | resultReg SingleFloatType = fpResult end (* tag a short constant *) fun tag c = 2 * c + 1 (* shift a short constant, but don't set tag bit *) fun semitag c = 2 * c (* Reverse a list and append the second. This is used a lot when converting between the reverse and forward list versions. e.g. codeToICode and codeToICodeRev *) fun revApp([], l) = l | revApp(hd :: tl, l) = revApp(tl, hd :: l) datatype blockStruct = BlockSimple of x86ICode | BlockExit of x86ICode | BlockLabel of int | BlockFlow of controlFlow | BlockBegin of { regArgs: (preg * reg) list, stackArgs: stackLocn list } | BlockRaiseAndHandle of x86ICode * int | BlockOptionalHandle of {call: x86ICode, handler: int, label: int } local open RunCall val F_mutable_bytes = Word.fromLargeWord(Word8.toLargeWord(Word8.orb (F_mutable, F_bytes))) fun makeRealConst l = let val r = allocateByteMemory(0wx8 div bytesPerWord, F_mutable_bytes) fun setBytes([], _) = () | setBytes(hd::tl, n) = (storeByte(r, n, hd); setBytes(tl, n+0wx1)) val () = setBytes(l, 0w0) val () = clearMutableBit r in r end in (* These are floating point constants used to change and mask the sign bit. *) val realSignBit: machineWord = makeRealConst [0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx80] and realAbsMask: machineWord = makeRealConst [0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wx7f] and floatSignBit: machineWord = makeRealConst [0wx00, 0wx00, 0wx00, 0wx80, 0wx00, 0wx00, 0wx00, 0wx00] and floatAbsMask: machineWord = makeRealConst [0wxff, 0wxff, 0wxff, 0wx7f, 0wx00, 0wx00, 0wx00, 0wx00] end datatype commutative = Commutative | NonCommutative (* Check that a large-word constant looks right and get the value as a large int*) fun largeWordConstant value = if isShort value then raise InternalError "largeWordConstant: invalid" else let val addr = toAddress value in if length addr <> nativeWordSize div wordSize orelse flags addr <> F_bytes then raise InternalError "largeWordConstant: invalid" else (); LargeWord.toLargeInt(RunCall.unsafeCast addr) end fun codeFunctionToX86({body, localCount, name, argTypes, resultType=fnResultType, closure, ...}:bicLambdaForm, debugSwitches, resultClosure) = let (* Pseudo-registers are allocated sequentially and the properties added to the list. *) val pregCounter = ref 0 val pregPropList = ref [] fun newPReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropGeneral :: !pregPropList in PReg regNo end and newUReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropUntagged :: !pregPropList in PReg regNo end and newStackLoc size = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropStack size :: !pregPropList in StackLoc{size=size, rno=regNo} end and newMergeReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropMultiple :: !pregPropList in PReg regNo end datatype locationValue = NoLocation | PregLocation of preg | ContainerLocation of { container: stackLocn, stackOffset: int } val locToPregArray = Array.array(localCount, NoLocation) val labelCounter = ref 1 (* Start at 1. Zero is used for the root. *) fun newLabel() = !labelCounter before labelCounter := !labelCounter + 1 val ccRefCounter = ref 0 fun newCCRef() = CcRef(!ccRefCounter) before ccRefCounter := !ccRefCounter + 1 fun constantAsArgument value = if isShort value then IntegerConstant(tag(Word.toLargeIntX(toShort value))) else AddressConstant value (* Create the branch condition from the test, isSigned and jumpOn values. (In)equality tests are the same for signed and unsigned values. *) local open BuiltIns in fun testAsBranch(TestEqual, _, true) = JE | testAsBranch(TestEqual, _, false) = JNE (* Signed tests *) | testAsBranch(TestLess, true, true) = JL | testAsBranch(TestLess, true, false) = JGE | testAsBranch(TestLessEqual, true, true) = JLE | testAsBranch(TestLessEqual, true, false) = JG | testAsBranch(TestGreater, true, true) = JG | testAsBranch(TestGreater, true, false) = JLE | testAsBranch(TestGreaterEqual, true, true) = JGE | testAsBranch(TestGreaterEqual, true, false) = JL (* Unsigned tests *) | testAsBranch(TestLess, false, true) = JB | testAsBranch(TestLess, false, false) = JNB | testAsBranch(TestLessEqual, false, true) = JNA | testAsBranch(TestLessEqual, false, false) = JA | testAsBranch(TestGreater, false, true) = JA | testAsBranch(TestGreater, false, false) = JNA | testAsBranch(TestGreaterEqual, false, true) = JNB | testAsBranch(TestGreaterEqual, false, false) = JB | testAsBranch(TestUnordered, _, _) = raise InternalError "TestUnordered" (* Switch the direction of a test if we turn c op x into x op c. *) fun leftRightTest TestEqual = TestEqual | leftRightTest TestLess = TestGreater | leftRightTest TestLessEqual = TestGreaterEqual | leftRightTest TestGreater = TestLess | leftRightTest TestGreaterEqual = TestLessEqual | leftRightTest TestUnordered = TestUnordered end (* Overflow check. This raises Overflow if the overflow bit is set in the cc. This generates a single block for the function unless there is a handler. As well as reducing the size of the code this also means that overflow checks are generally JO instructions to the end of the code. Since the default branch prediction is not to take forward jumps this should improve prefetching on the normal, non-overflow, path. *) fun checkOverflow ({currHandler=NONE, overflowBlock=ref(SOME overFlowLab), ...}) ccRef = (* It's already been set and there's no surrounding handler - use this. *) let val noOverflowLab = newLabel() in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel noOverflowLab ] end | checkOverflow ({currHandler=NONE, overflowBlock, ...}) ccRef = let (* *) val overFlowLab = newLabel() and noOverflowLab = newLabel() val packetReg = newPReg() val () = overflowBlock := SOME overFlowLab in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel overFlowLab, BlockSimple(LoadArgument{source=AddressConstant(toMachineWord(Overflow)), dest=packetReg, kind=movePolyWord}), BlockExit(RaiseExceptionPacket{packetReg=packetReg}), BlockLabel noOverflowLab ] end | checkOverflow ({currHandler=SOME h, ...}) ccRef = let val overFlowLab = newLabel() and noOverflowLab = newLabel() val packetReg = newPReg() in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel overFlowLab, BlockSimple(LoadArgument{source=AddressConstant(toMachineWord(Overflow)), dest=packetReg, kind=movePolyWord}), BlockRaiseAndHandle(RaiseExceptionPacket{packetReg=packetReg}, h), BlockLabel noOverflowLab ] end fun setAndRestoreRounding (rndMode, doWithRounding) = let open IEEEReal val savedRnd = newUReg() and setRnd = newUReg() in case fpMode of FPModeX87 => [BlockSimple(GetX87ControlReg{dest=savedRnd})] @ (* Set the appropriate bits in the control word. *) (case rndMode of TO_NEAREST => (* The bits need to be zero - just mask them. *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32})] | TO_NEGINF => let val wrk = newUReg() in (* Mask the bits and set to 01 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x400, ccRef=newCCRef(), opSize=OpSize32})] end | TO_POSINF => let val wrk = newUReg() in (* Mask the bits and set to 10 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x800, ccRef=newCCRef(), opSize=OpSize32})] end | TO_ZERO => (* The bits need to be one - just set them. *) [BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xc00, ccRef=newCCRef(), opSize=OpSize32})]) @ [BlockSimple(SetX87ControlReg{source=setRnd})] @ doWithRounding() @ (* Restore the original rounding. *) [BlockSimple(SetX87ControlReg{source=savedRnd})] | FPModeSSE2 => [BlockSimple(GetSSE2ControlReg{dest=savedRnd})] @ (* Set the appropriate bits in the control word. *) (case rndMode of TO_NEAREST => (* The bits need to be zero - just mask them. *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32})] | TO_NEGINF => let val wrk = newUReg() in (* Mask the bits and set to 01 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x2000, ccRef=newCCRef(), opSize=OpSize32})] end | TO_POSINF => let val wrk = newUReg() in (* Mask the bits and set to 10 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x4000, ccRef=newCCRef(), opSize=OpSize32})] end | TO_ZERO => (* The bits need to be one - just set them. *) [BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x6000, ccRef=newCCRef(), opSize=OpSize32})]) @ [BlockSimple(SetSSE2ControlReg{source=setRnd})] @ doWithRounding() @ [BlockSimple(SetSSE2ControlReg{source=savedRnd})] end (* Put a floating point value into a box or tag it so the value can be held in a general register. *) fun boxOrTagReal(srcReg, destReg, precision) = if precision = BuiltIns.PrecDouble orelse wordSize <> 0w8 then let open BuiltIns val boxFloat = case (fpMode, precision) of (FPModeX87, PrecDouble) => BoxX87Double | (FPModeX87, PrecSingle) => BoxX87Float | (FPModeSSE2, PrecDouble) => BoxSSE2Double | (FPModeSSE2, PrecSingle) => BoxSSE2Float in [BlockSimple(BoxValue{boxKind=boxFloat, source=srcReg, dest=destReg, saveRegs=[]})] end else [BlockSimple(TagFloat{source=srcReg, dest=destReg})] (* Indicate that the base address is actually an object index where appropriate. *) val memIndexOrObject = case targetArch of ObjectId32Bit => ObjectIndex | _ => NoMemIndex (* Generally we have an offset in words and no index register. *) fun wordOffsetAddress(offset, baseReg: preg): argument = MemoryLocation{offset=offset*Word.toInt wordSize, base=baseReg, index=memIndexOrObject, cache=NONE} (* The large-word operations all work on the value within the box pointed at by the register. We generate all large-word operations using this even where the X86 instruction requires a register. This allows the next level to optimise cases of cascaded instructions and avoid creating boxes for intermediate values. *) fun wordAt reg = wordOffsetAddress(0, reg) val returnAddressEntry = newStackLoc 1 datatype argLoc = ArgInReg of { realReg: reg, argReg: preg } | ArgOnStack of { stackOffset: int, stackReg: stackLocn } (* Pseudo-regs for the result, the closure and the args that were passed in real regs. *) val resultTarget = newPReg() val closureRegAddr = newPReg() (* Create a map for the arguments indicating their register or stack location. *) local (* Select the appropriate argument register depending on the argument type. *) fun argTypesToArgEntries([], _, _, _) = ([], [], [], []) | argTypesToArgEntries(DoubleFloatType :: tl, gRegs, fpReg :: fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val pRegArg = newPReg() and uRegArg = newUReg() in (ArgInReg{realReg=fpReg, argReg=pRegArg} :: argTypes, boxOrTagReal(uRegArg, pRegArg, BuiltIns.PrecDouble) @ argCode, (uRegArg, fpReg) :: argRegs, stackArgs) end | argTypesToArgEntries(SingleFloatType :: tl, gRegs, fpReg :: fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val pRegArg = newPReg() and uRegArg = newUReg() in (ArgInReg{realReg=fpReg, argReg=pRegArg} :: argTypes, boxOrTagReal(uRegArg, pRegArg, BuiltIns.PrecSingle) @ argCode, (uRegArg, fpReg) :: argRegs, stackArgs) end | argTypesToArgEntries(_ :: tl, gReg :: gRegs, fpRegs, n) = (* This deals with general arguments but also with extra floating point arguments. They are boxed as usual. *) let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val argReg=newPReg() in (ArgInReg{realReg=gReg, argReg=argReg} :: argTypes, argCode, (argReg, gReg) :: argRegs, stackArgs) end | argTypesToArgEntries(_ :: tl, [], fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, [], fpRegs, n-1) val stackLoc = newStackLoc 1 in (ArgOnStack {stackOffset=n, stackReg = stackLoc } :: argTypes, argCode, argRegs, stackLoc :: stackArgs) end val (argEntries, argCode, argRegs, stackArguments) = argTypesToArgEntries(argTypes, generalArgRegs, floatingPtArgRegs, List.length argTypes) val clReg = case closure of [] => [] | _ => [(closureRegAddr, GenReg edx)] in val argumentVector = Vector.fromList argEntries (* Start code for the function. *) val beginInstructions = argCode @ [BlockBegin{regArgs=clReg @ argRegs, stackArgs=stackArguments @ [returnAddressEntry]}] (* The number of arguments on the stack. Needed in return instrs and tail calls. *) val currentStackArgs = List.length stackArguments end (* The return instruction. This can be added on to various tails but there is always one at the end anyway. *) fun returnInstruction({stackPtr, ...}, target, tailCode) = let val (returnCode, resReg) = case fnResultType of GeneralType => ([], target) | DoubleFloatType => let val resReg = newUReg() in ([BlockSimple(LoadArgument{source=wordAt target, dest=resReg, kind=MoveDouble})], resReg) end | SingleFloatType => let val resReg = newUReg() val unpack = if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument target, dest=resReg, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt target, dest=resReg, kind=MoveFloat}) in ([unpack], resReg) end in BlockExit(ReturnResultFromFunction{resultReg=resReg, realReg=resultReg fnResultType, numStackArgs=currentStackArgs}) :: returnCode @ (if stackPtr <> 0 then BlockSimple(ResetStackPtr{numWords=stackPtr, preserveCC=false}) :: tailCode else tailCode) end (* This controls what codeAsArgument returns. Different instructions have different requirements. If an option is set to false the value is instead loaded into a new preg. "const32s" means that it will fit into 32-bits. Any constant satisfies that on X86/32 but on the X86/64 we don't allow addresses because we can't be sure whether they will fit or not. *) type allowedArgument = { anyConstant: bool, const32s: bool, memAddr: bool, existingPreg: bool } val allowInMemMove = (* We can move a 32-bit constant into memory but not a long constant. *) { anyConstant=false, const32s=true, memAddr=false, existingPreg=true } and allowInPReg = { anyConstant=false, const32s=false, memAddr=false, existingPreg=true } (* AllowDefer can be used to ensure that any side-effects are done before something else but otherwise we only evaluate afterwards. *) and allowDefer = { anyConstant=true, const32s=true, memAddr=true, existingPreg=true } datatype destination = SpecificPReg of preg | NoResult | Allowed of allowedArgument (* Context type. *) type context = { loopArgs: (preg list * int * int) option, stackPtr: int, currHandler: int option, overflowBlock: int option ref } (* If a preg has been provided, use that, otherwise generate a new one. *) fun asTarget(SpecificPReg preg) = preg | asTarget NoResult = newPReg() | asTarget(Allowed _) = newPReg() fun moveIfNotAllowed(NoResult, code, arg) = (code, arg, false) | moveIfNotAllowed(Allowed{anyConstant=true, ...}, code, arg as AddressConstant _) = (code, arg, false) | moveIfNotAllowed(Allowed{anyConstant=true, ...}, code, arg as IntegerConstant _) = (code, arg, false) | moveIfNotAllowed(dest as Allowed{const32s=true, ...}, code, arg as IntegerConstant value) = (* This is allowed if the value is within 32-bits *) if is32bit value then (code, arg, false) else moveToTarget(dest, code, arg) | moveIfNotAllowed(dest as Allowed{const32s=true, ...}, code, arg as AddressConstant _) = if targetArch = Native32Bit then (code, arg, false) (* We can store the address directly *) else moveToTarget(dest, code, arg) | moveIfNotAllowed(Allowed{existingPreg=true, ...}, code, arg as RegisterArgument(PReg _)) = (code, arg, false) | moveIfNotAllowed(Allowed{memAddr=true, ...}, code, arg as MemoryLocation _) = (code, arg, false) | moveIfNotAllowed(dest, code, arg) = moveToTarget(dest, code, arg) and moveToTarget(dest, code, arg) = let val target = asTarget dest val moveSize = case arg of AddressConstant _ => movePolyWord | MemoryLocation _ => movePolyWord | _ => moveNativeWord in (code @ [BlockSimple(LoadArgument{source=arg, dest=target, kind=moveSize})], RegisterArgument target, false) end (* Create a bool result from a test by returning true or false. *) fun makeBoolResultRev(condition, ccRef, target, testCode) = let val trueLab = newLabel() and falseLab = newLabel() and mergeLab = newLabel() val mergeReg = newMergeReg() in BlockSimple(LoadArgument{dest=target, source=RegisterArgument mergeReg, kind=Move32Bit}) :: BlockLabel mergeLab :: BlockFlow(Unconditional mergeLab) :: BlockSimple(LoadArgument{dest=mergeReg, source=IntegerConstant(tag 0), kind=Move32Bit}) :: BlockLabel falseLab :: BlockFlow(Unconditional mergeLab) :: BlockSimple(LoadArgument{dest=mergeReg, source=IntegerConstant(tag 1), kind=Move32Bit}) :: BlockLabel trueLab :: BlockFlow(Conditional{ ccRef=ccRef, condition=condition, trueJump=trueLab, falseJump=falseLab }) :: testCode end fun moveIfNotAllowedRev(NoResult, code, arg) = (code, arg, false) | moveIfNotAllowedRev(Allowed{anyConstant=true, ...}, code, arg as AddressConstant _) = (code, arg, false) | moveIfNotAllowedRev(Allowed{anyConstant=true, ...}, code, arg as IntegerConstant _) = (code, arg, false) | moveIfNotAllowedRev(dest as Allowed{const32s=true, ...}, code, arg as IntegerConstant value) = (* This is allowed if the value is within 32-bits *) if is32bit value then (code, arg, false) else moveToTargetRev(dest, code, arg) | moveIfNotAllowedRev(dest as Allowed{const32s=true, ...}, code, arg as AddressConstant _) = if targetArch = Native32Bit then (code, arg, false) else moveToTargetRev(dest, code, arg) | moveIfNotAllowedRev(Allowed{existingPreg=true, ...}, code, arg as RegisterArgument(PReg _)) = (code, arg, false) | moveIfNotAllowedRev(Allowed{memAddr=true, ...}, code, arg as MemoryLocation _) = (code, arg, false) | moveIfNotAllowedRev(dest, code, arg) = moveToTargetRev(dest, code, arg) and moveToTargetRev(dest, code, arg) = let val target = asTarget dest val moveSize = case arg of AddressConstant _ => movePolyWord | MemoryLocation _ => movePolyWord | _ => moveNativeWord in (BlockSimple(LoadArgument{source=arg, dest=target, kind=moveSize}) :: code, RegisterArgument target, false) end (* Use a move if there's no offset or index. We could use an add if there's no index. *) and loadAddress{base, offset=0, index=NoMemIndex, dest} = LoadArgument{source=RegisterArgument base, dest=dest, kind=movePolyWord} | loadAddress{base, offset, index, dest} = LoadEffectiveAddress{base=SOME base, offset=offset, dest=dest, index=index, opSize=nativeWordOpSize} and codeToICodeTarget(instr, context: context, isTail, target) = (* This is really for backwards compatibility. *) let val (code, _, _) = codeToICode(instr, context, isTail, SpecificPReg target) in code end and codeToPReg(instr, context) = let (* Many instructions require an argument in a register. If it's already in a register use that rather than creating a new one. *) val (code, result, _) = codeToICode(instr, context, false, Allowed allowInPReg) val preg = case result of RegisterArgument pr => pr | _ => raise InternalError "codeToPReg" in (code, preg) end and codeToPRegRev(instr, context, tailCode) = let (* Many instructions require an argument in a register. If it's already in a register use that rather than creating a new one. *) val (code, result, _) = codeToICodeRev(instr, context, false, Allowed allowInPReg, tailCode) val preg = case result of RegisterArgument pr => pr | _ => raise InternalError "codeToPRegRev" in (code, preg) end and codeToICode(instr, context, isTail, destination) = let val (code, dest, haveExited) = codeToICodeRev(instr, context, isTail, destination, []) in (List.rev code, dest, haveExited) end (* Main function to turn the codetree into ICode. Optimisation is generally left to later passes. This does detect tail recursion. This builds the result up in reverse order. There was an allocation hotspot in loadFields in the BICTuple case which was eliminated by building the list in reverse and then reversing the result. It seems better to build the list in reverse generally but for the moment there are too many special cases to do everything. *) and codeToICodeRev(BICNewenv (bindings, exp), context: context as {stackPtr=initialSp, ...} , isTail, destination, tailCode) = let (* Process a list of bindings. We need to accumulate the space used by any containers and reset the stack pointer at the end if necessary. *) fun doBindings([], context, tailCode) = (tailCode, context) | doBindings(BICDeclar{value=BICExtract(BICLoadLocal l), addr, ...} :: decs, context, tailCode) = let (* Giving a new name to an existing entry. This should have been removed at a higher level but it doesn't always seem to be. In particular we must treat this specially if it's a container. *) val original = Array.sub(locToPregArray, l) val () = Array.update(locToPregArray, addr, original) in doBindings(decs, context, tailCode) end | doBindings(BICDeclar{value, addr, ...} :: decs, context, tailCode) = let val (code, dest) = codeToPRegRev(value, context, tailCode) val () = Array.update(locToPregArray, addr, PregLocation dest) in doBindings(decs, context, code) end | doBindings(BICRecDecs [{lambda, addr, ...}] :: decs, context, tailCode) = (* We shouldn't have single entries in RecDecs but it seems to occur at the moment. *) let val dest = newPReg() val (code, _, _) = codeToICodeRev(BICLambda lambda, context, false, SpecificPReg dest, tailCode) val () = Array.update(locToPregArray, addr, PregLocation dest) in doBindings(decs, context, code) end | doBindings(BICRecDecs recDecs :: decs, context, tailCode) = let val destRegs = map (fn _ => newPReg()) recDecs (* First build the closures as mutable cells containing zeros. Set the entry in the address table to the register containing the address. *) fun makeClosure({lambda={closure, ...}, addr, ...}, dest, c) = let val () = Array.update(locToPregArray, addr, PregLocation dest) val sizeClosure = List.length closure + (if targetArch = ObjectId32Bit then 2 else 1) open Address fun clear n = if n = sizeClosure then [BlockSimple(AllocateMemoryOperation{size=sizeClosure, flags=if targetArch = ObjectId32Bit then Word8.orb(F_mutable, F_closure) else F_mutable, dest=dest, saveRegs=[]})] else (clear (n+1) @ [BlockSimple( StoreArgument{source=IntegerConstant(tag 0), base=dest, offset=n*Word.toInt wordSize, index=memIndexOrObject, kind=movePolyWord, isMutable=false})]) in c @ clear 0 @ [BlockSimple InitialisationComplete] end val allocClosures = ListPair.foldlEq makeClosure [] (recDecs, destRegs) fun setClosure({lambda as {closure, ...}, ...}, dest, l) = let val clResult = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, clResult) (* Basically the same as tuple except we load the address of the closure we've made. *) fun loadFields([], _) = [] | loadFields(f :: rest, n) = let val (code, source, _) = codeToICode(BICExtract f, context, false, Allowed allowInMemMove) val storeValue = [BlockSimple(StoreArgument{ source=source, base=dest, offset=n*Word.toInt wordSize, index=memIndexOrObject, kind=movePolyWord, isMutable=false })] in code @ storeValue @ loadFields(rest, n+1) end val setCodeAddress = if targetArch = ObjectId32Bit then let (* We can't get the code address until run time. *) val codeReg = newUReg() val closureReg = newPReg() in map BlockSimple [ LoadArgument{ source=AddressConstant(toMachineWord clResult), dest=closureReg, kind=movePolyWord}, LoadArgument{ source=MemoryLocation{offset=0, base=closureReg, index=ObjectIndex, cache=NONE}, dest=codeReg, kind=Move64Bit}, StoreArgument{ source=RegisterArgument codeReg, offset=0, base=dest, index=ObjectIndex, kind=moveNativeWord, isMutable=false} ] end else let val codeAddr = codeAddressFromClosure clResult val (code, source, _) = moveIfNotAllowed(Allowed allowInMemMove, [], AddressConstant codeAddr) in code @ [BlockSimple( StoreArgument{ source=source, base=dest, offset=0, index=NoMemIndex, kind=movePolyWord, isMutable=false })] end val setFields = setCodeAddress @ loadFields(closure, if targetArch = ObjectId32Bit then 2 else 1) in l @ setFields @ [BlockSimple(LockMutable{addr=dest})] end val setClosures = ListPair.foldlEq setClosure [] (recDecs, destRegs) val code = List.rev(allocClosures @ setClosures) in doBindings(decs, context, code @ tailCode) end | doBindings(BICNullBinding exp :: decs, context, tailCode) = let val (code, _, _) = codeToICodeRev(exp, context, false, NoResult, tailCode) (* And discard result. *) in doBindings(decs, context, code) end | doBindings(BICDecContainer{ addr, size } :: decs, {loopArgs, stackPtr, currHandler, overflowBlock}, tailCode) = let val containerReg = newStackLoc size val () = Array.update(locToPregArray, addr, ContainerLocation{container=containerReg, stackOffset=stackPtr+size}) in doBindings(decs, {loopArgs=loopArgs, stackPtr=stackPtr+size, currHandler=currHandler, overflowBlock=overflowBlock}, BlockSimple(ReserveContainer{size=size, container=containerReg}) :: tailCode) end val (codeBindings, resContext as {stackPtr=finalSp, ...}) = doBindings(bindings, context, tailCode) (* If we have had a container we'll need to reset the stack *) in if initialSp <> finalSp then let val _ = finalSp >= initialSp orelse raise InternalError "codeToICode - stack ptr" val bodyReg = newPReg() and resultReg = asTarget destination val (codeExp, result, haveExited) = codeToICodeRev(exp, resContext, isTail, SpecificPReg bodyReg, codeBindings) val afterAdjustSp = if haveExited then codeExp else BlockSimple(LoadArgument{source=result, dest=resultReg, kind=movePolyWord}) :: BlockSimple(ResetStackPtr{numWords=finalSp-initialSp, preserveCC=false}) :: codeExp in (afterAdjustSp, RegisterArgument resultReg, haveExited) end else codeToICodeRev(exp, resContext, isTail, destination, codeBindings) end | codeToICodeRev(BICConstnt(value, _), _, _, destination, tailCode) = moveIfNotAllowedRev(destination, tailCode, constantAsArgument value) | codeToICodeRev(BICExtract(BICLoadLocal l), {stackPtr, ...}, _, destination, tailCode) = ( case Array.sub(locToPregArray, l) of NoLocation => raise InternalError "codeToICodeRev - local unset" | PregLocation preg => moveIfNotAllowedRev(destination, tailCode, RegisterArgument preg) | ContainerLocation{container, stackOffset} => (* This always returns a ContainerAddr whatever the "allowed". *) (tailCode, ContainerAddr{container=container, stackOffset=stackPtr-stackOffset}, false) ) | codeToICodeRev(BICExtract(BICLoadArgument a), {stackPtr, ...}, _, destination, tailCode) = ( case Vector.sub(argumentVector, a) of ArgInReg{argReg, ...} => (* It was originally in a register. It's now in a preg. *) moveIfNotAllowedRev(destination, tailCode, RegisterArgument argReg) | ArgOnStack{stackOffset, stackReg} => (* Pushed before call. *) let val target = asTarget destination in (BlockSimple(LoadArgument{ source=StackLocation{wordOffset=stackOffset+stackPtr, container=stackReg, field=0, cache=NONE}, dest=target, kind=moveNativeWord}) :: tailCode, RegisterArgument target, false) end ) | codeToICodeRev(BICExtract(BICLoadClosure c), _, _, destination, tailCode) = let (* Add the number of words for the code address. This is 1 in native but 2 in 32-in-64. *) val offset = case targetArch of ObjectId32Bit => c+2 | _ => c+1 in if c >= List.length closure then raise InternalError "BICExtract: closure" else (); (* N.B. We need to add one to the closure entry because zero is the code address. *) moveIfNotAllowedRev(destination, tailCode, wordOffsetAddress(offset, closureRegAddr)) end | codeToICodeRev(BICExtract BICLoadRecursive, _, _, destination, tailCode) = (* If the closure is empty we must use the constant. We can't guarantee that the caller will actually load the closure register if it knows the closure is empty. *) moveIfNotAllowedRev(destination, tailCode, case closure of [] => AddressConstant(closureAsAddress resultClosure) | _ => RegisterArgument closureRegAddr) | codeToICodeRev(BICField{base, offset}, context, _, destination, tailCode) = let val (codeBase, baseEntry, _) = codeToICodeRev(base, context, false, Allowed allowInPReg, tailCode) in (* This should not be used with a container. *) case baseEntry of RegisterArgument baseR => moveIfNotAllowedRev(destination, codeBase, wordOffsetAddress(offset, baseR)) | _ => raise InternalError "codeToICodeRev-BICField" end | codeToICodeRev(BICLoadContainer{base, offset}, context, _, destination, tailCode) = let val (codeBase, baseEntry, _) = codeToICodeRev(base, context, false, Allowed allowInPReg, tailCode) val multiplier = Word.toInt(nativeWordSize div wordSize) in (* If this is a local container we extract the field. *) case baseEntry of RegisterArgument baseR => moveIfNotAllowedRev(destination, codeBase, wordOffsetAddress(offset*multiplier, baseR)) | ContainerAddr{container, stackOffset} => let val target = asTarget destination val finalOffset = stackOffset+offset val _ = finalOffset >= 0 orelse raise InternalError "offset" in (BlockSimple(LoadArgument{ source=StackLocation{wordOffset=finalOffset, container=container, field=offset, cache=NONE}, - dest=target, kind=movePolyWord}) :: tailCode, + dest=target, kind=moveNativeWord}) :: tailCode, RegisterArgument target, false) end | _ => raise InternalError "codeToICodeRev-BICField" end | codeToICodeRev(BICEval{function, argList, resultType, ...}, context as { currHandler, ...}, isTail, destination, tailCode) = let val target = asTarget destination (* Create pregs for the closure and each argument. *) val clPReg = newPReg() (* If we have a constant closure we can go directly to the entry point. If the closure is a single word we don't need to load the closure register. *) val (functionCode, closureEntry, callKind) = case function of BICConstnt(addr, _) => let val addrAsAddr = toAddress addr (* If this is a closure we're still compiling we can't get the code address. However if this is directly recursive we can use the recursive convention. *) in if wordEq(closureAsAddress resultClosure, addr) then (tailCode, [], Recursive) else if flags addrAsAddr <> Address.F_words andalso flags addrAsAddr <> Address.F_closure then (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], FullCall) else if targetArch = ObjectId32Bit then (* We can't actually load the code address here. *) let val addrLength = length addrAsAddr val _ = addrLength >= 0w1 orelse raise InternalError "BICEval address" val _ = flags addrAsAddr = Address.F_closure orelse raise InternalError "BICEval address not a closure" in if addrLength = 0w2 then (tailCode, [], ConstantCode addr) else (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], ConstantCode addr) end else (* Native 32 or 64-bits. *) let val addrLength = length addrAsAddr val _ = addrLength >= 0w1 orelse raise InternalError "BICEval address" val codeAddr = loadWord(addrAsAddr, 0w0) val _ = isCode (toAddress codeAddr) orelse raise InternalError "BICEval address not code" in if addrLength = 0w1 then (tailCode, [], ConstantCode codeAddr) else (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], ConstantCode codeAddr) end end | BICExtract BICLoadRecursive => ( (* If the closure is empty we don't need to load rdx *) case closure of [] => (tailCode, [], Recursive) | _ => (BlockSimple(LoadArgument {source=RegisterArgument closureRegAddr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], Recursive) ) | function => (* General case. *) (#1 (codeToICodeRev(function, context, false, SpecificPReg clPReg, tailCode)), [(RegisterArgument clPReg, GenReg edx)], FullCall) (* Optimise arguments. We have to be careful with tail-recursive functions because they need to save any stack arguments that could be overwritten. This is complicated because we overwrite the stack before loading the register arguments. In some circumstances it could be safe but for the moment leave it. This should be safe in the new code-transform but not the old codeICode. Currently we don't allow memory arguments at all. There's the potential for problems later. Memory arguments could possibly lead to aliasing of the stack if the memory actually refers to a container on the stack. That would mess up the code that ensures that stack arguments are stored in the right order. *) (* We don't allow long constants in stack arguments to a tail-recursive call because we may use a memory move to set them. We also don't allow them in 32-in-64 because we can't push an address constant. *) val allowInStackArg = Allowed {anyConstant=not isTail andalso targetArch <> ObjectId32Bit, const32s=true, memAddr=false, existingPreg=not isTail } and allowInRegArg = Allowed {anyConstant=true, const32s=true, memAddr=false, existingPreg=not isTail } (* Load the first arguments into registers and the rest to the stack. *) fun loadArgs ([], _, _, tailCode) = (tailCode, [], []) | loadArgs ((arg, DoubleFloatType) :: args, gRegs, fpReg :: fpRegs, tailCode) = let (* Floating point register argument. *) val (c, r) = codeToPRegRev(arg, context, tailCode) val r1 = newUReg() val c1 = BlockSimple(LoadArgument{source=wordAt r, dest=r1, kind=MoveDouble}) :: c val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c1) in (code, (RegisterArgument r1, fpReg) :: regArgs, stackArgs) end | loadArgs ((arg, SingleFloatType) :: args, gRegs, fpReg :: fpRegs, tailCode) = let (* Floating point register argument. *) val (c, r) = codeToPRegRev(arg, context, tailCode) val r1 = newUReg() val c1 = if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument r, dest=r1, cache=NONE}) :: c else BlockSimple(LoadArgument{source=wordAt r, dest=r1, kind=MoveFloat}) :: c val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c1) in (code, (RegisterArgument r1, fpReg) :: regArgs, stackArgs) end | loadArgs ((arg, _) :: args, gReg::gRegs, fpRegs, tailCode) = let (* General register argument. *) val (c, r, _) = codeToICodeRev(arg, context, false, allowInRegArg, tailCode) val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c) in (code, (r, gReg) :: regArgs, stackArgs) end | loadArgs ((arg, _) :: args, [], fpRegs, tailCode) = let (* Stack argument. *) val (c, r, _) = codeToICodeRev(arg, context, false, allowInStackArg, tailCode) val (code, regArgs, stackArgs) = loadArgs(args, [], fpRegs, c) in (code, regArgs, r :: stackArgs) end val (codeArgs, regArgs, stackArgs) = loadArgs(argList, generalArgRegs, floatingPtArgRegs, functionCode) (* If this is at the end of the function and the result types are the same we can use a tail-recursive call. *) val tailCall = isTail andalso resultType = fnResultType val callCode = if tailCall then let val {stackPtr, ...} = context (* The number of arguments currently on the stack. *) val currentStackArgCount = currentStackArgs val newStackArgCount = List.length stackArgs (* The offset of the first argument or the return address if there are no stack arguments. N.B. We actually have currentStackArgCount+1 items on the stack including the return address. Offsets can be negative. *) val stackOffset = stackPtr val firstArgumentAddr = currentStackArgCount fun makeStackArgs([], _) = [] | makeStackArgs(arg::args, offset) = {src=arg, stack=offset} :: makeStackArgs(args, offset-1) val stackArgs = makeStackArgs(stackArgs, firstArgumentAddr) (* The stack adjustment needed to compensate for any items that have been pushed and the differences in the number of arguments. May be positive or negative. This is also the destination address of the return address so when we enter the new function the return address will be the first item on the stack. *) val stackAdjust = firstArgumentAddr - newStackArgCount (* Add an entry for the return address to the stack arguments. *) val returnEntry = {src=StackLocation{wordOffset=stackPtr, container=returnAddressEntry, field=0, cache=NONE}, stack=stackAdjust} (* Because we're storing into the stack we may be overwriting values we want. If the source of any value is a stack location below the current stack pointer we load it except in the special case where the destination is the same as the source (which is often the case with the return address). *) local fun loadArgs [] = ([], []) | loadArgs (arg :: rest) = let val (loadCode, loadedArgs) = loadArgs rest in case arg of {src as StackLocation{wordOffset, ...}, stack} => if wordOffset = stack+stackOffset (* Same location *) orelse stack+stackOffset < 0 (* Storing above current top of stack *) orelse stackOffset+wordOffset > ~ stackAdjust (* Above the last argument *) then (loadCode, arg :: loadedArgs) else let val preg = newPReg() in (BlockSimple(LoadArgument{source=src, dest=preg, kind=moveNativeWord}) :: loadCode, {src=RegisterArgument preg, stack=stack} :: loadedArgs) end | _ => (loadCode, arg :: loadedArgs) end in val (loadStackArgs, loadedStackArgs) = loadArgs(returnEntry :: stackArgs) end in BlockExit(TailRecursiveCall{regArgs=closureEntry @ regArgs, stackArgs=loadedStackArgs, stackAdjust = stackAdjust, currStackSize=stackOffset, callKind=callKind, workReg=newPReg()}) :: loadStackArgs @ codeArgs end else let val (moveResult, resReg) = case resultType of GeneralType => ([], target) | DoubleFloatType => let val fpRegDest = newUReg() in (boxOrTagReal(fpRegDest, target, BuiltIns.PrecDouble), fpRegDest) end | SingleFloatType => let val fpRegDest = newUReg() in (boxOrTagReal(fpRegDest, target, BuiltIns.PrecSingle), fpRegDest) end val call = FunctionCall{regArgs=closureEntry @ regArgs, stackArgs=stackArgs, dest=resReg, realDest=resultReg resultType, callKind=callKind, saveRegs=[]} val callBlock = case currHandler of NONE => BlockSimple call :: codeArgs | SOME h => BlockOptionalHandle{call=call, handler=h, label=newLabel()} :: codeArgs in moveResult @ callBlock end in (callCode, RegisterArgument target, tailCall (* We've exited if this was a tail jump *)) end | codeToICodeRev(BICGetThreadId, _, _, destination, tailCode) = (* Get the ID of the current thread. *) let val target = asTarget destination in (BlockSimple(LoadMemReg{offset=memRegThreadSelf, dest=target}) :: tailCode, RegisterArgument target, false) end | codeToICodeRev(BICUnary instr, context, isTail, destination, tailCode) = codeToICodeUnaryRev(instr, context, isTail, destination, tailCode) | codeToICodeRev(BICBinary instr, context, isTail, destination, tailCode) = codeToICodeBinaryRev(instr, context, isTail, destination, tailCode) | codeToICodeRev(BICArbitrary{oper, shortCond, arg1, arg2, longCall}, context, _, destination, tailCode) = let val startLong = newLabel() and resultLabel = newLabel() val target = asTarget destination val condResult = newMergeReg() (* Overflow check - if there's an overflow jump to the long precision case. *) fun jumpOnOverflow ccRef = let val noOverFlow = newLabel() in [BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=startLong, falseJump=noOverFlow }), BlockLabel noOverFlow] end val (longCode, _, _) = codeToICode(longCall, context, false, SpecificPReg condResult) (* We could use a tail jump here if this is a tail. *) val (code, dest, haveExited) = ( (* Test the tag bits and skip to the long case if either is clear. *) List.rev(codeConditionRev(shortCond, context, false, startLong, [])) @ (* Try evaluating as fixed precision and jump if we get an overflow. *) codeFixedPrecisionArith(oper, arg1, arg2, context, condResult, jumpOnOverflow) @ (* If we haven't had an overflow jump to the result. *) [BlockFlow(Unconditional resultLabel), (* If we need to use the full long-precision call we come here. *) BlockLabel startLong] @ longCode @ [BlockLabel resultLabel, BlockSimple(LoadArgument{source=RegisterArgument condResult, dest=target, kind=movePolyWord})], RegisterArgument target, false) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICAllocateWordMemory instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeAllocate(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICLambda(lambda as { closure = [], ...}), _, _, destination, tailCode) = (* Empty closure - create a constant closure for any recursive calls. *) let val closure = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, closure) (* Return the closure itself as the value. *) in moveIfNotAllowedRev(destination, tailCode, AddressConstant(closureAsAddress closure)) end | codeToICodeRev(BICLambda(lambda as { closure, ...}), context, isTail, destination, tailCode) = (* Non-empty closure. Ignore stack closure option at the moment. *) let val closureRef = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, closureRef) in if targetArch = ObjectId32Bit then let val target = asTarget destination val memAddr = newPReg() fun loadFields([], n, tlCode) = let val codeReg = newUReg() val closureReg = newPReg() in (* The code address occupies the first native word but we need to extract it at run-time. We don't currently have a way to have 64-bit constants. *) BlockSimple( StoreArgument{ source=RegisterArgument codeReg, offset=0, base=memAddr, index=ObjectIndex, kind=moveNativeWord, isMutable=false}) :: BlockSimple(LoadArgument{ source=MemoryLocation{offset=0, base=closureReg, index=ObjectIndex, cache=NONE}, dest=codeReg, kind=Move64Bit}) :: BlockSimple(LoadArgument{ source=AddressConstant(toMachineWord closureRef), dest=closureReg, kind=movePolyWord}) :: BlockSimple(AllocateMemoryOperation{size=n, flags=F_closure, dest=memAddr, saveRegs=[]}) :: tlCode end | loadFields(f :: rest, n, tlCode) = let (* Defer the evaluation if possible. We may have a constant that we can't move directly but it's better to load it after the allocation otherwise we will have to push the register if we need to GC. *) val (code1, source1, _) = codeToICodeRev(BICExtract f, context, false, Allowed allowDefer, tlCode) val restAndAlloc = loadFields(rest, n+1, code1) val (code2, source, _) = moveIfNotAllowedRev(Allowed allowInMemMove, restAndAlloc, source1) val storeValue = BlockSimple(StoreArgument{ source=source, offset=n*Word.toInt wordSize, base=memAddr, index=ObjectIndex, kind=movePolyWord, isMutable=false}) in storeValue :: code2 end val code = BlockSimple InitialisationComplete :: BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord}) :: loadFields(closure, 2, tailCode) in (code, RegisterArgument target, false) end (* Treat it as a tuple with the code as the first field. *) else codeToICodeRev(BICTuple(BICConstnt(codeAddressFromClosure closureRef, []) :: map BICExtract closure), context, isTail, destination, tailCode) end | codeToICodeRev(BICCond(test, thenPt, elsePt), context, isTail, NoResult, tailCode) = let (* If we don't want the result but are only evaluating for side-effects we may be able to optimise special cases. This was easier in the forward case but for now we don't bother and leave it to the lower levels. *) val startElse = newLabel() and skipElse = newLabel() val codeTest = codeConditionRev(test, context, false, startElse, tailCode) val (codeThen, _, _) = codeToICodeRev(thenPt, context, isTail, NoResult, codeTest) val (codeElse, _, _) = codeToICodeRev(elsePt, context, isTail, NoResult, BlockLabel startElse :: BlockFlow(Unconditional skipElse) :: codeThen) in (BlockLabel skipElse :: codeElse, (* Unit result *) IntegerConstant(tag 0), false) end | codeToICodeRev(BICCond(test, thenPt, elsePt), context, isTail, destination, tailCode) = let (* Because we may push the result onto the stack we have to create a new preg to hold the result and then copy that to the final result. *) (* If this is a tail each arm will exit separately and neither will return a result. *) val target = asTarget destination val condResult = newMergeReg() val thenTarget = if isTail then newPReg() else condResult val startElse = newLabel() val testCode = codeConditionRev(test, context, false, startElse, tailCode) (* Put the result in the target register. *) val (thenCode, _, thenExited) = codeToICodeRev(thenPt, context, isTail, SpecificPReg thenTarget, testCode) (* Add a jump round the else-part except that if this is a tail we return. The then-part could have exited e.g. with a raise or a loop. *) val (exitThen, thenLabel, elseTarget) = if thenExited then (thenCode, [], target (* Can use original target. *)) else if isTail then (returnInstruction(context, thenTarget, thenCode), [], newPReg()) else let val skipElse = newLabel() in (BlockFlow(Unconditional skipElse) :: thenCode, [BlockSimple(LoadArgument{source=RegisterArgument condResult, dest=target, kind=movePolyWord}), BlockLabel skipElse], condResult) end val (elseCode, _, elseExited) = codeToICodeRev(elsePt, context, isTail, SpecificPReg elseTarget, BlockLabel startElse :: exitThen) (* Add a return to the else-part if necessary so we will always exit on a tail. *) val exitElse = if isTail andalso not elseExited then returnInstruction(context, elseTarget, elseCode) else elseCode in (thenLabel @ exitElse, RegisterArgument target, isTail orelse thenExited andalso elseExited) end | codeToICodeRev(BICCase { cases, test, default, isExhaustive, firstIndex}, context, isTail, destination, tailCode) = let (* We have to create a new preg for the result in case we need to push it to the stack. *) val targetReg = newMergeReg() local val initialTestReg = newPReg() val (testCode, _, _) = codeToICodeRev(test, context, false, SpecificPReg initialTestReg, tailCode) (* Subtract the minimum value so the value we're testing is always in the range of (tagged) 0 to the maximum. It is possible to adjust the value when computing the index but that can lead to overflows during compilation if the minimum is very large or small. We can ignore overflow and allow values to wrap round. *) in val (testCode, testReg) = if firstIndex = 0w0 then (testCode, initialTestReg) else let val newTestReg = newPReg() val subtract = BlockSimple(ArithmeticFunction{oper=SUB, resultReg=newTestReg, operand1=initialTestReg, operand2=IntegerConstant(semitag(Word.toLargeInt firstIndex)), ccRef=newCCRef(), opSize=polyWordOpSize}) in (subtract :: testCode, newTestReg) end end val workReg = newPReg() (* Unless this is exhaustive we need to add a range check. *) val (rangeCheck, extraDefaults) = if isExhaustive then (testCode, []) else let val defLab1 = newLabel() val tReg1 = newPReg() val ccRef1 = newCCRef() (* Since we've subtracted any minimum we only have to check whether the value is greater (unsigned) than the maximum. *) val numberOfCases = LargeInt.fromInt(List.length cases) val continueLab = newLabel() val testCode2 = BlockLabel continueLab :: BlockFlow(Conditional{ccRef=ccRef1, condition=JNB, trueJump=defLab1, falseJump=continueLab}) :: BlockSimple(WordComparison{arg1=tReg1, arg2=IntegerConstant(tag numberOfCases), ccRef=ccRef1, opSize=polyWordOpSize}) :: BlockSimple(LoadArgument {source=RegisterArgument testReg, dest=tReg1, kind=movePolyWord}) :: testCode in (testCode2, [defLab1]) end (* Make a label for each item in the list. *) val codeLabels = map (fn _ => newLabel()) cases (* Create an exit label in case it's needed. *) val labelForExit = if isTail then ~1 (* Illegal label. *) else newLabel() (* Generate the code for each of the cases and the default. We need to put an unconditional branch after each to skip the other cases. *) fun codeCases (SOME c :: otherCases, startLabel :: otherLabels, tailCode) = let val caseTarget = if isTail then newPReg() else targetReg (* Put in the case with a jump to the end of the sequence. *) val (codeThisCase, _, caseExited) = codeToICodeRev(c, context, isTail, SpecificPReg caseTarget, BlockLabel startLabel :: tailCode) val exitThisCase = if caseExited then codeThisCase else if isTail then returnInstruction(context, caseTarget, codeThisCase) else BlockFlow(Unconditional labelForExit) :: codeThisCase in codeCases(otherCases, otherLabels, exitThisCase) end | codeCases(NONE :: otherCases, _ :: otherLabels, tailCode) = codeCases(otherCases, otherLabels, tailCode) | codeCases ([], [], tailCode) = let (* We need to add labels for all the gaps we filled and also for a "default" label for the indexed-case instruction itself as well as any range checks. *) fun addDefault (startLabel, NONE, l) = BlockLabel startLabel :: l | addDefault (_, SOME _, l) = l fun asForward l = BlockLabel l val dLabs = map asForward extraDefaults @ tailCode val defLabels = ListPair.foldlEq addDefault dLabs (codeLabels, cases) val defaultTarget = if isTail then newPReg() else targetReg val (defaultCode, _, defaultExited) = codeToICodeRev(default, context, isTail, SpecificPReg defaultTarget, defLabels) in (* Put in the default. Because this is the last we don't need to jump round it. However if this is a tail and we haven't exited we put in a return. That way the case will always have exited if this is a tail. *) if isTail andalso not defaultExited then returnInstruction(context, defaultTarget, defaultCode) else defaultCode end | codeCases _ = raise InternalError "codeCases: mismatch" val codedCases = codeCases(cases, codeLabels, BlockFlow(IndexedBr codeLabels) :: BlockSimple(IndexedCaseOperation{testReg=testReg, workReg=workReg}) :: rangeCheck) (* We can now copy to the target. If we need to push the result this load will be converted into a push. *) val target = asTarget destination val copyToTarget = if isTail then codedCases else BlockSimple(LoadArgument{source=RegisterArgument targetReg, dest=target, kind=movePolyWord}) :: BlockLabel labelForExit :: codedCases in (copyToTarget, RegisterArgument target, isTail (* We have always exited on a tail. *)) end | codeToICodeRev(BICBeginLoop {loop, arguments}, context as { stackPtr, currHandler, overflowBlock, ...}, isTail, destination, tailCode) = let val target = asTarget destination fun codeArgs ([], tailCode) = ([], tailCode) | codeArgs (({value, addr}, _) :: rest, tailCode) = let val pr = newPReg() val () = Array.update(locToPregArray, addr, PregLocation pr) val (code, _, _) = codeToICodeRev(value, context, false, SpecificPReg pr, tailCode) val (pregs, othercode) = codeArgs(rest, code) in (pr::pregs, othercode) end val (loopRegs, argCode) = codeArgs(arguments, tailCode) val loopLabel = newLabel() val (loopBody, _, loopExited) = codeToICodeRev(loop, {loopArgs=SOME (loopRegs, loopLabel, stackPtr), stackPtr=stackPtr, currHandler=currHandler, overflowBlock=overflowBlock }, isTail, SpecificPReg target, BlockLabel loopLabel :: BlockSimple BeginLoop :: argCode) in (loopBody, RegisterArgument target, loopExited) end | codeToICodeRev(BICLoop args, context as {loopArgs=SOME (loopRegs, loopLabel, loopSp), stackPtr, currHandler, ...}, _, destination, tailCode) = let val target = asTarget destination (* Registers to receive the evaluated arguments. We can't put the values into the loop variables yet because the values could depend on the current values of the loop variables. *) val argPRegs = map(fn _ => newPReg()) args val codeArgs = ListPair.foldlEq(fn ((arg, _), pr, l) => #1 (codeToICodeRev(arg, context, false, SpecificPReg pr, l))) tailCode (args, argPRegs) val jumpArgs = ListPair.mapEq(fn (s, l) => (RegisterArgument s, l)) (argPRegs, loopRegs) (* If we've allocated a container in the loop we have to remove it before jumping back. *) val stackReset = if loopSp = stackPtr then codeArgs else BlockSimple(ResetStackPtr{numWords=stackPtr-loopSp, preserveCC=false}) :: codeArgs val jumpLoop = JumpLoop{regArgs=jumpArgs, stackArgs=[], checkInterrupt=SOME[], workReg=NONE} (* "checkInterrupt" could result in a Interrupt exception so we treat this like a function call. *) val code = case currHandler of NONE => BlockFlow(Unconditional loopLabel) :: BlockSimple jumpLoop :: stackReset | SOME h => BlockOptionalHandle{call=jumpLoop, handler=h, label=loopLabel} :: stackReset in (code, RegisterArgument target, true) end | codeToICodeRev(BICLoop _, {loopArgs=NONE, ...}, _, _, _) = raise InternalError "BICLoop without BICBeginLoop" | codeToICodeRev(BICRaise exc, context as { currHandler, ...}, _, destination, tailCode) = let val packetReg = newPReg() val (code, _, _) = codeToICodeRev(exc, context, false, SpecificPReg packetReg, tailCode) val raiseCode = RaiseExceptionPacket{packetReg=packetReg} val block = case currHandler of NONE => BlockExit raiseCode | SOME h => BlockRaiseAndHandle(raiseCode, h) in (block :: code, RegisterArgument(asTarget destination), true (* Always exits *)) end | codeToICodeRev(BICHandle{exp, handler, exPacketAddr}, context as { stackPtr, loopArgs, overflowBlock, ... }, isTail, destination, tailCode) = let (* As with BICCond and BICCase we need to create a new register for the result in case we need to push it to the stack. *) val handleResult = newMergeReg() val handlerLab = newLabel() and startHandling = newLabel() val (bodyTarget, handlerTarget) = if isTail then (newPReg(), newPReg()) else (handleResult, handleResult) (* TODO: Even if we don't actually want a result we force one in here by using "asTarget". *) (* The expression cannot be treated as a tail because the handler has to be removed after. It may "exit" if it has raised an unconditional exception. If it has we mustn't generate a PopExceptionHandler because there won't be any result for resultReg. We need to add two words to the stack to account for the items pushed by PushExceptionHandler. We create an instruction to push the handler followed by a block fork to the start of the code and, potentially the handler, then a label to start the code that the handler is in effect for. *) val initialCode = BlockLabel startHandling :: BlockFlow(SetHandler{handler=handlerLab, continue=startHandling}) :: BlockSimple(PushExceptionHandler{workReg=newPReg()}) :: tailCode val (expCode, _, expExit) = codeToICodeRev(exp, {stackPtr=stackPtr+2, loopArgs=loopArgs, currHandler=SOME handlerLab, overflowBlock=overflowBlock}, false (* Not tail *), SpecificPReg bodyTarget, initialCode) (* If this is the tail we can replace the jump at the end of the handled code with returns. If the handler has exited we don't need a return there. Otherwise we need to add an unconditional jump to skip the handler. *) val (atExpEnd, skipExpLabel) = case (isTail, expExit) of (true, true) => (* Tail and exited. *) (expCode, NONE) | (true, false) => (* Tail and not exited. *) (returnInstruction(context, bodyTarget, BlockSimple(PopExceptionHandler{workReg=newPReg()}) :: expCode), NONE) | (false, true) => (* Not tail but exited. *) (expCode, NONE) | (false, false) => let val skipHandler = newLabel() in (BlockFlow(Unconditional skipHandler) :: BlockSimple(PopExceptionHandler{workReg=newPReg()}) :: expCode, SOME skipHandler) end (* Make a register to hold the exception packet and put eax into it. *) val packetAddr = newPReg() val () = Array.update(locToPregArray, exPacketAddr, PregLocation packetAddr) val (handleCode, _, handleExit) = codeToICodeRev(handler, context, isTail, SpecificPReg handlerTarget, BlockSimple(BeginHandler{workReg=newPReg(), packetReg=packetAddr}) :: BlockLabel handlerLab :: atExpEnd) val target = asTarget destination val afterHandler = case (isTail, handleExit) of (true, true) => (* Tail and exited. *) handleCode | (true, false) => (* Tail and not exited. *) returnInstruction(context, handlerTarget, handleCode) | (false, _) => (* Not tail. *) handleCode val addLabel = case skipExpLabel of SOME lab => BlockLabel lab:: afterHandler | NONE => afterHandler in (BlockSimple(LoadArgument{source=RegisterArgument handleResult, dest=target, kind=movePolyWord}) :: addLabel, RegisterArgument target, isTail) end | codeToICodeRev(BICTuple fields, context, _, destination, tailCode) = let (* TODO: This is a relic of the old fall-back code-generator. It required the result of a tuple to be at the top of the stack. It should be changed. *) val target = asTarget destination (* Actually we want this. *) val memAddr = newPReg() fun loadFields([], n, tlCode) = BlockSimple(AllocateMemoryOperation{size=n, flags=0w0, dest=memAddr, saveRegs=[]}) :: tlCode | loadFields(f :: rest, n, tlCode) = let (* Defer the evaluation if possible. We may have a constant that we can't move directly but it's better to load it after the allocation otherwise we will have to push the register if we need to GC. *) val (code1, source1, _) = codeToICodeRev(f, context, false, Allowed allowDefer, tlCode) val restAndAlloc = loadFields(rest, n+1, code1) val (code2, source, _) = moveIfNotAllowedRev(Allowed allowInMemMove, restAndAlloc, source1) val storeValue = BlockSimple(StoreArgument{ source=source, offset=n*Word.toInt wordSize, base=memAddr, index=memIndexOrObject, kind=movePolyWord, isMutable=false}) in storeValue :: code2 end val code = BlockSimple InitialisationComplete :: BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord}) :: loadFields(fields, 0, tailCode) in (code, RegisterArgument target, false) end (* Copy the source tuple into the container. There are important special cases for both the source tuple and the container. If the source tuple is a BICTuple we have the fields and can store them without creating a tuple on the heap. If the destination is a local container we can store directly into the stack. *) | codeToICodeRev(BICSetContainer{container, tuple, filter}, context as {stackPtr, ...}, _, destination, tailCode) = let local fun createStore containerReg (source, destWord) = StoreArgument{source=source, offset=destWord*Word.toInt nativeWordSize, base=containerReg, index=NoMemIndex, kind=moveNativeWord, isMutable=false} in val findContainer = case container of BICExtract(BICLoadLocal l) => ( case Array.sub(locToPregArray, l) of ContainerLocation{container, stackOffset} => let fun storeToStack(source, destWord) = StoreToStack{source=source, container=container, field=destWord, stackOffset=stackPtr-stackOffset+destWord} in SOME storeToStack end | _ => NONE ) | _ => NONE val (codeContainer, storeInstr) = case findContainer of SOME storeToStack => (tailCode, storeToStack) | NONE => let val containerTarget = newPReg() val (codeContainer, _, _) = codeToICodeRev(container, context, false, SpecificPReg containerTarget, tailCode) in (codeContainer, createStore containerTarget) end end val filterLength = BoolVector.length filter val code = case tuple of BICTuple cl => let (* In theory it's possible that the tuple could contain fields that are not used but nevertheless need to be evaluated for their side-effects. Create all the fields and push to the stack. *) fun codeField(arg, (regs, tailCode)) = let val (c, r, _) = codeToICodeRev(arg, context, false, Allowed allowInMemMove, tailCode) in (r :: regs, c) end val (pregsRev, codeFields) = List.foldl codeField ([], codeContainer) cl val pregs = List.rev pregsRev fun copyField(srcReg, (sourceWord, destWord, tailCode)) = if sourceWord < filterLength andalso BoolVector.sub(filter, sourceWord) then (sourceWord+1, destWord+1, BlockSimple(storeInstr(srcReg, destWord)) :: tailCode) else (sourceWord+1, destWord, tailCode) val (_, _, resultCode) = List.foldl copyField (0, 0, codeFields) pregs in resultCode end | tuple => let (* Copy a heap tuple. It is possible that this is another container in which case we must load the fields directly. We mustn't load its address and then copy because loading the address would be the last reference and might cause the container to be reused prematurely. *) val findContainer = case tuple of BICExtract(BICLoadLocal l) => ( case Array.sub(locToPregArray, l) of ContainerLocation{container, stackOffset} => let fun getAddr sourceWord = StackLocation{wordOffset=stackPtr-stackOffset+sourceWord, container=container, field=sourceWord, cache=NONE} in SOME getAddr end | _ => NONE ) | _ => NONE val (codeTuple, loadField) = case findContainer of SOME getAddr => (codeContainer, getAddr) | NONE => let val tupleTarget = newPReg() val (codeTuple, _, _) = codeToICodeRev(tuple, context, false, SpecificPReg tupleTarget, codeContainer) fun loadField sourceWord = wordOffsetAddress(sourceWord, tupleTarget) in (codeTuple, loadField) end fun copyContainer(sourceWord, destWord, tailCode) = if sourceWord = filterLength then tailCode else if BoolVector.sub(filter, sourceWord) then let val loadReg = newPReg() val code = BlockSimple(storeInstr(RegisterArgument loadReg, destWord)) :: BlockSimple(LoadArgument{source=loadField sourceWord, dest=loadReg, kind=movePolyWord}) :: tailCode in copyContainer(sourceWord+1, destWord+1, code) end else copyContainer(sourceWord+1, destWord, tailCode) in copyContainer(0, 0, codeTuple) end in moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeRev(BICTagTest{test, tag=tagValue, ...}, context, _, destination, tailCode) = (* Check the "tag" word of a union (datatype). N.B. Not the same as testing the tag bit of a word. *) let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (testCode, tagArg, _) = codeToICodeRev(test, context, false, Allowed memOrReg, tailCode) val target = asTarget destination in (makeBoolResultRev(JE, ccRef, target, (* Use CompareLiteral because the tag must fit in 32-bits. *) BlockSimple(CompareLiteral{arg1=tagArg, arg2=tag(Word.toLargeInt tagValue), opSize=polyWordOpSize, ccRef=ccRef}) :: testCode), RegisterArgument target, false) end | codeToICodeRev(BICLoadOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeLoad(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICStoreOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeStore(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICBlockOperation ({kind=BlockOpEqualByte, sourceLeft, destRight, length}), context, _, destination, tailCode) = let val vec1Reg = newUReg() and vec2Reg = newUReg() val ccRef = newCCRef() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddressRev(sourceLeft, true, context, tailCode) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddressRev(destRight, true, context, leftCode) val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToRegRev(length, false (* unsigned *), context, rightCode) val target = asTarget destination val code = makeBoolResultRev(JE, ccRef, target, BlockSimple(CompareByteVectors{ vec1Addr=vec1Reg, vec2Addr=vec2Reg, length=lengthArg, ccRef=ccRef }) :: lengthUntag @ BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg}) :: rightUntag @ BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg}) :: leftUntag @ lengthCode) in (code, RegisterArgument target, false) end | codeToICodeRev(BICBlockOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeBlock(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end and codeToICodeUnaryRev({oper=BuiltIns.NotBoolean, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val ccRef = newCCRef() val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} val (argCode, testDest, _) = codeToICodeRev(arg1, context, false, allow, tailCode) in (* Test the argument and return a boolean result. If either the argument is a condition or the result is used in a test this will be better than using XOR. *) (makeBoolResultRev(JNE, ccRef, target, BlockSimple(CompareLiteral{arg1=testDest, arg2=tag 1, opSize=polyWordOpSize, ccRef=ccRef}) :: argCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.IsTaggedValue, arg1}, context, _, destination, tailCode) = let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (testCode, testResult, _) = codeToICodeRev(arg1, context, false, Allowed memOrReg, tailCode) (* Test the tag bit. This sets the zero bit if the value is untagged. *) val target = asTarget destination in (makeBoolResultRev(JNE, ccRef, target, BlockSimple(TestTagBit{arg=testResult, ccRef=ccRef}) :: testCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.MemoryCellLength, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val argReg1 = newUReg() and argReg2 = newUReg() and argReg3 = newUReg() (* These are untagged until the tag is put in. *) and ccRef1 = newCCRef() and ccRef2 = newCCRef() and ccRef3 = newCCRef() (* Get the length of a memory cell (heap object). We need to mask out the top byte containing the flags and to tag the result. The mask is 56 bits on 64-bit which won't fit in an inline constant. Since we have to shift it anyway we might as well do this by shifts. *) val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=argReg3, operand2=IntegerConstant 1, ccRef=ccRef3, opSize=polyWordOpSize}) :: BlockSimple(ShiftOperation{shift=SHR, resultReg=argReg3, operand=argReg2, shiftAmount=IntegerConstant 7 (* 8-tagshift*), ccRef=ccRef2, opSize=polyWordOpSize }) :: BlockSimple(ShiftOperation{shift=SHL, resultReg=argReg2, operand=argReg1, shiftAmount=IntegerConstant 8, ccRef=ccRef1, opSize=polyWordOpSize }) :: BlockSimple(LoadArgument{source=wordOffsetAddress(~1, addrReg), dest=argReg1, kind=movePolyWord}) :: argCode, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.MemoryCellFlags, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val argReg1 = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(TagValue{ source=argReg1, dest=target, isSigned=false, opSize=OpSize32 }) :: BlockSimple(LoadArgument{source=MemoryLocation{offset= ~1, base=addrReg, index=memIndexOrObject, cache=NONE}, dest=argReg1, kind=MoveByte}) :: argCode, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.ClearMutableFlag, arg1}, context, _, destination, tailCode) = let val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in moveIfNotAllowedRev(destination, BlockSimple(LockMutable{addr=addrReg}) :: argCode, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeUnaryRev({oper=BuiltIns.AtomicIncrement, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val incrReg = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) val code = (* We want the result to be the new value but we've returned the old value. *) BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=incrReg, operand2=IntegerConstant(semitag 1), ccRef=newCCRef(), opSize=polyWordOpSize}) :: BlockSimple(AtomicExchangeAndAdd{ base=addrReg, source=incrReg }) :: BlockSimple(LoadArgument{source=IntegerConstant(semitag 1), dest=incrReg, kind=movePolyWord}) :: argCode in (code, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.AtomicDecrement, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val incrReg = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) val code = BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=incrReg, operand2=IntegerConstant(semitag 1), ccRef=newCCRef(), opSize=polyWordOpSize}) :: BlockSimple(AtomicExchangeAndAdd{ base=addrReg, source=incrReg }) :: BlockSimple(LoadArgument{source=IntegerConstant(semitag ~1), dest=incrReg, kind=movePolyWord}) :: argCode in (code, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.AtomicReset, arg1}, context, _, destination, tailCode) = let (* This is needed only for the interpreted version where we have a single real mutex to interlock atomic increment and decrement. We have to use the same mutex to interlock clearing a mutex. On the X86 we use hardware locking and the hardware guarantees that an assignment of a word will be atomic. *) val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) (* Store tagged 1 in the mutex. This is the unlocked value. *) val code = BlockSimple(StoreArgument{source=IntegerConstant(tag 1), base=addrReg, index=memIndexOrObject, offset=0, kind=movePolyWord, isMutable=true}) :: argCode in moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeUnaryRev({oper=BuiltIns.LongWordToTagged, arg1}, context, _, destination, tailCode) = let (* This is exactly the same as StringLengthWord at the moment. TODO: introduce a new ICode entry so that the next stage can optimise longword operations. *) val target = asTarget destination val argReg1 = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) val code = BlockSimple(TagValue{ source=argReg1, dest=target, isSigned=false, opSize=polyWordOpSize }) :: BlockSimple(LoadArgument{source=wordAt addrReg, dest=argReg1, kind=movePolyWord}) :: argCode in (code, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.SignedToLongWord, arg1}, context, _, destination, tailCode) = let val addrReg = newPReg() and untagArg = newUReg() val (argCode, argReg1) = codeToPRegRev(arg1, context, tailCode) val (signExtend, sxReg) = case targetArch of ObjectId32Bit => let val sReg = newUReg() in ([BlockSimple(SignExtend32To64{source=RegisterArgument argReg1, dest=sReg})], sReg) end | _ => ([], argReg1) val code = BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untagArg, dest=addrReg, saveRegs=[]}) :: BlockSimple(UntagValue{source=sxReg, dest=untagArg, isSigned=true, cache=NONE, opSize=nativeWordOpSize}) :: signExtend @ argCode in moveIfNotAllowedRev(destination, code, RegisterArgument addrReg) end | codeToICodeUnaryRev({oper=BuiltIns.UnsignedToLongWord, arg1}, context, _, destination, tailCode) = let val addrReg = newPReg() and untagArg = newUReg() val (argCode, argReg1) = codeToPRegRev(arg1, context, tailCode) val code = BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untagArg, dest=addrReg, saveRegs=[]}) :: (* We can just use a polyWord operation to untag the unsigned value. *) BlockSimple(UntagValue{source=argReg1, dest=untagArg, isSigned=false, cache=NONE, opSize=polyWordOpSize}) :: argCode in moveIfNotAllowedRev(destination, code, RegisterArgument addrReg) end | codeToICodeUnaryRev({oper=BuiltIns.RealNeg precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val fpRegSrc = newUReg() and fpRegDest = newUReg() and sse2ConstReg = newUReg() (* The SSE2 code uses an SSE2 logical operation to flip the sign bit. This requires the values to be loaded into registers first because the logical operations require 128-bit operands. *) val (argCode, aReg1) = codeToPReg(arg1, context) (* Double precision values are always boxed and single precision values if they won't fit in a word. Otherwise we can using tagging. *) open BuiltIns val load = if precision = PrecDouble then BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveDouble}) else if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpRegSrc, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveFloat}) val code = case fpMode of FPModeX87 => [BlockSimple(X87FPUnaryOps{ fpOp=FCHS, dest=fpRegDest, source=fpRegSrc})] | FPModeSSE2 => let (* In single precision mode the sign bit is in the low 32-bits. There may be a better way to load it. *) val signBit = if precision = PrecDouble then realSignBit else floatSignBit in [BlockSimple(LoadArgument{source=AddressConstant signBit, dest=sse2ConstReg, kind=MoveDouble}), BlockSimple(SSE2FPBinary{opc=SSE2BXor, resultReg=fpRegDest, arg1=fpRegSrc, arg2=RegisterArgument sse2ConstReg})] end val result = boxOrTagReal(fpRegDest, target, precision) in (revApp(argCode @ load :: code @ result, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.RealAbs precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val fpRegSrc = newUReg() and fpRegDest = newUReg() and sse2ConstReg = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) open BuiltIns val load = if precision = PrecDouble then BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveDouble}) else if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpRegSrc, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveFloat}) val code = case fpMode of FPModeX87 => [BlockSimple(X87FPUnaryOps{ fpOp=FABS, dest=fpRegDest, source=fpRegSrc})] | FPModeSSE2 => let val mask = if precision = PrecDouble then realAbsMask else floatAbsMask in [BlockSimple(LoadArgument{source=AddressConstant mask, dest=sse2ConstReg, kind=MoveDouble}), BlockSimple(SSE2FPBinary{opc=SSE2BAnd, resultReg=fpRegDest, arg1=fpRegSrc, arg2=RegisterArgument sse2ConstReg})] end val result = boxOrTagReal(fpRegDest, target, precision) in (revApp(argCode @ load :: code @ result, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.RealFixedInt precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val untagReg = newUReg() and fpReg = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) val floatOp = case fpMode of FPModeX87 => X87Float | FPModeSSE2 => SSE2Float val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double val _ = precision = BuiltIns.PrecDouble orelse raise InternalError "RealFixedInt - single" val code = argCode @ [BlockSimple(UntagValue{source=aReg1, dest=untagReg, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(floatOp{ dest=fpReg, source=RegisterArgument untagReg}), BlockSimple(BoxValue{boxKind=boxFloat, source=fpReg, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.FloatToDouble, arg1}, context, _, destination, tailCode) = let (* Convert a single precision floating point value to double precision. *) val target = asTarget destination val fpReg = newUReg() and fpReg2 = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) (* MoveFloat always converts from single to double-precision. *) val unboxOrUntag = case (fpMode, wordSize) of (FPModeX87, _) => [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg2, kind=MoveFloat})] | (FPModeSSE2, 0w4) => [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveFloat}), BlockSimple(SSE2FPUnary{opc=SSE2UFloatToDouble, resultReg=fpReg2, source=RegisterArgument fpReg})] | (FPModeSSE2, _) => [BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpReg, cache=NONE}), BlockSimple(SSE2FPUnary{opc=SSE2UFloatToDouble, resultReg=fpReg2, source=RegisterArgument fpReg})] val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double val code = argCode @ unboxOrUntag @ [BlockSimple(BoxValue{boxKind=boxFloat, source=fpReg2, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.DoubleToFloat NONE, arg1}, context, _, destination, tailCode) = let (* Convert a double precision value to a single precision using the current rounding mode. This is simpler than setting the rounding mode and then restoring it. *) val target = asTarget destination val fpReg = newUReg() and fpReg2 = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) (* In 32-bit mode we need to box the float. In 64-bit mode we can tag it. *) val boxOrTag = case fpMode of FPModeX87 => [BlockSimple(BoxValue{boxKind=BoxX87Float, source=fpReg, dest=target, saveRegs=[]})] | FPModeSSE2 => BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=fpReg2, source=RegisterArgument fpReg}) :: boxOrTagReal(fpReg2, target, BuiltIns.PrecSingle) val code = argCode @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveDouble})] @ boxOrTag in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.DoubleToFloat (SOME rndMode), arg1}, context, _, destination, tailCode) = let (* Convert a double precision value to a single precision. The rounding mode is passed in explicitly. *) val target = asTarget destination val fpReg = newUReg() and fpReg2 = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) (* In 32-bit mode we need to box the float. In 64-bit mode we can tag it. *) (* We need to save the rounding mode before we change it and restore it afterwards. *) open IEEEReal fun doConversion() = case fpMode of FPModeX87 => (* Convert the value using the appropriate rounding. *) [BlockSimple(BoxValue{boxKind=BoxX87Float, source=fpReg, dest=target, saveRegs=[]})] | FPModeSSE2 => BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=fpReg2, source=RegisterArgument fpReg}) :: boxOrTagReal(fpReg2, target, BuiltIns.PrecSingle) val code = argCode @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveDouble})] @ setAndRestoreRounding(rndMode, doConversion) in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.RealToInt(precision, rndMode), arg1}, context, _, destination, tailCode) = let val target = asTarget destination val chkOverflow = newCCRef() val convResult = newUReg() and wrkReg2 = newUReg() (* Convert a floating point value to an integer. We need to raise overflow if the result is out of range. We first convert the value to 32/64 bits then tag it. An overflow can happen either because the real number does not fit in 32/64 bits or if it is not a 31/63 bit value. Fortunately, if the first conversion fails the result is a value that causes an overflow when we try it shift it so the check for overflow only needs to happen there. There is an SSE2 instruction that implements truncation (round to zero) directly but in other cases we need to set the rounding mode. *) val doConvert = case (fpMode, precision) of (FPModeX87, _) => let val fpReg = newUReg() val (argCode, aReg) = codeToPReg(arg1, context) fun doConvert() = [BlockSimple(X87RealToInt{source=fpReg, dest=convResult })] in argCode @ [BlockSimple(LoadArgument{source=wordAt aReg, dest=fpReg, kind=MoveDouble})] @ setAndRestoreRounding(rndMode, doConvert) end | (FPModeSSE2, BuiltIns.PrecDouble) => let val (argCode, argReg) = codeToPReg(arg1, context) fun doConvert() = [BlockSimple( SSE2RealToInt{source=wordAt argReg, dest=convResult, isDouble=true, isTruncate = rndMode = IEEEReal.TO_ZERO }) ] in argCode @ ( case rndMode of IEEEReal.TO_ZERO => doConvert() | _ => setAndRestoreRounding(rndMode, doConvert)) end | (FPModeSSE2, BuiltIns.PrecSingle) => let val (argCode, aReg) = codeToPReg(arg1, context) val fpReg = newUReg() fun doConvert() = [BlockSimple( SSE2RealToInt{source=RegisterArgument fpReg, dest=convResult, isDouble=false, isTruncate = rndMode = IEEEReal.TO_ZERO })] in argCode @ [BlockSimple(UntagFloat{source=RegisterArgument aReg, dest=fpReg, cache=NONE})] @ ( case rndMode of IEEEReal.TO_ZERO => doConvert() | _ => setAndRestoreRounding(rndMode, doConvert) ) end val checkAndTag = BlockSimple(ShiftOperation{ shift=SHL, resultReg=wrkReg2, operand=convResult, shiftAmount=IntegerConstant 1, ccRef=chkOverflow, opSize=polyWordOpSize}) :: checkOverflow context chkOverflow @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=wrkReg2, operand2=IntegerConstant 1, ccRef = newCCRef(), opSize=polyWordOpSize})] in (revApp(doConvert @ checkAndTag, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.TouchAddress, arg1}, context, _, destination, tailCode) = let (* Put the value in a register. This is not entirely necessary but ensures that if the value is a constant the constant will be included in the code. *) val (argCode, aReg) = codeToPRegRev(arg1, context, tailCode) in moveIfNotAllowedRev(destination, BlockSimple(TouchArgument{source=aReg}) :: argCode, (* Unit result *) IntegerConstant(tag 0)) end and codeToICodeBinaryRev({oper=BuiltIns.WordComparison{test, isSigned}, arg1, arg2=BICConstnt(arg2Value, _)}, context, _, destination, tailCode) = let (* Comparisons. Because this is also used for pointer equality and even for exception matching it is perfectly possible that the argument could be an address. *) val ccRef = newCCRef() val comparison = (* If the argument is a tagged value that will fit in 32-bits we can use the literal version. Use toLargeIntX here because the value will be sign-extended even if we're actually doing an unsigned comparison. *) if isShort arg2Value andalso is32bit(tag(Word.toLargeIntX(toShort arg2Value))) then let val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} in (* We're often comparing with a character or a string length field that has to be untagged. In that case we can avoid loading it into a register and untagging it by doing the comparison directly. *) case arg1 of BICLoadOperation{kind=LoadStoreUntaggedUnsigned, address} => let val (codeBaseIndex, codeUntag, memLoc) = codeAddressRev(address, false, context, tailCode) val literal = Word.toLargeIntX(toShort arg2Value) in BlockSimple(CompareLiteral{arg1=MemoryLocation memLoc, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: codeUntag @ codeBaseIndex end | BICLoadOperation{kind=LoadStoreMLByte _, address} => let val (codeBaseIndex, codeUntag, {base, index, offset, ...}) = codeAddressRev(address, true, context, tailCode) val _ = toShort arg2Value >= 0w0 andalso toShort arg2Value < 0w256 orelse raise InternalError "Compare byte not a byte" val literal = Word8.fromLargeWord(Word.toLargeWord(toShort arg2Value)) in BlockSimple(CompareByteMem{arg1={base=base, index=index, offset=offset}, arg2=literal, ccRef=ccRef}) :: codeUntag @ codeBaseIndex end | BICUnary({oper=BuiltIns.MemoryCellFlags, arg1}) => (* This occurs particularly in arbitrary precision comparisons. *) let val (baseCode, baseReg) = codeToPRegRev(arg1, context, tailCode) val _ = toShort arg2Value >= 0w0 andalso toShort arg2Value < 0w256 orelse raise InternalError "Compare memory cell not a byte" val literal = Word8.fromLargeWord(Word.toLargeWord(toShort arg2Value)) in BlockSimple(CompareByteMem{arg1={base=baseReg, index=memIndexOrObject, offset= ~1}, arg2=literal, ccRef=ccRef}) :: baseCode end | _ => let (* TODO: We could include rarer cases of tagging by looking at the code and seeing if it's a TagValue. *) val (testCode, testDest, _) = codeToICodeRev(arg1, context, false, allow, tailCode) val literal = tag(Word.toLargeIntX(toShort arg2Value)) in BlockSimple(CompareLiteral{arg1=testDest, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: testCode end end else (* Addresses or larger values. We need to use a register comparison. *) let val (testCode, testReg) = codeToPRegRev(arg1, context, tailCode) val arg2Arg = constantAsArgument arg2Value in BlockSimple(WordComparison{arg1=testReg, arg2=arg2Arg, ccRef=ccRef, opSize=polyWordOpSize}) :: testCode end val target = asTarget destination in (makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, comparison), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordComparison{test, isSigned}, arg1=BICConstnt(arg1Value, _), arg2}, context, _, destination, tailCode) = let (* If we have the constant first we need to reverse the test so the first argument is a register. *) val ccRef = newCCRef() val comparison = if isShort arg1Value andalso is32bit(tag(Word.toLargeIntX(toShort arg1Value))) then let val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} val (testCode, testDest, _) = codeToICodeRev(arg2, context, false, allow, tailCode) val literal = tag(Word.toLargeIntX(toShort arg1Value)) in BlockSimple(CompareLiteral{arg1=testDest, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: testCode end else (* Addresses or larger values. We need to use a register comparison. *) let val (testCode, testReg) = codeToPRegRev(arg2, context, tailCode) val arg1Arg = constantAsArgument arg1Value in BlockSimple(WordComparison{arg1=testReg, arg2=arg1Arg, ccRef=ccRef, opSize=polyWordOpSize}) :: testCode end val target = asTarget destination in (makeBoolResultRev(testAsBranch(leftRightTest test, isSigned, true), ccRef, target, comparison), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordComparison {test, isSigned}, arg1, arg2}, context, _, destination, tailCode) = let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (arg1Code, arg1Result, _) = codeToICodeRev(arg1, context, false, Allowed memOrReg, tailCode) val (arg2Code, arg2Result, _) = codeToICodeRev(arg2, context, false, Allowed memOrReg, arg1Code) val target = asTarget destination val code = case (arg1Result, arg2Result) of (RegisterArgument arg1Reg, arg2Result) => makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg1Reg, arg2=arg2Result, ccRef=ccRef, opSize=polyWordOpSize}) :: arg2Code) | (arg1Result, RegisterArgument arg2Reg) => (* The second argument is in a register - switch the sense of the test. *) makeBoolResultRev(testAsBranch(leftRightTest test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg2Reg, arg2=arg1Result, ccRef=ccRef, opSize=polyWordOpSize}) :: arg2Code) | (arg1Result, arg2Result) => let (* Have to load an argument - pick the first. *) val arg1Reg = newPReg() in makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg1Reg, arg2=arg2Result, ccRef=ccRef, opSize=polyWordOpSize}) :: BlockSimple(LoadArgument{source=arg1Result, dest=arg1Reg, kind=movePolyWord}) :: arg2Code) end in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.FixedPrecisionArith oper, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val code = codeFixedPrecisionArith(oper, arg1, arg2, context, target, checkOverflow context) in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. N.B. it is possible to have type-incorrect values in dead code. i.e. code that will never be executed because of a run-time check. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef = newCCRef(), opSize=polyWordOpSize}) :: arg1Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg2Code, aReg2) = codeToPRegRev(arg2, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg2, operand2=IntegerConstant constVal, ccRef = newCCRef(), opSize=polyWordOpSize}) :: arg2Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) (* Use LEA to do the addition since we're not concerned with overflow. This is shorter than subtracting the tag and adding the values and also moves the result into the appropriate register. *) val code = arg1Code @ arg2Code @ [BlockSimple(LoadEffectiveAddress{base=SOME aReg1, offset= ~1, index=MemIndex1 aReg2, dest=target, opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithSub, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=polyWordOpSize}) :: arg1Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithSub, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val aReg3 = newPReg() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Do the subtraction and add in the tag bit. This could be reordered if we have cascaded operations since we don't need to check for overflow. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=RegisterArgument aReg2, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = codeMultiplyConstantWordRev(arg1, context, destination, if isShort value then toShort value else 0w0, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = codeMultiplyConstantWordRev(arg2, context, destination, if isShort value then toShort value else 0w0, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() and resUntagged = newUReg() val code = arg1Code @ arg2Code @ (* Shift one argument and subtract the tag from the other. It's possible this could be reordered if we have a value that is already untagged. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=arg2Untagged, operand1=aReg2, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=arg1Untagged, operand2=RegisterArgument arg2Untagged, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithDiv, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() val code = arg1Code @ arg2Code @ (* Shift both of the arguments to remove the tags. We don't test for zero here - that's done explicitly. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = false, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=quotient, dest=target, isSigned=false, opSize=polyWordOpSize })] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMod, arg1, arg2}, context, _, destination, tailCode) = let (* Identical to Quot except that the result is the remainder. *) val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() val code = arg1Code @ arg2Code @ (* Shift both of the arguments to remove the tags. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = false, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=remainder, dest=target, isSigned=false, opSize=polyWordOpSize })] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith _, ...}, _, _, _, _) = raise InternalError "codeToICodeNonRev: WordArith - unimplemented operation" | codeToICodeBinaryRev({oper=BuiltIns.WordLogical logOp, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) (* Use a semitagged value for XOR. This preserves the tag bit. Use toLargeIntX here because the operations will sign-extend 32-bit values. *) val constVal = if isShort value then (case logOp of BuiltIns.LogicalXor => semitag | _ => tag) (Word.toLargeIntX(toShort value)) else 0 val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constVal <= 0xffffffff andalso constVal >= 0 then OpSize32 else polyWordOpSize val code = arg1Code @ [BlockSimple(ArithmeticFunction{oper=oper, resultReg=target, operand1=arg1Reg, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=opSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical logOp, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, arg2Reg) = codeToPReg(arg2, context) (* Use a semitagged value for XOR. This preserves the tag bit. *) val constVal = if isShort value then (case logOp of BuiltIns.LogicalXor => semitag | _ => tag) (Word.toLargeIntX(toShort value)) else 0 val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constVal <= 0xffffffff andalso constVal >= 0 then OpSize32 else polyWordOpSize val code = arg2Code @ [BlockSimple(ArithmeticFunction{oper=oper, resultReg=target, operand1=arg2Reg, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=opSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalOr, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Or-ing preserves the tag bit. *) [BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalAnd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Since they're both tagged the result will be tagged. *) [BlockSimple(ArithmeticFunction{oper=AND, resultReg=target, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalXor, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val aReg3 = newPReg() val code = arg1Code @ arg2Code @ (* We need to restore the tag bit after the operation. *) [BlockSimple(ArithmeticFunction{oper=XOR, resultReg=aReg3, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordShift BuiltIns.ShiftLeft, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = (* Use the general case multiplication code. This will use a shift except for small values. It does detect special cases such as multiplication by 4 and 8 which can be implemented with LEA. *) codeMultiplyConstantWordRev(arg1, context, destination, if isShort value then Word.<<(0w1, toShort value) else 0w1, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordShift shift, arg1, arg2}, context, _, destination, tailCode) = (* N.B. X86 shifts of greater than the word length mask the higher bits. That isn't what ML wants but that is dealt with at a higher level *) let open BuiltIns val target = asTarget destination (* Load the value into an untagged register. If this is a left shift we need to clear the tag bit. We don't need to do that for right shifts. *) val argRegUntagged = newUReg() val arg1Code = case arg1 of BICConstnt(value, _) => let (* Remove the tag bit. This isn't required for right shifts. *) val cnstntVal = if isShort value then semitag(Word.toLargeInt(toShort value)) else 1 in [BlockSimple(LoadArgument{source=IntegerConstant cnstntVal, dest=argRegUntagged, kind=movePolyWord})] end | _ => let val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val removeTag = case shift of ShiftLeft => ArithmeticFunction{oper=SUB, resultReg=argRegUntagged, operand1=arg1Reg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize} | _ => LoadArgument{source=RegisterArgument arg1Reg, dest=argRegUntagged, kind=movePolyWord} in arg1Code @ [BlockSimple removeTag] end (* The shift amount can usefully be a constant. *) val (arg2Code, untag2Code, arg2Arg) = codeAsUntaggedByte(arg2, false, context) val resRegUntagged = newUReg() val shiftOp = case shift of ShiftLeft => SHL | ShiftRightLogical => SHR | ShiftRightArithmetic => SAR val code = arg1Code @ arg2Code @ untag2Code @ [BlockSimple(ShiftOperation{ shift=shiftOp, resultReg=resRegUntagged, operand=argRegUntagged, shiftAmount=arg2Arg, ccRef=newCCRef(), opSize=polyWordOpSize }), (* Set the tag by ORing it in. This will work whether or not a right shift has shifted a 1 into this position. *) BlockSimple( ArithmeticFunction{oper=OR, resultReg=target, operand1=resRegUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.AllocateByteMemory, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val sizeReg = newPReg() and baseReg = newPReg() val sizeCode = codeToICodeTarget(arg1, context, false, sizeReg) val (flagsCode, flagUntag, flagArg) = codeAsUntaggedByte(arg2, false, context) val code =sizeCode @ flagsCode @ [BlockSimple(AllocateMemoryVariable{size=sizeReg, dest=baseReg, saveRegs=[]})] @ flagUntag @ [BlockSimple(StoreArgument{ source=flagArg, base=baseReg, offset= ~1, index=memIndexOrObject, kind=MoveByte, isMutable=false}), BlockSimple InitialisationComplete, BlockSimple(LoadArgument{ source=RegisterArgument baseReg, dest=target, kind=movePolyWord})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordComparison test, arg1, arg2}, context, _, destination, tailCode) = let val ccRef = newCCRef() val (arg1Code, arg1Reg) = codeToPRegRev(arg1, context, tailCode) (* In X64 we can extract the word from a constant and do the comparison directly. That can't be done in X86/32 because the value isn't tagged and might look like an address. The RTS scans for comparisons with inline constant addresses. *) val (arg2Code, arg2Operand) = if targetArch <> Native32Bit then (* Native 64-bit or 32-in-64. *) ( case arg2 of BICConstnt(value, _) => (arg1Code, IntegerConstant(largeWordConstant value)) | _ => let val (code, reg) = codeToPRegRev(arg2, context, arg1Code) in (code, wordAt reg) end ) else let val (code, reg) = codeToPRegRev(arg2, context, arg1Code) in (code, wordAt reg) end val argReg = newUReg() val target = asTarget destination val code = makeBoolResultRev(testAsBranch(test, false, true), ccRef, target, BlockSimple(WordComparison{arg1=argReg, arg2=arg2Operand, ccRef=ccRef, opSize=nativeWordOpSize}) :: BlockSimple(LoadArgument{source=wordAt arg1Reg, dest=argReg, kind=moveNativeWord}) :: arg2Code) in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code =arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code = arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg2, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithSub, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code = arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithSub, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithMult, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val resValue = newUReg() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val argReg1 = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg1, kind=moveNativeWord}), BlockSimple(Multiplication{resultReg=resValue, operand1=argReg1, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=resValue, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithDiv, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val quotient = newUReg() and remainder = newUReg() val dividendReg = newUReg() and divisorReg = newUReg() val code = arg1Code @ arg2Code @ (* We don't test for zero here - that's done explicitly. *) [BlockSimple(LoadArgument{source=wordAt aReg1, dest=dividendReg, kind=moveNativeWord}), BlockSimple(LoadArgument{source=wordAt aReg2, dest=divisorReg, kind=moveNativeWord}), BlockSimple(Division { isSigned = false, dividend=dividendReg, divisor=RegisterArgument divisorReg, quotient=quotient, remainder=remainder, opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=quotient, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithMod, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val quotient = newUReg() and remainder = newUReg() val dividendReg = newUReg() and divisorReg = newUReg() val code = arg1Code @ arg2Code @ (* We don't test for zero here - that's done explicitly. *) [BlockSimple(LoadArgument{source=wordAt aReg1, dest=dividendReg, kind=moveNativeWord}), BlockSimple(LoadArgument{source=wordAt aReg2, dest=divisorReg, kind=moveNativeWord}), BlockSimple(Division { isSigned = false, dividend=dividendReg, divisor=RegisterArgument divisorReg, quotient=quotient, remainder=remainder, opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=remainder, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith _, ...}, _, _, _, _) = raise InternalError "codeToICodeNonRev: LargeWordArith - unimplemented operation" | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constantValue <= 0xffffffff andalso constantValue >= 0 then OpSize32 else nativeWordOpSize val code = arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=opSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constantValue <= 0xffffffff andalso constantValue >= 0 then OpSize32 else nativeWordOpSize val code = arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg2, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=opSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordShift shift, arg1, arg2}, context, _, destination, tailCode) = (* The shift is always a Word.word value i.e. tagged. There is a check at the higher level that the shift does not exceed 32/64 bits. *) let open BuiltIns val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, untag2Code, arg2Arg) = codeAsUntaggedByte(arg2, false, context) val aReg3 = newUReg() val shiftOp = case shift of ShiftLeft => SHL | ShiftRightLogical => SHR | ShiftRightArithmetic => SAR val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord})] @ untag2Code @ [BlockSimple(ShiftOperation{ shift=shiftOp, resultReg=aReg3, operand=argReg, shiftAmount=arg2Arg, ccRef=newCCRef(), opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealArith(fpOpPrec as (fpOp, fpPrec)), arg1, arg2}, context, _, destination, tailCode) = let open BuiltIns val commutative = case fpOp of ArithSub => NonCommutative | ArithDiv => NonCommutative | ArithAdd => Commutative | ArithMult => Commutative | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" val (argCodeRev, fpRegSrc, arg2Value) = codeFPBinaryArgsRev(arg1, arg2, fpPrec, commutative, context, []) val argCode = List.rev argCodeRev val target = asTarget destination val fpRegDest = newUReg() val arith = case fpMode of FPModeX87 => let val fpOp = case fpOp of ArithAdd => FADD | ArithSub => FSUB | ArithMult => FMUL | ArithDiv => FDIV | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" val isDouble = case fpPrec of PrecSingle => false | PrecDouble => true in [BlockSimple(X87FPArith{ opc=fpOp, resultReg=fpRegDest, arg1=fpRegSrc, arg2=arg2Value, isDouble=isDouble})] end | FPModeSSE2 => let val fpOp = case fpOpPrec of (ArithAdd, PrecSingle) => SSE2BAddSingle | (ArithSub, PrecSingle) => SSE2BSubSingle | (ArithMult, PrecSingle) => SSE2BMulSingle | (ArithDiv, PrecSingle) => SSE2BDivSingle | (ArithAdd, PrecDouble) => SSE2BAddDouble | (ArithSub, PrecDouble) => SSE2BSubDouble | (ArithMult, PrecDouble) => SSE2BMulDouble | (ArithDiv, PrecDouble) => SSE2BDivDouble | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" in [BlockSimple(SSE2FPBinary{ opc=fpOp, resultReg=fpRegDest, arg1=fpRegSrc, arg2=arg2Value})] end (* Box or tag the result. *) val result = boxOrTagReal(fpRegDest, target, fpPrec) in (revApp(argCode @ arith @ result, tailCode), RegisterArgument target, false) end (* Floating point comparison. This is complicated because we have different instruction sequences for SSE2 and X87. We also have to get the handling of unordered (NaN) values right. All the tests are treated as false if either argument is a NaN. To combine that test with the other tests we sometimes have to reverse the comparison. *) | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(BuiltIns.TestEqual, precision), arg1, arg2}, context, _, destination, tailCode) = let (* Get the arguments. It's commutative. *) val (arg2Code, fpReg, arg2Val) = codeFPBinaryArgsRev(arg1, arg2, precision, Commutative, context, tailCode) val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testReg1 = newUReg() and testReg2 = newUReg() and testReg3 = newUReg() (* If this is X87 we get the condition into RAX and test it there. If it is SSE2 we have to treat the unordered result (parity set) specially. *) val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=XOR, resultReg=testReg3, operand1=testReg2, operand2=IntegerConstant 0x4000, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant 0x4400, ccRef=newCCRef(), opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) | FPModeSSE2 => let val noParityLabel = newLabel() val resultLabel = newLabel() val falseLabel = newLabel() val trueLabel = newLabel() val mergeReg = newMergeReg() in BlockSimple(LoadArgument{ source=RegisterArgument mergeReg, dest=target, kind=Move32Bit }) :: BlockLabel resultLabel :: BlockFlow(Unconditional resultLabel) :: (* Result is false if parity is set i.e. unordered or if unequal. *) BlockSimple(LoadArgument{ source=IntegerConstant(tag 0), dest=mergeReg, kind=Move32Bit }) :: BlockLabel falseLabel :: BlockFlow(Unconditional resultLabel) :: (* Result is true if it's ordered and equal. *) BlockSimple(LoadArgument{ source=IntegerConstant(tag 1), dest=mergeReg, kind=Move32Bit }) :: BlockLabel trueLabel :: (* Not unordered - test the equality *) BlockFlow(Conditional{ccRef=ccRef1, condition=JE, trueJump=trueLabel, falseJump=falseLabel}) :: BlockLabel noParityLabel :: (* Go to falseLabel if unordered and therefore not equal. *) BlockFlow(Conditional{ccRef=ccRef1, condition=JP, trueJump=falseLabel, falseJump=noParityLabel}) :: BlockSimple(SSE2Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code end in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(BuiltIns.TestUnordered, precision), arg1, arg2}, context, _, destination, tailCode) = let (* The unordered test is really included because it is easy to implement and is the simplest way of implementing isNan. *) (* Get the arguments. It's commutative. *) val (arg2Code, fpReg, arg2Val) = codeFPBinaryArgsRev(arg1, arg2, precision, Commutative, context, tailCode) val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testReg1 = newUReg() and testReg2 = newUReg() and testReg3 = newUReg() (* If this is X87 we get the condition into RAX and test it there. If it is SSE2 we have to treat the unordered result (parity set) specially. *) val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => (* And with 0x4500. We have to use XOR rather than CMP to avoid having an untagged constant comparison. *) makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=XOR, resultReg=testReg3, operand1=testReg2, operand2=IntegerConstant 0x4500, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant 0x4500, ccRef=newCCRef(), opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) | FPModeSSE2 => makeBoolResultRev(JP, ccRef1, target, BlockSimple(SSE2Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(comparison, precision), arg1, arg2}, context, _, destination, tailCode) = let (* Ordered comparisons are complicated because they are all defined to be false if either argument is a NaN. We have two different tests for a > b and a >= b and implement a < b and a <= b by changing the order of the arguments. *) val (arg1Code, arg1Value) = codeFPArgument(arg1, precision, context, tailCode) val (arg2Code, arg2Value) = codeFPArgument(arg2, precision, context, arg1Code) val (regArg, opArg, isGeq) = case comparison of BuiltIns.TestGreater => (arg1Value, arg2Value, false) | BuiltIns.TestLess => (arg2Value, arg1Value, false) (* Reversed: aa. *) | BuiltIns.TestGreaterEqual => (arg1Value, arg2Value, true) | BuiltIns.TestLessEqual => (arg2Value, arg1Value, true) (* Reversed: a<=b is b>=a. *) | _ => raise InternalError "RealComparison: unimplemented operation" (* Load the first operand into a register. *) val (fpReg, loadCode) = case regArg of RegisterArgument fpReg => (fpReg, arg2Code) | regArg => let val fpReg = newUReg() val moveOp = case precision of BuiltIns.PrecDouble => MoveDouble | BuiltIns.PrecSingle => MoveFloat in (fpReg, BlockSimple(LoadArgument{source=regArg, dest=fpReg, kind=moveOp}) :: arg2Code) end val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => let val testReg1 = newUReg() and testReg2 = newUReg() val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testBits = if isGeq then 0x500 else 0x4500 in makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant testBits, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=opArg, ccRef=ccRef1, isDouble = isDouble}) :: loadCode) end | FPModeSSE2 => let val ccRef1 = newCCRef() val condition = if isGeq then JNB (* >=, <= *) else JA (* >, < *) in makeBoolResultRev(condition, ccRef1, target, BlockSimple(SSE2Compare{arg1=fpReg, arg2=opArg, ccRef=ccRef1, isDouble = isDouble}) :: loadCode) end in (code, RegisterArgument target, false) end (* Multiply tagged word by a constant. We're not concerned with overflow so it's possible to use various short cuts. *) and codeMultiplyConstantWordRev(arg, context, destination, multiplier, tailCode) = let val target = asTarget destination val (argCode, aReg) = codeToPReg(arg, context) val doMultiply = case multiplier of 0w0 => [BlockSimple(LoadArgument{source=IntegerConstant 1, dest=target, kind=movePolyWord})] | 0w1 => [BlockSimple(LoadArgument{source=RegisterArgument aReg, dest=target, kind=movePolyWord})] | 0w2 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~1, index=MemIndex1 aReg, dest=target, opSize=polyWordOpSize})] | 0w3 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~2, index=MemIndex2 aReg, dest=target, opSize=polyWordOpSize})] | 0w4 => [BlockSimple(LoadEffectiveAddress{base=NONE, offset= ~3, index=MemIndex4 aReg, dest=target, opSize=polyWordOpSize})] | 0w5 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~4, index=MemIndex4 aReg, dest=target, opSize=polyWordOpSize})] | 0w8 => [BlockSimple(LoadEffectiveAddress{base=NONE, offset= ~7, index=MemIndex8 aReg, dest=target, opSize=polyWordOpSize})] | 0w9 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~8, index=MemIndex8 aReg, dest=target, opSize=polyWordOpSize})] | _ => let val tReg = newUReg() val tagCorrection = Word.toLargeInt multiplier - 1 fun getPower2 n = let fun p2 (n, l) = if n = 0w1 then SOME l else if Word.andb(n, 0w1) = 0w1 then NONE else p2(Word.>>(n, 0w1), l+0w1) in if n = 0w0 then NONE else p2(n,0w0) end val multiply = case getPower2 multiplier of SOME power => (* Shift it including the tag. *) BlockSimple(ShiftOperation{ shift=SHL, resultReg=tReg, operand=aReg, shiftAmount=IntegerConstant(Word.toLargeInt power), ccRef=newCCRef(), opSize=polyWordOpSize }) | NONE => (* Multiply including the tag. *) BlockSimple(Multiplication{resultReg=tReg, operand1=aReg, operand2=IntegerConstant(Word.toLargeInt multiplier), ccRef=newCCRef(), opSize=polyWordOpSize}) in [multiply, BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=tReg, operand2=IntegerConstant tagCorrection, ccRef=newCCRef(), opSize=polyWordOpSize})] end in (revApp(argCode @ doMultiply, tailCode), RegisterArgument target, false) end and codeToICodeAllocate({numWords as BICConstnt(length, _), flags as BICConstnt(flagValue, _), initial}, context, _, destination) = (* Constant length and flags is used for ref. We could handle other cases. *) if isShort length andalso isShort flagValue andalso toShort length = 0w1 then let val target = asTarget destination (* Force a different register. *) val vecLength = Word.toInt(toShort length) val flagByte = Word8.fromLargeWord(Word.toLargeWord(toShort flagValue)) val memAddr = newPReg() and valueReg = newPReg() fun initialise n = BlockSimple(StoreArgument{ source=RegisterArgument valueReg, offset=n*Word.toInt wordSize, base=memAddr, index=memIndexOrObject, kind=movePolyWord, isMutable=false}) val code = codeToICodeTarget(initial, context, false, valueReg) @ [BlockSimple(AllocateMemoryOperation{size=vecLength, flags=flagByte, dest=memAddr, saveRegs=[]})] @ List.tabulate(vecLength, initialise) @ [BlockSimple InitialisationComplete, BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord})] in (code, RegisterArgument target, false) end else (* If it's longer use the full run-time form. *) allocateMemoryVariable(numWords, flags, initial, context, destination) | codeToICodeAllocate({numWords, flags, initial}, context, _, destination) = allocateMemoryVariable(numWords, flags, initial, context, destination) and codeToICodeLoad({kind=LoadStoreMLWord _, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, false, context) in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument {source=MemoryLocation memLoc, dest=target, kind=movePolyWord})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreMLByte _, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, true, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveByte}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC8, address}, context, _, destination) = let (* Load a byte from C memory. This is almost exactly the same as LoadStoreMLByte except that the base address is a LargeWord.word value. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w1, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveByte}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC16, address}, context, _, destination) = let (* Load a 16-bit value from C memory. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w2, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move16Bit}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC32, address}, context, _, destination) = let (* Load a 32-bit value from C memory. If this is 64-bit mode we can tag it but if this is 32-bit mode we need to box it. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w4, context) val untaggedResReg = newUReg() val boxTagCode = if targetArch = Native64Bit then BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize64 (* It becomes 33 bits *)}) else BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untaggedResReg, dest=target, saveRegs=[]}) in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move32Bit}), boxTagCode], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC64, address}, context, _, destination) = let (* Load a 64-bit value from C memory. This is only allowed in 64-bit mode. The result is a boxed value. *) val _ = targetArch <> Native32Bit orelse raise InternalError "codeToICodeNonRev: BICLoadOperation LoadStoreC64 in 32-bit" val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w8, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move64Bit}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreCFloat, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w4, context) val untaggedResReg = newUReg() val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double (* We need to convert the float into a double. *) val loadArg = case fpMode of FPModeX87 => BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveFloat}) | FPModeSSE2 => BlockSimple(SSE2FPUnary { source=MemoryLocation memLoc, resultReg=untaggedResReg, opc=SSE2UFloatToDouble}) in (codeBaseIndex @ codeUntag @ [loadArg, BlockSimple(BoxValue{boxKind=boxFloat, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreCDouble, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w8, context) val untaggedResReg = newUReg() val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveDouble}), BlockSimple(BoxValue{boxKind=boxFloat, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreUntaggedUnsigned, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, false, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=movePolyWord}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=polyWordOpSize})], RegisterArgument target, false) end and codeToICodeStore({kind=LoadStoreMLWord _, address, value}, context, _, destination) = let val (sourceCode, source, _) = codeToICode(value, context, false, Allowed allowInMemMove) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, false, context) val code = codeBaseIndex @ sourceCode @ codeUntag @ [BlockSimple(StoreArgument {source=source, base=base, offset=offset, index=index, kind=movePolyWord, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreMLByte _, address, value}, context, _, destination) = let val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, true, context) (* We have to untag the value to store. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedByte(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=valueArg, base=base, offset=offset, index=index, kind=MoveByte, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC8, address, value}, context, _, destination) = let (* Store a byte to C memory. Almost exactly the same as LoadStoreMLByte. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w1, context) val (valueCode, untagValue, valueArg) = codeAsUntaggedByte(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=valueArg, base=base, offset=offset, index=index, kind=MoveByte, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC16, address, value}, context, _, destination) = let (* Store a 16-bit value to C memory. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w2, context) (* We don't currently implement 16-bit constant moves so this must always be in a reg. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedToReg(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueArg, base=base, offset=offset, index=index, kind=Move16Bit, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC32, address, value}, context, _, destination) = (* Store a 32-bit value. If this is 64-bit mode we untag it but if this is 32-bit mode we unbox it. *) let val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w4, context) val code = if targetArch = Native64Bit then let (* We don't currently implement 32-bit constant moves so this must always be in a reg. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedToReg(value, false, context) in codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueArg, base=base, offset=offset, index=index, kind=Move32Bit, isMutable=true})] end else let val (valueCode, valueReg) = codeToPReg(value, context) val valueReg1 = newUReg() in codeBaseIndex @ valueCode @ BlockSimple(LoadArgument{source=wordAt valueReg, dest=valueReg1, kind=Move32Bit}) :: codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueReg1, base=base, offset=offset, index=index, kind=Move32Bit, isMutable=true})] end in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC64, address, value}, context, _, destination) = let (* Store a 64-bit value. *) val _ = targetArch <> Native32Bit orelse raise InternalError "codeToICodeNonRev: BICStoreOperation LoadStoreC64 in 32-bit" val (valueCode, valueReg) = codeToPReg(value, context) val valueReg1 = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w8, context) val code = codeBaseIndex @ valueCode @ codeUntag @ [BlockSimple(LoadArgument{source=wordAt valueReg, dest=valueReg1, kind=Move64Bit}), BlockSimple(StoreArgument {source=RegisterArgument valueReg1, base=base, offset=offset, index=index, kind=Move64Bit, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreCFloat, address, value}, context, _, destination) = let val floatReg = newUReg() and float2Reg = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w4, context) val (valueCode, valueReg) = codeToPReg(value, context) (* If we're using an SSE2 reg we have to convert it from double to single precision. *) val (storeReg, cvtCode) = case fpMode of FPModeSSE2 => (float2Reg, [BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=float2Reg, source=RegisterArgument floatReg})]) | FPModeX87 => (floatReg, []) val code = codeBaseIndex @ valueCode @ codeUntag @ BlockSimple(LoadArgument{source=wordAt valueReg, dest=floatReg, kind=MoveDouble}) :: cvtCode @ [BlockSimple(StoreArgument {source=RegisterArgument storeReg, base=base, offset=offset, index=index, kind=MoveFloat, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreCDouble, address, value}, context, _, destination) = let val floatReg = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w8, context) val (valueCode, valueReg) = codeToPReg(value, context) val code = codeBaseIndex @ valueCode @ codeUntag @ [BlockSimple(LoadArgument{source=wordAt valueReg, dest=floatReg, kind=MoveDouble}), BlockSimple(StoreArgument {source=RegisterArgument floatReg, base=base, offset=offset, index=index, kind=MoveDouble, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreUntaggedUnsigned, address, value}, context, _, destination) = let (* We have to untag the value to store. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, false, context) (* See if it's a constant. This is frequently used to set the last word of a string to zero. *) (* We have to be a bit more careful on the X86. We use moves to store constants that can include addresses. To avoid problems we only use a move if the value is zero or odd and so looks like a tagged value. *) val storeAble = case value of BICConstnt(value, _) => if not(isShort value) then NONE else let val ival = Word.toLargeIntX(toShort value) in if targetArch = Native64Bit then if is32bit ival then SOME ival else NONE else if ival = 0 orelse ival mod 2 = 1 then SOME ival else NONE end | _ => NONE val (storeVal, valCode) = case storeAble of SOME value => (IntegerConstant value (* Leave untagged *), []) | NONE => let val valueReg = newPReg() and valueReg1 = newUReg() in (RegisterArgument valueReg1, codeToICodeTarget(value, context, false, valueReg) @ [BlockSimple(UntagValue{dest=valueReg1, source=valueReg, isSigned=false, cache=NONE, opSize=polyWordOpSize})]) end val code = codeBaseIndex @ valCode @ codeUntag @ [BlockSimple(StoreArgument {source=storeVal, base=base, offset=offset, index=index, kind=movePolyWord, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end and codeToICodeBlock({kind=BlockOpCompareByte, sourceLeft, destRight, length}, context, _, destination) = let (* This is effectively a big-endian comparison since we compare the bytes until we find an inequality. *) val target = asTarget destination val mergeResult = newMergeReg() val vec1Reg = newUReg() and vec2Reg = newUReg() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddress(sourceLeft, true, context) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddress(destRight, true, context) val ccRef = newCCRef() val labLess = newLabel() and labGreater = newLabel() and exitLab = newLabel() val labNotLess = newLabel() and labNotGreater = newLabel() val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToReg(length, false (* unsigned *), context) val code = leftCode @ rightCode @ lengthCode @ leftUntag @ [BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg})] @ rightUntag @ [BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg})] @ lengthUntag @ [BlockSimple(CompareByteVectors{ vec1Addr=vec1Reg, vec2Addr=vec2Reg, length=lengthArg, ccRef=ccRef }), (* N.B. These are unsigned comparisons. *) BlockFlow(Conditional{ ccRef=ccRef, condition=JB, trueJump=labLess, falseJump=labNotLess }), BlockLabel labNotLess, BlockFlow(Conditional{ ccRef=ccRef, condition=JA, trueJump=labGreater, falseJump=labNotGreater }), BlockLabel labNotGreater, BlockSimple(LoadArgument{ source=IntegerConstant(tag 0), dest=mergeResult, kind=movePolyWord }), BlockFlow(Unconditional exitLab), BlockLabel labLess, BlockSimple(LoadArgument{ source=IntegerConstant(tag ~1), dest=mergeResult, kind=movePolyWord }), BlockFlow(Unconditional exitLab), BlockLabel labGreater, BlockSimple(LoadArgument{ source=IntegerConstant(tag 1), dest=mergeResult, kind=movePolyWord }), BlockLabel exitLab, BlockSimple(LoadArgument{ source=RegisterArgument mergeResult, dest=target, kind=movePolyWord })] in (code, RegisterArgument target, false) end | codeToICodeBlock({kind=BlockOpMove {isByteMove}, sourceLeft, destRight, length}, context, _, destination) = let (* Moves of 4 or 8 bytes can be done as word moves provided the alignment is correct. Although this will work for strings it is really to handle moves between SysWord and volatileRef in Foreign.Memory. Moves of 1, 2 or 3 bytes or words are converted into a sequence of byte or word moves. *) val isWordMove = case (isByteMove, length) of (true, BICConstnt(l, _)) => if not (isShort l) orelse (toShort l <> 0w4 andalso toShort l <> nativeWordSize) then NONE else let val leng = toShort l val moveKind = if toShort l = nativeWordSize then moveNativeWord else Move32Bit val isLeftAligned = case sourceLeft of {index=NONE, offset, ...} => offset mod leng = 0w0 | _ => false val isRightAligned = case destRight of {index=NONE, offset, ...} => offset mod leng = 0w0 | _ => false in if isLeftAligned andalso isRightAligned then SOME moveKind else NONE end | _ => NONE in case isWordMove of SOME moveKind => let val (leftCode, leftUntag, leftMem) = codeAddress(sourceLeft, isByteMove, context) val (rightCode, rightUntag, {base, offset, index, ...}) = codeAddress(destRight, isByteMove, context) val untaggedResReg = newUReg() val code = leftCode @ rightCode @ leftUntag @ rightUntag @ [BlockSimple(LoadArgument { source=MemoryLocation leftMem, dest=untaggedResReg, kind=moveKind}), BlockSimple(StoreArgument {source=RegisterArgument untaggedResReg, base=base, offset=offset, index=index, kind=moveKind, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | _ => let val vec1Reg = newUReg() and vec2Reg = newUReg() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddress(sourceLeft, isByteMove, context) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddress(destRight, isByteMove, context) val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToReg(length, false (* unsigned *), context) val code = leftCode @ rightCode @ lengthCode @ leftUntag @ [BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg})] @ rightUntag @ [BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg})] @ lengthUntag @ [BlockSimple(BlockMove{ srcAddr=vec1Reg, destAddr=vec2Reg, length=lengthArg, isByteMove=isByteMove })] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end end | codeToICodeBlock({kind=BlockOpEqualByte, ...}, _, _, _) = (* TODO: Move the code from codeToICodeRev. However, that is already reversed. *) raise InternalError "codeToICodeBlock - BlockOpEqualByte" (* Already done *) and codeConditionRev(condition, context, jumpOn, jumpLabel, tailCode) = (* General case. Load the value into a register and compare it with 1 (true) *) let val ccRef = newCCRef() val (testCode, testReg) = codeToPRegRev(condition, context, tailCode) val noJumpLabel = newLabel() in BlockLabel noJumpLabel :: BlockFlow(Conditional{ccRef=ccRef, condition=if jumpOn then JE else JNE, trueJump=jumpLabel, falseJump=noJumpLabel}) :: BlockSimple(CompareLiteral{arg1=RegisterArgument testReg, arg2=tag 1, opSize=OpSize32, ccRef=ccRef}) :: testCode end (* The fixed precision functions are also used for arbitrary precision but instead of raising Overflow we need to jump to the code that handles the long format. *) and codeFixedPrecisionArith(BuiltIns.ArithAdd, arg1, BICConstnt(value, _), context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. This should always be a tagged value if the type is correct. However it's possible for it not to be if we have an arbitrary precision value. There will be a run-time check that the value is short and so this code will never be executed. It will generally be edited out by the higher level be we can't rely on that. Because it's never executed we can just put in zero. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPReg(arg1, context) in arg1Code @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithAdd, BICConstnt(value, _), arg2, context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg2Code @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg2, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithAdd, arg1, arg2, context, target, onOverflow) = let val aReg3 = newPReg() and ccRef = newCCRef() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg1Code @ arg2Code @ (* Subtract the tag bit from the second argument, do the addition and check for overflow. *) (* TODO: We should really do the detagging in the transform phase. It can make a better choice of the argument if one of the arguments is already untagged or if we have a constant argument. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=RegisterArgument aReg2, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end (* Subtraction. We can handle the special case of the second argument being a constant but not the first. *) | codeFixedPrecisionArith(BuiltIns.ArithSub, arg1, BICConstnt(value, _), context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPReg(arg1, context) in arg1Code @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithSub, arg1, arg2, context, target, onOverflow) = let val aReg3 = newPReg() val ccRef = newCCRef() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg1Code @ arg2Code @ (* Do the subtraction, test for overflow and afterwards add in the tag bit. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=RegisterArgument aReg2, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, arg1, BICConstnt(value, _), context, target, onOverflow) = let val aReg = newPReg() and argUntagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* Is it better to untag the constant or the register argument? *) val constVal = if isShort value then Word.toLargeIntX(toShort value) else 0 in codeToICodeTarget(arg1, context, false, aReg) @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=argUntagged, operand1=aReg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=argUntagged, operand2=IntegerConstant constVal, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, BICConstnt(value, _), arg2, context, target, onOverflow) = let val aReg = newPReg() and argUntagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* Is it better to untag the constant or the register argument? *) val constVal = if isShort value then Word.toLargeIntX(toShort value) else 0 in codeToICodeTarget(arg2, context, false, aReg) @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=argUntagged, operand1=aReg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=argUntagged, operand2=IntegerConstant constVal, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, arg1, arg2, context, target, onOverflow) = let val aReg1 = newPReg() and aReg2 = newPReg() and arg1Untagged = newUReg() and arg2Untagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* This is almost the same as the word operation except we use a signed shift and check for overflow. *) in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift one argument and subtract the tag from the other. It's possible this could be reordered if we have a value that is already untagged. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true (* Signed shift here. *), cache=NONE, opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=arg2Untagged, operand1=aReg2, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=arg1Untagged, operand2=RegisterArgument arg2Untagged, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithQuot, arg1, arg2, context, target, _) = let val aReg1 = newPReg() and aReg2 = newPReg() val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift both of the arguments to remove the tags. We don't test for zero here - that's done explicitly. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = true, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=quotient, dest=target, isSigned=true, opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithRem, arg1, arg2, context, target, _) = let (* Identical to Quot except that the result is the remainder. *) val aReg1 = newPReg() and aReg2 = newPReg() val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift both of the arguments to remove the tags. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = true, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=remainder, dest=target, isSigned=true, opSize=polyWordOpSize})] end | codeFixedPrecisionArith(_, _, _, _, _, _) = raise InternalError "codeToICode: FixedPrecisionArith - unimplemented operation" (* Generate code for floating point arguments where one of the arguments must be in a register. If the first argument is in a register use that, if the second is in a register and it's commutative use that otherwise load the first argument into a register. *) and codeFPBinaryArgsRev(arg1, arg2, precision, commutative, context, tailCode) = let val (arg1Code, arg1Value) = codeFPArgument(arg1, precision, context, tailCode) val (arg2Code, arg2Value) = codeFPArgument(arg2, precision, context, arg1Code) in case (arg1Value, arg2Value, commutative) of (RegisterArgument fpReg, _, _) => (arg2Code, fpReg, arg2Value) | (_, RegisterArgument fpReg, Commutative) => (arg2Code, fpReg, arg1Value) | (arg1Val, _, _) => let val fpReg = newUReg() val moveOp = case precision of BuiltIns.PrecDouble => MoveDouble | BuiltIns.PrecSingle => MoveFloat in (BlockSimple(LoadArgument{source=arg1Val, dest=fpReg, kind=moveOp}) :: arg2Code, fpReg, arg2Value) end end (* Generate code to evaluate a floating point argument. The aim of this code is to avoid the overhead of untagging a short-precision floating point value in memory. *) and codeFPArgument(BICConstnt(value, _), _, _, tailCode) = let val argVal = (* Single precision constants in 64-bit mode are represented by the value shifted left 32 bits. A word is shifted left one bit so the result is 0w31. *) if isShort value then IntegerConstant(Word.toLargeInt(Word.>>(toShort value, 0w31))) else AddressConstant value in (tailCode, argVal) end | codeFPArgument(arg, precision, context, tailCode) = ( case (precision, wordSize) of (BuiltIns.PrecSingle, 0w8) => (* If this is a single precision value and the word size is 8 the values are tagged. If it is memory we can load the value directly from the high-order word. *) let val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (code, result, _) = codeToICodeRev(arg, context, false, Allowed memOrReg, tailCode) in case result of RegisterArgument argReg => let val fpReg = newUReg() in (BlockSimple(UntagFloat{source=RegisterArgument argReg, dest=fpReg, cache=NONE}) :: code, RegisterArgument fpReg) end | MemoryLocation{offset, base, index, ...} => (code, MemoryLocation{offset=offset+4, base=base, index=index, cache=NONE}) | _ => raise InternalError "codeFPArgument" end | _ => (* Otherwise the value is boxed. *) let val (argCode, argReg) = codeToPRegRev(arg, context, tailCode) in (argCode, wordAt argReg) end ) (* Code an address. The index is optional. *) and codeAddressRev({base, index=SOME index, offset}, true (* byte move *), context, tailCode) = let (* Byte address with index. The index needs to be untagged. *) val indexReg1 = newUReg() val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) val (codeIndex, indexReg) = codeToPRegRev(index, context, codeBase) val untagCode = [BlockSimple(UntagValue{dest=indexReg1, source=indexReg, isSigned=false, cache=NONE, opSize=polyWordOpSize})] val (codeLoadAddr, realBase) = if targetArch = ObjectId32Bit then let val addrReg = newUReg() in ([BlockSimple(LoadEffectiveAddress{ base=SOME baseReg, offset=0, index=ObjectIndex, dest=addrReg, opSize=nativeWordOpSize})], addrReg) end else ([], baseReg) val memResult = {base=realBase, offset=Word.toInt offset, index=MemIndex1 indexReg1, cache=NONE} in (codeLoadAddr @ codeIndex, untagCode, memResult) end | codeAddressRev({base, index=SOME index, offset}, false (* word move *), context, tailCode) = let (* Word address with index. We can avoid untagging the index by adjusting the multiplier and offset *) val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) val (codeIndex, indexReg) = codeToPRegRev(index, context, codeBase) val (codeLoadAddr, realBase) = if targetArch = ObjectId32Bit then let val addrReg = newUReg() in ([BlockSimple(LoadEffectiveAddress{ base=SOME baseReg, offset=0, index=ObjectIndex, dest=addrReg, opSize=nativeWordOpSize})], addrReg) end else ([], baseReg) val iOffset = Word.toInt offset handle Overflow => 0 (* See below: special case may not happen. *) val memResult = if wordSize = 0w8 then {base=realBase, offset=iOffset-4, index=MemIndex4 indexReg, cache=NONE} else {base=realBase, offset=iOffset-2, index=MemIndex2 indexReg, cache=NONE} in (codeLoadAddr @ codeIndex, [], memResult) end | codeAddressRev({base, index=NONE, offset}, _, context, tailCode) = let val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) (* A negative value for "offset" will produce an overflow at compile time. It should never be reached at run-time because of bounds checking. See Test192. *) val iOffset = Word.toInt offset handle Overflow => 0 val memResult = {offset=iOffset, base=baseReg, index=memIndexOrObject, cache=NONE} in (codeBase, [], memResult) end and codeAddress(addr, isByte, context) = let val (code, untag, res) = codeAddressRev(addr, isByte, context, []) in (List.rev code, untag, res) end (* C-memory operations are slightly different. The base address is a LargeWord.word value. The index is a byte index so may have to be untagged. *) and codeCAddress({base, index=SOME index, offset}, 0w1, context) = let (* Byte address with index. The index needs to be untagged. *) val untaggedBaseReg = newUReg() and indexReg1 = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) and (codeIndex, indexReg) = codeToPReg(index, context) val untagCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord}), BlockSimple(UntagValue{dest=indexReg1, source=indexReg, isSigned=false, cache=NONE, opSize=polyWordOpSize})] val memResult = {base=untaggedBaseReg, offset=Word.toInt offset, index=MemIndex1 indexReg1, cache=NONE} in (codeBase @ codeIndex, untagCode, memResult) end | codeCAddress({base, index=SOME index, offset}, size, context) = let (* Non-byte address with index. By using an appropriate multiplier we can avoid having to untag the index. *) val untaggedBaseReg = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) and (codeIndex, indexReg) = codeToPReg(index, context) val untagCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] val memResult = case size of 0w2 => {base=untaggedBaseReg, offset=Word.toInt offset-1, index=MemIndex1 indexReg, cache=NONE} | 0w4 => {base=untaggedBaseReg, offset=Word.toInt offset-2, index=MemIndex2 indexReg, cache=NONE} | 0w8 => {base=untaggedBaseReg, offset=Word.toInt offset-4, index=MemIndex4 indexReg, cache=NONE} | _ => raise InternalError "codeCAddress: unknown size" in (codeBase @ codeIndex, untagCode, memResult) end | codeCAddress({base, index=NONE, offset}, _, context) = let val untaggedBaseReg = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) val untagCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] val memResult = {offset=Word.toInt offset, base=untaggedBaseReg, index=NoMemIndex, cache=NONE} in (codeBase, untagCode, memResult) end (* Return an untagged value. If we have a constant just return it. Otherwise return the code to evaluate the argument, the code to untag it and the reference to the untagged register. *) and codeAsUntaggedToRegRev(BICConstnt(value, _), isSigned, _, tailCode) = let (* Should always be short except for unreachable code. *) val untagReg = newUReg() val cval = if isShort value then toShort value else 0w0 val cArg = IntegerConstant(if isSigned then Word.toLargeIntX cval else Word.toLargeInt cval) (* Don't tag *) val untag = [BlockSimple(LoadArgument{source=cArg, dest=untagReg, kind=movePolyWord})] in (tailCode, untag, untagReg) (* Don't tag. *) end | codeAsUntaggedToRegRev(arg, isSigned, context, tailCode) = let val untagReg = newUReg() val (code, srcReg) = codeToPRegRev(arg, context, tailCode) val untag = [BlockSimple(UntagValue{source=srcReg, dest=untagReg, isSigned=isSigned, cache=NONE, opSize=polyWordOpSize})] in (code, untag, untagReg) end and codeAsUntaggedToReg(arg, isSigned, context) = let val (code, untag, untagReg) = codeAsUntaggedToRegRev(arg, isSigned, context, []) in (List.rev code, untag, untagReg) end (* Return the argument as an untagged value. We separate evaluating the argument from untagging because we may have to evaluate other arguments and that could involve a function call and we can't save the value to the stack after we've untagged it. Currently this is only used for byte values but we may have to be careful if we use it for word values on the X86. Moving an untagged value into a register might look like loading a constant address. *) and codeAsUntaggedByte(BICConstnt(value, _), isSigned, _) = let val cval = if isShort value then toShort value else 0w0 val cArg = IntegerConstant(if isSigned then Word.toLargeIntX cval else Word.toLargeInt cval) (* Don't tag *) in ([], [], cArg) end | codeAsUntaggedByte(arg, isSigned, context) = let val untagReg = newUReg() val (code, argReg) = codeToPReg(arg, context) val untag = [BlockSimple(UntagValue{source=argReg, dest=untagReg, isSigned=isSigned, cache=NONE, opSize=OpSize32})] in (code, untag, RegisterArgument untagReg) end (* Allocate memory. This is used both for true variable length cells and also for longer constant length cells. *) and allocateMemoryVariable(numWords, flags, initial, context, destination) = let val target = asTarget destination (* With the exception of flagReg all these registers are modified by the code. So, we have to copy the size value into a new register. *) val sizeReg = newPReg() and initReg = newPReg() val sizeReg2 = newPReg() val untagSizeReg = newUReg() and initAddrReg = newPReg() and allocReg = newPReg() val sizeCode = codeToICodeTarget(numWords, context, false, sizeReg) and (flagsCode, flagUntag, flagArg) = codeAsUntaggedByte(flags, false, context) (* We're better off deferring the initialiser if possible. If the value is a constant we don't have to save it. *) val (initCode, initResult, _) = codeToICode(initial, context, false, Allowed allowDefer) in (sizeCode @ flagsCode @ initCode @ [(* We need to copy the size here because AllocateMemoryVariable modifies the size in order to store the length word. This is unfortunate especially as we're going to untag it anyway. *) BlockSimple(LoadArgument{source=RegisterArgument sizeReg, dest=sizeReg2, kind=movePolyWord}), BlockSimple(AllocateMemoryVariable{size=sizeReg, dest=allocReg, saveRegs=[]})] @ flagUntag @ [BlockSimple(StoreArgument{ source=flagArg, base=allocReg, offset= ~1, index=memIndexOrObject, kind=MoveByte, isMutable=false}), (* We need to copy the address here because InitialiseMem modifies all its arguments. *) BlockSimple( if targetArch = ObjectId32Bit then LoadEffectiveAddress{ base=SOME allocReg, offset=0, index=ObjectIndex, dest=initAddrReg, opSize=nativeWordOpSize} else LoadArgument{source=RegisterArgument allocReg, dest=initAddrReg, kind=movePolyWord}), BlockSimple(UntagValue{source=sizeReg2, dest=untagSizeReg, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(LoadArgument{source=initResult, dest=initReg, kind=movePolyWord}), BlockSimple(InitialiseMem{size=untagSizeReg, init=initReg, addr=initAddrReg}), BlockSimple InitialisationComplete, BlockSimple(LoadArgument{source=RegisterArgument allocReg, dest=target, kind=movePolyWord})], RegisterArgument target, false) end (*Turn the codetree structure into icode. *) val bodyContext = {loopArgs=NONE, stackPtr=0, currHandler=NONE, overflowBlock=ref NONE} val (bodyCode, _, bodyExited) = codeToICodeRev(body, bodyContext, true, SpecificPReg resultTarget, beginInstructions) val icode = if bodyExited then bodyCode else returnInstruction(bodyContext, resultTarget, bodyCode) (* Turn the icode list into basic blocks. The input list is in reverse so as part of this we reverse the list. *) local val resArray = Array.array(!labelCounter, BasicBlock{ block=[], flow=ExitCode }) fun createEntry (blockNo, block, flow) = Array.update(resArray, blockNo, BasicBlock{ block=block, flow=flow}) fun splitCode([], _, _) = (* End of code. We should have had a BeginFunction. *) raise InternalError "splitCode - no begin" | splitCode(BlockBegin args :: _, sinceLabel, flow) = (* Final instruction. Create the initial block and exit. *) createEntry(0, BeginFunction args ::sinceLabel, flow) | splitCode(BlockSimple instr :: rest, sinceLabel, flow) = splitCode(rest, instr :: sinceLabel, flow) | splitCode(BlockLabel label :: rest, sinceLabel, flow) = (* Label - finish this block and start another. *) ( createEntry(label, sinceLabel, flow); (* Default to a jump to this label. That is used if we have assumed a drop-through. *) splitCode(rest, [], Unconditional label) ) | splitCode(BlockExit instr :: rest, _, _) = splitCode(rest, [instr], ExitCode) | splitCode(BlockFlow flow :: rest, _, _) = splitCode(rest, [], flow) | splitCode(BlockRaiseAndHandle(instr, handler) :: rest, _, _) = splitCode(rest, [instr], UnconditionalHandle handler) | splitCode(BlockOptionalHandle{call, handler, label} :: rest, sinceLabel, flow) = let (* A function call within a handler. This could go to the handler but if there is no exception will go to the next instruction. Also includes JumpLoop since the stack check could result in an Interrupt exception. *) in createEntry(label, sinceLabel, flow); splitCode(rest, [call], ConditionalHandle{handler=handler, continue=label}) end in val () = splitCode(icode, [], ExitCode) val resultVector = Array.vector resArray end open ICODETRANSFORM val pregProperties = Vector.fromList(List.rev(! pregPropList)) in codeICodeFunctionToX86{blocks = resultVector, functionName = name, pregProps = pregProperties, ccCount= ! ccRefCounter, debugSwitches = debugSwitches, resultClosure = resultClosure} end fun gencodeLambda(lambda, debugSwitches, closure) = let open DEBUG Universal (*val debugSwitches = [tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), tagInject assemblyCodeTag true] @ debugSwitches*) in codeFunctionToX86(lambda, debugSwitches, closure) end structure Foreign = X86FOREIGN structure Sharing = struct type backendIC = backendIC and bicLoadForm = bicLoadForm and argumentType = argumentType and closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeToX86Code.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeToX86Code.ML index ec321ae5..e0121162 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeToX86Code.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeToX86Code.ML @@ -1,2118 +1,2125 @@ (* Copyright David C. J. Matthews 2016-19 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86ICodeToX86Code( structure X86CODE: X86CODESIG structure X86OPTIMISE: sig type operation type code type operations = operation list type closureRef val generateCode: {code: code, ops: operations, labelCount: int, resultClosure: closureRef } -> unit structure Sharing: sig type operation = operation type code = code type closureRef = closureRef end end structure DEBUG: DEBUGSIG structure ICODE: ICodeSig structure IDENTIFY: X86IDENTIFYREFSSIG structure INTSET: INTSETSIG structure PRETTY: PRETTYSIG structure STRONGLY: sig val stronglyConnectedComponents: {nodeAddress: 'a -> int, arcs: 'a -> int list } -> 'a list -> 'a list list end sharing X86CODE.Sharing = ICODE.Sharing = X86OPTIMISE.Sharing = IDENTIFY.Sharing = INTSET ): X86ICODEGENERATESIG = struct open IDENTIFY open ICODE open X86CODE open Address exception InternalError = Misc.InternalError fun asGenReg(GenReg r) = r | asGenReg _ = raise InternalError "asGenReg" and asFPReg(FPReg r) = r | asFPReg _ = raise InternalError "asFPReg" and asXMMReg(XMMReg r) = r | asXMMReg _ = raise InternalError "asXMMReg" (* tag a short constant *) fun tag c = 2 * c + 1 local val regs = case targetArch of Native32Bit => [edi, esi, edx, ecx, ebx, eax] | Native64Bit => [r14, r13, r12, r11, r10, r9, r8, edi, esi, edx, ecx, ebx, eax] | ObjectId32Bit => [r14, r13, r12, r11, r10, r9, r8, edi, esi, edx, ecx, eax] in val generalRegisters = List.map GenReg regs end fun opSizeToMove OpSize32 = Move32 | opSizeToMove OpSize64 = Move64 fun icodeToX86Code{blocks, functionName, stackRequired, debugSwitches, allocatedRegisters, resultClosure, ...} = let fun argAsGenReg(RegisterArg(GenReg r)) = r | argAsGenReg _ = raise InternalError "argAsGenReg" fun sourceAsGenRegOrMem(RegisterArg(GenReg r)) = RegisterArg r | sourceAsGenRegOrMem(MemoryArg{offset, base=baseReg, index}) = MemoryArg{base=baseReg, offset=offset, index=index} | sourceAsGenRegOrMem(NonAddressConstArg v) = NonAddressConstArg v | sourceAsGenRegOrMem(AddressConstArg v) = AddressConstArg v | sourceAsGenRegOrMem _ = raise InternalError "sourceAsGenRegOrMem" and sourceAsXMMRegOrMem(RegisterArg(XMMReg r)) = RegisterArg r | sourceAsXMMRegOrMem(MemoryArg{offset, base=baseReg, index}) = MemoryArg{base=baseReg, offset=offset, index=index} | sourceAsXMMRegOrMem(NonAddressConstArg v) = NonAddressConstArg v | sourceAsXMMRegOrMem(AddressConstArg v) = AddressConstArg v | sourceAsXMMRegOrMem _ = raise InternalError "sourceAsGenRegOrMem" (* Moves and loads. *) fun llLoadArgument({ source, dest=GenReg destReg, kind=Move64Bit}, code) = Move { source=sourceAsGenRegOrMem source, destination=RegisterArg destReg, moveSize=Move64 } :: code | llLoadArgument({ source=MemoryArg mLoc, dest=GenReg destReg, kind=MoveByte}, code) = (* Load from memory. *) Move{moveSize=Move8, source=MemoryArg mLoc, destination=RegisterArg destReg} :: code | llLoadArgument({ source=MemoryArg mLoc, dest=GenReg destReg, kind=Move16Bit}, code) = (* Load from memory. *) Move{moveSize=Move16, source=MemoryArg mLoc, destination=RegisterArg destReg} :: code | llLoadArgument({ source, dest=GenReg destReg, kind=Move32Bit}, code) = (* Load from memory. *) Move { source=sourceAsGenRegOrMem source, destination=RegisterArg destReg, moveSize=Move32 } :: code (* Load a floating point value. *) | llLoadArgument({source=MemoryArg{offset, base=baseReg, index}, dest=FPReg fpReg, kind=MoveDouble}, code) = moveToOutputFP(fpReg, FPLoadFromMemory{ address={base=baseReg, offset=offset, index=index}, precision=DoublePrecision } :: code) | llLoadArgument({source=AddressConstArg addrConst, dest=FPReg fpReg, kind=MoveDouble}, code) = moveToOutputFP(fpReg, FPLoadFromConst{ constant= addrConst, precision=DoublePrecision } :: code) | llLoadArgument({source=RegisterArg(FPReg fpSrc), dest=FPReg fpDest, kind=MoveDouble}, code) = (* Moving from one FP reg to another. Even if we are moving from FP0 we still do a load because FPStoreToFPReg adds one to the register number to account for one value on the stack. *) moveToOutputFP(fpDest, FPLoadFromFPReg{source=fpSrc, lastRef=false} :: code) (* Load or move from an XMM reg. *) | llLoadArgument({source, dest=XMMReg xmmRegReg, kind=MoveDouble}, code) = XMMArith { opc= SSE2MoveDouble, source=sourceAsXMMRegOrMem source, output=xmmRegReg } :: code (* Load a floating point value. *) | llLoadArgument({source=MemoryArg{offset, base=baseReg, index}, dest=FPReg fpReg, kind=MoveFloat}, code) = moveToOutputFP(fpReg, FPLoadFromMemory{ address={ base=baseReg, offset=offset, index=index }, precision=SinglePrecision } :: code) | llLoadArgument({source=AddressConstArg addrConst, dest=FPReg fpReg, kind=MoveFloat}, code) = moveToOutputFP(fpReg, FPLoadFromConst{ constant= addrConst, precision=SinglePrecision } :: code) (* Load or move from an XMM reg. *) | llLoadArgument({source, dest=XMMReg xmmRegReg, kind=MoveFloat}, code) = XMMArith { opc= SSE2MoveFloat, source=sourceAsXMMRegOrMem source, output=xmmRegReg } :: code (* Any other combinations are not allowed. *) | llLoadArgument _ = raise InternalError "codeGenICode: LoadArgument" (* Unless the destination is FP0 we need to store and pop. *) and moveToOutputFP(fpDest, code) = if fpDest = fp0 then code else FPStoreToFPReg{output=fpDest, andPop=true} :: code (* Store to memory *) fun llStoreArgument{ source=RegisterArg(GenReg sourceReg), base, offset, index, kind=Move64Bit} = Move{source=RegisterArg sourceReg, destination=MemoryArg {base=base, offset=offset, index=index}, moveSize=opSizeToMove OpSize64} | llStoreArgument{ source=RegisterArg(GenReg sourceReg), base, offset, index, kind=MoveByte} = Move{moveSize=Move8, source=RegisterArg sourceReg, destination=MemoryArg {base=base, offset=offset, index=index}} | llStoreArgument{ source=RegisterArg(GenReg sourceReg), base, offset, index, kind=Move16Bit} = Move{moveSize=Move16, source=RegisterArg sourceReg, destination=MemoryArg {base=base, offset=offset, index=index}} | llStoreArgument{ source=RegisterArg(GenReg sourceReg), base, offset, index, kind=Move32Bit} = Move{source=RegisterArg sourceReg, destination=MemoryArg {base=base, offset=offset, index=index}, moveSize=opSizeToMove OpSize32} (* Store a short constant to memory *) | llStoreArgument{ source=NonAddressConstArg srcValue, base, offset, index, kind=Move64Bit} = Move{source=NonAddressConstArg srcValue, destination=MemoryArg {base=base, offset=offset, index=index}, moveSize=Move64} | llStoreArgument{ source=NonAddressConstArg srcValue, base, offset, index, kind=Move32Bit} = Move{source=NonAddressConstArg srcValue, destination=MemoryArg {base=base, offset=offset, index=index}, moveSize=Move32} | llStoreArgument{ source=NonAddressConstArg srcValue, base, offset, index, kind=MoveByte} = Move{moveSize=Move8, source=NonAddressConstArg srcValue, destination=MemoryArg{base=base, offset=offset, index=index}} (* Store a long constant to memory *) | llStoreArgument{ source=AddressConstArg srcValue, base, offset, index, kind} = ( (* This Move must be of a polyWord size. *) case (kind, polyWordOpSize) of (Move64Bit, OpSize64) => () | (Move32Bit, OpSize32) => () | _ => raise InternalError "Move of AddressConstArg"; Move{moveSize=opSizeToMove polyWordOpSize, source=AddressConstArg srcValue, destination=MemoryArg {base=base, offset=offset, index=index}} ) (* Store a floating point value. *) | llStoreArgument{source=RegisterArg(FPReg fpReg), offset, base=baseReg, index, kind=MoveDouble} = let val _ = fpReg = fp0 orelse raise InternalError "llStoreArgument: Store FPReg <> fp0" in FPStoreToMemory{ address={ base=baseReg, offset=offset, index=index}, precision=DoublePrecision, andPop=true } end | llStoreArgument{source=RegisterArg(XMMReg xmmRegReg), offset, base=baseReg, index, kind=MoveDouble} = XMMStoreToMemory { toStore=xmmRegReg, address={base=baseReg, offset=offset, index=index}, precision=DoublePrecision } (* Store a floating point value. *) | llStoreArgument{source=RegisterArg(FPReg fpReg), offset, base=baseReg, index, kind=MoveFloat} = let val _ = fpReg = fp0 orelse raise InternalError "llStoreArgument: Store FPReg <> fp0" in FPStoreToMemory{address={ base=baseReg, offset=offset, index=index}, precision=SinglePrecision, andPop=true } end | llStoreArgument{source=RegisterArg(XMMReg xmmRegReg), offset, base=baseReg, index, kind=MoveFloat} = XMMStoreToMemory { toStore=xmmRegReg, address={base=baseReg, offset=offset, index=index}, precision=SinglePrecision } | llStoreArgument _ = raise InternalError "llStoreArgument: StoreArgument" val numBlocks = Vector.length blocks fun getAllocatedReg r = Vector.sub(allocatedRegisters, r) val getAllocatedGenReg = asGenReg o getAllocatedReg and getAllocatedFPReg = asFPReg o getAllocatedReg and getAllocatedXMMReg = asXMMReg o getAllocatedReg fun codeExtIndex NoMemIndex = NoIndex | codeExtIndex(MemIndex1(PReg r)) = Index1(getAllocatedGenReg r) | codeExtIndex(MemIndex2(PReg r)) = Index2(getAllocatedGenReg r) | codeExtIndex(MemIndex4(PReg r)) = Index4(getAllocatedGenReg r) | codeExtIndex(MemIndex8(PReg r)) = Index8(getAllocatedGenReg r) | codeExtIndex ObjectIndex = raise InternalError "codeExtIndex: ObjectIndex" local fun codeExtArgument getReg (RegisterArgument(PReg r)) = RegisterArg(getReg r) | codeExtArgument _ (AddressConstant m) = AddressConstArg m | codeExtArgument _ (IntegerConstant i) = NonAddressConstArg i | codeExtArgument _ (MemoryLocation{base=PReg bReg, offset, index=ObjectIndex, cache=NONE}) = MemoryArg{base=ebx, index=Index4(getAllocatedGenReg bReg), offset=offset} | codeExtArgument _ (MemoryLocation{base=PReg bReg, offset, index, cache=NONE}) = MemoryArg{base=getAllocatedGenReg bReg, offset=offset, index=codeExtIndex index} | codeExtArgument getReg (MemoryLocation{cache=SOME(PReg r), ...}) = RegisterArg(getReg r) | codeExtArgument _ (StackLocation{wordOffset, cache=NONE, ...}) = MemoryArg{base=esp, offset=wordOffset*Word.toInt nativeWordSize, index=NoIndex} | codeExtArgument getReg (StackLocation{cache=SOME(PReg r), ...}) = RegisterArg(getReg r) | codeExtArgument _ (ContainerAddr _) = raise InternalError "codeExtArgument - ContainerAddr" in val codeExtArgument = codeExtArgument getAllocatedReg and codeExtArgumentAsGenReg = codeExtArgument getAllocatedGenReg and codeExtArgumentAsFPReg = codeExtArgument getAllocatedFPReg and codeExtArgumentAsXMMReg = codeExtArgument getAllocatedXMMReg end fun codeCallKind Recursive = NonAddressConstArg 0 (* Jump to the start *) | codeCallKind (ConstantCode v) = AddressConstArg v | codeCallKind FullCall = ( case targetArch of ObjectId32Bit => MemoryArg{base=ebx, index=Index4 edx, offset=0} | _ => MemoryArg{base=edx, index=NoIndex, offset=0} ) (* Move unless the registers are the same. *) fun moveIfNecessary({src, dst, kind}, code) = if src = dst then code else llLoadArgument({source=RegisterArg src, dest=dst, kind=kind}, code) fun opSizeToIMove OpSize64 = Move64Bit | opSizeToIMove OpSize32 = Move32Bit datatype llsource = StackSource of int | OtherSource of reg regOrMemoryArg fun sourceToX86Code(OtherSource r) = r | sourceToX86Code(StackSource wordOffset) = MemoryArg{base=esp, offset=wordOffset*Word.toInt nativeWordSize, index=NoIndex} local fun indexRegister NoIndex = NONE | indexRegister (Index1 r) = SOME r | indexRegister (Index2 r) = SOME r | indexRegister (Index4 r) = SOME r | indexRegister (Index8 r) = SOME r (* The registers are numbered from 0. Choose values that don't conflict with the stack addresses. *) fun regNo r = ~1 - nReg r type node = {src: llsource, dst: destinations } fun nodeAddress({dst=RegDest r, ...}: node) = regNo r | nodeAddress({dst=StackDest a, ...}) = a fun arcs({src=StackSource wordOffset, ...}: node) = [wordOffset] | arcs{src=OtherSource(RegisterArg r), ...} = [regNo r] | arcs{src=OtherSource(MemoryArg{base, index, ...}), ...} = (case indexRegister index of NONE => [regNo(GenReg base)] | SOME r => [regNo(GenReg base), regNo(GenReg r)]) | arcs _ = [] in val stronglyConnected = STRONGLY.stronglyConnectedComponents { nodeAddress=nodeAddress, arcs=arcs } end (* This is a general function for moving values into registers or to the stack where it is possible that the source values might also be in use as destinations. The stack is used for destinations only for tail recursive calls. *) fun moveMultipleValues(moves, workReg: reg option, code) = let val _ = if List.exists(fn {dst=StackDest _, ...} => true | _ => false) moves andalso not(isSome workReg) then raise InternalError "no work reg" else () fun moveValues ([], code) = code (* We're done. *) | moveValues (arguments, code) = let (* stronglyConnectedComponents does two things. It detects loops where it's not possible to move items without breaking the loop but more importantly it orders the dependencies so that if there are no loops we can load the source and store it in the destination knowing that we won't overwrite anything we might later need. *) val ordered = stronglyConnected arguments fun isFPReg(GenReg _) = false | isFPReg(XMMReg _) = true | isFPReg(FPReg _) = true fun moveEachValue ([], code) = code | moveEachValue ([{dst=RegDest reg, src as OtherSource(RegisterArg r)}] :: rest, code) = (* Source and dest are both regs - only move if they're different. *) if r = reg then moveEachValue(rest, code) else moveEachValue(rest, llLoadArgument({source=sourceToX86Code src, dest=reg, kind=if isFPReg reg then MoveDouble else moveNativeWord}, code)) + | moveEachValue ([{dst=RegDest reg, src as StackSource _}] :: rest, code) = + (* If loading from the stack always use native word. The value could be a stack address. *) + moveEachValue(rest, llLoadArgument({source=sourceToX86Code src, dest=reg, kind=moveNativeWord}, code)) + | moveEachValue ([{dst=RegDest reg, src}] :: rest, code) = (* Load from store or a constant. Have to use movePolyWord if it's an address constant. *) moveEachValue(rest, llLoadArgument({source=sourceToX86Code src, dest=reg, kind=movePolyWord}, code)) | moveEachValue ([{dst=StackDest _, src=OtherSource(MemoryArg _ )}] :: _, _) = raise InternalError "moveEachValue - MemoryArgument" | moveEachValue ([{dst=StackDest addr, src as StackSource wordOffset}] :: rest, code) = (* Copy a stack location - needs a load and store unless the address is the same. *) if addr = wordOffset then moveEachValue(rest, code) else let val workReg = valOf workReg in moveEachValue(rest, llStoreArgument{source=RegisterArg workReg, base=esp, index=NoIndex, offset = addr*Word.toInt nativeWordSize, kind=moveNativeWord} :: llLoadArgument({source=sourceToX86Code src, dest=workReg, kind=moveNativeWord}, code)) end | moveEachValue ([{dst=StackDest addr, src}] :: rest, code) = (* Store from a register or a constant. *) moveEachValue(rest, llStoreArgument{ source=sourceToX86Code src, base=esp, index=NoIndex, offset = addr*Word.toInt nativeWordSize, kind=moveNativeWord} :: code) | moveEachValue((cycle as first :: _ :: _) :: rest, code) = (* We have a cycle. *) let (* We need to exchange some of the arguments. Doing an exchange here will set the destination with the correct source. However we have to process every subsequent entry with the swapped registers. That may well mean that one of those entries becomes trivial. Using XCHG means that we can move N registers in N-1 exchanges. We also need to rerun stronglyConnectedComponents on at least the rest of this cycle. It's easiest to flatten the rest and do everything. *) (* Try to find either a register-register move or a register-stack move. If not use the first. If there's a stack-register move there will also be a register-stack so we don't need to look for both. *) val {dst=selectDst, src=selectSrc} = case List.find(fn {src=OtherSource(RegisterArg _), dst=RegDest _} => true | _ => false) cycle of SOME found => found | _ => ( case List.find(fn {dst=RegDest _, ...} => true | _ => false) cycle of SOME found => found | NONE => first ) (* This includes this entry but after the swap we'll eliminate it. *) val flattened = List.foldl(fn (a, b) => a @ b) [] (cycle :: rest) val destAsSource = case selectDst of RegDest reg => OtherSource(RegisterArg reg) | StackDest s => StackSource s (* Source is not an equality type. We can't currently handle the situation where the source is a memory location. *) fun match(OtherSource(RegisterArg r1), OtherSource(RegisterArg r2)) = r1 = r2 | match(StackSource s1, StackSource s2) = s1 = s2 | match(OtherSource(MemoryArg _), _) = raise InternalError "moveEachValue: cycle" | match _ = false fun swapSources{src, dst} = if match(src, selectSrc) then {src=destAsSource, dst=dst} else if match(src, destAsSource) then {src=selectSrc, dst=dst} else {src=src, dst=dst} (* Try to use register to register exchange if we can. A register-to-memory exchange involves a bus lock and we'd like to avoid that. *) val exchangeCode = case (selectDst, selectSrc) of (RegDest(GenReg regA), OtherSource(RegisterArg(GenReg regB))) => XChng { reg=regA, arg=RegisterArg regB, opSize=nativeWordOpSize } :: code | (RegDest(XMMReg regA), OtherSource(RegisterArg(XMMReg regB))) => (* This is the only case where we can have a cycle with SSE2 regs. There are various ways of doing it but XORs are probably the easiest. *) XMMArith{opc=SSE2Xor, source=RegisterArg regA, output=regB} :: XMMArith{opc=SSE2Xor, source=RegisterArg regB, output=regA} :: XMMArith{opc=SSE2Xor, source=RegisterArg regA, output=regB} :: code | (RegDest _, OtherSource(RegisterArg _)) => raise InternalError "moveEachValue: invalid register combination" | (RegDest regA, src as StackSource addr) => let val workReg = valOf workReg in llStoreArgument{source=RegisterArg workReg, base=esp, index=NoIndex, offset = addr*Word.toInt nativeWordSize, kind=moveNativeWord} :: XChng { reg=asGenReg regA, arg=RegisterArg(asGenReg workReg), opSize=nativeWordOpSize } :: llLoadArgument({source=sourceToX86Code src, dest=workReg, kind=moveNativeWord}, code) end | (StackDest addr, OtherSource(RegisterArg regA)) => let (* This doesn't actually occur because we always find the case above. *) val workReg = valOf workReg in llStoreArgument{source=RegisterArg workReg, base=esp, index=NoIndex, offset = addr*Word.toInt nativeWordSize, kind=moveNativeWord} :: XChng { reg=asGenReg regA, arg=RegisterArg (asGenReg workReg), opSize=nativeWordOpSize } :: llLoadArgument({ source=MemoryArg{base=esp, offset=addr*Word.toInt nativeWordSize, index=NoIndex}, dest=workReg, kind=moveNativeWord}, code) end | (StackDest addr1, StackSource addr2) => let val workReg = valOf workReg (* This can still happen if we have argument registers that need to be loaded from stack locations and those argument registers happen to contain the values to be stored into those stack locations. e.g. ebx => S8; eax => S7; S8 => eax; S7 => eax. Eliminating the registers results in a cycle. It may be possible to avoid this by excluding the argument registers (eax; ebx; r8; r9; r10) from holding values in the area to be overwritten. *) in llStoreArgument{source=RegisterArg workReg, base=esp, index=NoIndex, offset = addr1*Word.toInt nativeWordSize, kind=moveNativeWord} :: XChng { reg=asGenReg workReg, arg=MemoryArg{base=esp, offset=addr2*Word.toInt nativeWordSize, index=NoIndex}, opSize=nativeWordOpSize } :: llLoadArgument({ source=MemoryArg{base=esp, offset=addr1*Word.toInt nativeWordSize, index=NoIndex}, dest=workReg, kind=moveNativeWord}, code) end | _ => raise InternalError "moveEachValue: cycle" in moveValues(List.map swapSources flattened, exchangeCode) end | moveEachValue(([]) :: _, _) = (* This should not happen - avoid warning. *) raise InternalError "moveEachValue - empty set" in moveEachValue(ordered, code) end in moveValues(moves, code) end (* Where we have multiple specific registers as either source or destination there is the potential that a destination register if currently in use as a source. *) fun moveMultipleRegisters(regPairList, code) = let val regPairsAsDests = List.map(fn {src, dst} => {src=OtherSource(RegisterArg src), dst=RegDest dst}) regPairList in moveMultipleValues(regPairsAsDests, NONE, code) end val outputLabelCount = ref 0 val blockToLabelMap = Array.array(numBlocks, ~1) fun makeLabel() = Label{labelNo = ! outputLabelCount} before outputLabelCount := !outputLabelCount + 1 fun getBlockLabel blockNo = case Array.sub(blockToLabelMap, blockNo) of ~1 => let val label as Label{labelNo} = makeLabel() val () = Array.update(blockToLabelMap, blockNo, labelNo) in label end | n => Label{labelNo=n} (* The profile object is a single mutable with the F_bytes bit set. *) local val v = RunCall.allocateByteMemory(0w1, Word.fromLargeWord(Word8.toLargeWord(Word8.orb(F_mutable, F_bytes)))) fun clear 0w0 = () | clear i = (assignByte(v, i-0w1, 0w0); clear (i-0w1)) val () = clear wordSize in val profileObject = toMachineWord v end (* Switch to indicate if we want to trace where live data has been allocated. *) val addAllocatingFunction = DEBUG.getParameter DEBUG.profileAllocationTag debugSwitches = 1 fun llAllocateMemoryOperation ({ size, flags, dest, saveRegs}, code) = let val toReg = asGenReg dest val preserve = saveRegs (* Allocate memory. N.B. Instructions are in reverse order. *) fun allocStore{size, flags, output, preserve} = if targetArch = Native64Bit andalso flags <> 0w0 then [Move{moveSize=Move8, source=NonAddressConstArg(Word8.toLargeInt flags), destination=MemoryArg {offset= ~1, base=output, index=NoIndex}}, Move{source=NonAddressConstArg(LargeInt.fromInt size), destination=MemoryArg {offset= ~ (Word.toInt wordSize), base=output, index=NoIndex}, moveSize=opSizeToMove polyWordOpSize}, AllocStore{size=size, output=output, saveRegs=preserve}] else let val lengthWord = IntInf.orb(IntInf.fromInt size, IntInf.<<(Word8.toLargeInt flags, 0w24)) in [Move{source=NonAddressConstArg lengthWord, destination=MemoryArg {offset= ~ (Word.toInt wordSize), base=output, index=NoIndex}, moveSize=opSizeToMove polyWordOpSize}, AllocStore{size=size, output=output, saveRegs=preserve}] end val allocCode = (* If we need to add the profile object *) if addAllocatingFunction then allocStore {size=size+1, flags=Word8.orb(flags, Address.F_profile), output=toReg, preserve=preserve} @ [Move{moveSize=opSizeToMove polyWordOpSize, source=AddressConstArg profileObject, destination=MemoryArg {base=toReg, offset=size*Word.toInt wordSize, index=NoIndex}}] else allocStore {size=size, flags=flags, output=toReg, preserve=preserve} (* Convert to an object index if necessary. *) val convertToObjId = if targetArch = ObjectId32Bit then [ ShiftConstant{ shiftType=SHR, output=toReg, shift=0w2, opSize=OpSize64 }, ArithToGenReg{ opc=SUB, output=toReg, source=RegisterArg ebx, opSize=nativeWordOpSize } ] else [] in convertToObjId @ allocCode @ code end (* Check the stack limit "register". This is used both at the start of a function for genuine stack checking but also in a loop to check for an interrupt. We need to save the registers even across an interrupt because it can be used if another thread wants a GC. *) fun testRegAndTrap(reg, entryPt, saveRegs) = let (* Normally we won't have a stack overflow so we will skip the check. *) val skipCheckLab = makeLabel() in (* Need it in reverse order. *) [ JumpLabel skipCheckLab, CallRTS{rtsEntry=entryPt, saveRegs=saveRegs}, ConditionalBranch{test=JNB, label=skipCheckLab}, ArithToGenReg{ opc=CMP, output=reg, source=MemoryArg{offset=memRegStackLimit, base=ebp, index=NoIndex}, opSize=nativeWordOpSize } ] end local val numRegisters = Vector.length allocatedRegisters val uses = Array.array(numRegisters, false) fun used(PReg r) = Array.update(uses, r, true) fun isUsed(PReg r) = Array.sub(uses, r) (* Set the registers used by the sources. This differs from getInstructionState in that we don't set the base register of a memory location to "used" if we can use the cache. *) fun argUses(RegisterArgument rarg) = used rarg | argUses(MemoryLocation { cache=SOME cr, ...}) = used cr | argUses(MemoryLocation { base, index, cache=NONE, ...}) = (used base; indexUses index) | argUses(StackLocation { cache=SOME rarg, ...}) = used rarg | argUses _ = () and indexUses NoMemIndex = () | indexUses(MemIndex1 arg) = used arg | indexUses(MemIndex2 arg) = used arg | indexUses(MemIndex4 arg) = used arg | indexUses(MemIndex8 arg) = used arg | indexUses ObjectIndex = () (* LoadArgument, TagValue, CopyToCache, UntagValue and BoxValue are eliminated if their destination is not used. In that case their source are not used either. *) fun instructionUses(LoadArgument { source, dest, ...}) = if isUsed dest then argUses source else () | instructionUses(StoreArgument{ source, base, index, ...}) = (argUses source; used base; indexUses index) | instructionUses(LoadMemReg _) = () | instructionUses(BeginFunction _) = () | instructionUses(FunctionCall{regArgs, stackArgs, ...}) = (List.app(argUses o #1) regArgs; List.app argUses stackArgs) | instructionUses(TailRecursiveCall{regArgs, stackArgs, ...}) = (List.app(argUses o #1) regArgs; List.app(argUses o #src) stackArgs) | instructionUses(AllocateMemoryOperation _) = () | instructionUses(AllocateMemoryVariable{size, ...}) = used size | instructionUses(InitialiseMem{size, addr, init}) = (used size; used addr; used init) | instructionUses(InitialisationComplete) = () | instructionUses(BeginLoop) = () | instructionUses(JumpLoop{regArgs, stackArgs, ...}) = (List.app(argUses o #1) regArgs; List.app(argUses o #1) stackArgs) | instructionUses(RaiseExceptionPacket{packetReg}) = used packetReg | instructionUses(ReserveContainer _) = () | instructionUses(IndexedCaseOperation{testReg, ...}) = used testReg | instructionUses(LockMutable{addr}) = used addr | instructionUses(WordComparison{arg1, arg2, ...}) = (used arg1; argUses arg2) | instructionUses(CompareLiteral{arg1, ...}) = argUses arg1 | instructionUses(CompareByteMem{arg1={base, index, ...}, ...}) = (used base; indexUses index) | instructionUses(PushExceptionHandler _) = () | instructionUses(PopExceptionHandler _) = () | instructionUses(BeginHandler _) = () | instructionUses(ReturnResultFromFunction{resultReg, ...}) = used resultReg | instructionUses(ArithmeticFunction{operand1, operand2, ...}) = (used operand1; argUses operand2) | instructionUses(TestTagBit{arg, ...}) = argUses arg | instructionUses(PushValue {arg, ...}) = argUses arg | instructionUses(CopyToCache{source, dest, ...}) = if isUsed dest then used source else () | instructionUses(ResetStackPtr _) = () | instructionUses(StoreToStack {source, ...}) = argUses source | instructionUses(TagValue{source, dest, ...}) = if isUsed dest then used source else () | instructionUses(UntagValue{dest, cache=SOME cacheR, ...}) = if isUsed dest then used cacheR else () | instructionUses(UntagValue{source, dest, cache=NONE, ...}) = if isUsed dest then used source else () | instructionUses(LoadEffectiveAddress{base, index, ...}) = (case base of SOME bReg => used bReg | NONE => (); indexUses index) | instructionUses(ShiftOperation{operand, shiftAmount, ...}) = (used operand; argUses shiftAmount) | instructionUses(Multiplication{operand1, operand2, ...}) = (used operand1; argUses operand2) | instructionUses(Division{dividend, divisor, ...}) = (used dividend; argUses divisor) | instructionUses(AtomicExchangeAndAdd{base, source}) = (used base; used source) | instructionUses(BoxValue{source, dest, ...}) = if isUsed dest then used source else () | instructionUses(CompareByteVectors{vec1Addr, vec2Addr, length, ...}) = (used vec1Addr; used vec2Addr; used length) | instructionUses(BlockMove{srcAddr, destAddr, length, ...}) = (used srcAddr; used destAddr; used length) | instructionUses(X87Compare{arg1, arg2, ...}) = (used arg1; argUses arg2) | instructionUses(SSE2Compare{arg1, arg2, ...}) = (used arg1; argUses arg2) | instructionUses(X87FPGetCondition _) = () | instructionUses(X87FPArith{arg1, arg2, ...}) = (used arg1; argUses arg2) | instructionUses(X87FPUnaryOps{source, ...}) = used source | instructionUses(X87Float{source, ...}) = argUses source | instructionUses(SSE2Float{source, ...}) = argUses source | instructionUses(SSE2FPUnary{source, ...}) = argUses source | instructionUses(SSE2FPBinary{arg1, arg2, ...}) = (used arg1; argUses arg2) | instructionUses(TagFloat{source, dest, ...}) = if isUsed dest then used source else () | instructionUses(UntagFloat{dest, cache=SOME cacheR, ...}) = if isUsed dest then used cacheR else () | instructionUses(UntagFloat{source, dest, cache=NONE, ...}) = if isUsed dest then argUses source else () | instructionUses(GetSSE2ControlReg _) = () | instructionUses(SetSSE2ControlReg{source}) = used source | instructionUses(GetX87ControlReg _) = () | instructionUses(SetX87ControlReg{source}) = used source | instructionUses(X87RealToInt{source, ...}) = used source | instructionUses(SSE2RealToInt{source, ...}) = argUses source | instructionUses(SignExtend32To64{source, dest}) = if isUsed dest then argUses source else () | instructionUses(TouchArgument{source}) = used source (* Depth-first scan. *) val visited = Array.array(numBlocks, false) fun processBlocks blockNo = if Array.sub(visited, blockNo) then () (* Done or currently being done. *) else let val () = Array.update(visited, blockNo, true) val ExtendedBasicBlock { flow, block,...} = Vector.sub(blocks, blockNo) val () = (* Process the dependencies first. *) case flow of ExitCode => () | Unconditional m => processBlocks m | Conditional {trueJump, falseJump, ...} => (processBlocks trueJump; processBlocks falseJump) | IndexedBr cases => List.app processBlocks cases | SetHandler{ handler, continue } => (processBlocks handler; processBlocks continue) | UnconditionalHandle _ => () | ConditionalHandle { continue, ...} => processBlocks continue (* Now this block. *) in List.foldr(fn ({instr, ...}, ()) => instructionUses instr) () block end in val () = processBlocks 0 val isUsed = isUsed end (* Return the register part of a cached item. *) fun decache(StackLocation{cache=SOME r, ...}) = RegisterArgument r | decache(MemoryLocation{cache=SOME r, ...}) = RegisterArgument r | decache arg = arg (* Only get the registers that are actually used. *) val getSaveRegs = List.mapPartial(fn (reg as PReg r) => if isUsed reg then SOME(getAllocatedGenReg r) else NONE) fun codeExtended _ ({instr=LoadArgument{source, dest as PReg dreg, kind}, ...}, code) = if not (isUsed dest) then code else let val realDestReg = getAllocatedReg dreg in case source of RegisterArgument(PReg sreg) => (* Register to register move. Try to use the same register for the source as the destination to eliminate the instruction. *) (* If the source is the same as the destination we don't need to do anything. *) moveIfNecessary({src=getAllocatedReg sreg, dst=realDestReg, kind=kind}, code) | MemoryLocation{cache=SOME(PReg sreg), ...} => (* This is also a register to register move but because the original load is from memory it could be a byte or short precision value. *) let val moveKind = case kind of Move64Bit => Move64Bit | MoveByte => Move32Bit | Move16Bit => Move32Bit | Move32Bit => Move32Bit | MoveFloat => MoveFloat | MoveDouble => MoveDouble in moveIfNecessary({src=getAllocatedReg sreg, dst=realDestReg, kind=moveKind}, code) end (* TODO: Isn't this covered by codeExtArgument? It looks like it was added in the 32-in-64 changes. *) | StackLocation{cache=SOME(PReg sreg), ...} => moveIfNecessary({src=getAllocatedReg sreg, dst=realDestReg, kind=kind}, code) + | source as StackLocation _ => (* Always use native loads from the stack. *) + llLoadArgument({source=codeExtArgument source, dest=realDestReg, kind=moveNativeWord}, code) + | source => (* Loads of constants or from an address. *) llLoadArgument({source=codeExtArgument source, dest=realDestReg, kind=kind}, code) end | codeExtended _ ({instr=StoreArgument{ source, base=PReg bReg, offset, index, kind, ... }, ...}, code) = let val (baseReg, indexVal) = case index of ObjectIndex => (ebx, Index4(getAllocatedGenReg bReg)) | _ => (getAllocatedGenReg bReg, codeExtIndex index) in case (decache source, kind) of (RegisterArgument(PReg sReg), MoveByte) => if targetArch <> Native32Bit then llStoreArgument{ source=codeExtArgument source, base=baseReg, offset=offset, index=indexVal, kind=MoveByte} :: code else (* This is complicated on X86/32. We can't use edi or esi for the store registers. Instead we reserve ecx (see special case in "identify") and use that if we have to. *) let val realStoreReg = getAllocatedReg sReg val (moveCode, storeReg) = if realStoreReg = GenReg edi orelse realStoreReg = GenReg esi then (moveIfNecessary({src=realStoreReg, dst=GenReg ecx, kind=moveNativeWord}, code), GenReg ecx) else (code, realStoreReg) in llStoreArgument{ source=RegisterArg storeReg, base=baseReg, offset=offset, index=indexVal, kind=MoveByte} :: moveCode end | _ => llStoreArgument{ source=codeExtArgument source, base=baseReg, offset=offset, index=indexVal, kind=kind} :: code end | codeExtended _ ({instr=LoadMemReg { offset, dest=PReg pr}, ...}, code) = (* Load from the "memory registers" pointed at by ebp. *) (* Currently only used to load the thread Id which is a Poly word. *) llLoadArgument({source=MemoryArg{base=ebp, offset=offset, index=NoIndex}, dest=getAllocatedReg pr, kind=movePolyWord}, code) | codeExtended _ ({instr=BeginFunction{regArgs, ...}, ...}, code) = let val minStackCheck = 20 val saveRegs = List.mapPartial(fn (_, GenReg r) => SOME r | _ => NONE) regArgs val preludeCode = if stackRequired >= minStackCheck then let (* Compute the necessary amount in edi and compare that. *) val stackByteAdjust = ~ (Word.toInt nativeWordSize) * stackRequired val testEdiCode = testRegAndTrap (edi, StackOverflowCallEx, saveRegs) in (* N.B. In reverse order. *) testEdiCode @ [LoadAddress{output=edi, base=SOME esp, index=NoIndex, offset=stackByteAdjust, opSize=nativeWordOpSize}] end else testRegAndTrap (esp, StackOverflowCall, saveRegs) val usedRegs = List.filter (isUsed o #1) regArgs fun mkPair(PReg pr, rr) = {src=rr,dst=getAllocatedReg pr} val regPairs = List.map mkPair usedRegs in moveMultipleRegisters(regPairs, preludeCode @ code) end | codeExtended _ ({instr=TailRecursiveCall{callKind, regArgs=oRegArgs, stackArgs=oStackArgs, stackAdjust, currStackSize, workReg=PReg wReg}, ...}, code) = let val regArgs = List.map (fn (arg, reg) => (decache arg, reg)) oRegArgs and stackArgs = List.map(fn {src, stack } => {src=decache src, stack=stack}) oStackArgs val workReg = getAllocatedReg wReg (* We must leave stack entries as stack entries for the moment. *) fun codeArg(StackLocation{wordOffset, cache=NONE, ...}) = StackSource wordOffset | codeArg arg = OtherSource(codeExtArgument arg) val extStackArgs = map (fn {stack, src} => {dst=StackDest(stack+currStackSize), src=codeArg src}) stackArgs val extRegArgs = map (fn (a, r) => {src=codeArg a, dst=RegDest r}) regArgs (* Tail recursive calls are complicated because we generally have to overwrite the existing stack. That means storing the arguments in the right order to avoid overwriting a value that we are using for a different argument. *) fun codeTailCall(arguments: {dst: destinations, src: llsource} list, stackAdjust, code) = if stackAdjust < 0 then let (* If the function we're calling takes more arguments on the stack than the current function we will have to extend the stack. Do that by pushing the argument whose offset is at -1. Then adjust all the offsets and repeat. *) val {src=argM1, ...} = valOf(List.find(fn {dst=StackDest ~1, ...} => true | _ => false) arguments) fun renumberArgs [] = [] | renumberArgs ({dst=StackDest ~1, ...} :: args) = renumberArgs args (* Remove the one we've done. *) | renumberArgs ({dst, src} :: args) = let val newDest = case dst of StackDest d => StackDest(d+1) | regDest => regDest val newSrc = case src of StackSource wordOffset => StackSource(wordOffset+1) | other => other in {dst=newDest, src=newSrc} :: renumberArgs args end in codeTailCall(renumberArgs arguments, stackAdjust+1, PushToStack(sourceAsGenRegOrMem(sourceToX86Code argM1)) :: code) end else let val loadArgs = moveMultipleValues(arguments, SOME workReg, code) in if stackAdjust = 0 then loadArgs else ResetStack{numWords=stackAdjust, preserveCC=false} :: loadArgs end in JumpAddress(codeCallKind callKind) :: codeTailCall(extStackArgs @ extRegArgs, stackAdjust+currStackSize, code) end | codeExtended _ ({instr=FunctionCall{callKind, regArgs=oRegArgs, stackArgs=oStackArgs, dest=PReg dReg, realDest, saveRegs}, ...}, code) = let val regArgs = List.map (fn (arg, reg) => (decache arg, reg)) oRegArgs and stackArgs = List.map decache oStackArgs val destReg = getAllocatedReg dReg fun pushStackArgs ([], _, code) = code | pushStackArgs (ContainerAddr {stackOffset, ...} ::args, argNum, code) = let val adjustedAddr = stackOffset+argNum (* If there is an offset relative to rsp we need to add this in. *) val addOffset = if adjustedAddr = 0 then [] else [ArithMemConst{opc=ADD, address={offset=0, base=esp, index=NoIndex}, source=LargeInt.fromInt(adjustedAddr*Word.toInt nativeWordSize), opSize=nativeWordOpSize}] in pushStackArgs(args, argNum+1, addOffset @ PushToStack(RegisterArg esp) :: code) end | pushStackArgs (StackLocation {wordOffset, container, field, ...} ::args, argNum, code) = let (* Have to adjust the offsets of stack arguments. *) val adjusted = StackLocation{wordOffset=wordOffset+argNum, container=container, field=field+argNum, cache=NONE} in pushStackArgs(args, argNum+1, PushToStack(codeExtArgumentAsGenReg adjusted) :: code) end | pushStackArgs (arg::args, argNum, code) = pushStackArgs(args, argNum+1, PushToStack(codeExtArgumentAsGenReg arg) :: code) val pushedArgs = pushStackArgs(stackArgs, 0, code (* Initial code *)) (* We have to adjust any stack offset to account for the arguments we've pushed. *) val numStackArgs = List.length stackArgs (* We don't currently allow the arguments to be memory locations and instead force them into registers. That may be simpler especially if we can get the values directly into the required register. *) fun getRegArgs(RegisterArgument(PReg pr), reg) = SOME{dst=reg, src=getAllocatedReg pr} | getRegArgs(StackLocation {cache=SOME(PReg pr), ...}, reg) = SOME{dst=reg, src=getAllocatedReg pr} | getRegArgs(MemoryLocation _, _) = raise InternalError "FunctionCall - MemoryLocation" | getRegArgs _ = NONE val loadRegArgs = moveMultipleRegisters(List.mapPartial getRegArgs regArgs, pushedArgs) (* These are all items we can load without requiring a source register. That includes loading from the stack. *) fun getConstArgs((AddressConstant m, reg), code) = llLoadArgument({source=AddressConstArg m, dest=reg, kind=movePolyWord}, code) | getConstArgs((IntegerConstant i, reg), code) = llLoadArgument({source=NonAddressConstArg i, dest=reg, kind=movePolyWord}, code) | getConstArgs((StackLocation { cache=SOME _, ...}, _), code) = code | getConstArgs((StackLocation { wordOffset, ...}, reg), code) = llLoadArgument({source=MemoryArg{offset=(wordOffset+numStackArgs)*Word.toInt nativeWordSize, base=esp, index=NoIndex}, dest=reg, kind=moveNativeWord}, code) | getConstArgs((ContainerAddr {stackOffset, ...}, reg), code) = if stackOffset+numStackArgs = 0 then llLoadArgument({source=RegisterArg(GenReg esp), dest=reg, kind=moveNativeWord}, code) else LoadAddress{ output=asGenReg reg, offset=(stackOffset+numStackArgs)*Word.toInt nativeWordSize, base=SOME esp, index=NoIndex, opSize=nativeWordOpSize } :: code | getConstArgs((RegisterArgument _, _), code) = code | getConstArgs((MemoryLocation _, _), code) = code val loadConstArgs = List.foldl getConstArgs loadRegArgs regArgs (* Push the registers before the call and pop them afterwards. *) fun makeSaves([], code) = CallAddress(codeCallKind callKind) :: code | makeSaves(PReg reg::regs, code) = let val areg = getAllocatedGenReg reg val _ = areg = eax andalso raise InternalError "codeExtended: eax in save regs" val _ = if List.exists(fn (_, r) => r = GenReg areg) regArgs then raise InternalError "codeExtended: arg reg in save regs" else () in PopR areg :: makeSaves(regs, PushToStack(RegisterArg areg) :: code) end in moveIfNecessary({dst=destReg, src=realDest, kind=case realDest of GenReg _ => moveNativeWord | _ => MoveDouble}, makeSaves(saveRegs, loadConstArgs)) end | codeExtended _ ({instr=AllocateMemoryOperation{ size, flags, dest=PReg dReg, saveRegs}, ...}, code) = let val preserve = getSaveRegs saveRegs in llAllocateMemoryOperation({ size=size, flags=flags, dest=getAllocatedReg dReg, saveRegs=preserve}, code) end | codeExtended _ ({instr=AllocateMemoryVariable{size=PReg size, dest=PReg dest, saveRegs}, ...}, code) = let (* Simple case - no initialiser. *) val saveRegs = getSaveRegs saveRegs val sReg = getAllocatedGenReg size and dReg = getAllocatedGenReg dest val _ = sReg <> dReg orelse raise InternalError "codeGenICode-AllocateMemoryVariable" val allocCode = [ (* Store it as the length field. *) Move{source=RegisterArg sReg, moveSize=opSizeToMove polyWordOpSize, destination=MemoryArg {base=dReg, offset= ~ (Word.toInt wordSize), index=NoIndex}}, (* Untag the length *) ShiftConstant{ shiftType=SHR, output=sReg, shift=0w1, opSize=polyWordOpSize}, (* Allocate the memory *) AllocStoreVariable{ size=sReg, output=dReg, saveRegs=saveRegs} ] (* Convert to an object index if necessary. *) val convertToObjId = if targetArch = ObjectId32Bit then [ ShiftConstant{ shiftType=SHR, output=dReg, shift=0w2, opSize=OpSize64 }, ArithToGenReg{ opc=SUB, output=dReg, source=RegisterArg ebx, opSize=nativeWordOpSize } ] else [] in convertToObjId @ allocCode @ code end | codeExtended _ ({instr=InitialiseMem{size=PReg sReg, addr=PReg aReg, init=PReg iReg}, ...}, code) = (* We are going to use rep stosl/q to set the memory. That requires the length to be in ecx, the initialiser to be in eax and the destination to be edi. *) RepeatOperation (if polyWordOpSize = OpSize64 then STOS64 else STOS32):: moveIfNecessary({src=getAllocatedReg iReg, dst=GenReg eax, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg aReg, dst=GenReg edi, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg sReg, dst=GenReg ecx, kind=moveNativeWord}, code))) | codeExtended _ ({instr=InitialisationComplete, ...}, code) = StoreInitialised :: code | codeExtended _ ({instr=BeginLoop, ...}, code) = code | codeExtended _ ({instr=JumpLoop{regArgs, stackArgs, checkInterrupt, workReg}, ...}, code) = let val workReg = Option.map (fn PReg r => getAllocatedReg r) workReg (* TODO: Make the sources and destinations "friends". *) (* We must leave stack entries as stack entries for the moment as with TailCall. *) fun codeArg(StackLocation{wordOffset, ...}) = StackSource wordOffset | codeArg arg = OtherSource(codeExtArgument arg) val extStackArgs = map (fn (src, stack, _) => {dst=StackDest stack, src=codeArg src}) stackArgs val extRegArgs = map (fn (a, PReg r) => {src=codeArg a, dst=RegDest(getAllocatedReg r)}) regArgs val checkCode = case checkInterrupt of NONE => [] | SOME saveRegs => testRegAndTrap (esp, StackOverflowCall, getSaveRegs saveRegs) in checkCode @ moveMultipleValues(extStackArgs @ extRegArgs, workReg, code) end | codeExtended _ ({instr=RaiseExceptionPacket{ packetReg=PReg preg }, ...}, code) = let (* The argument must be put into rax. *) val _ = getAllocatedGenReg preg = eax orelse raise InternalError "codeExtended: RaiseExceptionPacket" in (* We need a work register here. It can be any register other than eax since we don't preserve registers across calls. *) RaiseException { workReg=ecx } :: code end | codeExtended _ ({instr=ReserveContainer{size, ...}, ...}, code) = (* The memory must be cleared in case we have a GC. *) List.tabulate(size, fn _ => PushToStack(NonAddressConstArg(tag 0))) @ code | codeExtended {flow} ({instr=IndexedCaseOperation{testReg=PReg tReg, workReg=PReg wReg}, ...}, code) = let val testReg = getAllocatedReg tReg val workReg = getAllocatedReg wReg val _ = testReg <> workReg orelse raise InternalError "IndexedCaseOperation - same registers" val rReg = asGenReg testReg and wReg = asGenReg workReg val _ = rReg <> wReg orelse raise InternalError "IndexedCaseOperation - same registers" (* This should only be within a block with an IndexedBr flow type. *) val cases = case flow of IndexedBr cases => cases | _ => raise InternalError "codeGenICode: IndexedCaseOperation" val caseLabels = map getBlockLabel cases val startJumpTable = makeLabel() (* Compute the jump address. The index is a tagged integer so it is already multiplied by 2. We need to multiply by four to get the correct size. Subtract off the shifted tag. *) val jumpSize = ref JumpSize8 in JumpTable{cases=caseLabels, jumpSize=jumpSize} :: JumpLabel startJumpTable :: JumpAddress(RegisterArg wReg) :: IndexedJumpCalc{ addrReg=wReg, indexReg=rReg, jumpSize=jumpSize } :: LoadLabelAddress{label=startJumpTable, output=wReg} :: code end | codeExtended _ ({instr=LockMutable{addr=PReg pr}, ...}, code) = let val (bReg, index) = if targetArch = ObjectId32Bit then (ebx, Index4(asGenReg(getAllocatedReg pr))) else (asGenReg(getAllocatedReg pr), NoIndex) in (* Mask off the mutable bit. *) ArithByteMemConst{opc=AND, address={base=bReg, offset= ~1, index=index}, source=0wxff - F_mutable} :: code end | codeExtended _ ({instr=WordComparison{ arg1=PReg pr, arg2, opSize, ... }, ...}, code) = ArithToGenReg {opc=CMP, output=asGenReg(getAllocatedReg pr), source=codeExtArgumentAsGenReg arg2, opSize=opSize} :: code | codeExtended _ ({instr=CompareLiteral{ arg1, arg2, opSize, ... }, ...}, code) = ( case decache arg1 of (* N.B. We MUST decache since we're assuming that the base reg is not used. *) RegisterArgument(PReg pr) => ArithToGenReg {opc=CMP, output=asGenReg(getAllocatedReg pr), source=NonAddressConstArg arg2, opSize=opSize} :: code | MemoryLocation{base=PReg br, offset, index=ObjectIndex, ...} => ArithMemConst{ opc=CMP, address={offset=offset, base=ebx, index=Index4(asGenReg(getAllocatedReg br))}, source=arg2, opSize=opSize } :: code | MemoryLocation{base=PReg br, index, offset, ...} => ArithMemConst{ opc=CMP, address={offset=offset, base=asGenReg(getAllocatedReg br), index=codeExtIndex index}, source=arg2, opSize=opSize } :: code | StackLocation{wordOffset, ...} => ArithMemConst{ opc=CMP, address={offset=wordOffset*Word.toInt nativeWordSize, base=esp, index=NoIndex}, source=arg2, opSize=opSize } :: code | _ => raise InternalError "CompareLiteral" ) | codeExtended _ ({instr=CompareByteMem{ arg1={base=PReg br, offset, index}, arg2, ... }, ...}, code) = let val (bReg, index) = case index of ObjectIndex => (ebx, Index4(asGenReg(getAllocatedReg br))) | _ => (asGenReg(getAllocatedReg br), codeExtIndex index) in ArithByteMemConst{ opc=CMP, address={offset=offset, base=bReg, index=index}, source=arg2 } :: code end (* Set up an exception handler. *) | codeExtended {flow} ({instr=PushExceptionHandler{workReg=PReg hReg}, ...}, code) = let (* Set up an exception handler. *) val workReg=getAllocatedReg hReg (* Although we're pushing this to the stack we need to use LEA on the X86/64 and some arithmetic on the X86/32. We need a work reg for that. *) val handleReg = asGenReg workReg (* This should only be within a block with a SetHandler flow type. *) val handleLabel = case flow of SetHandler{ handler, ...} => handler | _ => raise InternalError "codeGenICode: PushExceptionHandler" val labelRef = getBlockLabel handleLabel (* Set up the handler by pushing the old handler to the stack, pushing the entry point and setting the handler address to the current stack pointer. *) in ( Move{source=RegisterArg esp, destination=MemoryArg {offset=memRegHandlerRegister, base=ebp, index=NoIndex}, moveSize=opSizeToMove nativeWordOpSize} :: PushToStack(RegisterArg handleReg) :: LoadLabelAddress{ label=labelRef, output=handleReg} :: PushToStack(MemoryArg{base=ebp, offset=memRegHandlerRegister, index=NoIndex}) :: code) end (* Pop an exception handler at the end of a handled section. Executed if no exception has been raised. This removes items from the stack. *) | codeExtended _ ({instr=PopExceptionHandler{workReg=PReg wReg, ...}, ...}, code) = let val workReg = getAllocatedReg wReg val wReg = asGenReg workReg in (* The stack pointer has been adjusted to just above the two words that were stored in PushExceptionHandler. *) ( Move{source=RegisterArg wReg, destination=MemoryArg {offset=memRegHandlerRegister, base=ebp, index=NoIndex}, moveSize=opSizeToMove nativeWordOpSize} :: PopR wReg :: ResetStack{numWords=1, preserveCC=false} :: code) end (* Start of a handler. Sets the address associated with PushExceptionHandler and provides a register for the packet.*) | codeExtended _ ({instr=BeginHandler{packetReg=PReg pReg, workReg=PReg wReg}, ...}, code) = let (* The exception packet is in rax. *) val realPktReg = getAllocatedReg pReg val realWorkreg = getAllocatedGenReg wReg (* The code here is almost the same as PopExceptionHandler. The only real difference is that PopExceptionHandler needs to pass the result of executing the handled code which could be in any register. This code needs to transmit the exception packet and that is always in rax. *) val beginHandleCode = Move{source=RegisterArg realWorkreg, destination=MemoryArg {offset=memRegHandlerRegister, base=ebp, index=NoIndex}, moveSize=opSizeToMove nativeWordOpSize} :: PopR realWorkreg :: ResetStack{numWords=1, preserveCC=false} :: Move{ source=MemoryArg{base=ebp, offset=memRegHandlerRegister, index=NoIndex}, destination=RegisterArg esp, moveSize=opSizeToMove nativeWordOpSize } :: code in moveIfNecessary({src=GenReg eax, dst=realPktReg, kind=moveNativeWord }, beginHandleCode) end | codeExtended _ ({instr=ReturnResultFromFunction { resultReg=PReg resReg, realReg, numStackArgs }, ...}, code) = let val resultReg = getAllocatedReg resReg (* If for some reason it's not in the right register we have to move it there. *) in ReturnFromFunction numStackArgs :: moveIfNecessary({src=resultReg, dst=realReg, kind=moveNativeWord}, code) end | codeExtended _ ({instr=ArithmeticFunction{oper=SUB, resultReg=PReg resReg, operand1=PReg op1Reg, operand2, opSize, ...}, ...}, code) = (* Subtraction - this is special because it can only be done one way round. The first argument must be in a register. *) let val realDestReg = getAllocatedReg resReg val realOp1Reg = getAllocatedReg op1Reg in ArithToGenReg { opc=SUB, output=asGenReg realDestReg, source=codeExtArgumentAsGenReg operand2, opSize=opSize } :: moveIfNecessary({src=realOp1Reg, dst=realDestReg, kind=opSizeToIMove opSize}, code) end | codeExtended _ ({instr=ArithmeticFunction{oper, resultReg=PReg resReg, operand1=PReg op1Reg, operand2, opSize, ...}, ...}, code) = ( case decache operand2 of RegisterArgument(PReg op2Reg) => (* Arithmetic operation with both arguments as registers. These operations are all symmetric so we can try to put either argument into the result reg and then do the operation on the other arg. *) let val realDestReg = getAllocatedGenReg resReg val realOp1Reg = getAllocatedGenReg op1Reg and realOp2Reg = getAllocatedGenReg op2Reg val (operandReg, moveInstr) = if realOp1Reg = realDestReg then (realOp2Reg, code) else if realOp2Reg = realDestReg then (realOp1Reg, code) else (realOp2Reg, Move{source=RegisterArg realOp1Reg, destination=RegisterArg realDestReg, moveSize=opSizeToMove opSize} :: code) in ArithToGenReg { opc=oper, output=realDestReg, source=RegisterArg operandReg, opSize=opSize } :: moveInstr end | operand2 => (* Arithmetic operation with the first argument in a register and the second a constant or memory location. *) let val realDestReg = getAllocatedReg resReg val realOp1Reg = getAllocatedReg op1Reg val op2Arg = codeExtArgumentAsGenReg operand2 (* If we couldn't put it in the result register we have to copy it there. *) in ArithToGenReg { opc=oper, output=asGenReg realDestReg, source=op2Arg, opSize=opSize } :: moveIfNecessary({src=realOp1Reg, dst=realDestReg, kind=opSizeToIMove opSize}, code) end ) | codeExtended _ ({instr=TestTagBit{arg, ...}, ...}, code) = TestByteBits{arg=codeExtArgumentAsGenReg arg, bits=0w1} :: code | codeExtended _ ({instr=PushValue {arg, ...}, ...}, code) = PushToStack(codeExtArgumentAsGenReg arg) :: code | codeExtended _ ({instr=CopyToCache{source=PReg sreg, dest as PReg dreg, kind}, ...}, code) = if not (isUsed dest) then code else let val realDestReg = getAllocatedReg dreg (* Get the source register using the current destination as a preference. *) val realSrcReg = getAllocatedReg sreg in (* If the source is the same as the destination we don't need to do anything. *) moveIfNecessary({src=realSrcReg, dst=realDestReg, kind=kind}, code) end | codeExtended _ ({instr=ResetStackPtr {numWords, preserveCC}, ...}, code) = ( numWords >= 0 orelse raise InternalError "codeGenICode: ResetStackPtr - negative offset"; ResetStack{numWords=numWords, preserveCC=preserveCC} :: code ) | codeExtended _ ({instr=StoreToStack{ source, stackOffset, ... }, ...}, code) = llStoreArgument{ source=codeExtArgument source, base=esp, offset=stackOffset*Word.toInt nativeWordSize, index=NoIndex, kind=moveNativeWord} :: code | codeExtended _ ({instr=TagValue{source=PReg srcReg, dest as PReg dReg, opSize, ...}, ...}, code) = if not (isUsed dest) then code else let val regResult = asGenReg(getAllocatedReg dReg) val realSReg = asGenReg(getAllocatedReg srcReg) in (* N.B. Using LEA with a base register and an index multiplier of 1 is shorter than using no base register and a multiplier of two. *) (* TODO: If the value we're tagging is a byte or a 16-bit value we can use OpSize32 and possibly save the Rex byte. *) LoadAddress{ output=regResult, offset=1, base=SOME realSReg, index=Index1 realSReg, opSize=opSize } :: code end | codeExtended _ ({instr=UntagValue{dest as PReg dReg, cache=SOME(PReg cacheReg), opSize, ...}, ...}, code) = if not (isUsed dest) then code else moveIfNecessary({src=getAllocatedReg cacheReg, dst=getAllocatedReg dReg, kind=opSizeToIMove opSize}, code) | codeExtended _ ({instr=UntagValue{source=PReg sReg, dest as PReg dReg, isSigned, opSize, ...}, ...}, code) = if not (isUsed dest) then code else let val regResult = getAllocatedReg dReg val realSReg = getAllocatedReg sReg in (* For most cases we're going to be using a 32-bit word if this is 32-in-64. The exception is when converting a word to a signed large-word. *) ShiftConstant{ shiftType=if isSigned then SAR else SHR, output=asGenReg regResult, shift=0w1, opSize=opSize } :: moveIfNecessary({src=realSReg, dst=regResult, kind=opSizeToIMove opSize}, code) end | codeExtended _ ({instr=LoadEffectiveAddress{base, offset, index=ObjectIndex, dest=PReg dReg, opSize}, ...}, code) = let val destReg = asGenReg(getAllocatedReg dReg) val bReg = case base of SOME(PReg br) => asGenReg(getAllocatedReg br) | NONE => raise InternalError "LoadEffectiveAddress - ObjectIndex but no base" in LoadAddress{ output=destReg, offset=offset, base=SOME ebx, index=Index4 bReg, opSize=opSize } :: code end | codeExtended _ ({instr=LoadEffectiveAddress{base, offset, index, dest=PReg dReg, opSize}, ...}, code) = let val destReg = asGenReg(getAllocatedReg dReg) val bReg = case base of SOME(PReg br) => SOME(asGenReg(getAllocatedReg br)) | NONE => NONE val indexR = codeExtIndex index in LoadAddress{ output=destReg, offset=offset, base=bReg, index=indexR, opSize=opSize } :: code end | codeExtended _ ({instr=ShiftOperation{shift, resultReg=PReg resReg, operand=PReg operReg, shiftAmount=IntegerConstant i, opSize, ...}, ...}, code) = let val realDestReg = getAllocatedReg resReg val realOpReg = getAllocatedReg operReg in ShiftConstant{ shiftType=shift, output=asGenReg realDestReg, shift=Word8.fromLargeInt i, opSize=opSize } :: moveIfNecessary({src=realOpReg, dst=realDestReg, kind=opSizeToIMove opSize}, code) end | codeExtended _ ({instr=ShiftOperation{shift, resultReg=PReg resReg, operand=PReg operReg, shiftAmount=RegisterArgument(PReg shiftReg), opSize, ...}, ...}, code) = let val realDestReg = getAllocatedReg resReg val realShiftReg = getAllocatedReg shiftReg val realOpReg = getAllocatedReg operReg (* We want the shift in ecx. We may not have got it there but the register should be free. The shift is masked to 5 or 6 bits so we have to check for larger shift values at a higher level.*) in ShiftVariable{ shiftType=shift, output=asGenReg realDestReg, opSize=opSize } :: moveIfNecessary({src=realOpReg, dst=realDestReg, kind=opSizeToIMove opSize}, moveIfNecessary({src=realShiftReg, dst=GenReg ecx, kind=Move32Bit (* < 64*)}, code)) end | codeExtended _ ({instr=ShiftOperation _, ...}, _) = raise InternalError "codeExtended - ShiftOperation" | codeExtended _ ({instr= Multiplication{resultReg=PReg resReg, operand1=PReg op1Reg, operand2, opSize, ...}, ...}, code) = ( case decache operand2 of RegisterArgument(PReg op2Reg) => let (* Treat exactly the same as ArithmeticFunction. *) val realDestReg = getAllocatedGenReg resReg val realOp1Reg = getAllocatedGenReg op1Reg and realOp2Reg = getAllocatedGenReg op2Reg val (operandReg, moveInstr) = if realOp1Reg = realDestReg then (realOp2Reg, code) else if realOp2Reg = realDestReg then (realOp1Reg, code) else (realOp2Reg, Move{source=RegisterArg realOp1Reg, destination=RegisterArg realDestReg, moveSize=opSizeToMove opSize} :: code) in MultiplyR { source=RegisterArg operandReg, output=realDestReg, opSize=opSize } :: moveInstr end | operand2 => (* Multiply operation with the first argument in a register and the second a constant or memory location. *) let val realDestReg = getAllocatedReg resReg val realOp1Reg = getAllocatedReg op1Reg val op2Arg = codeExtArgumentAsGenReg operand2 in MultiplyR { output=asGenReg realDestReg, source=op2Arg, opSize=opSize } :: moveIfNecessary({src=realOp1Reg, dst=realDestReg, kind=opSizeToIMove opSize}, code) end ) | codeExtended _ ({instr=Division{isSigned, dividend=PReg regDivid, divisor, quotient=PReg regQuot, remainder=PReg regRem, opSize}, ...}, code) = let (* TODO: This currently only supports the dividend in a register. LargeWord division will generally load the argument from a box so we could support a memory argument for that case. Word and integer values will always have to be detagged. *) (* Division is specific as to the registers. The dividend must be eax, quotient is eax and the remainder is edx. *) val realDiviReg = getAllocatedReg regDivid val realQuotReg = getAllocatedReg regQuot val realRemReg = getAllocatedReg regRem val divisorArg = codeExtArgument divisor val divisorReg = argAsGenReg divisorArg val _ = divisorReg <> eax andalso divisorReg <> edx orelse raise InternalError "codeGenICode: Division" (* rdx needs to be set to the high order part of the dividend. For signed division that means sign-extending rdx, for unsigned division we clear it. We only need a 32-bit clear since the top 32-bits are cleared anyway. *) val setRDX = if isSigned then SignExtendForDivide opSize else ArithToGenReg{ opc=XOR, output=edx, source=RegisterArg edx, opSize=OpSize32 } in (* We may need to move one or more of the registers although normally that won't be necessary. Almost certainly only either the remainder or the quotient will actually be used. *) moveMultipleRegisters([{src=GenReg eax, dst=realQuotReg}, {src=GenReg edx, dst=realRemReg}], DivideAccR {arg=divisorReg, isSigned=isSigned, opSize=opSize} :: setRDX :: moveIfNecessary({src=realDiviReg, dst=GenReg eax, kind=opSizeToIMove opSize}, code)) end | codeExtended _ ({instr=AtomicExchangeAndAdd{base=PReg bReg, source=PReg sReg}, ...}, code) = let val baseReg = asGenReg (getAllocatedReg bReg) and outReg = asGenReg (getAllocatedReg sReg) val address = if targetArch = ObjectId32Bit then {base=ebx, index=Index4 baseReg, offset=0} else {base=baseReg, index=NoIndex, offset=0} in AtomicXAdd{address=address, output=outReg, opSize=polyWordOpSize} :: code end | codeExtended _ ({instr=BoxValue{boxKind, source=PReg sReg, dest as PReg dReg, saveRegs}, ...}, code) = if not (isUsed dest) then code else let val preserve = getSaveRegs saveRegs val (srcReg, boxSize, moveKind) = case boxKind of BoxLargeWord => (getAllocatedReg sReg, Word.toInt(nativeWordSize div wordSize), moveNativeWord) | BoxX87Double => (getAllocatedReg sReg, Word.toInt(0w8 div wordSize), MoveDouble) | BoxX87Float => (getAllocatedReg sReg, Word.toInt(0w4 div wordSize), MoveFloat) | BoxSSE2Double => (getAllocatedReg sReg, Word.toInt(0w8 div wordSize), MoveDouble) | BoxSSE2Float => (getAllocatedReg sReg, Word.toInt(0w4 div wordSize), MoveFloat) val dstReg = getAllocatedReg dReg val (bReg, index) = if targetArch = ObjectId32Bit then (ebx, Index4(asGenReg dstReg)) else (asGenReg dstReg, NoIndex) in StoreInitialised :: llStoreArgument{ source=RegisterArg srcReg, offset=0, base=bReg, index=index, kind=moveKind} :: llAllocateMemoryOperation({ size=boxSize, flags=0wx1, dest=dstReg, saveRegs=preserve}, code) end | codeExtended _ ({instr=CompareByteVectors{vec1Addr=PReg v1Reg, vec2Addr=PReg v2Reg, length=PReg lReg, ...}, ...}, code) = (* There's a complication here. CompareByteVectors generates REPE CMPSB to compare the vectors but the condition code is only set if CMPSB is executed at least once. If the value in RCX/ECX is zero it will never be executed and the condition code will be unchanged. We want the result to be "equal" in that case so we need to ensure that is the case. It's quite possible that the condition code has just been set by shifting RCX/ECX to remove the tag in which case it will have set "equal" if the value was zero. We use CMP R/ECX,R/ECX which is two bytes in 32-bit. If we knew the length was non-zero (e.g. a constant) we could avoid this. *) RepeatOperation CMPS8 :: ArithToGenReg {opc=CMP, output=ecx, source=RegisterArg ecx, opSize=OpSize32} :: moveIfNecessary({src=getAllocatedReg v1Reg, dst=GenReg esi, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg v2Reg, dst=GenReg edi, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg lReg, dst=GenReg ecx, kind=moveNativeWord}, code))) | codeExtended _ ({instr=BlockMove{srcAddr=PReg sReg, destAddr=PReg dReg, length=PReg lReg, isByteMove}, ...}, code) = (* We may need to move these into the appropriate registers. They have been reserved but it's still possible the values could be in something else. *) RepeatOperation(if isByteMove then MOVS8 else if polyWordOpSize = OpSize64 then MOVS64 else MOVS32) :: moveIfNecessary({src=getAllocatedReg sReg, dst=GenReg esi, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg dReg, dst=GenReg edi, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg lReg, dst=GenReg ecx, kind=moveNativeWord}, code))) | codeExtended _ ({instr=X87Compare{arg1=PReg argReg, arg2, isDouble, ...}, ...}, code) = let val fpReg = getAllocatedFPReg argReg val _ = fpReg = fp0 orelse raise InternalError "codeGenICode: CompareFloatingPt not fp0" (* This currently pops the value. *) val precision = if isDouble then DoublePrecision else SinglePrecision in case codeExtArgumentAsFPReg arg2 of RegisterArg fpReg2 => FPArithR{opc=FCOMP, source=fpReg2} :: code | MemoryArg{offset, base=baseReg, index=NoIndex} => FPArithMemory{opc=FCOMP, base=baseReg, offset=offset, precision=precision} :: code | AddressConstArg const => FPArithConst{opc=FCOMP, source = const, precision=precision} :: code | _ => raise InternalError "codeGenICode: CompareFloatingPt: TODO" end | codeExtended _ ({instr=SSE2Compare{arg1=PReg argReg, arg2, isDouble, ...}, ...}, code) = let val xmmReg = getAllocatedXMMReg argReg val arg2Code = codeExtArgumentAsXMMReg arg2 in XMMArith { opc= if isDouble then SSE2CompDouble else SSE2CompSingle, output=xmmReg, source=arg2Code} :: code end | codeExtended _ ({instr=X87FPGetCondition{dest=PReg dReg, ...}, ...}, code) = moveIfNecessary({src=GenReg eax, dst=getAllocatedReg dReg, kind=Move32Bit}, FPStatusToEAX :: code) | codeExtended _ ({instr=X87FPArith{opc, resultReg=PReg resReg, arg1=PReg op1Reg, arg2, isDouble}, ...}, code) = let val realDestReg = getAllocatedFPReg resReg val realOp1Reg = getAllocatedFPReg op1Reg val _ = realDestReg = fp0 orelse raise InternalError "codeGenICode: FloatingPointArith not fp0" val _ = realOp1Reg = fp0 orelse raise InternalError "codeGenICode: FloatingPointArith not fp0" val op2Arg = codeExtArgumentAsFPReg arg2 val precision = if isDouble then DoublePrecision else SinglePrecision in case op2Arg of MemoryArg{offset, base=baseReg, index=NoIndex} => FPArithMemory{opc=opc, base=baseReg, offset=offset, precision=precision} :: code | AddressConstArg const => FPArithConst{opc=opc, source = const, precision=precision} :: code | _ => raise InternalError "codeGenICode: X87FPArith: TODO" end | codeExtended _ ({instr=X87FPUnaryOps{fpOp, dest=PReg resReg, source=PReg op1Reg}, ...}, code) = let val realDestReg = getAllocatedFPReg resReg val realOp1Reg = getAllocatedFPReg op1Reg val _ = realDestReg = fp0 orelse raise InternalError "codeGenICode: X87FPUnaryOps not fp0" val _ = realOp1Reg = fp0 orelse raise InternalError "codeGenICode: X87FPUnaryOps not fp0" in FPUnary fpOp :: code end | codeExtended _ ({instr=X87Float{dest=PReg resReg, source}, ...}, code) = let val intSource = codeExtArgumentAsGenReg source val fpReg = getAllocatedFPReg resReg val _ = fpReg = fp0 orelse raise InternalError "codeGenICode: FloatFixedInt not fp0" in (* This is complicated. The integer value has to be in memory not in a register so we have to push it to the stack and then make sure it is popped afterwards. Because it is untagged it is unsafe to leave it. *) ResetStack{numWords=1, preserveCC=false} :: FPLoadInt{ base=esp, offset=0, opSize=polyWordOpSize } :: PushToStack intSource :: code end | codeExtended _ ({instr=SSE2Float{dest=PReg resReg, source}, ...}, code) = let val xmmResReg = getAllocatedXMMReg resReg val srcReg = case codeExtArgumentAsGenReg source of RegisterArg srcReg => srcReg | _ => raise InternalError "FloatFixedInt: not reg" in XMMConvertFromInt{ output=xmmResReg, source=srcReg, opSize=polyWordOpSize} :: code end | codeExtended _ ({instr=SSE2FPUnary{opc, resultReg=PReg resReg, source}, ...}, code) = let val realDestReg = getAllocatedXMMReg resReg val opArg = codeExtArgumentAsXMMReg source val sse2Op = case opc of SSE2UDoubleToFloat => SSE2DoubleToFloat | SSE2UFloatToDouble => SSE2FloatToDouble in XMMArith{ opc=sse2Op, output=realDestReg, source=opArg} :: code end | codeExtended _ ({instr=SSE2FPBinary{opc, resultReg=PReg resReg, arg1=PReg op1Reg, arg2}, ...}, code) = let val realDestReg = getAllocatedXMMReg resReg val realOp1Reg = getAllocatedXMMReg op1Reg val op2Arg = codeExtArgumentAsXMMReg arg2 (* xorpd and andpd require 128-bit arguments with 128-bit alignment. *) val _ = case (opc, op2Arg) of (SSE2BXor, RegisterArg _) => () | (SSE2BXor, _) => raise InternalError "codeGenICode - SSE2Xor not in register" | (SSE2BAnd, RegisterArg _) => () | (SSE2BAnd, _) => raise InternalError "codeGenICode - SSE2And not in register" | _ => () val doMove = if realDestReg = realOp1Reg then code else XMMArith { opc=SSE2MoveDouble, source=RegisterArg realOp1Reg, output=realDestReg } :: code val sse2Op = case opc of SSE2BAddDouble => SSE2AddDouble | SSE2BSubDouble => SSE2SubDouble | SSE2BMulDouble => SSE2MulDouble | SSE2BDivDouble => SSE2DivDouble | SSE2BAddSingle => SSE2AddSingle | SSE2BSubSingle => SSE2SubSingle | SSE2BMulSingle => SSE2MulSingle | SSE2BDivSingle => SSE2DivSingle | SSE2BXor => SSE2Xor | SSE2BAnd => SSE2And in XMMArith{ opc=sse2Op, output=realDestReg, source=op2Arg} :: doMove end | codeExtended _ ({instr=TagFloat{source=PReg srcReg, dest as PReg dReg, ...}, ...}, code) = if not (isUsed dest) then code else let val _ = targetArch = Native64Bit orelse raise InternalError "TagFloat: not 64-bit" (* Copy the value from an XMM reg into a general reg and tag it. *) val regResult = asGenReg(getAllocatedReg dReg) val realSReg = getAllocatedXMMReg srcReg in ArithToGenReg { opc=ADD, output=regResult, source=NonAddressConstArg 1, opSize=polyWordOpSize } :: ShiftConstant{ shiftType=SHL, output=regResult, shift=0w32, opSize=OpSize64} :: MoveXMMRegToGenReg { source = realSReg, output = regResult } :: code end | codeExtended _ ({instr=UntagFloat{dest as PReg dReg, cache=SOME(PReg cacheReg), ...}, ...}, code) = if not (isUsed dest) then code else moveIfNecessary({src=getAllocatedReg cacheReg, dst=getAllocatedReg dReg, kind=MoveFloat}, code) | codeExtended _ ({instr=UntagFloat{source, dest as PReg dReg, ...}, ...}, code) = if not (isUsed dest) then code else let val regResult = getAllocatedXMMReg dReg in case codeExtArgumentAsGenReg source of RegisterArg realSReg => XMMShiftRight{ output=regResult, shift=0w4 (* Bytes - not bits *) } :: MoveGenRegToXMMReg {source=realSReg, output=regResult} :: code | MemoryArg{base, offset, index} => (* If the value is in memory we can just load the high order word. *) XMMArith { opc=SSE2MoveFloat, source=MemoryArg{base=base, offset=offset+4, index=index}, output=regResult } :: code | NonAddressConstArg ic => (* Shift down and then load from the non-constant area. *) XMMArith { opc=SSE2MoveFloat, source=NonAddressConstArg(IntInf.~>>(ic, 0w32)), output=regResult } :: code | _ => raise InternalError "UntagFloat - not register or memory" end | codeExtended _ ({instr=GetSSE2ControlReg{dest=PReg dReg}, ...}, code) = let (* This has to work through memory. Reserve one word on the stack, get the MXCSR register into it and pop it to the register. *) val regResult = getAllocatedGenReg dReg in PopR regResult :: XMMStoreCSR{base=esp, offset=0, index=NoIndex } :: PushToStack(NonAddressConstArg 0) :: code end | codeExtended _ ({instr=SetSSE2ControlReg{source=PReg sReg}, ...}, code) = let (* This has to work through memory. Push the register to the stack, store the value into the control register and remove it from the stack. *) val sourceReg = getAllocatedGenReg sReg in ResetStack{ numWords=1, preserveCC=false } :: XMMLoadCSR{base=esp, offset=0, index=NoIndex } :: PushToStack(RegisterArg sourceReg) :: code end | codeExtended _ ({instr=GetX87ControlReg{dest=PReg dReg}, ...}, code) = let (* This has to work through memory. Reserve one word on the stack, get the X87 control register into it and pop it to the register. *) val regResult = getAllocatedGenReg dReg in PopR regResult :: FPStoreCtrlWord{base=esp, offset=0, index=NoIndex } :: PushToStack(NonAddressConstArg 0) :: code end | codeExtended _ ({instr=SetX87ControlReg{source=PReg sReg}, ...}, code) = let (* This has to work through memory. Push the register to the stack, store the value into the control register and remove it from the stack. *) val sourceReg = getAllocatedGenReg sReg in ResetStack{ numWords=1, preserveCC=false } :: FPLoadCtrlWord{base=esp, offset=0, index=NoIndex } :: PushToStack(RegisterArg sourceReg) :: code end | codeExtended _ ({instr=X87RealToInt{source=PReg sReg, dest=PReg dReg}, ...}, code) = let (* This has to work through memory. Reserve one word on the stack, convert the value into it and pop it to the register. *) val regResult = getAllocatedGenReg dReg val fpReg = getAllocatedFPReg sReg val _ = fpReg = fp0 orelse raise InternalError "codeGenICode: CompareFloatingPt not fp0" (* This currently pops the value. *) in PopR regResult :: FPStoreInt{base=esp, offset=0, index=NoIndex } :: PushToStack(NonAddressConstArg 0) :: code end | codeExtended _ ({instr=SSE2RealToInt{source, dest=PReg dReg, isDouble, isTruncate}, ...}, code) = let (* The source is either an XMM register or memory. *) val regResult = getAllocatedGenReg dReg val opArg = codeExtArgumentAsXMMReg source in XMMStoreInt { source=opArg, precision=if isDouble then DoublePrecision else SinglePrecision, output = regResult, isTruncate=isTruncate } :: code end | codeExtended _ ({instr=SignExtend32To64{source, dest=PReg dReg}, ...}, code) = let val regResult = getAllocatedGenReg dReg val opArg = codeExtArgumentAsGenReg source in Move{moveSize=Move32X, source=opArg, destination=RegisterArg regResult } :: code end | codeExtended _ ({instr=TouchArgument _, ...}, code) = code (* Don't need to do anything. *) val newCode = codeCreate (functionName, profileObject, debugSwitches) local (* processed - set to true when a block has been processed. *) val processed = Array.array(numBlocks, false) fun haveProcessed n = Array.sub(processed, n) (* Find the blocks that reference this one. This isn't essential but allows us to try to generate blocks in the order of the control flow. This in turn may allow us to use short branches rather than long ones. *) val labelRefs = Array.array(numBlocks, []) datatype flowCode = FlowCodeSimple of int | FlowCodeCMove of {code: operation list, trueJump: int, falseJump: int} (* Process this recursively to set the references. If we have unreachable blocks, perhaps because they've been merged, we don't want to include them in the reference counting. This shouldn't happen now that IdentifyReferences removes unreferenced blocks. *) fun setReferences fromLabel toLabel = case Array.sub(labelRefs, toLabel) of [] => (* Not yet visited at all. *) let val ExtendedBasicBlock{ flow, ...} = Vector.sub(blocks, toLabel) val refs = case flow of ExitCode => [] | Unconditional lab => [lab] | Conditional{trueJump, falseJump, ... } => [trueJump, falseJump] | IndexedBr labs => labs | SetHandler { handler, continue } => [handler, continue] | UnconditionalHandle _ => [] | ConditionalHandle { continue, ...} => [continue] val () = if fromLabel >= 0 then Array.update(labelRefs, toLabel, [fromLabel]) else () in List.app (setReferences toLabel) refs end | refs => (* We've visiting this at least once. Just add us to the list. *) Array.update(labelRefs, toLabel, fromLabel :: refs) val _ = setReferences 0 0 (* Process the blocks. We keep the "stack" explicit rather than using recursion because this allows us to select both arms of a conditional branch sooner. *) fun genCode(toDo, lastFlow, code) = case List.filter (not o haveProcessed) toDo of [] => let (* There's nothing left to do. We may need to add a final branch to the end. *) val finalBranch = case lastFlow of ExitCode => [] | IndexedBr _ => [] | Unconditional dest => [UncondBranch(getBlockLabel dest)] | Conditional { condition, trueJump, falseJump, ...} => [ UncondBranch(getBlockLabel falseJump), ConditionalBranch{test=condition, label=getBlockLabel trueJump} ] | SetHandler { continue, ...} => [UncondBranch(getBlockLabel continue)] | UnconditionalHandle _ => [] | ConditionalHandle { continue, ...} => [UncondBranch(getBlockLabel continue)] in finalBranch @ code (* Done. *) end | stillToDo as head :: _ => let local (* Check the references. If all the sources that lead up to this have already been we won't have any backward jumps. *) fun available dest = List.all haveProcessed (Array.sub(labelRefs, dest)) val continuation = case lastFlow of ExitCode => NONE | IndexedBr _ => NONE (* We could put the last branch in here. *) | Unconditional dest => if not (haveProcessed dest) andalso available dest then SOME(FlowCodeSimple dest) else NONE | Conditional {trueJump, falseJump, condition, ...} => let (* Can we replace this with a SETCC or CMOV? If both arms simply set a register to a value and either return or jump to the same location we can use a SETCC or a CMOV. *) val ExtendedBasicBlock { flow=tFlow, block=tBlock, ...} = Vector.sub(blocks, trueJump) and ExtendedBasicBlock { flow=fFlow, block=fBlock, ...} = Vector.sub(blocks, falseJump) fun cmoveOrSetcc{condition, output, tSource=IntegerConstant trueValue, fSource=IntegerConstant falseValue, kind, code} = let (* Could use SETCC. Only if we can use LEA for multiplication. The result must be tagged so we will always have a multiplier. *) val (multiplier, fValue, testCondition) = if trueValue >= falseValue then (trueValue-falseValue, falseValue, condition) else (falseValue-trueValue, trueValue, invertTest condition) val destReg = asGenReg output in if not (targetArch = Native32Bit andalso (destReg=esi orelse destReg=edi)) (* We can't use Setcc with esi or edi on native 32-bit. *) andalso (multiplier = 2 orelse multiplier = 4 orelse multiplier = 8) (* We're using LEA so can only be multiplying by 2, 4 or 8. *) andalso is32bit fValue (* and we're going to put this in the offset *) then let val effectiveOpSize = (* We can generally use 32-bit LEA except if the result is negative. *) if kind = Move32Bit orelse fValue >= 0 andalso fValue+multiplier <= 0x7fffffff then OpSize32 else OpSize64 val (index, base) = case multiplier of 2 => (Index1 destReg, SOME destReg) | 4 => (Index4 destReg, NONE) | 8 => (Index8 destReg, NONE) | _ => (NoIndex, NONE) (* Try to put the instruction to zero the register before any compare. We can do it provided the register we're going to zero isn't used in the comparison. *) fun checkArg(RegisterArg r) = r <> destReg | checkArg(MemoryArg mem) = checkMem mem | checkArg _ = true and checkMem{base, index=NoIndex, ...} = base <> destReg | checkMem{base, index=Index1 index, ...} = base <> destReg andalso index <> destReg | checkMem{base, index=Index2 index, ...} = base <> destReg andalso index <> destReg | checkMem{base, index=Index4 index, ...} = base <> destReg andalso index <> destReg | checkMem{base, index=Index8 index, ...} = base <> destReg andalso index <> destReg val zeroReg = ArithToGenReg { opc=XOR, output=destReg, source=RegisterArg destReg, opSize=OpSize32 } fun addXOR [] = NONE | addXOR ((instr as ResetStack _) :: tl) = (* If we can add the XOR before the ResetStack do so. *) Option.map(fn code => instr :: code) (addXOR tl) | addXOR ((instr as ArithToGenReg{output, source, ...}) :: tl) = if output <> destReg andalso checkArg source then SOME(instr :: zeroReg :: tl) else NONE | addXOR ((instr as ArithMemConst{address, ...}) :: tl) = if checkMem address then SOME(instr :: zeroReg :: tl) else NONE | addXOR ((instr as ArithByteMemConst{address, ...}) :: tl) = if checkMem address then SOME(instr :: zeroReg :: tl) else NONE | addXOR ((instr as XMMArith{source=MemoryArg mem, ...}) :: tl) = if checkMem mem then SOME(instr :: zeroReg :: tl) else NONE | addXOR ((instr as XMMArith _) :: tl) = SOME(instr :: zeroReg :: tl) | addXOR ((instr as TestByteBits{arg, ...}) :: tl) = if checkArg arg then SOME(instr :: zeroReg :: tl) else NONE | addXOR ((instr as RepeatOperation CMPS8) :: tl) = (* This uses edi, esi and ecx implicitly *) if destReg <> esi andalso destReg <> edi andalso destReg <> ecx then SOME(instr :: zeroReg :: tl) else NONE (* This seems to be just a conditional jump as a result of testing the condition code twice in Real.== *) | addXOR _ = NONE (* If we can't put the XOR before the instruction we need to either zero it using a move which won't affect the CC or we use MOVZB to extend the byte value to 32/64 bits. *) val loadAddr = LoadAddress{output=destReg, offset=Int.fromLarge fValue, base=base, index=index, opSize=effectiveOpSize} and setCond = SetCondition{output=destReg, test=testCondition} val code = case addXOR code of SOME withXOR => loadAddr :: setCond :: withXOR | NONE => loadAddr :: (* We've already check that we're not using esi/edi on native 32-bits. *) Move{destination=RegisterArg destReg, source=RegisterArg destReg, moveSize=Move8} :: setCond :: code in SOME code end else NONE end (* If either value is a memory location it isn't safe to load it. The base address may not be valid if the condition does not hold. *) | cmoveOrSetcc{tSource=MemoryLocation _, ...} = NONE | cmoveOrSetcc{fSource=MemoryLocation _, ...} = NONE | cmoveOrSetcc{condition, output, tSource, fSource, kind, code} = if targetArch = Native32Bit then NONE (* CMov doesn't work for constants. *) else let val output = asGenReg output val codeTrue = codeExtArgumentAsGenReg tSource and codeFalse = codeExtArgumentAsGenReg fSource val opSize = case kind of Move32Bit => OpSize32 | Move64Bit => OpSize64 | _ => raise InternalError "move size" (* One argument has to be loaded into a register first and the other is conditionally moved. *) val loadFalseCmoveTrue = if (case codeFalse of RegisterArg regFalse => regFalse = output | _ => false) then true (* The false value is already in the right register. *) else if (case codeTrue of RegisterArg regTrue => regTrue = output | _ => false) then false (* The true value is in the right register - have to reverse. *) else if (case codeTrue of NonAddressConstArg _ => true | _ => false) then false (* The true value is a short constant. If we use a CMOV we will have to put that in the non-constant area and use a PC-relative reference. Try to avoid it. *) else true fun cmov{codeLoad, codeMove, condition} = let val load = case codeLoad of RegisterArg regLoad => moveIfNecessary({src=GenReg regLoad, dst=GenReg output, kind=opSizeToIMove opSize}, code) | codeLoad => Move{source=codeLoad, destination=RegisterArg output, moveSize=opSizeToMove opSize} :: code in CondMove{test=condition, output=output, source=codeMove, opSize=opSize} :: load end in if loadFalseCmoveTrue then SOME(cmov{codeLoad=codeFalse, codeMove=codeTrue, condition=condition}) else SOME(cmov{codeLoad=codeTrue, codeMove=codeFalse, condition=invertTest condition}) end val isPossSetCCOrCmov = if not (haveProcessed trueJump) andalso available trueJump andalso not (haveProcessed falseJump) andalso available falseJump then case (tFlow, fFlow, tBlock, fBlock) of (ExitCode, ExitCode, [{instr=LoadArgument{dest=PReg tReg, source=tSource, kind=kindT}, ...}, {instr=ReturnResultFromFunction{resultReg=PReg resReg, realReg, numStackArgs, ...}, ...}], [{instr=LoadArgument{dest=PReg fReg, source=fSource, kind=kindF}, ...}, {instr=ReturnResultFromFunction _, ...}]) => (* The real register for the two sides should both be rax. *) let val realTReg = getAllocatedReg tReg and realFReg = getAllocatedReg fReg in if realTReg = realFReg andalso kindT = kindF andalso (kindT = Move32Bit orelse kindT = Move64Bit) then ( case cmoveOrSetcc{condition=condition, output=realTReg, tSource=tSource, fSource=fSource, kind=kindT, code=code} of SOME code => let val resultReg = getAllocatedReg resReg val code = ReturnFromFunction numStackArgs :: moveIfNecessary({src=resultReg, dst=realReg, kind=moveNativeWord}, code) in SOME{code=code, trueJump=trueJump, falseJump=falseJump} end | NONE => NONE ) else NONE end | (Unconditional tDest, Unconditional fDest, [{instr=LoadArgument{dest=PReg tReg, source=tSource, kind=kindT}, ...}], [{instr=LoadArgument{dest=PReg fReg, source=fSource, kind=kindF}, ...}]) => let val realTReg = getAllocatedReg tReg and realFReg = getAllocatedReg fReg in if tDest = fDest andalso realTReg = realFReg andalso kindT = kindF andalso (kindT = Move32Bit orelse kindT = Move64Bit) then ( case cmoveOrSetcc{condition=condition, output=realTReg, tSource=tSource, fSource=fSource, kind=kindT, code=code} of SOME code => SOME{code=code, trueJump=trueJump, falseJump=falseJump} | NONE => NONE ) else NONE end | _ => NONE else NONE in case isPossSetCCOrCmov of NONE => (* We can usually choose either destination and in nearly all cases it won't matter. The default branch is not to take forward jumps so if there is reason to believe that one branch is more likely we should follow that branch now and leave the other. If we have JO/JNO we assume that overflow is unusual. If one branch raises an exception we assume that that is unusual. *) let val (first, second) = case (condition, Vector.sub(blocks, falseJump)) of (JNO, _) => (trueJump, falseJump) | (_, ExtendedBasicBlock{ flow=ExitCode, block, ...}) => if List.exists(fn{instr=RaiseExceptionPacket _, ...} => true | _ => false) block then (trueJump, falseJump) else (falseJump, trueJump) | _ => (falseJump, trueJump) in if not (haveProcessed first) andalso available first then SOME(FlowCodeSimple first) else if not (haveProcessed second) andalso available second then SOME(FlowCodeSimple second) else NONE end | SOME args => SOME(FlowCodeCMove args) end | SetHandler { continue, ... } => (* We want the continuation if possible. We'll need a branch round the handler so that won't help. *) if not (haveProcessed continue) andalso available continue then SOME(FlowCodeSimple continue) else NONE | UnconditionalHandle _ => NONE | ConditionalHandle _ => NONE in (* First choice - continue the existing block. Second choice - the first item whose sources have all been processed. Third choice - something from the list. *) val picked = case continuation of SOME c => c | NONE => case List.find available stillToDo of SOME c => FlowCodeSimple c | NONE => FlowCodeSimple head end in case picked of FlowCodeSimple picked => let val () = Array.update(processed, picked, true) (* Code to terminate the previous block. *) val startCode = case lastFlow of ExitCode => [] | IndexedBr _ => [] | UnconditionalHandle _ => [] | Unconditional dest => if dest = picked then [] else [UncondBranch(getBlockLabel dest)] | ConditionalHandle { continue, ...} => if continue = picked then [] else [UncondBranch(getBlockLabel continue)] | SetHandler { continue, ... } => if continue = picked then [] else [UncondBranch(getBlockLabel continue)] | Conditional { condition, trueJump, falseJump, ...} => if picked = falseJump (* Usual case. *) then [ConditionalBranch{test=condition, label=getBlockLabel trueJump}] else if picked = trueJump then (* We have a jump to the true condition. Invert the jump. This is more than an optimisation. Because this immediately precedes the true block we're not going to generate a label. *) [ConditionalBranch{test=invertTest condition, label=getBlockLabel falseJump}] else [ UncondBranch(getBlockLabel falseJump), ConditionalBranch{test=condition, label=getBlockLabel trueJump} ] (* Code-generate the body with the code we've done so far at the end. Add a label at the start if necessary. *) local (* If the previous block dropped through to this and this was the only reference then we don't need a label. *) fun onlyJumpingHere (lab: int) = if lab <> picked then false else case Array.sub(labelRefs, picked) of [singleton] => singleton = lab | _ => false val noLabel = case lastFlow of ExitCode => picked = 0 (* Unless this was the first block. *) | Unconditional dest => onlyJumpingHere dest | Conditional { trueJump, falseJump, ...} => onlyJumpingHere trueJump orelse onlyJumpingHere falseJump | IndexedBr _ => false | SetHandler _ => false | UnconditionalHandle _ => false | ConditionalHandle { continue, ...} => onlyJumpingHere continue in val startLabel = if noLabel then [] else [JumpLabel(getBlockLabel picked)] end val ExtendedBasicBlock { flow, block, ...} = Vector.sub(blocks, picked) local fun genCodeBlock(instr, code) = codeExtended {flow=flow} (instr, code) in val bodyCode = List.foldl genCodeBlock (startLabel @ startCode @ code) block end val addSet = case flow of ExitCode => [] | IndexedBr cases => cases | Unconditional dest => [dest] | Conditional {trueJump, falseJump, ...} => [falseJump, trueJump] | SetHandler { handler, continue } => [handler, continue] | UnconditionalHandle _ => [] | ConditionalHandle { continue, ...} => [continue] in genCode(addSet @ stillToDo, flow, bodyCode) end | FlowCodeCMove{code, trueJump, falseJump} => let (* We've generated a conditional move and possibly a return. If the trueJump and falseJump are only ever referenced from this block they're done, otherwise we still need to do them. *) val _ = case Array.sub(labelRefs, trueJump) of [_] => Array.update(processed, trueJump, true) | _ => () val _ = case Array.sub(labelRefs, falseJump) of [_] => Array.update(processed, falseJump, true) | _ => () val ExtendedBasicBlock { flow, ...} = Vector.sub(blocks, trueJump) val addSet = case flow of ExitCode => [] | Unconditional dest => [dest] | _ => raise InternalError "FlowCodeCMove" in genCode(addSet @ stillToDo, flow, code) end end in val ops = genCode([0], ExitCode, []) end in X86OPTIMISE.generateCode{code=newCode, ops=List.rev ops, labelCount= !outputLabelCount, resultClosure=resultClosure} end val nGenRegs = List.length generalRegisters structure Sharing = struct type intSet = intSet and extendedBasicBlock = extendedBasicBlock and regProperty = regProperty and reg = reg and closureRef = closureRef end end;