diff --git a/basis/Real32.sml b/basis/Real32.sml index 9b8307a5..8913a55d 100644 --- a/basis/Real32.sml +++ b/basis/Real32.sml @@ -1,318 +1,328 @@ (* Title: Real32 structure. Author: David Matthews - Copyright David Matthews 2018 + Copyright David Matthews 2018, 2021 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* This structure implements 32-bit real values, at least on X86. On other platforms it is whatever "float" is. N.B. This uses the X87 floating point instructions on X86/32. The precision on the X87 is set to 64-bits which is correct for the Real.real operations but involves an extra stage of rounding for Real32.real. That means that the results may not be strictly accurate. *) structure Real32: REAL where type real = Real32.real = struct open Real32 (* Inherit the type and the built-in functions. *) open IEEEReal - fun fromLarge IEEEReal.TO_NEAREST = fromRealRound - | fromLarge IEEEReal.TO_ZERO = fromRealTrunc - | fromLarge IEEEReal.TO_POSINF = fromRealCeil - | fromLarge IEEEReal.TO_NEGINF = fromRealFloor + (* On both the X86 and ARM there is only a single conversion from + double to float using the current rounding mode. If we want + a specific rounding mode we need to set the rounding. *) + fun fromLarge mode value = + let + val current = getRoundingMode() + val () = setRoundingMode mode + val result = fromReal value + val () = setRoundingMode current + in + result + end + + val fromRealRound = fromLarge TO_NEAREST (* Defined to use the current rounding mode. *) val fromInt = fromReal o Real.fromInt (* TODO *) and fromLargeInt = fromReal o Real.fromLargeInt val zero = fromInt 0 and one = fromInt 1 and four = fromInt 4 local (* The General call is now only used to get constants. *) val doFloatFloat : int*unit->real = RunCall.rtsCallFull2 "PolyRealGeneral" and doFloatInt : int*unit->int = RunCall.rtsCallFull2 "PolyRealGeneral" fun callFloat n x = doFloatFloat(n, x) and callFloatToInt n x = doFloatInt(n, x) in val radix : int = callFloatToInt 30 () val precision : int = callFloatToInt 31 () val maxFinite : real = callFloat 32 () val minNormalPos : real = callFloat 33 () val minPos : real = callFloat 34() end val posInf : real = one/zero; val negInf : real = ~one/zero; infix 4 == != ?=; val op != : real * real -> bool = not o op == local in (* isNan can be defined in terms of unordered. *) fun isNan x = unordered(x, x) (* NAN values do not match and infinities when multiplied by 0 produce NAN. *) fun isFinite x = x * zero == zero val copySign : (real * real) -> real = rtsCallFastFF_F "PolyRealFCopySign" (* Get the sign bit by copying the sign onto a finite value and then testing. This works for non-finite values and zeros. *) fun signBit r = copySign(one, r) < zero (* If we assume that all functions produce normalised results where possible, the only subnormal values will be those smaller than minNormalPos. *) fun isNormal x = isFinite x andalso abs x >= minNormalPos fun class x = if isFinite x then if x == zero then ZERO else if abs x >= minNormalPos then NORMAL else SUBNORMAL else if isNan x then NAN else (* not finite and not Nan *) INF fun sign x = if isNan x then raise General.Domain else if x == zero then 0 else if x < zero then ~1 else 1 end fun sameSign (x, y) = signBit x = signBit y (* Returns the minimum. In the case where one is a NaN it returns the other. In that case the comparison will be false. *) fun min (a: real, b: real): real = if a < b orelse isNan b then a else b (* Similarly for max. *) fun max (a: real, b: real): real = if a > b orelse isNan b then a else b fun checkFloat x = if isFinite x then x else if isNan x then raise General.Div else raise General.Overflow (* On certain platforms e.g. mips, toLarge does not preserve the sign on nans. We deal with the non-finite cases here. *) (* Use the Real versions for the moment. *) fun toManExp r = if not (isFinite r) orelse r == zero (* Nan, infinities and +/-0 all return r in the mantissa. We include 0 to preserve its sign. *) then {man=r, exp=0} else let val {man, exp} = Real.toManExp(toLarge r) in {man = fromRealRound man, exp = exp } end and fromManExp {man, exp} = if not (isFinite man) orelse man == zero (* Nan, infinities and +/-0 in the mantissa all return their argument. *) then man else fromRealRound(Real.fromManExp{man=toLarge man, exp=exp}) fun compare (r1, r2) = if r1 == r2 then General.EQUAL else if r1 < r2 then General.LESS else if r1 > r2 then General.GREATER else raise Unordered fun compareReal (r1, r2) = if r1 == r2 then EQUAL else if r1 < r2 then LESS else if r1 > r2 then GREATER else UNORDERED fun op ?= (x, y) = unordered(x, y) orelse x == y (* Although these may be built in in some architectures it's probably not worth treating them specially at the moment. *) fun *+ (x: real, y: real, z: real): real = x*y+z and *- (x: real, y: real, z: real): real = x*y-z val realFloor = rtsCallFastF_F "PolyRealFFloor" and realCeil = rtsCallFastF_F "PolyRealFCeil" and realTrunc = rtsCallFastF_F "PolyRealFTrunc" and realRound = rtsCallFastF_F "PolyRealFRound" val rem = rtsCallFastFF_F "PolyRealFRem" (* Split a real into whole and fractional parts. The fractional part must have the same sign as the number even if it is zero. *) fun split r = let val whole = realTrunc r val frac = r - whole in { whole = whole, frac = if not (isFinite r) then if isNan r then r else (* Infinity *) if r < zero then ~zero else zero else if frac == zero then if signBit r then ~zero else zero else frac } end (* Get the fractional part of a real. *) fun realMod r = #frac(split r) val nextAfter = rtsCallFastFF_F "PolyRealFNextAfter" fun toLargeInt mode r = Real.toLargeInt mode (toLarge r) local (* These are defined to raise Domain rather than Overflow on Nans. *) fun checkNan x = if isNan x then raise Domain else x (* If int is fixed we use the hardware conversions otherwise we convert it to real and use the real to arbitrary conversions. *) in val floor = if Bootstrap.intIsArbitraryPrecision then LargeInt.toInt o toLargeInt IEEEReal.TO_NEGINF else FixedInt.toInt o floorFix o checkNan and ceil = if Bootstrap.intIsArbitraryPrecision then LargeInt.toInt o toLargeInt IEEEReal.TO_POSINF else FixedInt.toInt o ceilFix o checkNan and trunc = if Bootstrap.intIsArbitraryPrecision then LargeInt.toInt o toLargeInt IEEEReal.TO_ZERO else FixedInt.toInt o truncFix o checkNan and round = if Bootstrap.intIsArbitraryPrecision then LargeInt.toInt o toLargeInt IEEEReal.TO_NEAREST else FixedInt.toInt o roundFix o checkNan fun toInt IEEEReal.TO_NEGINF = floor | toInt IEEEReal.TO_POSINF = ceil | toInt IEEEReal.TO_ZERO = trunc | toInt IEEEReal.TO_NEAREST = round end (* The order of evaluation here is important. See Test175. *) fun fmt fm = let val doFmt = Real.fmt fm in fn r => doFmt (toLarge r) end val toString = Real.toString o toLarge (* Scan input source for a valid number. The format is the same as for double precision. Convert it using the current rounding mode. *) fun scan getc src = case Real.scan getc src of NONE => NONE | SOME (r, a) => SOME(fromReal r, a) val fromString = StringCvt.scanString scan (* toDecimal: It's particularly important to handle the nan case here because toLarge loses the sign bit on some architectures. *) fun toDecimal r = let val sign = signBit r val kind = class r in case kind of ZERO => { class = ZERO, sign = sign, digits=[], exp = 0 } | INF => { class = INF, sign = sign, digits=[], exp = 0 } | NAN => { class = NAN, sign = sign, digits=[], exp = 0 } | _ => (* NORMAL/SUBNORMAL *) Real.toDecimal(toLarge r) end (* Convert from decimal. This is defined to use TO_NEAREST. We need to handle NaNs specially because fromRealRound loses the sign on a NaN. *) local val posNan = abs(zero / zero) val negNan = ~posNan in fun fromDecimal { class = INF, sign=true, ...} = SOME negInf | fromDecimal { class = INF, sign=false, ...} = SOME posInf | fromDecimal { class = NAN, sign=true, ... } = SOME negNan | fromDecimal { class = NAN, sign=false, ... } = SOME posNan | fromDecimal arg = Option.map fromRealRound (Real.fromDecimal arg) end structure Math = struct type real = real val sqrt = rtsCallFastF_F "PolyRealFSqrt" and sin = rtsCallFastF_F "PolyRealFSin" and cos = rtsCallFastF_F "PolyRealFCos" and atan = rtsCallFastF_F "PolyRealFArctan" and exp = rtsCallFastF_F "PolyRealFExp" and ln = rtsCallFastF_F "PolyRealFLog" and tan = rtsCallFastF_F "PolyRealFTan" and asin = rtsCallFastF_F "PolyRealFArcSin" and acos = rtsCallFastF_F "PolyRealFArcCos" and log10 = rtsCallFastF_F "PolyRealFLog10" and sinh = rtsCallFastF_F "PolyRealFSinh" and cosh = rtsCallFastF_F "PolyRealFCosh" and tanh = rtsCallFastF_F "PolyRealFTanh" val atan2 = rtsCallFastFF_F "PolyRealFAtan2" val pow = rtsCallFastFF_F "PolyRealFPow" (* Derived values. *) val e = exp one val pi = four * atan one end (* Converter for literal constants. Copied from Real. *) local fun convReal (s: string) : real = let (* Set the rounding mode to TO_NEAREST whatever the current rounding mode. Otherwise the result of compiling a piece of code with a literal constant could depend on what the rounding mode was set to. We should always support TO_NEAREST. *) val oldRounding = IEEEReal.getRoundingMode() val () = IEEEReal.setRoundingMode IEEEReal.TO_NEAREST val scanResult = StringCvt.scanString scan s val () = IEEEReal.setRoundingMode oldRounding in case scanResult of NONE => raise RunCall.Conversion "Invalid real constant" | SOME res => res end in (* Install this as a conversion function for real literals. *) val (): unit = RunCall.addOverload convReal "convReal" end end; val () = RunCall.addOverload Real32.>= ">=" and () = RunCall.addOverload Real32.<= "<=" and () = RunCall.addOverload Real32.> ">" and () = RunCall.addOverload Real32.< "<" and () = RunCall.addOverload Real32.+ "+" and () = RunCall.addOverload Real32.- "-" and () = RunCall.addOverload Real32.* "*" and () = RunCall.addOverload Real32.~ "~" and () = RunCall.addOverload Real32.abs "abs" and () = RunCall.addOverload Real32./ "/"; (* Install print function. *) local fun print_real _ _ (r: Real32.real) = PolyML.PrettyString(Real32.fmt (StringCvt.GEN(SOME 10)) r) in val () = PolyML.addPrettyPrinter print_real; end; diff --git a/mlsource/MLCompiler/BUILTINS.sml b/mlsource/MLCompiler/BUILTINS.sml index e3bd1f82..af533417 100644 --- a/mlsource/MLCompiler/BUILTINS.sml +++ b/mlsource/MLCompiler/BUILTINS.sml @@ -1,115 +1,116 @@ (* Signature for built-in functions Copyright David C. J. Matthews 2016, 2018-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) signature BUILTINS = sig datatype testConditions = TestEqual (* No TestNotEqual because that is always generated with "not" *) | TestLess | TestLessEqual | TestGreater | TestGreaterEqual | TestUnordered (* Reals only. *) datatype arithmeticOperations = ArithAdd | ArithSub | ArithMult | ArithQuot | ArithRem | ArithDiv | ArithMod datatype logicalOperations = LogicalAnd | LogicalOr | LogicalXor datatype shiftOperations = ShiftLeft | ShiftRightLogical (* Logical shift - zero added bits. *) | ShiftRightArithmetic (* Arithmetic shift - add the sign bit. *) datatype unaryOps = NotBoolean (* true => false; false => true - XOR *) | IsTaggedValue (* Test the tag bit. *) | MemoryCellLength (* Return the length of a memory cell (heap object) *) | MemoryCellFlags (* Return the flags byte of a memory cell (heap object) *) | ClearMutableFlag (* Remove the mutable flag from the flags byte *) | AtomicReset (* Set a value to (tagged) zero atomically. *) | LongWordToTagged (* Convert a LargeWord.word to a Word.word or FixedInt.int. *) | SignedToLongWord (* Convert a tagged value to a LargeWord with sign extension. *) | UnsignedToLongWord (* Convert a tagged value to a LargeWord without sign extension. *) | RealAbs of precision (* Set the sign bit of a real to positive. *) | RealNeg of precision (* Invert the sign bit of a real. *) | RealFixedInt of precision (* Convert an integer value into a real value. *) | FloatToDouble (* Convert a single precision floating point value to double precision. *) - | DoubleToFloat of IEEEReal.rounding_mode option (* Convert a double precision floating point value to single precision. *) + | DoubleToFloat (* Convert a double precision floating point value to single precision + using current rounding mode. *) | RealToInt of precision * IEEEReal.rounding_mode (* Convert a double or float to a fixed precision int. *) | TouchAddress (* Ensures that the cell is reachable. *) | AllocCStack (* Allocate space on the C stack. *) and precision = PrecSingle | PrecDouble (* Single or double precision floating pt. *) and binaryOps = (* Compare two words and return the result. This is used for both word values (isSigned=false) and fixed precision integer (isSigned=true). Values must be tagged and not pointers. *) WordComparison of { test: testConditions, isSigned: bool } (* Fixed precision int operations. These may raise Overflow. *) | FixedPrecisionArith of arithmeticOperations (* Arithmetic operations on word values. These do not raise Overflow. *) | WordArith of arithmeticOperations (* Load a word at a specific offset in a heap object. If this is immutable and the arguments are constants it can be folded at compile time since the result will never change. *) | WordLogical of logicalOperations (* Logical operations on words. *) | WordShift of shiftOperations (* Shift operations on words. *) (* Allocate a heap cell for byte data. The first argument is the number of words (not bytes) needed. The second argument is the "flags" byte which must include F_bytes and F_mutable. The new cell is not initialised. *) | AllocateByteMemory (* Operations on LargeWords. These are 32/64 bit values that are "boxed". *) | LargeWordComparison of testConditions | LargeWordArith of arithmeticOperations | LargeWordLogical of logicalOperations | LargeWordShift of shiftOperations | RealComparison of testConditions * precision | RealArith of arithmeticOperations * precision (* Equality of values which could be pointers or tagged values. At the lowest level this is the same as WordComparison but if we try to use an indexed case there must be a check that the values are tagged. *) | PointerEq | FreeCStack (* Free space on the C stack. *) | AtomicExchangeAdd (* Add a value to a ref atomically and return the old value. *) and nullaryOps = (* Get the current thread id *) GetCurrentThreadId (* Check whether the last RTS call set the exception status and raise it if it had. *) | CheckRTSException | CPUPause (* Pause a CPU while waiting for a spinlock. *) val unaryRepr: unaryOps -> string and binaryRepr: binaryOps -> string and testRepr: testConditions -> string and arithRepr: arithmeticOperations -> string and nullaryRepr: nullaryOps -> string end; diff --git a/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64ASSEMBLY.sml b/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64ASSEMBLY.sml index 49047444..96490d36 100644 --- a/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64ASSEMBLY.sml +++ b/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64ASSEMBLY.sml @@ -1,1757 +1,1810 @@ (* Copyright (c) 2021 David C. J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public Licence version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public Licence for more details. You should have received a copy of the GNU Lesser General Public Licence along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor ARM64ASSEMBLY ( structure Debug: DEBUG and Pretty: PRETTYSIG and CodeArray: CODEARRAYSIG ) : Arm64Assembly = struct open CodeArray Address exception InternalError = Misc.InternalError infix 5 << <<+ <<- >> >>+ >>- ~>> ~>>+ ~>>- (* Shift operators *) infix 3 andb orb xorb andbL orbL xorbL andb8 orb8 xorb8 val op << = Word.<< and op >> = Word.>> and op ~>> = Word.~>> and op andb = Word.andb and op orb = Word.orb val wordToWord8 = Word8.fromLargeWord o Word.toLargeWord and word8ToWord = Word.fromLargeWord o Word8.toLargeWord (* XReg is used for fixed point registers since X0 and W0 are the same register. *) datatype xReg = XReg of Word8.word | XZero | XSP (* VReg is used for the floating point registers since V0, D0 and S0 are the same register. *) and vReg = VReg of Word8.word (* A Label is a ref that is later set to the location. Several labels can be linked together so that they are only set at a single point. Only forward jumps are linked so when we come to finally set the label we will have the full list. *) type labels = Word.word ref list ref (* Condition codes. The encoding is standard. *) datatype condition = CCode of Word8.word val condEqual = CCode 0wx0 (* Z=1 *) and condNotEqual = CCode 0wx1 (* Z=0 *) and condCarrySet = CCode 0wx2 (* C=1 *) and condCarryClear = CCode 0wx3 (* C=0 *) and condNegative = CCode 0wx4 (* N=1 *) and condPositive = CCode 0wx5 (* N=0 imcludes zero *) and condOverflow = CCode 0wx6 (* V=1 *) and condNoOverflow = CCode 0wx7 (* V=0 *) and condUnsignedHigher = CCode 0wx8 (* C=1 && Z=0 *) and condUnsignedLowOrEq = CCode 0wx9 (* ! (C=1 && Z=0) *) and condSignedGreaterEq = CCode 0wxa (* N=V *) and condSignedLess = CCode 0wxb (* N<>V *) and condSignedGreater = CCode 0wxc (* Z==0 && N=V *) and condSignedLessEq = CCode 0wxd (* !(Z==0 && N=V) *) and condAlways = CCode 0wxe (* Any *) and condAlwaysNV = CCode 0wxf (* Any - alternative encoding. *) (* N.B. On subtraction and comparison the ARM uses an inverted carry flag for borrow. The C flag is set if there is NO borrow. This is the reverse of the X86. *) (* Offsets in the assembly code interface pointed at by X26 These are in units of 64-bits NOT bytes. *) val heapOverflowCallOffset = 1 and stackOverflowCallOffset = 2 and stackOverflowXCallOffset= 3 and exceptionHandlerOffset = 5 and stackLimitOffset = 6 and exceptionPacketOffset = 7 and threadIdOffset = 8 and heapLimitPtrOffset = 42 and heapAllocPtrOffset = 43 and mlStackPtrOffset = 44 (* 31 in the register field can either mean the zero register or the hardware stack pointer. Which meaning depends on the instruction. *) fun xRegOrXZ(XReg w) = w | xRegOrXZ XZero = 0w31 | xRegOrXZ XSP = raise InternalError "XSP not valid here" and xRegOrXSP(XReg w) = w | xRegOrXSP XZero = raise InternalError "XZero not valid here" | xRegOrXSP XSP = 0w31 (* There are cases where it isn't clear. *) and xRegOnly (XReg w) = w | xRegOnly XZero = raise InternalError "XZero not valid here" | xRegOnly XSP = raise InternalError "XSP not valid here" val X0 = XReg 0w0 and X1 = XReg 0w1 and X2 = XReg 0w2 and X3 = XReg 0w3 and X4 = XReg 0w4 and X5 = XReg 0w5 and X6 = XReg 0w6 and X7 = XReg 0w7 and X8 = XReg 0w8 and X9 = XReg 0w9 and X10= XReg 0w10 and X11 = XReg 0w11 and X12 = XReg 0w12 and X13 = XReg 0w13 and X14= XReg 0w14 and X15 = XReg 0w15 and X16 = XReg 0w16 and X17 = XReg 0w17 and X18= XReg 0w18 and X19 = XReg 0w19 and X20 = XReg 0w20 and X21 = XReg 0w21 and X22= XReg 0w22 and X23 = XReg 0w23 and X24 = XReg 0w24 and X25 = XReg 0w25 and X26= XReg 0w26 and X27 = XReg 0w27 and X28 = XReg 0w28 and X29 = XReg 0w29 and X30= XReg 0w30 val X_MLHeapLimit = X25 (* ML Heap limit pointer *) and X_MLAssemblyInt = X26 (* ML assembly interface pointer. *) and X_MLHeapAllocPtr = X27 (* ML Heap allocation pointer. *) and X_MLStackPtr = X28 (* ML Stack pointer. *) and X_LinkReg = X30 (* Link reg - return address *) fun vReg(VReg v) = v (* Only the first eight registers are currently used by ML. *) val V0 = VReg 0w0 and V1 = VReg 0w1 and V2 = VReg 0w2 and V3 = VReg 0w3 and V4 = VReg 0w4 and V5 = VReg 0w5 and V6 = VReg 0w6 and V7 = VReg 0w7 (* Some data instructions include a possible shift. *) datatype shiftType = ShiftLSL of word | ShiftLSR of word | ShiftASR of word | ShiftNone local fun checkImm6 w = if w > 0w63 then raise InternalError "shift > 63" else w in fun shiftEncode(ShiftLSL w) = (0w0, checkImm6 w) | shiftEncode(ShiftLSR w) = (0w1, checkImm6 w) | shiftEncode(ShiftASR w) = (0w2, checkImm6 w) | shiftEncode ShiftNone = (0w0, 0w0) end (* Other instructions include an extension i.e. a sign- or zero-extended value from one of the argument registers. When an extension is encoded there can also be a left shift which applies after the extension. I don't understand what difference, if any, there is between UXTX and SXTX. There's no ExtNone because we need to use either UXTW or UXTX depending on the length *) datatype 'a extend = ExtUXTB of 'a (* Unsigned extend byte *) | ExtUXTH of 'a (* Unsigned extend byte *) | ExtUXTW of 'a (* Unsigned extend byte *) | ExtUXTX of 'a (* Left shift *) | ExtSXTB of 'a (* Sign extend byte *) | ExtSXTH of 'a (* Sign extend halfword *) | ExtSXTW of 'a (* Sign extend word *) | ExtSXTX of 'a (* Left shift *) (* Load/store instructions have only a single bit for the shift. For byte operations this is one bit shift; for others it scales by the size of the operand if set. *) datatype scale = ScaleOrShift | NoScale local (* Although there are three bits it seems that the shift is limited to 0 to 4. *) fun checkImm3 w = if w > 0w4 then raise InternalError "extend shift > 4" else w in fun extendArithEncode(ExtUXTB w) = (0w0, checkImm3 w) | extendArithEncode(ExtUXTH w) = (0w1, checkImm3 w) | extendArithEncode(ExtUXTW w) = (0w2, checkImm3 w) | extendArithEncode(ExtUXTX w) = (0w3, checkImm3 w) | extendArithEncode(ExtSXTB w) = (0w4, checkImm3 w) | extendArithEncode(ExtSXTH w) = (0w5, checkImm3 w) | extendArithEncode(ExtSXTW w) = (0w6, checkImm3 w) | extendArithEncode(ExtSXTX w) = (0w7, checkImm3 w) fun extendLSEncode(ExtUXTB v) = (0w0, v) | extendLSEncode(ExtUXTH v) = (0w1, v) | extendLSEncode(ExtUXTW v) = (0w2, v) | extendLSEncode(ExtUXTX v) = (0w3, v) | extendLSEncode(ExtSXTB v) = (0w4, v) | extendLSEncode(ExtSXTH v) = (0w5, v) | extendLSEncode(ExtSXTW v) = (0w6, v) | extendLSEncode(ExtSXTX v) = (0w7, v) end datatype wordSize = WordSize32 | WordSize64 (* Bit patterns on the ARM64 are encoded using a complicated scheme and only certain values can be encoded. An element can be 2, 4, 8, 16, 32 or 64 bits and must be a sequence of at least one zero bits followed by at least one one bit. This sequence can then be rotated within the element. Finally the element is replicated within the register up to 32 or 64 bits. All this information is encoded in 13 bits. N.B. Bit patterns of all zeros or all ones cannot be encoded. *) (* Encode the value if it is possible. *) fun encodeBitPattern(value, sf (* size flag *)) = (* Can't encode 0 or all ones. *) if value = 0w0 orelse value = Word64.notb 0w0 then NONE (* If this is 32-bits we can't encode all ones in the low-order 32-bits or any value that won't fit in 32-bits, *) else if sf = WordSize32 andalso value >= 0wxffffffff then NONE else let val regSize = case sf of WordSize32 => 0w32 | WordSize64 => 0w64 (* Get the element size. Look for the repeat of the pattern. *) fun getElemSize size = let val ns = size div 0w2 val mask = Word64.<<(0w1, ns) - 0w1 in if Word64.andb(value, mask) <> Word64.andb(Word64.>>(value, ns), mask) then size else if ns <= 0w2 then ns else getElemSize ns end val elemSize = getElemSize regSize fun log2 0w1 = 0w0 | log2 n = 0w1 + log2(Word.>>(n, 0w1)) val elemBits = log2 elemSize (* Find the rotation that puts as many of the zero bits in the element at the top. *) val elemMask = Word64.>>(Word64.notb 0w0, 0w64-elemSize) fun ror elt = Word64.orb((Word64.<<(Word64.andb(elt, 0w1), elemSize-0w1), Word64.>>(elt, 0w1))) and rol elt = Word64.orb(Word64.andb(elemMask, Word64.<<(elt, 0w1)), Word64.>>(elt, elemSize-0w1)) fun findRotation(v, n) = if ror v < v then findRotation(ror v, (n-0w1) mod elemSize) else if rol v < v then findRotation(rol v, n+0w1) else (v, n) val (rotated, rotation) = findRotation(Word64.andb(value, elemMask), 0w0) (* Count out the low order ones. If the result is zero then we;ve got a valid sequence of zeros followed by ones but if we discover a zero bit and the result isn't zero then we can't encode this. *) fun countLowOrderOnes(v, n) = if v = 0w0 then SOME n else if Word64.andb(v, 0w1) = 0w1 then countLowOrderOnes(Word64.>>(v, 0w1), n+0w1) else NONE in case countLowOrderOnes(rotated, 0w0) of NONE => NONE | SOME lowOrderOnes => let (* Encode the element size. *) val elemSizeEnc = 0wx7f - (Word.<<(0w1, elemBits+0w1) - 0w1) val n = if Word.andb(elemSizeEnc, 0wx40) = 0w0 then 0w1 else 0w0 val imms = Word.andb(Word.orb(elemSizeEnc, lowOrderOnes-0w1), 0wx3f) in SOME{n=n, imms=imms, immr=rotation} end end; (* Decode a pattern for printing. *) fun decodeBitPattern{sf, n, immr, imms} = let (* Find the highest bit set in N:NOT(imms) *) fun highestBitSet 0w0 = 0 | highestBitSet n = 1+highestBitSet(Word.>>(n, 0w1)) val len = highestBitSet(Word.orb(Word.<<(n, 0w6), Word.xorb(imms, 0wx3f))) - 1 val _ = if len < 0 then raise InternalError "decodeBitPattern: invalid" else () val size = Word.<<(0w1, Word.fromInt len) val r = Word.andb(immr, size-0w1) and s = Word.andb(imms, size-0w1) val _ = if s = size-0w1 then raise InternalError "decodeBitPattern: invalid" else () val pattern = Word64.<<(0w1, s+0w1) - 0w1 (* Rotate right: shift left and put the top bit in the high order bit*) fun ror elt = Word64.orb((Word64.<<(Word64.andb(elt, 0w1), size-0w1), Word64.>>(elt, 0w1))) fun rotateBits(value, 0w0) = value | rotateBits(value, n) = rotateBits(ror value, n-0w1) val rotated = rotateBits(pattern, r) val regSize = if sf = 0w0 then 0w32 else 0w64 (* Replicate the rotated pattern to fill the register. *) fun replicate(pattern, size) = if size >= regSize then pattern else replicate(Word64.orb(pattern, Word64.<<(pattern, size)), size * 0w2) in replicate(rotated, size) end val isEncodableBitPattern = isSome o encodeBitPattern datatype instr = SimpleInstr of word | LoadAddressLiteral of {reg: xReg, value: machineWord} | LoadNonAddressLiteral of {reg: xReg, value: Word64.word} | Label of labels | Branch of { label: labels, jumpCondition: condition } | LoadLabelAddress of { label: labels, reg: xReg } | TestBitBranch of { label: labels, bitNo: Word8.word, brNonZero: bool, reg: xReg } | CompareBranch of { label: labels, brNonZero: bool, size: wordSize, reg: xReg } val nopCode = 0wxD503201F (* Add/subtract an optionally shifted 12-bit immediate (i.e. constant) to/from a register. The constant is zero-extended. The versions that do not set the flags can use XSP as the destination; the versions that use the signs can use XZero as the destination i.e. they discard the result and act as a comparison. *) local fun addSubRegImmediate(sf, oper, s, xdOp) ({regN, regD, immed, shifted}) = let val () = if immed >= 0wx1000 then raise InternalError "addSubRegImmediate: immed > 12 bits" else () in SimpleInstr( 0wx11000000 orb (sf << 0w31) orb (oper << 0w30) orb (s << 0w29) orb (if shifted then 0wx400000 else 0w0) orb (immed << 0w10) orb (word8ToWord(xRegOrXSP regN) << 0w5) orb word8ToWord(xdOp regD)) end in val addImmediate = addSubRegImmediate(0w1, 0w0, 0w0, xRegOrXSP) and addSImmediate = addSubRegImmediate(0w1, 0w0, 0w1, xRegOrXZ) and subImmediate = addSubRegImmediate(0w1, 0w1, 0w0, xRegOrXSP) and subSImmediate = addSubRegImmediate(0w1, 0w1, 0w1, xRegOrXZ) end (* Add/subtract a shifted register, optionally setting the flags. *) local (* X31 is XZ here unlike the extended version.*) fun addSubtractShiftedReg (sf, oper, s) ({regM, regN, regD, shift}) = let val (shift, imm6) = shiftEncode shift in SimpleInstr(0wx0b000000 orb (sf << 0w31) orb (oper << 0w30) orb (s << 0w29) orb (shift << 0w22) orb (word8ToWord(xRegOnly regM) << 0w16) orb (imm6 << 0w10) orb (word8ToWord(xRegOrXZ regN) << 0w5) orb word8ToWord(xRegOrXZ regD)) end in val addShiftedReg = addSubtractShiftedReg(0w1, 0w0, 0w0) and addSShiftedReg = addSubtractShiftedReg(0w1, 0w0, 0w1) and subShiftedReg = addSubtractShiftedReg(0w1, 0w1, 0w0) and subSShiftedReg = addSubtractShiftedReg(0w1, 0w1, 0w1) end (* Add/subtract an extended register, optionally setting the flags. *) local (* SP can be used as Xn and also for Xd for the non-flags versions. *) fun addSubtractExtendedReg (sf, oper, s, opt, xD) ({regM, regN, regD, extend}) = let val (option, imm3) = extendArithEncode extend in SimpleInstr(0wx0b200000 orb (sf << 0w31) orb (oper << 0w30) orb (s << 0w29) orb (opt << 0w22) orb (word8ToWord(xRegOnly regM) << 0w16) orb (option << 0w13) orb (imm3 << 0w10) orb (word8ToWord(xRegOrXSP regN) << 0w5) orb word8ToWord(xD regD)) end in val addExtendedReg = addSubtractExtendedReg(0w1, 0w0, 0w0, 0w0, xRegOrXSP) and addSExtendedReg = addSubtractExtendedReg(0w1, 0w0, 0w1, 0w0, xRegOrXZ) and subExtendedReg = addSubtractExtendedReg(0w1, 0w1, 0w0, 0w0, xRegOrXSP) and subSExtendedReg = addSubtractExtendedReg(0w1, 0w1, 0w1, 0w0, xRegOrXZ) end (* Logical operations on a shifted register. *) local fun logicalShiftedReg (sf, oper, n) ({regM, regN, regD, shift}) = let val (shift, imm6) = shiftEncode shift in SimpleInstr(0wx0a000000 orb (sf << 0w31) orb (oper << 0w29) orb (shift << 0w22) orb (n << 0w21) orb (word8ToWord(xRegOrXZ regM) << 0w16) orb (imm6 << 0w10) orb (word8ToWord(xRegOrXZ regN) << 0w5) orb word8ToWord(xRegOrXZ regD)) end in val andShiftedReg = logicalShiftedReg(0w1, 0w0, 0w0) and orrShiftedReg = logicalShiftedReg(0w1, 0w1, 0w0) and eorShiftedReg = logicalShiftedReg(0w1, 0w2, 0w0) and andsShiftedReg = logicalShiftedReg(0w1, 0w3, 0w0) (* There are also versions that operate with an inverted version of the argument. *) end (* Two-source operations. *) local fun twoSourceInstr (sf, s, opcode) ({regM, regN, regD}) = SimpleInstr(0wx1ac00000 orb (sf << 0w31) orb (s << 0w29) orb (word8ToWord(xRegOnly regM) << 0w16) orb (opcode << 0w10) orb (word8ToWord(xRegOnly regN) << 0w5) orb word8ToWord(xRegOnly regD)) in (* Signed and unsigned division. *) val unsignedDivide = twoSourceInstr(0w1, 0w0, 0wx2) and signedDivide = twoSourceInstr(0w1, 0w0, 0wx3) (* Logical shift left Rd = Rn << (Rm mod 0w64) *) and logicalShiftLeftVariable = twoSourceInstr(0w1, 0w0, 0wx8) (* Logical shift right Rd = Rn >> (Rm mod 0w64) *) and logicalShiftRightVariable = twoSourceInstr(0w1, 0w0, 0wx9) (* Arithmetic shift right Rd = Rn ~>> (Rm mod 0w64) *) and arithmeticShiftRightVariable = twoSourceInstr(0w1, 0w0, 0wxa) end (* Three source operations. These are all variations of multiply. *) local fun threeSourceInstr (sf, op54, op31, o0) ({regM, regA, regN, regD}) = SimpleInstr(0wx1b000000 orb (sf << 0w31) orb (op54 << 0w29) orb (op31 << 0w21) orb (word8ToWord(xRegOnly regM) << 0w16) orb (o0 << 0w15) orb (word8ToWord(xRegOrXZ regA) << 0w10) orb (word8ToWord(xRegOnly regN) << 0w5) orb word8ToWord(xRegOnly regD)) in (* regD = regA + regN * regM *) val multiplyAndAdd = threeSourceInstr(0w1, 0w0, 0w0, 0w0) (* regD = regA - regN * regM *) and multiplyAndSub = threeSourceInstr(0w1, 0w0, 0w0, 0w1) (* Return the high-order part of a signed multiplication. *) fun signedMultiplyHigh({regM, regN, regD}) = threeSourceInstr(0w1, 0w0, 0w2, 0w0) { regM=regM, regN=regN, regD=regD, regA=XZero} end (* Loads: There are two versions of this on the ARM. There is a version that takes a signed 9-bit byte offset and a version that takes an unsigned 12-bit word offset. *) local fun loadStoreRegScaled (size, v, opc, xD) ({regT, regN, unitOffset}) = let val _ = (unitOffset >= 0 andalso unitOffset < 0x1000) orelse raise InternalError "loadStoreRegScaled: value out of range" in SimpleInstr(0wx39000000 orb (size << 0w30) orb (opc << 0w22) orb (v << 0w26) orb (Word.fromInt unitOffset << 0w10) orb (word8ToWord(xRegOrXSP regN) << 0w5) orb word8ToWord(xD regT)) end in val loadRegScaled = loadStoreRegScaled(0w3, 0w0, 0w1, xRegOrXZ) and storeRegScaled = loadStoreRegScaled(0w3, 0w0, 0w0, xRegOrXZ) (* (Unsigned) byte operations. There are also signed versions. *) and loadRegScaledByte = loadStoreRegScaled (0w0, 0w0, 0w1, xRegOrXZ) and storeRegScaledByte = loadStoreRegScaled (0w0, 0w0, 0w0, xRegOrXZ) and loadRegScaledDouble = loadStoreRegScaled(0w3, 0w1, 0w1, vReg) and storeRegScaledDouble = loadStoreRegScaled(0w3, 0w1, 0w0, vReg) and loadRegScaledFloat = loadStoreRegScaled(0w2, 0w1, 0w1, vReg) and storeRegScaledFloat = loadStoreRegScaled(0w2, 0w1, 0w0, vReg) end local (* Loads and stores with a signed byte offset. This includes simple unscaled addresses, pre-indexing and post-indexing. *) fun loadStoreByteAddress op4 (size, v, opc) ({regT, regN, byteOffset}) = let val _ = (byteOffset >= ~256 andalso byteOffset < 256) orelse raise InternalError "loadStoreUnscaled: value out of range" val imm9 = Word.fromInt byteOffset andb 0wx1ff in SimpleInstr(0wx38000000 orb (size << 0w30) orb (opc << 0w22) orb (v << 0w26) orb (imm9 << 0w12) orb (op4 << 0w10) orb (word8ToWord(xRegOrXSP regN) << 0w5) orb word8ToWord(xRegOrXZ regT)) end val loadStoreUnscaled = loadStoreByteAddress 0w0 and loadStorePostIndex = loadStoreByteAddress 0w1 and loadStorePreIndex = loadStoreByteAddress 0w3 in val loadRegUnscaled = loadStoreUnscaled (0w3, 0w0, 0w1) and storeRegUnscaled = loadStoreUnscaled (0w3, 0w0, 0w0) (* (Unsigned) byte operations. There are also signed versions. *) and loadRegUnscaledByte = loadStoreUnscaled (0w0, 0w0, 0w1) and storeRegUnscaledByte = loadStoreUnscaled (0w0, 0w0, 0w0) val loadRegPostIndex = loadStorePostIndex (0w3, 0w0, 0w1) and storeRegPostIndex = loadStorePostIndex (0w3, 0w0, 0w0) and loadRegPostIndexByte = loadStorePostIndex (0w0, 0w0, 0w1) and storeRegPostIndexByte = loadStorePostIndex (0w0, 0w0, 0w0) val loadRegPreIndex = loadStorePreIndex (0w3, 0w0, 0w1) and storeRegPreIndex = loadStorePreIndex (0w3, 0w0, 0w0) and loadRegPreIndexByte = loadStorePreIndex (0w0, 0w0, 0w1) and storeRegPreIndexByte = loadStorePreIndex (0w0, 0w0, 0w0) end (* Load/store with a register offset i.e. an index register. *) local fun loadStoreRegRegisterOffset (size, v, opc) ({regT, regN, regM, option}) = let val (opt, s) = case extendLSEncode option of (opt, ScaleOrShift) => (opt, 0w1) | (opt, NoScale) => (opt, 0w0) in SimpleInstr(0wx38200800 orb (size << 0w30) orb (v << 0w26) orb (opc << 0w22) orb (word8ToWord(xRegOnly regM) << 0w16) orb (opt << 0w13) orb (s << 0w12) orb (word8ToWord(xRegOrXSP regN) << 0w5) orb word8ToWord(xRegOrXZ regT)) end in val loadRegIndexed = loadStoreRegRegisterOffset(0w3, 0w0, 0w1) and storeRegIndexed = loadStoreRegRegisterOffset(0w3, 0w0, 0w0) and loadRegIndexedByte = loadStoreRegRegisterOffset(0w0, 0w0, 0w1) and storeRegIndexedByte = loadStoreRegRegisterOffset(0w0, 0w0, 0w0) end (* Addresses must go in the constant area at the end of the code where they can be found by the GC. *) fun loadAddressConstant(xReg, valu) = LoadAddressLiteral{reg=xReg, value=valu} (* Non-address constants. These may or may not be tagged values. *) local fun loadConstantFromCArea(xReg, valu) = LoadNonAddressLiteral{reg=xReg, value=valu} (* Move an unsigned constant. *) fun genMoveShortConstToReg(xReg, constnt, shiftBits, is64, opc) = SimpleInstr((if is64 then 0wx80000000 else 0w0) orb (opc << 0w29) orb 0wx12800000 orb (shiftBits << 0w21) orb (constnt << 0w5) orb word8ToWord(xRegOnly xReg)) val opcMovZ = 0w2 (* Zero the rest of the register. *) and opcMovK = 0w3 (* Keep the rest of the register. *) and opcMovN = 0w0 (* Invert the value and the rest of the register. *) in fun loadNonAddressConstant(xReg, valu) = (* If this can be encoded using at most two instructions we do that otherwise the constant is stored in the non-address constant area. *) let fun extW h = Word.fromLarge(LargeWord.>>(Word64.toLarge valu, h*0w16)) andb 0wxffff val hw0 = extW 0w3 and hw1 = extW 0w2 and hw2 = extW 0w1 and hw3 = extW 0w0 in if hw0 = 0w0 andalso hw1 = 0w0 then (* If the top 32-bits are zero we can use a 32-bit move. *) ( if hw2 = 0w0 then genMoveShortConstToReg(xReg, hw3, 0w0, false, opcMovZ) else if hw3 = 0w0 then genMoveShortConstToReg(xReg, hw2, 0w1, false, opcMovZ) else if hw2 = 0wxffff then genMoveShortConstToReg(xReg, Word.xorb(hw3, 0wxffff), 0w0, false, opcMovN) else if hw3 = 0wxffff then genMoveShortConstToReg(xReg, Word.xorb(hw2, 0wxffff), 0w1, false, opcMovN) else ( genMoveShortConstToReg(xReg, hw3, 0w0, false, opcMovZ); genMoveShortConstToReg(xReg, hw2, 0w1, false, opcMovK) ) ) (* TODO: For the moment just handle the simple case. *) else if hw0 = 0wxffff andalso hw1 = 0wxffff andalso hw2 = 0wxffff then genMoveShortConstToReg(xReg, Word.xorb(hw3, 0wxffff), 0w0, true, opcMovN) else loadConstantFromCArea(xReg, valu) end end (* Instructions involved in thread synchonisation. *) val yield = SimpleInstr 0wxD503203F (* Yield inside a spin-lock. *) and dmbIsh = SimpleInstr 0wxD5033BBF (* Memory barrier. *) (* Acquire exclusive access to a memory location and load its current value *) fun loadAcquireExclusiveRegister{regN, regT} = SimpleInstr(0wxC85FFC00 orb (word8ToWord(xRegOrXSP regN) << 0w5) orb word8ToWord(xRegOnly regT)) (* Release exclusive access and test whether it succeeded. Sets regS to 0 if successful otherwise 1, in which case we have to repeat the operation. *) and storeReleaseExclusiveRegister{regN, regS, regT} = SimpleInstr(0wxC800FC00 orb (word8ToWord(xRegOnly regS) << 0w16) orb (word8ToWord(xRegOrXSP regN) << 0w5) orb word8ToWord(xRegOnly regT)) (* Jump to the address in the register and put the address of the next instruction into X30. *) fun branchAndLinkReg(dest) = SimpleInstr(0wxD63F0000 orb (word8ToWord(xRegOnly dest) << 0w5)) (* Jump to the address in the register. *) fun branchRegister(dest) = SimpleInstr(0wxD61F0000 orb (word8ToWord(xRegOnly dest) << 0w5)) (* Jump to the address in the register and hint this is a return. *) fun returnRegister(dest) = SimpleInstr(0wxD65F0000 orb (word8ToWord(xRegOnly dest) << 0w5)) (* Put a label into the code. *) val setLabel = Label (* Create a label. *) fun createLabel () = ref [ref 0w0] (* A conditional or unconditional branch. *) and conditionalBranch(cond, label) = Branch{label=label, jumpCondition=cond} (* Put the address of a label into a register - used for handlers and cases. *) and loadLabelAddress(reg, label) = LoadLabelAddress{label=label, reg=reg} (* Test a bit in a register and branch if zero/nonzero *) and testBitBranchZero(reg, bit, label) = TestBitBranch{label=label, bitNo=bit, brNonZero=false, reg=reg} and testBitBranchNonZero(reg, bit, label) = TestBitBranch{label=label, bitNo=bit, brNonZero=true, reg=reg} (* Compare a register with zero and branch if zero/nonzero *) and compareBranchZero(reg, size, label) = CompareBranch{label=label, brNonZero=false, size=size, reg=reg} and compareBranchNonZero(reg, size, label) = CompareBranch{label=label, brNonZero=true, size=size, reg=reg} (* Set the destination register to the value of the first reg if the condition is true otherwise to a, possibly modified, version of the second argument. There are variants that set it unmodified, incremented, inverted and negated. *) local fun conditionalSelect (sf, opc, op2) {regD, regFalse, regTrue, cond=CCode cond} = SimpleInstr(0wx1A800000 orb (sf << 0w31) orb (opc << 0w30) orb (word8ToWord(xRegOrXZ regFalse) << 0w16) orb (word8ToWord cond << 0w12) orb (op2 << 0w10) orb (word8ToWord(xRegOrXZ regTrue) << 0w5) orb word8ToWord(xRegOrXZ regD)) in val conditionalSet = conditionalSelect(0w1, 0w0, 0w0) and conditionalSetIncrement = conditionalSelect(0w1, 0w0, 0w1) and conditionalSetInverted = conditionalSelect(0w1, 0w1, 0w0) and conditionalSetNegated = conditionalSelect(0w1, 0w1, 0w1) end (* This combines the effect of a left and right shift. There are various derived forms of this depending on the relative values of immr and imms. if imms >= immr copies imms-immr-1 bits from bit position immr to the lsb bits of the destination. if imms < immr copies imms+1 bits from the lsb bit to bit position regsize-immr. How the remaining bits are affected depends on the instruction. BitField instructions do not affect other bits. UnsignedBitField instructions zero other bits. SignedBitField instructions set the high order bits to a copy of the high order bit copied and zero the low order bits. *) local fun bitfield (sf, opc, n) {immr, imms, regN, regD} = SimpleInstr(0wx13000000 orb (sf << 0w31) orb (opc << 0w29) orb (n << 0w22) orb (immr << 0w16) orb (imms << 0w10) orb (word8ToWord(xRegOrXZ regN) << 0w5) orb word8ToWord(xRegOrXZ regD)) val signedBitfieldMove32 = bitfield(0w0, 0w0, 0w0) and bitfieldMove32 = bitfield(0w0, 0w1, 0w0) and unsignedBitfieldMove32 = bitfield(0w0, 0w2, 0w0) and signedBitfieldMove64 = bitfield(0w1, 0w0, 0w1) and bitfieldMove64 = bitfield(0w1, 0w1, 0w1) and unsignedBitfieldMove64 = bitfield(0w1, 0w2, 0w1) in fun logicalShiftLeft{wordSize=WordSize64, shift, regN, regD} = unsignedBitfieldMove64{immr=Word.~ shift mod 0w64, imms=0w64-0w1-shift, regN=regN, regD=regD} | logicalShiftLeft{wordSize=WordSize32, shift, regN, regD} = unsignedBitfieldMove32{immr=Word.~ shift mod 0w32, imms=0w32-0w1-shift, regN=regN, regD=regD} and logicalShiftRight{wordSize=WordSize64, shift, regN, regD} = unsignedBitfieldMove64{immr=shift, imms=0wx3f, regN=regN, regD=regD} | logicalShiftRight{wordSize=WordSize32, shift, regN, regD} = unsignedBitfieldMove32{immr=shift, imms=0wx1f, regN=regN, regD=regD} and unsignedBitfieldInsertinZeros{wordSize=WordSize64, lsb, width, regN, regD} = unsignedBitfieldMove64{immr=Word.~ lsb mod 0w64, imms=width-0w1, regN=regN, regD=regD} | unsignedBitfieldInsertinZeros{wordSize=WordSize32, lsb, width, regN, regD} = unsignedBitfieldMove32{immr=Word.~ lsb mod 0w32, imms=width-0w1, regN=regN, regD=regD} and arithmeticShiftRight{wordSize=WordSize64, shift, regN, regD} = signedBitfieldMove64{immr=shift, imms=0wx3f, regN=regN, regD=regD} | arithmeticShiftRight{wordSize=WordSize32, shift, regN, regD} = signedBitfieldMove32{immr=shift, imms=0wx1f, regN=regN, regD=regD} and bitfieldInsert{wordSize=WordSize64, lsb, width, regN, regD} = bitfieldMove64{immr=Word.~ lsb mod 0w64, imms=width-0w1, regN=regN, regD=regD} | bitfieldInsert{wordSize=WordSize32, lsb, width, regN, regD} = bitfieldMove32{immr=Word.~ lsb mod 0w32, imms=width-0w1, regN=regN, regD=regD} end local (* Logical immediates. AND, OR, XOR and ANDS. Assumes that the immediate value has already been checked as valid. The non-flags versions can use SP as the destination. *) fun logicalImmediate (opc, xD) {wordSize, bits, regN, regD} = let val s = case wordSize of WordSize32 => 0w0 | WordSize64 => 0w1 val {n, imms, immr} = case encodeBitPattern(bits, wordSize) of NONE => raise InternalError "testBitPattern: unable to encode bit pattern" | SOME res => res in SimpleInstr(0wx12000000 orb (opc << 0w29) orb (s << 0w31) orb (n << 0w22) orb (immr << 0w16) orb (imms << 0w10) orb (word8ToWord(xRegOrXZ regN) << 0w5) orb word8ToWord(xD regD)) end in val bitwiseAndImmediate = logicalImmediate (0w0, xRegOrXSP) and bitwiseOrImmediate = logicalImmediate (0w1, xRegOrXSP) and bitwiseXorImmediate = logicalImmediate (0w2, xRegOrXSP) and bitwiseAndSImmediate = logicalImmediate (0w3, xRegOrXZ) (* Test a bit pattern in a register. If the pattern is within the low-order 32-bits we use a 32-bit test. *) fun testBitPattern(reg, bits) = let val w = if bits <= 0wxffffffff then WordSize32 else WordSize64 in bitwiseAndSImmediate({wordSize=w, bits=bits, regN=reg, regD=XZero}) end end local (* Floating point operations - 2 source *) fun floatingPoint2Source (pt, opc) {regM, regN, regD} = SimpleInstr(0wx1E200800 orb (pt << 0w22) orb (word8ToWord(vReg regM) << 0w16) orb (opc << 0w12) orb (word8ToWord(vReg regN) << 0w5) orb word8ToWord(vReg regD)) in val multiplyFloat = floatingPoint2Source(0w0, 0wx0) and divideFloat = floatingPoint2Source(0w0, 0wx1) and addFloat = floatingPoint2Source(0w0, 0wx2) and subtractFloat = floatingPoint2Source(0w0, 0wx3) and multiplyDouble = floatingPoint2Source(0w1, 0wx0) and divideDouble = floatingPoint2Source(0w1, 0wx1) and addDouble = floatingPoint2Source(0w1, 0wx2) and subtractDouble = floatingPoint2Source(0w1, 0wx3) end local - (* Move between a floating point and a general register without conversion. *) - fun fmoveGeneral (sf, ftype, mode, opcode, rN, rD) {regN, regD} = - SimpleInstr(0wx1E200000 orb (sf << 0w31) orb (ftype << 0w22) orb + (* Move between a floating point and a general register with or without conversion. *) + fun fmoveGeneral (sf, s, ptype, mode, opcode, rN, rD) {regN, regD} = + SimpleInstr(0wx1E200000 orb (sf << 0w31) orb (s << 0w29) orb (ptype << 0w22) orb (mode << 0w19) orb (opcode << 0w16) orb (word8ToWord(rN regN) << 0w5) orb word8ToWord(rD regD)) + open IEEEReal in - val moveGeneralToFloat = fmoveGeneral(0w0, 0w0, 0w0, 0w7, xRegOrXZ, vReg) - and moveFloatToGeneral = fmoveGeneral(0w0, 0w0, 0w0, 0w6, vReg, xRegOnly) - and moveGeneralToDouble = fmoveGeneral(0w1, 0w1, 0w0, 0w7, xRegOrXZ, vReg) - and moveDoubleToGeneral = fmoveGeneral(0w1, 0w1, 0w0, 0w6, vReg, xRegOnly) + (* Moves without conversion *) + val moveGeneralToFloat = fmoveGeneral(0w0, 0w0, 0w0, 0w0, 0w7, xRegOrXZ, vReg) + and moveFloatToGeneral = fmoveGeneral(0w0, 0w0, 0w0, 0w0, 0w6, vReg, xRegOnly) + and moveGeneralToDouble = fmoveGeneral(0w1, 0w0, 0w1, 0w0, 0w7, xRegOrXZ, vReg) + and moveDoubleToGeneral = fmoveGeneral(0w1, 0w0, 0w1, 0w0, 0w6, vReg, xRegOnly) + (* Moves with conversion - signed. The argument is a 64-bit value SCVTF *) + and convertIntToFloat = fmoveGeneral(0w1, 0w0, 0w0, 0w0, 0w2, xRegOrXZ, vReg) + and convertIntToDouble = fmoveGeneral(0w1, 0w0, 0w1, 0w0, 0w2, xRegOrXZ, vReg) + + fun convertFloatToInt TO_NEAREST = + fmoveGeneral(0w1, 0w0, 0w0, 0w0, 0w4, vReg, xRegOnly) (* fcvtas *) + | convertFloatToInt TO_NEGINF = + fmoveGeneral(0w1, 0w0, 0w0, 0w2, 0w0, vReg, xRegOnly) (* fcvtms *) + | convertFloatToInt TO_POSINF = + fmoveGeneral(0w1, 0w0, 0w0, 0w1, 0w0, vReg, xRegOnly) (* fcvtps *) + | convertFloatToInt TO_ZERO = + fmoveGeneral(0w1, 0w0, 0w0, 0w3, 0w0, vReg, xRegOnly) (* fcvtzs *) + + and convertDoubleToInt TO_NEAREST = + fmoveGeneral(0w1, 0w0, 0w1, 0w0, 0w4, vReg, xRegOnly) (* fcvtas *) + | convertDoubleToInt TO_NEGINF = + fmoveGeneral(0w1, 0w0, 0w1, 0w2, 0w0, vReg, xRegOnly) (* fcvtms *) + | convertDoubleToInt TO_POSINF = + fmoveGeneral(0w1, 0w0, 0w1, 0w1, 0w0, vReg, xRegOnly) (* fcvtps *) + | convertDoubleToInt TO_ZERO = + fmoveGeneral(0w1, 0w0, 0w1, 0w3, 0w0, vReg, xRegOnly) (* fcvtzs *) + end + + local + fun floatingPtQuietCompare(ftype, opc) {regM, regN} = + SimpleInstr(0wx1E202000 orb (ftype << 0w22) orb + (word8ToWord(vReg regM) << 0w16) orb (word8ToWord(vReg regN) << 0w5) orb + (opc << 0w3)) + in + val compareFloat = floatingPtQuietCompare(0w0, 0w0) + and compareDouble = floatingPtQuietCompare(0w1, 0w0) + (* It is also possible to compare a single register with zero using opc=1 *) + end + + local + (* Floating point single source. *) + fun floatingPtSingle (ptype, opc) {regN, regD} = + SimpleInstr(0wx1E204000 orb (ptype << 0w22) orb (opc << 0w15) orb + (word8ToWord(vReg regN) << 0w5) orb word8ToWord(vReg regD)) + in + val moveFloatToFloat = floatingPtSingle(0w0, 0wx0) + and absFloat = floatingPtSingle(0w0, 0wx1) + and negFloat = floatingPtSingle(0w0, 0wx2) + and convertFloatToDouble = floatingPtSingle(0w0, 0wx5) + and moveDoubleToDouble = floatingPtSingle(0w1, 0wx0) + and absDouble = floatingPtSingle(0w1, 0wx1) + and negDouble = floatingPtSingle(0w1, 0wx2) + and convertDoubleToFloat = floatingPtSingle(0w1, 0wx4) end (* This word is put in after a call to the RTS trap-handler. All the registers are saved and restored across a call to the trap-handler; the register mask contains those that may contain an address and so need to be scanned and possibly updated if there is a GC. *) fun registerMask(regs) = let fun addToMask(r, mask) = mask orb (0w1 << word8ToWord(xRegOnly r)) val maskWord = List.foldl addToMask 0w0 regs in SimpleInstr(0wx02000000 (* Reserved instr range. *) orb maskWord) end (* Size of each code word. All except labels are one word at the moment. *) fun codeSize (SimpleInstr _) = 1 (* Number of 32-bit words *) | codeSize (LoadAddressLiteral _) = 1 | codeSize (LoadNonAddressLiteral _) = 1 | codeSize (Label _) = 0 | codeSize (Branch _) = 1 | codeSize (LoadLabelAddress _) = 1 | codeSize (TestBitBranch _) = 1 | codeSize (CompareBranch _) = 1 (* Store a 32-bit value in the code *) fun writeInstr(value, wordAddr, seg) = let fun putBytes(value, a, seg, i) = if i = 0w4 then () else ( byteVecSet(seg, a+i, wordToWord8(value andb 0wxff)); putBytes(value >> 0w8, a, seg, i+0w1) ) in putBytes(value, wordAddr << 0w2, seg, 0w0) end (* Store a 64-bit constant in the code area. *) fun write64Bit(value, word64Addr, seg) = let fun putBytes(value, a, seg, i) = if i = 0w8 then () else ( byteVecSet(seg, a+i, Word8.fromLarge(Word64.toLarge value)); putBytes(Word64.>>(value, 0w8), a, seg, i+0w1) ) in putBytes(value, word64Addr << 0w3, seg, 0w0) end fun genCode(ops, addressConsts, nonAddressConsts) = let local (* First pass - set the labels. *) fun setLabels(Label(ref labs) :: ops, ic) = (List.app(fn d => d := ic) labs; setLabels(ops, ic)) | setLabels(oper :: ops, ic) = setLabels(ops, ic + Word.fromInt(codeSize oper)) | setLabels([], ic) = ic in val codeSize = setLabels(ops, 0w0) (* Number of 32-bit instructions *) end val wordsOfCode = (codeSize + 0w1) div 0w2 (* Round up to 64-bits *) val paddingWord = if Word.andb(codeSize, 0w1) = 0w1 then [SimpleInstr nopCode] else [] val numNonAddrConsts = Word.fromInt(List.length nonAddressConsts) and numAddrConsts = Word.fromInt(List.length addressConsts) val segSize = wordsOfCode + numAddrConsts + numNonAddrConsts + 0w4 (* 4 extra words *) val codeVec = byteVecMake segSize fun genCodeWords([], _ , _, _) = () | genCodeWords(SimpleInstr code :: tail, wordNo, aConstNum, nonAConstNum) = ( writeInstr(code, wordNo, codeVec); genCodeWords(tail, wordNo+0w1, aConstNum, nonAConstNum) ) | genCodeWords(LoadAddressLiteral{reg, ...} :: tail, wordNo, aConstNum, nonAConstNum) = let (* The offset is in 32-bit words. The first of the constants is at offset wordsOfCode+3 *) val offsetOfConstant = (wordsOfCode+numNonAddrConsts+0w3+aConstNum)*0w2 - wordNo val _ = offsetOfConstant < 0wx100000 orelse raise InternalError "Offset to constant is too large" val code = 0wx58000000 orb (offsetOfConstant << 0w5) orb word8ToWord(xRegOnly reg) in writeInstr(code, wordNo, codeVec); genCodeWords(tail, wordNo+0w1, aConstNum+0w1, nonAConstNum) end | genCodeWords(LoadNonAddressLiteral{reg, ...} :: tail, wordNo, aConstNum, nonAConstNum) = let (* The offset is in 32-bit words. *) val offsetOfConstant = (wordsOfCode+nonAConstNum)*0w2 - wordNo val _ = offsetOfConstant < 0wx100000 orelse raise InternalError "Offset to constant is too large" val code = 0wx58000000 orb (offsetOfConstant << 0w5) orb word8ToWord(xRegOnly reg) in writeInstr(code, wordNo, codeVec); genCodeWords(tail, wordNo+0w1, aConstNum, nonAConstNum+0w1) end | genCodeWords(Label _ :: tail, wordNo, aConstNum, nonAConstNum) = genCodeWords(tail, wordNo, aConstNum, nonAConstNum) (* No code. *) | genCodeWords(Branch{ label=ref labs, jumpCondition=CCode cond }:: tail, wordNo, aConstNum, nonAConstNum) = let val dest = !(hd labs) val offset = Word.toInt dest - Word.toInt wordNo in if cond = 0wxe orelse cond = 0wxf then (* We can use an unconditional branch. *) ( (offset < Word.toInt(0w1 << 0w25) andalso offset >= ~ (Word.toInt(0w1 << 0w25))) orelse raise InternalError "genCodeWords: branch too far"; writeInstr(0wx14000000 orb (Word.fromInt offset andb 0wx03ffffff), wordNo, codeVec) ) else ( (offset < Word.toInt(0w1 << 0w18) andalso offset >= ~ (Word.toInt(0w1 << 0w18))) orelse raise InternalError "genCodeWords: branch too far"; writeInstr(0wx54000000 orb ((Word.fromInt offset andb 0wx07ffff) << 0w5) orb word8ToWord cond, wordNo, codeVec) ); genCodeWords(tail, wordNo+0w1, aConstNum, nonAConstNum) end | genCodeWords(LoadLabelAddress{label=ref labs, reg} :: tail, wordNo, aConstNum, nonAConstNum) = let val dest = !(hd labs) val offset = dest - wordNo val _ = offset < 0wx100000 orelse offset >= ~ 0wx100000 orelse raise InternalError "Offset to label address is too large" val code = 0wx10000000 orb ((offset andb 0wx7ffff) << 0w5) orb word8ToWord(xRegOnly reg) in writeInstr(code, wordNo, codeVec); genCodeWords(tail, wordNo+0w1, aConstNum, nonAConstNum) end | genCodeWords(TestBitBranch{label=ref labs, bitNo, brNonZero, reg} :: tail, wordNo, aConstNum, nonAConstNum) = let val dest = !(hd labs) val offset = dest - wordNo val _ = offset < 0wx2000 orelse offset >= ~ 0wx2000 orelse raise InternalError "TestBitBranch: Offset to label address is too large" val _ = bitNo <= 0w63 orelse raise InternalError "TestBitBranch: bit number > 63" val code = 0wx36000000 orb (if bitNo >= 0w32 then 0wx80000000 else 0w0) orb (if brNonZero then 0wx01000000 else 0w0) orb (word8ToWord(Word8.andb(bitNo, 0wx3f)) << 0w19) orb ((offset andb 0wx3fff) << 0w5) orb word8ToWord(xRegOnly reg) in writeInstr(code, wordNo, codeVec); genCodeWords(tail, wordNo+0w1, aConstNum, nonAConstNum) end | genCodeWords(CompareBranch{label=ref labs, brNonZero, size, reg} :: tail, wordNo, aConstNum, nonAConstNum) = let val dest = !(hd labs) val offset = dest - wordNo val _ = offset < 0wx40000 orelse offset >= ~ 0wx40000 orelse raise InternalError "CompareBranch: Offset to label address is too large" val code = 0wx34000000 orb (case size of WordSize64 => 0wx80000000 | WordSize32 => 0w0) orb (if brNonZero then 0wx01000000 else 0w0) orb ((offset andb 0wx7ffff) << 0w5) orb word8ToWord(xRegOnly reg) in writeInstr(code, wordNo, codeVec); genCodeWords(tail, wordNo+0w1, aConstNum, nonAConstNum) end in genCodeWords (ops @ paddingWord, 0w0, 0w0, 0w0); (* Copy in the non-address constants. *) List.foldl(fn (cVal, addr) => (write64Bit(cVal, addr, codeVec); addr+0w1)) wordsOfCode nonAddressConsts; (codeVec (* Return the completed code. *), wordsOfCode+numNonAddrConsts (* And the size in 64-bit words. *)) end (* Store a 64-bit value in the code *) fun set64(value, wordNo, seg) = let val addrs = wordNo * 0w8 fun putBytes(value, a, seg, i) = if i = 0w8 then () else ( byteVecSet(seg, a+i, Word8.fromInt(value mod 256)); putBytes(value div 256, a, seg, i+0w1) ) in putBytes(value, addrs, seg, 0w0) end (* Print the instructions in the code. *) fun printCode (codeVec, functionName, wordsOfCode, printStream) = let val numInstructions = wordsOfCode * 0w2 (* Words is number of 64-bit words *) fun printHex (v, n) = let val s = Word.fmt StringCvt.HEX v val pad = CharVector.tabulate(Int.max(0, n-size s), fn _ => #"0") in printStream pad; printStream s end fun printCondition 0wx0 = printStream "eq" | printCondition 0wx1 = printStream "ne" | printCondition 0wx2 = printStream "cs" | printCondition 0wx3 = printStream "cc" | printCondition 0wx4 = printStream "mi" | printCondition 0wx5 = printStream "pl" | printCondition 0wx6 = printStream "vs" | printCondition 0wx7 = printStream "vc" | printCondition 0wx8 = printStream "hi" | printCondition 0wx9 = printStream "ls" | printCondition 0wxa = printStream "ge" | printCondition 0wxb = printStream "lt" | printCondition 0wxc = printStream "gt" | printCondition 0wxd = printStream "le" | printCondition 0wxe = printStream "al" | printCondition _ = printStream "nv" (* Each instruction is 32-bytes. *) fun printWordAt wordNo = let val byteNo = wordNo << 0w2 val () = printHex(byteNo, 6) (* Address *) val () = printStream "\t" val wordValue = word8ToWord (codeVecGet (codeVec, byteNo)) orb (word8ToWord (codeVecGet (codeVec, byteNo+0w1)) << 0w8) orb (word8ToWord (codeVecGet (codeVec, byteNo+0w2)) << 0w16) orb (word8ToWord (codeVecGet (codeVec, byteNo+0w3)) << 0w24) val () = printHex(wordValue, 8) (* Instr as hex *) val () = printStream "\t" in if (wordValue andb 0wxfffffc1f) = 0wxD61F0000 then let val rN = (wordValue andb 0wx3e0) >> 0w5 in printStream "br\tx"; printStream(Word.fmt StringCvt.DEC rN) end else if (wordValue andb 0wxfffffc1f) = 0wxD63F0000 then let val rN = (wordValue andb 0wx3e0) >> 0w5 in printStream "blr\tx"; printStream(Word.fmt StringCvt.DEC rN) end else if (wordValue andb 0wxfffffc1f) = 0wxD65F0000 then let val rN = (wordValue andb 0wx3e0) >> 0w5 in printStream "ret\tx"; printStream(Word.fmt StringCvt.DEC rN) end else if wordValue = 0wxD503201F then printStream "nop" else if wordValue = 0wxD503203F then printStream "yield" else if wordValue = 0wxD5033BBF then printStream "dmb\tish" else if (wordValue andb 0wx1f800000) = 0wx12800000 then (* Move of constants. Includes movn and movk. *) let val rD = wordValue andb 0wx1f val imm16 = Word.toInt((wordValue andb 0wx1fffe) >> 0w5) val isXReg = (wordValue andb 0wx80000000) <> 0w0 val opc = (wordValue >> 0w29) andb 0w3 val shift = (wordValue >> 0w21) andb 0w3 in printStream (if opc = 0w3 then "movk\t" else "mov\t"); printStream (if isXReg then "x" else "w"); printStream(Word.fmt StringCvt.DEC rD); printStream ",#"; printStream(Int.toString(if opc = 0w0 then ~1 - imm16 else imm16)); if shift = 0w0 then () else (printStream ",lsl #"; printStream(Word.fmt StringCvt.HEX (shift*0w16))) end else if (wordValue andb 0wx3b000000) = 0wx39000000 then (* Load/Store with unsigned, scaled offset. *) let (* The offset is in units of the size of the operand. *) val size = wordValue >> 0w30 and v = (wordValue >> 0w26) andb 0w1 and opc = (wordValue >> 0w22) andb 0w3 val rT = wordValue andb 0wx1f and rN = (wordValue andb 0wx3e0) >> 0w5 and imm12 = (wordValue andb 0wx3ffc00) >> 0w10 val (opcode, r, scale) = case (size, v, opc) of (0w0, 0w0, 0w0) => ("strb", "w", 0w0) | (0w0, 0w0, 0w1) => ("ldrb", "w", 0w0) | (0w3, 0w0, 0w0) => ("str", "x", 0w8) | (0w3, 0w0, 0w1) => ("ldr", "x", 0w8) | (0w2, 0w0, 0w0) => ("str", "s", 0w4) | (0w2, 0w0, 0w1) => ("ldr", "s", 0w4) | (0w3, 0w1, 0w0) => ("str", "d", 0w8) | (0w3, 0w1, 0w1) => ("ldr", "d", 0w8) | _ => ("??", "?", 0w1) in printStream opcode; printStream "\t"; printStream r; printStream(Word.fmt StringCvt.DEC rT); printStream ",[x"; printStream(Word.fmt StringCvt.DEC rN); printStream ",#"; printStream(Word.fmt StringCvt.DEC(imm12*scale)); printStream "]" end else if (wordValue andb 0wx3b200c00) = 0wx38000000 then (* Load/store unscaled immediate *) let val size = wordValue >> 0w30 and v = (wordValue >> 0w26) andb 0w1 and opc = (wordValue >> 0w22) andb 0w3 val rT = wordValue andb 0wx1f and rN = (wordValue andb 0wx3e0) >> 0w5 and imm9 = (wordValue andb 0wx1ff000) >> 0w12 val imm9Text = if imm9 > 0wxff then "-" ^ Word.fmt StringCvt.DEC (0wx200 - imm9) else Word.fmt StringCvt.DEC imm9 val (opcode, r) = case (size, v, opc) of (0w0, 0w0, 0w0) => ("strub", "w") | (0w0, 0w0, 0w1) => ("ldrub", "w") | (0w3, 0w0, 0w0) => ("stur", "x") | (0w3, 0w0, 0w1) => ("ldur", "x") | _ => ("???", "?") in printStream opcode; printStream "\t"; printStream r; printStream(Word.fmt StringCvt.DEC rT); printStream ",[x"; printStream(Word.fmt StringCvt.DEC rN); printStream ",#"; printStream imm9Text; printStream "]" end else if (wordValue andb 0wx3b200c00) = 0wx38000400 then (* Load/store immediate post-indexed *) let val size = wordValue >> 0w30 and v = (wordValue >> 0w26) andb 0w1 and opc = (wordValue >> 0w22) andb 0w3 val rT = wordValue andb 0wx1f and rN = (wordValue andb 0wx3e0) >> 0w5 and imm9 = (wordValue andb 0wx1ff000) >> 0w12 val imm9Text = if imm9 > 0wxff then "-" ^ Word.fmt StringCvt.DEC (0wx200 - imm9) else Word.fmt StringCvt.DEC imm9 val (opcode, r) = case (size, v, opc) of (0w0, 0w0, 0w0) => ("strb", "w") | (0w0, 0w0, 0w1) => ("ldrb", "w") | (0w3, 0w0, 0w0) => ("str", "x") | (0w3, 0w0, 0w1) => ("ldr", "x") | _ => ("???", "?") in printStream opcode; printStream "\t"; printStream r; printStream(Word.fmt StringCvt.DEC rT); printStream ",[x"; printStream(Word.fmt StringCvt.DEC rN); printStream "],#"; printStream imm9Text end else if (wordValue andb 0wx3b200c00) = 0wx38000c00 then (* Load/store immediate pre-indexed *) let val size = wordValue >> 0w30 and v = (wordValue >> 0w26) andb 0w1 and opc = (wordValue >> 0w22) andb 0w3 val rT = wordValue andb 0wx1f and rN = (wordValue andb 0wx3e0) >> 0w5 and imm9 = (wordValue andb 0wx1ff000) >> 0w12 val imm9Text = if imm9 > 0wxff then "-" ^ Word.fmt StringCvt.DEC (0wx200 - imm9) else Word.fmt StringCvt.DEC imm9 val (opcode, r) = case (size, v, opc) of (0w0, 0w0, 0w0) => ("strb", "w") | (0w0, 0w0, 0w1) => ("ldrb", "w") | (0w3, 0w0, 0w0) => ("str", "x") | (0w3, 0w0, 0w1) => ("ldr", "x") | _ => ("???", "?") in printStream opcode; printStream "\t"; printStream r; printStream(Word.fmt StringCvt.DEC rT); printStream ",[x"; printStream(Word.fmt StringCvt.DEC rN); printStream ",#"; printStream imm9Text; printStream "]!" end else if (wordValue andb 0wx3b200c00) = 0wx38200800 then (* Load/store with register offset i.e. an index register. *) let val size = wordValue >> 0w30 and v = (wordValue >> 0w26) andb 0w1 and opc = (wordValue >> 0w22) andb 0w3 val rT = wordValue andb 0wx1f and rN = (wordValue >> 0w5) andb 0wx1f and rM = (wordValue >> 0w16) andb 0wx1f val option = (wordValue >> 0w13) andb 0w7 val s = (wordValue andb 0wx1000) <> 0w0 val (opcode, r) = case (size, v, opc) of (0w0, 0w0, 0w0) => ("strb", "w") | (0w0, 0w0, 0w1) => ("ldrb", "w") | (0w3, 0w0, 0w0) => ("str", "x") | (0w3, 0w0, 0w1) => ("ldr", "x") | _ => ("???", "?") val (extend, xr) = case option of 0w2 => (" uxtw", "w") | 0w3 => if s then (" lsl", "x") else ("", "x") | 0w6 => (" sxtw", "w") | 0w7 => (" sxtx", "x") | _ => ("?", "?") val indexShift = case (size, s) of (0w0, true) => " #1" | (0w1, true) => " #1" | (0w2, true) => " #2" | (0w3, true) => " #3" | _ => "" in printStream opcode; printStream "\t"; printStream r; printStream(Word.fmt StringCvt.DEC rT); printStream ",[x"; printStream(Word.fmt StringCvt.DEC rN); printStream ","; printStream xr; printStream(Word.fmt StringCvt.DEC rM); printStream extend; printStream indexShift; printStream "]" end else if (wordValue andb 0wxbf800000) = 0wx91000000 then let (* Add/Subtract a 12-bit immediate with possible shift. *) val rD = wordValue andb 0wx1f and rN = (wordValue andb 0wx3e0) >> 0w5 and imm12 = (wordValue andb 0wx3ffc00) >> 0w10 and shiftBit = wordValue andb 0wx400000 val imm = if shiftBit <> 0w0 then imm12 << 0w12 else imm12 val opr = if (wordValue andb 0wx40000000) = 0w0 then "add" else "sub" in printStream opr; printStream "\tx"; printStream(Word.fmt StringCvt.DEC rD); printStream ",x"; printStream(Word.fmt StringCvt.DEC rN); printStream ",#"; printStream(Word.fmt StringCvt.DEC imm) end else if (wordValue andb 0wxff800000) = 0wxF1000000 then let (* Subtract a 12-bit immediate with possible shift, setting flags. *) val rD = wordValue andb 0wx1f and rN = (wordValue andb 0wx3e0) >> 0w5 and imm12 = (wordValue andb 0wx3ffc00) >> 0w10 and shiftBit = wordValue andb 0wx400000 val imm = if shiftBit <> 0w0 then imm12 << 0w12 else imm12 in if rD = 0w31 then printStream "cmp\t" else (printStream "subs\tx"; printStream(Word.fmt StringCvt.DEC rD); printStream ","); printStream "x"; printStream(Word.fmt StringCvt.DEC rN); printStream ",#"; printStream(Word.fmt StringCvt.DEC imm) end else if (wordValue andb 0wx7fe0ffe0) = 0wx2A0003E0 then (* Move reg,reg. This is a subset of ORR shifted register. *) let val reg = if (wordValue andb 0wx80000000) <> 0w0 then "x" else "w" in printStream "mov\t"; printStream reg; printStream(Word.fmt StringCvt.DEC(wordValue andb 0wx1f)); printStream ","; printStream reg; printStream(Word.fmt StringCvt.DEC((wordValue >> 0w16) andb 0wx1f)) end else if (wordValue andb 0wx1f000000) = 0wx0A000000 then let (* Logical operations with shifted register. *) val rD = wordValue andb 0wx1f and rN = (wordValue >> 0w5) andb 0wx1f and rM = (wordValue >> 0w16) andb 0wx1f and imm6 = (wordValue >> 0w10) andb 0wx3f and shiftCode = (wordValue >> 0w22) andb 0wx3 val opc = (wordValue >> 0w29) andb 0wx3 val nBit = (wordValue >> 0w21) andb 0w1 val reg = if (wordValue andb 0wx80000000) <> 0w0 then "x" else "w" val opcode = case (opc, nBit) of (0w0, 0w0) => "and" | (0w1, 0w0) => "orr" | (0w2, 0w0) => "eor" | (0w3, 0w0) => "ands" | _ => "??" in printStream opcode; printStream"\t"; printStream reg; printStream(Word.fmt StringCvt.DEC rD); printStream ","; printStream reg; printStream(Word.fmt StringCvt.DEC rN); printStream ","; printStream reg; printStream(Word.fmt StringCvt.DEC rM); if imm6 <> 0w0 then ( case shiftCode of 0w0 => printStream ",lsl #" | 0w1 => printStream ",lsr #" | 0w2 => printStream ",asr #" | _ => printStream ",?? #"; printStream(Word.fmt StringCvt.DEC imm6) ) else () end else if (wordValue andb 0wx1f200000) = 0wx0B000000 then let (* Add/subtract shifted register. *) val rD = wordValue andb 0wx1f and rN = (wordValue >> 0w5) andb 0wx1f and rM = (wordValue >> 0w16) andb 0wx1f and imm6 = (wordValue >> 0w10) andb 0wx3f and shiftCode = (wordValue >> 0w22) andb 0wx3 val oper = (wordValue andb 0wx40000000) = 0w0 val isS = (wordValue andb 0wx20000000) <> 0w0 val reg = if (wordValue andb 0wx80000000) <> 0w0 then "x" else "w" in if isS andalso rD = 0w31 then printStream(if oper then "cmn\t" else "cmp\t") else ( printStream(if oper then "add" else "sub"); printStream(if isS then "s\t" else "\t"); printStream reg; printStream(Word.fmt StringCvt.DEC rD); printStream "," ); printStream reg; printStream(Word.fmt StringCvt.DEC rN); printStream ","; printStream reg; printStream(Word.fmt StringCvt.DEC rM); if imm6 <> 0w0 then ( case shiftCode of 0w0 => printStream ",lsl #" | 0w1 => printStream ",lsr #" | 0w2 => printStream ",asr #" | _ => printStream ",?? #"; printStream(Word.fmt StringCvt.DEC imm6) ) else () end else if (wordValue andb 0wxff000000) = 0wx58000000 then let (* Load from a PC-relative address. This may refer to the address constant area or the non-address constant area. *) val rT = wordValue andb 0wx1f (* The offset is in 32-bit words *) val byteAddr = ((wordValue andb 0wx00ffffe0) >> (0w5-0w2)) + byteNo val word64Addr = byteAddr >> 0w3 (* We must NOT use codeVecGetWord if this is in the non-address area. It may well not be a tagged value. *) local fun getConstant(cVal, 0w0) = cVal | getConstant(cVal, offset) = let val byteVal = Word64.fromLarge(Word8.toLarge(codeVecGet (codeVec, byteAddr+offset-0w1))) in getConstant(Word64.orb(Word64.<<(cVal, 0w8), byteVal), offset-0w1) end in val constantValue = if word64Addr <= wordsOfCode then "0x" ^ Word64.toString(getConstant(0w0, 0w8)) (* It's a non-address constant *) else stringOfWord(codeVecGetWord(codeVec, word64Addr)) end in printStream "ldr\tx"; printStream(Word.fmt StringCvt.DEC rT); printStream ",0x"; printStream(Word.fmt StringCvt.HEX byteAddr); printStream "\t// "; printStream constantValue end else if (wordValue andb 0wxbf000000) = 0wx10000000 then let (* Put a pc-relative address into a register. *) val rT = wordValue andb 0wx1f val byteOffset = ((wordValue andb 0wx00ffffe0) << (Word.fromInt Word.wordSize - 0w23) ~>> (Word.fromInt Word.wordSize - 0w20)) + ((wordValue >> 0w29) andb 0w3) in printStream "adr\tx"; printStream(Word.fmt StringCvt.DEC rT); printStream ",0x"; printStream(Word.fmt StringCvt.HEX (byteNo+byteOffset)) end else if (wordValue andb 0wxfc000000) = 0wx14000000 then (* Unconditional branch. *) let (* The offset is signed and the destination may be earlier. *) val byteOffset = (wordValue andb 0wx03ffffff) << (Word.fromInt Word.wordSize - 0w26) ~>> (Word.fromInt Word.wordSize - 0w28) in printStream "b\t0x"; printStream(Word.fmt StringCvt.HEX (byteNo+byteOffset)) end else if (wordValue andb 0wxff000000) = 0wx54000000 then (* Conditional branch *) let val byteOffset = (wordValue andb 0wx00ffffe0) << (Word.fromInt Word.wordSize - 0w24) ~>> (Word.fromInt Word.wordSize - 0w21) in printStream "b."; printCondition(wordValue andb 0wxf); printStream "\t0x"; printStream(Word.fmt StringCvt.HEX (byteNo+byteOffset)) end else if (wordValue andb 0wx7e000000) = 0wx34000000 then (* Compare and branch *) let val byteOffset = (wordValue andb 0wx00ffffe0) << (Word.fromInt Word.wordSize - 0w24) ~>> (Word.fromInt Word.wordSize - 0w21) val oper = if (wordValue andb 0wx01000000) = 0w0 then "cbz" else "cbnz" val r = if (wordValue andb 0wx80000000) = 0w0 then "w" else "x" in printStream oper; printStream "\t"; printStream r; printStream(Word.fmt StringCvt.DEC (wordValue andb 0wx1f)); printStream ","; printStream(Word.fmt StringCvt.HEX (byteNo+byteOffset)) end else if (wordValue andb 0wx3fe00000) = 0wx1A800000 then let val sf = wordValue >> 0w31 val opc = (wordValue >> 0w30) andb 0w1 val op2 = (wordValue >> 0w10) andb 0w3 val rT = wordValue andb 0wx1f val rN = (wordValue >> 0w5) andb 0wx1f val rM = (wordValue >> 0w16) andb 0wx1f val cond = (wordValue >> 0w12) andb 0wxf val opcode = case (opc, op2) of (0w0, 0w0) => "csel" | (0w0, 0w1) => "csinc" | (0w1, 0w0) => "csinv" | (0w1, 0w1) => "csneg" | _ => "??" val r = if sf = 0w0 then "w" else "x" in printStream opcode; printStream "\t"; printStream r; printStream(Word.fmt StringCvt.DEC rT); printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rN); printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rM); printStream ","; printCondition cond end else if (wordValue andb 0wx7f800000) = 0wx13000000 then (* signed bitfield *) let val sf = wordValue >> 0w31 (* N is always the same as sf. *) (*val nBit = (wordValue >> 0w22) andb 0w1*) val immr = (wordValue >> 0w16) andb 0wx3f val imms = (wordValue >> 0w10) andb 0wx3f val rN = (wordValue >> 0w5) andb 0wx1f val rD = wordValue andb 0wx1f val (r, wordSize) = if sf = 0w0 then ("w", 0w32) else ("x", 0w64) in if imms = wordSize - 0w1 then printStream "asr\t" else printStream "sbfm\t"; printStream r; printStream(Word.fmt StringCvt.DEC rD); printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rN); if imms = wordSize - 0w1 then (printStream ",#0x"; printStream(Word.toString immr)) else ( printStream ",#0x"; printStream(Word.toString immr); printStream ",#0x"; printStream(Word.toString imms) ) end else if (wordValue andb 0wx7f800000) = 0wx53000000 then (* unsigned bitfield move *) let val sf = wordValue >> 0w31 (* N is always the same as sf. *) (*val nBit = (wordValue >> 0w22) andb 0w1*) val immr = (wordValue >> 0w16) andb 0wx3f val imms = (wordValue >> 0w10) andb 0wx3f val rN = (wordValue >> 0w5) andb 0wx1f val rD = wordValue andb 0wx1f val (r, wordSize) = if sf = 0w0 then ("w", 0w32) else ("x", 0w64) in if imms + 0w1 = immr then printStream "lsl\t" else if imms = wordSize - 0w1 then printStream "lsr\t" else printStream "ubfm\t"; printStream r; printStream(Word.fmt StringCvt.DEC rD); printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rN); if imms + 0w1 = immr then (printStream ",#0x"; printStream(Word.toString(wordSize - immr))) else if imms = wordSize - 0w1 then (printStream ",#0x"; printStream(Word.toString immr)) else ( printStream ",#0x"; printStream(Word.toString immr); printStream ",#0x"; printStream(Word.toString imms) ) end else if (wordValue andb 0wx1f800000) = 0wx12000000 then (* logical immediate *) let val sf = wordValue >> 0w31 val opc = (wordValue >> 0w29) andb 0w3 val nBit = (wordValue >> 0w22) andb 0w1 val immr = (wordValue >> 0w16) andb 0wx3f val imms = (wordValue >> 0w10) andb 0wx3f val rN = (wordValue >> 0w5) andb 0wx1f val rD = wordValue andb 0wx1f val (opcode, r) = case (sf, opc, nBit) of (0w0, 0w0, 0w0) => ("and", "w") | (0w0, 0w1, 0w0) => ("orr", "w") | (0w0, 0w2, 0w0) => ("eor", "w") | (0w0, 0w3, 0w0) => ("ands", "w") | (0w1, 0w0, _) => ("and", "x") | (0w1, 0w1, _) => ("orr", "x") | (0w1, 0w2, _) => ("eor", "x") | (0w1, 0w3, _) => ("ands", "x") | _ => ("??", "?") in printStream opcode; printStream "\t"; printStream r; printStream(Word.fmt StringCvt.DEC rD); printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rN); printStream ",#0x"; printStream(Word64.toString(decodeBitPattern{sf=sf, n=nBit, immr=immr, imms=imms})) end else if (wordValue andb 0wx5fe00000) = 0wx1ac00000 then (* Two source operations - shifts and divide. *) let val sf = wordValue >> 0w31 val s = (wordValue >> 0w29) andb 0w1 val rM = (wordValue >> 0w16) andb 0wx1f val opcode = (wordValue >> 0w10) andb 0wx3f val rN = (wordValue >> 0w5) andb 0wx1f val rD = wordValue andb 0wx1f val (oper, r) = case (sf, s, opcode) of (0w1, 0w0, 0wx2) => ("udiv", "x") | (0w1, 0w0, 0wx3) => ("sdiv", "x") | (0w1, 0w0, 0wx8) => ("lsl", "x") | (0w1, 0w0, 0wx9) => ("lsr", "x") | (0w1, 0w0, 0wxa) => ("asr", "x") | _ => ("??", "?") in printStream oper; printStream "\t"; printStream r; printStream(Word.fmt StringCvt.DEC rD); printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rN); printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rM) end else if (wordValue andb 0wx1f000000) = 0wx1b000000 then (* Three source operations - multiply add/subtract. *) let val sf = wordValue >> 0w31 val op54 = (wordValue >> 0w29) andb 0w3 val op31 = (wordValue >> 0w21) andb 0w7 val o0 = (wordValue >> 0w15) andb 0w1 val rM = (wordValue >> 0w16) andb 0wx1f val rA = (wordValue >> 0w10) andb 0wx1f val rN = (wordValue >> 0w5) andb 0wx1f val rD = wordValue andb 0wx1f val (oper, r) = case (sf, op54, op31, o0, rA) of (0w1, 0w0, 0w0, 0w0, 0w31) => ("mul", "x") | (0w1, 0w0, 0w0, 0w0, _) => ("madd", "x") | (0w1, 0w0, 0w0, 0w1, 0w31) => ("mneg", "x") | (0w1, 0w0, 0w0, 0w1, _) => ("msub", "x") | (0w1, 0w0, 0w2, 0w0, 0w31) => ("smulh", "x") | _ => ("??", "?") in printStream oper; printStream "\t"; printStream r; printStream(Word.fmt StringCvt.DEC rD); printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rN); printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rM); if rA = 0w31 then () else (printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rA)) end else if (wordValue andb 0wxfffffc00) = 0wxC85FFC00 then let val rN = (wordValue >> 0w5) andb 0wx1f val rT = wordValue andb 0wx1f in printStream "ldaxr\tx"; printStream(Word.fmt StringCvt.DEC rT); printStream ".[x"; printStream(Word.fmt StringCvt.DEC rN); printStream "]" end else if (wordValue andb 0wxffe0fc00) = 0wxC800FC00 then let val rS = (wordValue >> 0w16) andb 0wx1f val rN = (wordValue >> 0w5) andb 0wx1f val rT = wordValue andb 0wx1f in printStream "stlxr\tw"; printStream(Word.fmt StringCvt.DEC rS); printStream ",x"; printStream(Word.fmt StringCvt.DEC rT); printStream ".[x"; printStream(Word.fmt StringCvt.DEC rN); printStream "]" end else if (wordValue andb 0wx7f20fc00) = 0wx1E200000 then (* Moves between floating point and general regs. *) let val sf = (wordValue >> 0w31) andb 0w1 - and ftype = (wordValue >> 0w22) andb 0w3 + and s = (wordValue >> 0w29) andb 0w1 + and ptype = (wordValue >> 0w22) andb 0w3 and mode = (wordValue >> 0w19) andb 0w3 and opcode = (wordValue >> 0w16) andb 0w7 and rN = (wordValue >> 0w5) andb 0wx1f and rD = wordValue andb 0wx1f - val (dr, nr) = - case (sf, ftype, mode, opcode) of - (0w0, 0w0, 0w0, 0w7) => ("s", "w") (* w -> s *) - | (0w0, 0w0, 0w0, 0w6) => ("w", "s") (* s -> w *) - | (0w1, 0w1, 0w0, 0w7) => ("d", "x") (* d -> x *) - | (0w1, 0w1, 0w0, 0w6) => ("x", "d") (* x -> d *) - | _ => ("?", "?") + val (opc, dr, nr) = + case (sf, s, ptype, mode, opcode) of + (0w0, 0w0, 0w0, 0w0, 0w7) => ("fmov", "s", "w") (* w -> s *) + | (0w0, 0w0, 0w0, 0w0, 0w6) => ("fmov", "w", "s") (* s -> w *) + | (0w1, 0w0, 0w1, 0w0, 0w7) => ("fmov", "d", "x") (* d -> x *) + | (0w1, 0w0, 0w1, 0w0, 0w6) => ("fmov", "x", "d") (* x -> d *) + | (0w1, 0w0, 0w0, 0w0, 0w2) => ("scvtf", "x", "s") + | (0w1, 0w0, 0w1, 0w0, 0w2) => ("scvtf", "x", "d") + | _ => ("?", "?", "?") in - printStream "fmov\t"; + printStream opc; printStream "\t"; printStream dr; printStream(Word.fmt StringCvt.DEC rD); printStream ","; printStream nr; printStream(Word.fmt StringCvt.DEC rN) end else if (wordValue andb 0wxff200c00) = 0wx1E200800 then (* Floating point two source operations. *) let val pt = (wordValue >> 0w22) andb 0w3 and rM = (wordValue >> 0w16) andb 0wx1f and opc = (wordValue >> 0w12) andb 0wxf and rN = (wordValue >> 0w5) andb 0wx1f and rT = wordValue andb 0wx1f val (opcode, r) = case (pt, opc) of (0w0, 0wx0) => ("fmul", "s") | (0w0, 0wx1) => ("fdiv", "s") | (0w0, 0wx2) => ("fadd", "s") | (0w0, 0wx3) => ("fsub", "s") | (0w1, 0wx0) => ("fmul", "d") | (0w1, 0wx1) => ("fdiv", "d") | (0w1, 0wx2) => ("fadd", "d") | (0w1, 0wx3) => ("fsub", "d") | _ => ("??", "?") in printStream opcode; printStream "\t"; printStream r; printStream(Word.fmt StringCvt.DEC rT); printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rN); printStream ","; printStream r; printStream(Word.fmt StringCvt.DEC rM) end else if (wordValue andb 0wx1e000000) = 0wx02000000 then (* This is an unallocated range. We use it for the register mask. *) let fun printMask (0w25, _) = () | printMask (i, comma) = if ((0w1 << i) andb wordValue) <> 0w0 then ( if comma then printStream ", " else (); printStream "x"; printStream(Word.fmt StringCvt.DEC i); printMask(i+0w1, true) ) else printMask(i+0w1, comma) in printStream "["; printMask(0w0, false); printStream "]" end else printStream "?" ; printStream "\n" end fun printAll i = if i = numInstructions then () else (printWordAt i; printAll(i+0w1)) in printStream functionName; printStream ":\n"; printAll 0w0 end (* Adds the constants onto the code, and copies the code into a new segment *) fun generateCode {instrs, name=functionName, parameters, resultClosure} = let val printStream = Pretty.getSimplePrinter(parameters, []) and printAssemblyCode = Debug.getParameter Debug.assemblyCodeTag parameters local (* Extract the constants. *) fun getConsts(LoadAddressLiteral {value, ...}, (addrs, nonAddrs)) = (value::addrs, nonAddrs) | getConsts(LoadNonAddressLiteral {value, ...}, (addrs, nonAddrs)) = (addrs, value::nonAddrs) | getConsts(_, consts) = consts val (addrConsts, nonAddrConsts) = List.foldl getConsts ([], []) instrs in val addressConsts = List.rev addrConsts and nonAddressConsts = List.rev nonAddrConsts end val (byteVec, wordsOfCode) = genCode(instrs, addressConsts, nonAddressConsts) (* +3 for profile count, function name and constants count *) val numOfConst = List.length addressConsts val segSize = wordsOfCode + Word.fromInt numOfConst + 0w4 val firstConstant = wordsOfCode + 0w3 (* Add 3 for no of consts, fn name and profile count. *) (* Put in the number of constants. This must go in before we actually put in any constants. *) local val lastWord = segSize - 0w1 in val () = set64(numOfConst + 2, wordsOfCode, byteVec) (* Set the last word of the code to the (negative) byte offset of the start of the code area from the end of this word. *) val () = set64((numOfConst + 3) * ~8, lastWord, byteVec) end (* Now we've filled in all the size info we need to convert the segment into a proper code segment before it's safe to put in any ML values. *) val codeVec = byteVecToCodeVec(byteVec, resultClosure) local val name : string = functionName val nameWord : machineWord = toMachineWord name in val () = codeVecPutWord (codeVec, wordsOfCode+0w1, nameWord) end (* Profile ref. A byte ref used by the profiler in the RTS. *) local val v = RunCall.allocateByteMemory(0w1, Word.fromLargeWord(Word8.toLargeWord(Word8.orb(F_mutable, F_bytes)))) fun clear 0w0 = () | clear i = (assignByte(v, i-0w1, 0w0); clear (i-0w1)) val () = clear(wordSize) in val () = codeVecPutWord (codeVec, wordsOfCode+0w2, toMachineWord v) end (* and then copy the constants from the constant list. *) local fun setConstant(value, num) = ( codeVecPutWord (codeVec, firstConstant + num, value); num+0w1 ) in val _ = List.foldl setConstant 0w0 addressConsts end in if printAssemblyCode then (* print out the code *) (printCode (codeVec, functionName, wordsOfCode, printStream); printStream"\n") else (); codeVecLock(codeVec, resultClosure) end (* copyCode *) structure Sharing = struct type closureRef = closureRef type instr = instr type xReg = xReg type vReg = vReg type labels = labels type condition = condition type shiftType = shiftType type wordSize = wordSize type 'a extend = 'a extend type scale = scale end end; diff --git a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64Assembly.sig b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64Assembly.sig index 2c6350bd..4f4685ac 100644 --- a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64Assembly.sig +++ b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64Assembly.sig @@ -1,303 +1,321 @@ (* Copyright (c) 2021 David C. J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public Licence version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public Licence for more details. You should have received a copy of the GNU Lesser General Public Licence along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) signature Arm64Assembly = sig type closureRef type instr type machineWord = Address.machineWord type labels type condition eqtype xReg val X0: xReg and X1: xReg and X2: xReg and X3: xReg and X4: xReg and X5: xReg and X6: xReg and X7: xReg and X8: xReg and X9: xReg and X10: xReg and X11: xReg and X12: xReg and X13: xReg and X14: xReg and X15: xReg and X16: xReg and X17: xReg and X18: xReg and X19: xReg and X20: xReg and X21: xReg and X22: xReg and X23: xReg and X24: xReg and X25: xReg and X26: xReg and X27: xReg and X28: xReg and X29: xReg and X30: xReg (* XZero and XSP are both encoded as 31 but the interpretation depends on the instruction *) val XZero: xReg and XSP: xReg val X_MLHeapLimit: xReg (* ML Heap limit pointer *) and X_MLAssemblyInt: xReg (* ML assembly interface pointer. *) and X_MLHeapAllocPtr: xReg (* ML Heap allocation pointer. *) and X_MLStackPtr: xReg (* ML Stack pointer. *) and X_LinkReg: xReg (* Link reg - return address *) eqtype vReg val V0: vReg and V1: vReg and V2: vReg and V3: vReg and V4: vReg and V5: vReg and V6: vReg and V7: vReg (* Condition for conditional branches etc. *) val condEqual: condition and condNotEqual: condition and condCarrySet: condition and condCarryClear: condition and condNegative: condition and condPositive: condition and condOverflow: condition and condNoOverflow: condition and condUnsignedHigher: condition and condUnsignedLowOrEq: condition and condSignedGreaterEq: condition and condSignedLess: condition and condSignedGreater: condition and condSignedLessEq: condition and condAlways: condition and condAlwaysNV: condition datatype shiftType = ShiftLSL of word | ShiftLSR of word | ShiftASR of word | ShiftNone datatype wordSize = WordSize32 | WordSize64 datatype 'a extend = ExtUXTB of 'a (* Unsigned extend byte *) | ExtUXTH of 'a (* Unsigned extend byte *) | ExtUXTW of 'a (* Unsigned extend byte *) | ExtUXTX of 'a (* Left shift *) | ExtSXTB of 'a (* Sign extend byte *) | ExtSXTH of 'a (* Sign extend halfword *) | ExtSXTW of 'a (* Sign extend word *) | ExtSXTX of 'a (* Left shift *) (* Load/store instructions have only a single bit for the shift. For byte operations this is one bit shift; for others it scales by the size of the operand if set. *) datatype scale = ScaleOrShift | NoScale (* Jump to the address in the register and put the address of the next instruction into X30. *) val branchAndLinkReg: xReg -> instr (* Jump to the address in the register. *) and branchRegister: xReg -> instr (* Jump to the address in the register and hint this is a return. *) and returnRegister: xReg -> instr (* Move an address constant to a register. *) val loadAddressConstant: xReg * machineWord -> instr (* Move a constant into a register that is not an address. The argument is the actual bit pattern to be copied. For tagged integers that means that the value must have been shifted and the tag bit set. *) and loadNonAddressConstant: xReg * Word64.word -> instr (* Add/subtract an optionally shifted 12-bit immediate (i.e. constant) to/from a register. The constant is zero-extended. *) val addImmediate: {regN: xReg, regD: xReg, immed: word, shifted: bool} -> instr and addSImmediate: {regN: xReg, regD: xReg, immed: word, shifted: bool} -> instr and subImmediate: {regN: xReg, regD: xReg, immed: word, shifted: bool} -> instr and subSImmediate: {regN: xReg, regD: xReg, immed: word, shifted: bool} -> instr (* Add/subtract a shifted register, optionally setting the flags. *) val addShiftedReg: {regM: xReg, regN: xReg, regD: xReg, shift: shiftType} -> instr and addSShiftedReg: {regM: xReg, regN: xReg, regD: xReg, shift: shiftType} -> instr and subShiftedReg: {regM: xReg, regN: xReg, regD: xReg, shift: shiftType} -> instr and subSShiftedReg: {regM: xReg, regN: xReg, regD: xReg, shift: shiftType} -> instr (* Add/subtract an extended register, optionally setting the flags. *) val addExtendedReg: {regM: xReg, regN: xReg, regD: xReg, extend: word extend} -> instr and addSExtendedReg: {regM: xReg, regN: xReg, regD: xReg, extend: word extend} -> instr and subExtendedReg: {regM: xReg, regN: xReg, regD: xReg, extend: word extend} -> instr and subSExtendedReg: {regM: xReg, regN: xReg, regD: xReg, extend: word extend} -> instr (* Multiplication *) (* regD = regA + regN * regM *) val multiplyAndAdd: {regM: xReg, regN: xReg, regA: xReg, regD: xReg} -> instr (* regD = regA - regN * regM *) and multiplyAndSub: {regM: xReg, regN: xReg, regA: xReg, regD: xReg} -> instr (* Return the high-order part of a signed multiplication. *) and signedMultiplyHigh: {regM: xReg, regN: xReg, regD: xReg} -> instr (* Division *) val unsignedDivide: {regM: xReg, regN: xReg, regD: xReg} -> instr and signedDivide: {regM: xReg, regN: xReg, regD: xReg} -> instr (* Logical operations on a shifted register, optionally setting the flags. *) val andShiftedReg: {regM: xReg, regN: xReg, regD: xReg, shift: shiftType} -> instr and orrShiftedReg: {regM: xReg, regN: xReg, regD: xReg, shift: shiftType} -> instr and eorShiftedReg: {regM: xReg, regN: xReg, regD: xReg, shift: shiftType} -> instr and andsShiftedReg: {regM: xReg, regN: xReg, regD: xReg, shift: shiftType} -> instr (* And a register with a bit pattern, discarding the results but setting the condition codes. The bit pattern must be encodable. *) val testBitPattern: xReg * Word64.word -> instr (* Check whether a constant can be encoded. *) val isEncodableBitPattern: Word64.word * wordSize -> bool (* Load/Store an aligned word using a 12-bit offset. The offset is in units of the size of the operand. *) val loadRegScaled: {regT: xReg, regN: xReg, unitOffset: int} -> instr and storeRegScaled: {regT: xReg, regN: xReg, unitOffset: int} -> instr and loadRegScaledByte: {regT: xReg, regN: xReg, unitOffset: int} -> instr and storeRegScaledByte: {regT: xReg, regN: xReg, unitOffset: int} -> instr and loadRegScaledDouble: {regT: vReg, regN: xReg, unitOffset: int} -> instr and storeRegScaledDouble: {regT: vReg, regN: xReg, unitOffset: int} -> instr and loadRegScaledFloat: {regT: vReg, regN: xReg, unitOffset: int} -> instr and storeRegScaledFloat: {regT: vReg, regN: xReg, unitOffset: int} -> instr (* Load/Store a value using a signed byte offset. *) val loadRegUnscaled: {regT: xReg, regN: xReg, byteOffset: int} -> instr and storeRegUnscaled: {regT: xReg, regN: xReg, byteOffset: int} -> instr and loadRegUnscaledByte: {regT: xReg, regN: xReg, byteOffset: int} -> instr and storeRegUnscaledByte: {regT: xReg, regN: xReg, byteOffset: int} -> instr (* Load/store with a register offset i.e. an index register. *) val loadRegIndexed: {regN: xReg, regM: xReg, regT: xReg, option: scale extend} -> instr and storeRegIndexed: {regN: xReg, regM: xReg, regT: xReg, option: scale extend} -> instr and loadRegIndexedByte: {regN: xReg, regM: xReg, regT: xReg, option: scale extend} -> instr and storeRegIndexedByte: {regN: xReg, regM: xReg, regT: xReg, option: scale extend} -> instr (* Load/Store a value using a signed byte offset and post-indexing (post-increment). *) (* The terminology is confusing. Pre-indexing means adding the offset into base address before loading the value, typically used for push, and post-index means using the original value of the base register as the address and adding in the offset after the value has been loaded, e.g. pop. *) val loadRegPostIndex: {regT: xReg, regN: xReg, byteOffset: int} -> instr and storeRegPostIndex: {regT: xReg, regN: xReg, byteOffset: int} -> instr and loadRegPostIndexByte: {regT: xReg, regN: xReg, byteOffset: int} -> instr and storeRegPostIndexByte: {regT: xReg, regN: xReg, byteOffset: int} -> instr (* Load/Store a value using a signed byte offset and pre-indexing (pre-increment). *) val loadRegPreIndex: {regT: xReg, regN: xReg, byteOffset: int} -> instr and storeRegPreIndex: {regT: xReg, regN: xReg, byteOffset: int} -> instr and loadRegPreIndexByte: {regT: xReg, regN: xReg, byteOffset: int} -> instr and storeRegPreIndexByte: {regT: xReg, regN: xReg, byteOffset: int} -> instr (* This word is put in after a call to the RTS trap-handler. All the registers are saved and restored across a call to the trap-handler; the register mask contains those that may contain an address and so need to be scanned and possibly updated if there is a GC. *) val registerMask: xReg list -> instr (* Create a label. *) val createLabel: unit -> labels (* Put a label into the code. *) val setLabel: labels -> instr (* A conditional or unconditional branch. *) val conditionalBranch: condition * labels -> instr (* Put the address of a label into a register - used for handlers and cases. *) and loadLabelAddress: xReg * labels -> instr (* Test a bit in a register and branch if zero/nonzero *) and testBitBranchZero: xReg * Word8.word * labels -> instr and testBitBranchNonZero: xReg * Word8.word * labels -> instr (* Compare a register with zero and branch if zero/nonzero *) and compareBranchZero: xReg * wordSize * labels -> instr and compareBranchNonZero: xReg * wordSize * labels -> instr (* Set the destination register to the value of the first reg if the condition is true otherwise to a, possibly modified, version of the second argument. There are variants that set it unmodified, incremented, inverted and negated. *) val conditionalSet: {regD: xReg, regTrue: xReg, regFalse: xReg, cond: condition} -> instr val conditionalSetIncrement: {regD: xReg, regTrue: xReg, regFalse: xReg, cond: condition} -> instr val conditionalSetInverted: {regD: xReg, regTrue: xReg, regFalse: xReg, cond: condition} -> instr val conditionalSetNegated: {regD: xReg, regTrue: xReg, regFalse: xReg, cond: condition} -> instr (* Various shifts *) val logicalShiftLeft: {wordSize: wordSize, shift: word, regN: xReg, regD: xReg} -> instr and logicalShiftRight: {wordSize: wordSize, shift: word, regN: xReg, regD: xReg} -> instr and arithmeticShiftRight: {wordSize: wordSize, shift: word, regN: xReg, regD: xReg} -> instr (* Extract bits and set the rest of the register to zero. *) and unsignedBitfieldInsertinZeros: {wordSize: wordSize, lsb: word, width: word, regN: xReg, regD: xReg} -> instr (* Extract bits but leave the rest of the register unchanged. Can be used to clear a specific range of bits by using XZero as the source. *) and bitfieldInsert: {wordSize: wordSize, lsb: word, width: word, regN: xReg, regD: xReg} -> instr (* Logical shift left Rd = Rn << (Rm mod 0w64) *) val logicalShiftLeftVariable: {regM: xReg, regN: xReg, regD: xReg} -> instr (* Logical shift right Rd = Rn >> (Rm mod 0w64) *) and logicalShiftRightVariable: {regM: xReg, regN: xReg, regD: xReg} -> instr (* Arithmetic shift right Rd = Rn ~>> (Rm mod 0w64) *) and arithmeticShiftRightVariable: {regM: xReg, regN: xReg, regD: xReg} -> instr (* Logical operations on bit patterns. The pattern must be valid. ANDS is an AND that also sets the flags, typically used for a test. *) val bitwiseAndImmediate: {wordSize: wordSize, bits: Word64.word, regN: xReg, regD: xReg} -> instr and bitwiseOrImmediate: {wordSize: wordSize, bits: Word64.word, regN: xReg, regD: xReg} -> instr and bitwiseXorImmediate: {wordSize: wordSize, bits: Word64.word, regN: xReg, regD: xReg} -> instr and bitwiseAndSImmediate: {wordSize: wordSize, bits: Word64.word, regN: xReg, regD: xReg} -> instr (* Instructions involved in thread synchonisation. *) val yield: instr and dmbIsh: instr val loadAcquireExclusiveRegister: {regN: xReg, regT: xReg} -> instr val storeReleaseExclusiveRegister: {regN: xReg, regS: xReg, regT: xReg} -> instr - (* Floating point operations. *) + (* Floating point moves and conversions. Moves simply copy the bits. + In all cases the integer argument is signed 64-bits. *) val moveGeneralToDouble: {regN: xReg, regD: vReg} -> instr and moveGeneralToFloat: {regN: xReg, regD: vReg} -> instr and moveDoubleToGeneral: {regN: vReg, regD: xReg} -> instr and moveFloatToGeneral: {regN: vReg, regD: xReg} -> instr + and convertIntToDouble: {regN: xReg, regD: vReg} -> instr + and convertIntToFloat: {regN: xReg, regD: vReg} -> instr + and convertFloatToInt: IEEEReal.rounding_mode -> {regN: vReg, regD: xReg} -> instr + and convertDoubleToInt: IEEEReal.rounding_mode -> {regN: vReg, regD: xReg} -> instr + (* Floating point operations. *) val multiplyFloat: {regM: vReg, regN: vReg, regD: vReg} -> instr and divideFloat: {regM: vReg, regN: vReg, regD: vReg} -> instr and addFloat: {regM: vReg, regN: vReg, regD: vReg} -> instr and subtractFloat: {regM: vReg, regN: vReg, regD: vReg} -> instr and multiplyDouble: {regM: vReg, regN: vReg, regD: vReg} -> instr and divideDouble: {regM: vReg, regN: vReg, regD: vReg} -> instr and addDouble: {regM: vReg, regN: vReg, regD: vReg} -> instr and subtractDouble: {regM: vReg, regN: vReg, regD: vReg} -> instr + val compareFloat: {regM: vReg, regN: vReg} -> instr + and compareDouble: {regM: vReg, regN: vReg} -> instr + + val moveFloatToFloat: {regN: vReg, regD: vReg} -> instr + and absFloat: {regN: vReg, regD: vReg} -> instr + and negFloat: {regN: vReg, regD: vReg} -> instr + and convertFloatToDouble: {regN: vReg, regD: vReg} -> instr + and moveDoubleToDouble: {regN: vReg, regD: vReg} -> instr + and absDouble: {regN: vReg, regD: vReg} -> instr + and negDouble: {regN: vReg, regD: vReg} -> instr + and convertDoubleToFloat: {regN: vReg, regD: vReg} -> instr + (* Create the vector of code from the list of instructions and update the closure reference to point to it. *) val generateCode: {instrs: instr list, name: string, parameters: Universal.universal list, resultClosure: closureRef} -> unit (* Offsets in the assembly code interface pointed at by X26 These are in units of 64-bits NOT bytes. *) val heapOverflowCallOffset: int and stackOverflowCallOffset: int and stackOverflowXCallOffset: int and exceptionHandlerOffset: int and stackLimitOffset: int and exceptionPacketOffset: int and threadIdOffset: int and heapLimitPtrOffset: int and heapAllocPtrOffset: int and mlStackPtrOffset: int structure Sharing: sig type closureRef = closureRef type instr = instr type xReg = xReg type vReg = vReg type labels = labels type condition = condition type shiftType = shiftType type wordSize = wordSize type 'a extend = 'a extend type scale = scale end end; diff --git a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64GenCode.sml b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64GenCode.sml index de465fd0..15a2f355 100644 --- a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64GenCode.sml +++ b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64GenCode.sml @@ -1,2104 +1,2161 @@ (* Copyright (c) 2021 David C. J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public Licence version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public Licence for more details. You should have received a copy of the GNU Lesser General Public Licence along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor Arm64GenCode ( structure FallBackCG: GENCODESIG and BackendTree: BackendIntermediateCodeSig and CodeArray: CODEARRAYSIG and Arm64Assembly: Arm64Assembly and Debug: DEBUG and Arm64Foreign: FOREIGNCALLSIG sharing FallBackCG.Sharing = BackendTree.Sharing = CodeArray.Sharing = Arm64Assembly.Sharing ) : GENCODESIG = struct open BackendTree CodeArray Arm64Assembly Address exception InternalError = Misc.InternalError exception Fallback of string (* tag a short constant *) fun tag c = 2 * c + 1 and semitag c = 2*c fun taggedWord w: word = w * 0w2 + 0w1 and taggedWord64 w: Word64.word = w * 0w2 + 0w1 val tagBitMask = Word64.<<(Word64.fromInt ~1, 0w1) fun gen(instr, code) = code := instr :: !code fun genPushReg(reg, code) = gen(storeRegPreIndex{regT=reg, regN=X_MLStackPtr, byteOffset= ~8}, code) and genPopReg(reg, code) = gen(loadRegPostIndex{regT=reg, regN=X_MLStackPtr, byteOffset= 8}, code) (* Move register. The ARM64 alias uses XZR as Rn. *) fun genMoveRegToReg{sReg, dReg} = orrShiftedReg{regN=XZero, regM=sReg, regD=dReg, shift=ShiftNone} (* Add a constant word to the source register and put the result in the destination. regW is used as a work register if necessary. This is used both for addition and subtraction. *) fun addConstantWord({regS, regD, value=0w0, ...}, code) = if regS = regD then () else gen(genMoveRegToReg{sReg=regS, dReg=regD}, code) | addConstantWord({regS, regD, regW, value}, code) = let (* If we have to load the constant it's better if the top 32-bits are zero if possible. *) val (isSub, unsigned) = if value > Word64.<<(0w1, 0w63) then (true, ~ value) else (false, value) in if unsigned < Word64.<<(0w1, 0w24) then (* We can put up to 24 in a shifted and an unshifted constant. *) let val w = Word.fromLarge(Word64.toLarge unsigned) val high = Word.andb(Word.>>(w, 0w12), 0wxfff) val low = Word.andb(w, 0wxfff) val addSub = if isSub then subImmediate else addImmediate in if high <> 0w0 then ( gen(addSub{regN=regS, regD=regD, immed=high, shifted=true}, code); if low <> 0w0 then gen(addSub{regN=regD, regD=regD, immed=low, shifted=false}, code) else () ) else gen(addSub{regN=regS, regD=regD, immed=low, shifted=false}, code) end else let (* To minimise the constant and increase the chances that it will fit in a single word look to see if we can shift it. *) fun getShift(value, shift) = if Word64.andb(value, 0w1) = 0w0 then getShift(Word64.>>(value, 0w1), shift+0w1) else (value, shift) val (shifted, shift) = getShift(unsigned, 0w0) in gen(loadNonAddressConstant(regW, shifted), code); gen((if isSub then subShiftedReg else addShiftedReg) {regM=regW, regN=regS, regD=regD, shift=ShiftLSL shift}, code) end end (* Remove items from the stack. If the second argument is true the value on the top of the stack has to be moved. *) fun resetStack(0, _, _) = () | resetStack(nItems, true, code) = ( genPopReg(X0, code); resetStack(nItems, false, code); genPushReg(X0, code) ) | resetStack(nItems, false, code) = addConstantWord({regS=X_MLStackPtr, regD=X_MLStackPtr, regW=X3, value=Word64.fromLarge(Word.toLarge wordSize) * Word64.fromInt nItems}, code) fun compareRegs(reg1, reg2, code) = gen(subSShiftedReg{regM=reg2, regN=reg1, regD=XZero, shift=ShiftNone}, code) (* Sequence to allocate on the heap. The words are not initialised apart from the length word. *) fun genAllocateFixedSize(words, flags, resultReg, workReg, code) = let val label = createLabel() in (* Subtract the number of bytes required from the heap pointer and put in X0. *) addConstantWord({regS=X_MLHeapAllocPtr, regD=X0, regW=X3, value= ~ (Word64.fromLarge(Word.toLarge wordSize)) * Word64.fromInt(words+1)}, code); compareRegs(resultReg, X_MLHeapLimit, code); gen(conditionalBranch(condCarrySet, label), code); gen(loadRegScaled{regT=X16, regN=X_MLAssemblyInt, unitOffset=heapOverflowCallOffset}, code); gen(branchAndLinkReg X16, code); gen(registerMask [], code); (* Not used at the moment. *) gen(setLabel label, code); gen(genMoveRegToReg{sReg=resultReg, dReg=X_MLHeapAllocPtr}, code); gen(loadNonAddressConstant(workReg, Word64.orb(Word64.fromInt words, Word64.<<(Word64.fromLarge(Word8.toLarge flags), 0w56))), code); (* Store the length word. Have to use the unaligned version because offset is -ve. *) gen(storeRegUnscaled{regT=workReg, regN=resultReg, byteOffset= ~8}, code) end (* Allocate space on the heap for a vector, string etc. sizeReg and flagsReg contain the size and flags as untagged values. sizeReg is unchanged, flagsReg is modified. The result address is in resultReg. All the registers must be different. *) fun allocateVariableSize({sizeReg, flagsReg, resultReg}, code) = let val trapLabel = createLabel() and noTrapLabel = createLabel() in (* Subtract the size as a number of bytes from the allocation ptr. *) gen(subShiftedReg{regM=sizeReg, regN=X_MLHeapAllocPtr, regD=resultReg, shift=ShiftLSL 0w3}, code); (* Subtract another 8 to allow for the length word. *) gen(subImmediate{regN=resultReg, regD=resultReg, immed=0w8, shifted=false}, code); (* If the size is large enough it is possible that this could wrap round. To check for that we trap if either the result is less than the limit or if it is now greater than the allocation pointer. *) compareRegs(resultReg, X_MLHeapLimit, code); gen(conditionalBranch(condCarryClear, trapLabel), code); compareRegs(resultReg, X_MLHeapAllocPtr, code); gen(conditionalBranch(condCarryClear, noTrapLabel), code); gen(setLabel trapLabel, code); gen(loadRegScaled{regT=X16, regN=X_MLAssemblyInt, unitOffset=heapOverflowCallOffset}, code); gen(branchAndLinkReg X16, code); gen(registerMask [], code); (* Not used at the moment. *) gen(setLabel noTrapLabel, code); gen(genMoveRegToReg{sReg=resultReg, dReg=X_MLHeapAllocPtr}, code); (* Combine the size with the flags in the top byte. *) gen(orrShiftedReg{regM=flagsReg, regN=sizeReg, regD=flagsReg, shift=ShiftLSL 0w56}, code); (* Store the length word. Have to use the unaligned version because offset is -ve. *) gen(storeRegUnscaled{regT=flagsReg, regN=resultReg, byteOffset= ~8}, code) end (* Set a register to either tagged(1) i.e. true or tagged(0) i.e. false. *) fun setBooleanCondition(reg, condition, code) = ( gen(loadNonAddressConstant(reg, Word64.fromInt(tag 1)), code); (* If the condition is false the value used is the XZero incremented by 1 i.e. 1 *) gen(conditionalSetIncrement{regD=reg, regTrue=reg, regFalse=XZero, cond=condition}, code) ) (* Raise the overflow exception if the overflow bit has been set. *) fun checkOverflow code = let val noOverflow = createLabel() in gen(conditionalBranch(condNoOverflow, noOverflow), code); gen(loadAddressConstant(X0, toMachineWord Overflow), code); gen(loadRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset}, code); gen(loadRegScaled{regT=X1, regN=X_MLStackPtr, unitOffset=0}, code); gen(branchRegister X1, code); gen(setLabel noOverflow, code) end (* Stack check code: this is inserted at the start of a function to check that there is sufficient ML stack available. It is also inserted, with a zero space value, in a loop to ensure that the RTS can interrupt a function. debugTrapAlways can be used to set a sort of breakpoint during debugging. *) fun checkStackCode(regW, space, debugTrapAlways, code) = let val skipCheck = createLabel() val defaultWords = 10 (* This is wired into the RTS. *) val (testReg, entryPt) = if space <= defaultWords then (X_MLStackPtr, stackOverflowCallOffset) else ( (* This is only used at the start of the code. X9 is wired into the RTS. *) addConstantWord({regS=X_MLStackPtr, regD=X9, regW=regW, value= ~ (Word64.fromLarge(Word.toLarge wordSize)) * Word64.fromInt space}, code); (X9, stackOverflowXCallOffset) ) in gen(loadRegScaled{regT=regW, regN=X_MLAssemblyInt, unitOffset=stackLimitOffset}, code); if debugTrapAlways then () else ( compareRegs(testReg, regW, code); gen(conditionalBranch(condCarrySet, skipCheck), code) ); gen(loadRegScaled{regT=X16, regN=X_MLAssemblyInt, unitOffset=entryPt}, code); gen(branchAndLinkReg X16, code); gen(registerMask [], code); (* Not used at the moment. *) gen(setLabel skipCheck, code) end (* Allocate a single byte cell and store the register into it. The result is in X0 so reg must not be X0. *) fun boxLargeWord(reg, code) = ( reg <> X0 orelse raise InternalError "boxLargeWord: X0"; genAllocateFixedSize(1, F_bytes, X0, X5, code); gen(storeRegScaled{regT=reg, regN=X0, unitOffset=0}, code) ) (* Allocate a single byte cell for a "real" i.e. double-precision floating point number. *) fun boxDouble(reg, code) = ( genAllocateFixedSize(1, F_bytes, X0, X5, code); gen(storeRegScaledDouble{regT=reg, regN=X0, unitOffset=0}, code) ) val opcode_realAbs = "opcode_realAbs" and opcode_realNeg = "opcode_realNeg" and opcode_fixedIntToReal = "opcode_fixedIntToReal" and opcode_fixedIntToFloat = "opcode_fixedIntToFloat" and opcode_floatToReal = "opcode_floatToReal" and opcode_floatAbs = "opcode_floatAbs" and opcode_floatNeg = "opcode_floatNeg" and opcode_realEqual = "opcode_realEqual" and opcode_realLess = "opcode_realLess" and opcode_realLessEq = "opcode_realLessEq" and opcode_realGreater = "opcode_realGreater" and opcode_realGreaterEq = "opcode_realGreaterEq" and opcode_realUnordered = "opcode_realUnordered" and opcode_floatEqual = "opcode_floatEqual" and opcode_floatLess = "opcode_floatLess" and opcode_floatLessEq = "opcode_floatLessEq" and opcode_floatGreater = "opcode_floatGreater" and opcode_floatGreaterEq = "opcode_floatGreaterEq" and opcode_floatUnordered = "opcode_floatUnordered" and opcode_loadC8 = "opcode_loadC8" and opcode_loadC16 = "opcode_loadC16" and opcode_loadC32 = "opcode_loadC32" and opcode_loadC64 = "opcode_loadC64" and opcode_loadCFloat = "opcode_loadCFloat" and opcode_loadCDouble = "opcode_loadCDouble" and opcode_storeC8 = "opcode_storeC8" and opcode_storeC16 = "opcode_storeC16" and opcode_storeC32 = "opcode_storeC32" and opcode_storeC64 = "opcode_storeC64" and opcode_storeCFloat = "opcode_storeCFloat" and opcode_storeCDouble = "opcode_storeCDouble" and opcode_allocCSpace = "opcode_allocCSpace" and opcode_freeCSpace = "opcode_freeCSpace" type caseForm = { cases : (backendIC * word) list, test : backendIC, caseType: caseType, default : backendIC } (* Where the result, if any, should go *) datatype whereto = NoResult (* discard result *) | ToStack (* Need a result but it can stay on the pseudo-stack *) | ToX0 (* Need a result in X0. *) (* Are we at the end of the function. *) datatype tail = EndOfProc | NotEnd (* Code generate a function or global declaration *) fun codegen (pt, name, resultClosure, numOfArgs, localCount, parameters) = let fun toDo s = raise Fallback(s ^ ":" ^ name) fun genOpcode (n, _) = toDo n fun genDoubleToFloat _ = toDo "genDoubleToFloat" fun genRealToInt _ = toDo "genRealToInt" fun genFloatToInt _ = toDo "genFloatToInt" val cvec = ref [] datatype decEntry = StackAddr of int | Empty val decVec = Array.array (localCount, Empty) (* Count of number of items on the stack. This excludes the arguments and the return address. *) val realstackptr = ref 1 (* The closure ptr is already there *) (* Maximum size of the stack. *) val maxStack = ref 1 (* Whether the top of the stack is actually in X0. *) val topInX0 = ref false (* Push a value onto the stack. *) fun incsp () = ( realstackptr := !realstackptr + 1; if !realstackptr > !maxStack then maxStack := !realstackptr else () ) (* An entry has been removed from the stack. *) fun decsp () = realstackptr := !realstackptr - 1 fun ensureX0 () = if ! topInX0 then (genPushReg(X0, cvec); incsp(); topInX0 := false) else () (* generates code from the tree *) fun gencde (pt : backendIC, whereto : whereto, tailKind : tail, loopAddr) : unit = let val _ = !topInX0 andalso raise InternalError "topInX0 true at start" (* Save the stack pointer value here. We may want to reset the stack. *) val oldsp = !realstackptr; (* Operations on ML memory always have the base as an ML address. Word operations are always word aligned. The higher level will have extracted any constant offset and scaled it if necessary. That's helpful for the X86 but not for the ARM. We have to turn them back into indexes. *) (* This pushes two values to the stack: the base address and the index. *) fun genMLAddress({base, index, offset}, scale) = ( gencde (base, ToStack, NotEnd, loopAddr); offset mod scale = 0 orelse raise InternalError "genMLAddress"; case (index, offset div scale) of (NONE, soffset) => (gen(loadNonAddressConstant(X0, Word64.fromInt(tag soffset)), cvec); genPushReg(X0, cvec); incsp()) | (SOME indexVal, 0) => gencde (indexVal, ToStack, NotEnd, loopAddr) | (SOME indexVal, soffset) => ( gencde (indexVal, ToX0, NotEnd, loopAddr); (* Add the offset as a shifted but not tagged value. *) addConstantWord({regS=X0, regD=X0, regW=X1, value=Word64.fromInt(semitag soffset)}, cvec); genPushReg(X0, cvec); incsp(); topInX0 := false ) ) datatype mlLoadKind = MLLoadOffset of int | MLLoadReg of xReg fun genMLLoadAddress({base, index=NONE, offset}, scale) = (* The index, if any, is a constant. *) ( gencde (base, ToX0, NotEnd, loopAddr); (X0, MLLoadOffset(offset div scale)) ) | genMLLoadAddress({base, index=SOME indexVal, offset}, scale) = ( gencde (base, ToStack, NotEnd, loopAddr); (* Push base addr to stack. *) gencde (indexVal, ToX0, NotEnd, loopAddr); (* Shift right to remove the tag. N.B. Indexes into ML memory are unsigned. Unlike on the X86 we can't remove the tag by providing a displacement and the only options are to scale by either 1 or 8. *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); (* Add any constant offset. Does nothing if it's zero. *) addConstantWord({regS=X0, regD=X0, regW=X3, value=Word64.fromInt (* unsigned *)(offset div scale)}, cvec); genPopReg(X1, cvec); (* Pop base reg into X1. *) decsp(); (X1, MLLoadReg X0) ) (* Load the address, index value and offset for non-byte operations. Because the offset has already been scaled by the size of the operand we have to load the index and offset separately. *) fun genCAddress{base, index, offset} = ( gencde (base, ToStack, NotEnd, loopAddr); case index of NONE => (gen(loadNonAddressConstant(X0, Word64.fromInt(tag 0)), cvec); genPushReg(X0, cvec); incsp()) | SOME indexVal => gencde (indexVal, ToStack, NotEnd, loopAddr); gen(loadNonAddressConstant(X0, Word64.fromInt(tag offset)), cvec); genPushReg(X0, cvec); incsp() ) (* Compare a block of bytes. Jumps to labelEqual if all the bytes are equal up to the length. Otherwise it drops through with the condition code set to the last byte comparison that tested unequal. *) fun blockCompareBytes(leftArg, rightArg, length, labelEqual, setZeroCC) = let val loopLabel = createLabel() in genMLAddress(leftArg, 1); genMLAddress(rightArg, 1); gencde (length, ToX0, NotEnd, loopAddr); genPopReg(X2, cvec); (* right arg index - tagged value. *) genPopReg(X1, cvec); (* right arg base address. *) (* Add in the index N.B. ML index values are unsigned. *) gen(addShiftedReg{regM=X2, regN=X1, regD=X1, shift=ShiftLSR 0w1}, cvec); genPopReg(X3, cvec); (* left index *) genPopReg(X2, cvec); decsp(); decsp(); decsp(); decsp(); gen(addShiftedReg{regM=X3, regN=X2, regD=X2, shift=ShiftLSR 0w1}, cvec); (* Untag the length *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); (* If necessary set the cc for the case where the length is zero. *) if setZeroCC then compareRegs(X0, X0, cvec) else (); gen(setLabel loopLabel, cvec); gen(compareBranchZero(X0, WordSize64, labelEqual), cvec); (* X2 is left arg addr, X1 is right arg addr. *) gen(loadRegPostIndexByte{regT=X4, regN=X2, byteOffset=1}, cvec); gen(loadRegPostIndexByte{regT=X3, regN=X1, byteOffset=1}, cvec); compareRegs(X4, X3, cvec); gen(subImmediate{regN=X0, regD=X0, immed=0w1, shifted=false}, cvec); (* Loop if they're equal. *) gen(conditionalBranch(condEqual, loopLabel), cvec) end val () = case pt of BICEval evl => genEval (evl, tailKind) | BICExtract ext => (* This may just be being used to discard a value which isn't used on this branch. N.B. genProc for mutual closures assumes that this does not affect X1. *) if whereto = NoResult then () else let fun loadLocalStackValue addr = ( gen(loadRegScaled{regT=X0, regN=X_MLStackPtr, unitOffset= !realstackptr + addr}, cvec); topInX0 := true ) in case ext of BICLoadArgument locn => (* The register arguments appear in order on the stack, followed by the stack argumens in reverse order. *) if locn < 8 then loadLocalStackValue (locn+1) else loadLocalStackValue (numOfArgs-locn+8) | BICLoadLocal locn => ( case Array.sub (decVec, locn) of StackAddr n => loadLocalStackValue (~ n) | _ => (* Should be on the stack, not a function. *) raise InternalError "locaddr: bad stack address" ) | BICLoadClosure locn => ( loadLocalStackValue ~1; (* The closure itself. *) gen(loadRegScaled{regT=X0, regN=X0, unitOffset=locn+1 (* The first word is the code *)}, cvec) ) | BICLoadRecursive => loadLocalStackValue ~1 (* The closure itself - first value on the stack. *) end | BICField {base, offset} => ( gencde (base, ToX0, NotEnd, loopAddr); gen(loadRegScaled{regT=X0, regN=X0, unitOffset=offset}, cvec) ) | BICLoadContainer {base, offset} => ( gencde (base, ToX0, NotEnd, loopAddr); gen(loadRegScaled{regT=X0, regN=X0, unitOffset=offset}, cvec) ) | BICLambda lam => genProc (lam, false, fn () => ()) | BICConstnt(w, _) => ( (*if isShort w then gen(loadNonAddressConstantX0, Word64.fromInt(tag(Word.toIntX(toShort w))), cvec) else *)gen(loadAddressConstant(X0, w), cvec); topInX0 := true ) | BICCond (testPart, thenPart, elsePart) => genCond (testPart, thenPart, elsePart, whereto, tailKind, loopAddr) | BICNewenv(decls, exp) => let (* Processes a list of entries. *) (* Mutually recursive declarations. May be either lambdas or constants. Recurse down the list pushing the addresses of the closure vectors, then unwind the recursion and fill them in. *) fun genMutualDecs [] = () | genMutualDecs ({lambda, addr, ...} :: otherDecs) = genProc (lambda, true, fn() => ( Array.update (decVec, addr, StackAddr (! realstackptr)); genMutualDecs (otherDecs) )) fun codeDecls(BICRecDecs dl) = genMutualDecs dl | codeDecls(BICDecContainer{size, addr}) = ( (* If this is a container we have to process it here otherwise it will be removed in the stack adjustment code. *) (* The stack entries have to be initialised. Set them to tagged(0). *) gen(loadNonAddressConstant(X0, Word64.fromInt(tag 0)), cvec); let fun pushN 0 = () | pushN n = (genPushReg(X0, cvec); pushN (n-1)) in pushN size end; gen(genMoveRegToReg{sReg=X_MLStackPtr, dReg=X0}, cvec); genPushReg(X0, cvec); (* Push the address of this container. *) realstackptr := !realstackptr + size + 1; (* Pushes N words plus the address. *) Array.update (decVec, addr, StackAddr(!realstackptr)) ) | codeDecls(BICDeclar{value, addr, ...}) = ( gencde (value, ToStack, NotEnd, loopAddr); Array.update (decVec, addr, StackAddr(!realstackptr)) ) | codeDecls(BICNullBinding exp) = gencde (exp, NoResult, NotEnd, loopAddr) in List.app codeDecls decls; gencde (exp, whereto, tailKind, loopAddr) end | BICBeginLoop {loop=body, arguments} => (* Execute the body which will contain at least one Loop instruction. There will also be path(s) which don't contain Loops and these will drop through. *) let val args = List.map #1 arguments (* Evaluate each of the arguments, pushing the result onto the stack. *) fun genLoopArg ({addr, value, ...}) = ( gencde (value, ToStack, NotEnd, loopAddr); Array.update (decVec, addr, StackAddr (!realstackptr)); !realstackptr (* Return the posn on the stack. *) ) val argIndexList = map genLoopArg args; val startSp = ! realstackptr; (* Remember the current top of stack. *) val startLoop = createLabel () val () = gen(setLabel startLoop, cvec) (* Start of loop *) in (* Process the body, passing the jump-back address down for the Loop instruction(s). *) gencde (body, whereto, tailKind, SOME(startLoop, startSp, argIndexList)) (* Leave the arguments on the stack. They can be cleared later if needed. *) end | BICLoop argList => (* Jump back to the enclosing BeginLoop. *) let val (startLoop, startSp, argIndexList) = case loopAddr of SOME l => l | NONE => raise InternalError "No BeginLoop for Loop instr" (* Evaluate the arguments. First push them to the stack because evaluating an argument may depend on the current value of others. Only when we've evaluated all of them can we overwrite the original argument positions. *) fun loadArgs ([], []) = !realstackptr - startSp (* The offset of all the args. *) | loadArgs (arg:: argList, _ :: argIndexList) = let (* Evaluate all the arguments. *) val () = gencde (arg, ToStack, NotEnd, NONE); val argOffset = loadArgs(argList, argIndexList); in genPopReg(X0, cvec); gen(storeRegScaled{regT=X0, regN=X_MLStackPtr, unitOffset=argOffset-1}, cvec); decsp(); (* The argument has now been popped. *) argOffset end | loadArgs _ = raise InternalError "loadArgs: Mismatched arguments"; val _: int = loadArgs(List.map #1 argList, argIndexList) in if !realstackptr <> startSp then resetStack (!realstackptr - startSp, false, cvec) (* Remove any local variables. *) else (); (* Jump back to the start of the loop. *) checkStackCode(X10, 0, false, cvec); gen(conditionalBranch(condAlways, startLoop), cvec) end | BICRaise exp => ( gencde (exp, ToStack, NotEnd, loopAddr); genPopReg(X0, cvec); (* Copy the handler "register" into the stack pointer. Then jump to the address in the first word. The second word is the next handler. This is set up in the handler. We have a lot more raises than handlers since most raises are exceptional conditions such as overflow so it makes sense to minimise the code in each raise. *) gen(loadRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset}, cvec); gen(loadRegScaled{regT=X1, regN=X_MLStackPtr, unitOffset=0}, cvec); gen(branchRegister X1, cvec) ) | BICHandle {exp, handler, exPacketAddr} => let (* Save old handler *) val () = gen(loadRegScaled{regT=X0, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset}, cvec) val () = genPushReg(X0, cvec) val () = incsp () val handlerLabel = createLabel() (* Push address of handler. *) val () = gen(loadLabelAddress(X0, handlerLabel), cvec) val () = genPushReg(X0, cvec) val () = incsp() (* Store the address of the stack pointer into the handler register. *) val () = gen(storeRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset}, cvec) (* Code generate the body; "NotEnd" because we have to come back to remove the handler; "ToStack" because delHandler needs a result to carry down. *) val () = gencde (exp, ToStack, NotEnd, loopAddr) (* Now get out of the handler and restore the old one. *) val () = genPopReg(X0, cvec) (* Pop the result. *) val () = genPopReg(X1, cvec) (* Pop and discard the handler address. *) val () = genPopReg(X1, cvec) (* Pop the old handler. *) val () = gen(storeRegScaled{regT=X1, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset}, cvec) val () = genPushReg(X0, cvec) (* Push the result. *) val skipHandler = createLabel() val () = gen(conditionalBranch (condAlways, skipHandler), cvec) val () = realstackptr := oldsp val () = gen(setLabel handlerLabel, cvec) (* The exception raise code resets the stack pointer to the value in the exception handler so this is probably redundant. Leave it for the moment, *) val () = gen(loadRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset}, cvec) (* We must, though, restore the old handler. *) val () = genPopReg(X1, cvec) (* Pop and discard the handler address. *) val () = genPopReg(X1, cvec) (* Pop the old handler. *) val () = gen(storeRegScaled{regT=X1, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset}, cvec) (* Push the exception packet which is in X0 and set the address. *) val () = genPushReg(X0, cvec) val () = incsp () val () = Array.update (decVec, exPacketAddr, StackAddr(!realstackptr)) val () = gencde (handler, ToStack, NotEnd, loopAddr) (* Have to remove the exception packet. *) val () = resetStack(1, true, cvec) val () = decsp() (* Finally fix-up the jump around the handler *) val () = gen(setLabel skipHandler, cvec) in () end | BICCase ({cases, test, default, firstIndex, ...}) => let val () = gencde (test, ToStack, NotEnd, loopAddr) (* Label to jump to at the end of each case. *) val exitJump = createLabel() val () = genPopReg(X0, cvec) val () = decsp () (* Subtract the minimum even if it is zero to remove the tag. This leaves us with a shifted but untagged value. Don't check for overflow. Instead allow large values to wrap around and check later.*) val () = addConstantWord({regS=X0, regD=X0, regW=X1, value= ~(taggedWord64(Word64.fromLarge(Word.toLargeX firstIndex)))}, cvec) (* Create the case labels. *) val nCases = List.length cases val caseLabels = List.tabulate(nCases, fn _ => createLabel()) val defaultLabel = createLabel() (* Compare with the number of cases and go to the default if it is not less. We use an unsigned comparison and compare with the semitagged value because we've removed the tag bit. *) (* TODO: Not necessary if it exhaustive. *) (* For the moment load the value into a register and compare. *) val () = gen(loadNonAddressConstant(X1, Word64.fromInt nCases * 0w2), cvec) val () = compareRegs(X0, X1, cvec) val () = gen(conditionalBranch(condCarrySet, defaultLabel), cvec) (* Load the address of the jump table. *) val tableLabel = createLabel() val () = gen(loadLabelAddress(X1, tableLabel), cvec) (* Add the value shifted by one since it's already shifted. *) val () = gen(addShiftedReg{regM=X0, regN=X1, regD=X0, shift=ShiftLSL 0w1}, cvec) val () = gen(branchRegister X0, cvec) (* Put in the branch table. *) val () = gen(setLabel tableLabel, cvec) val () = List.app(fn label => gen(conditionalBranch(condAlways, label), cvec)) caseLabels (* The default case, if any, follows the case statement. *) (* If we have a jump to the default set it to jump here. *) local fun fixDefault(NONE, defCase) = gen(setLabel defCase, cvec) | fixDefault(SOME _, _) = () in val () = ListPair.appEq fixDefault (cases, caseLabels) end val () = gen(setLabel defaultLabel, cvec) val () = gencde (default, whereto, tailKind, loopAddr) fun genCases(SOME body, label) = ( (* First exit from the previous case or the default if this is the first. *) gen(conditionalBranch(condAlways, exitJump), cvec); (* Remove the result - the last case will leave it. *) case whereto of ToStack => decsp () | NoResult => () | ToX0 => (); topInX0 := false; (* Fix up the jump to come here. *) gen(setLabel label, cvec); gencde (body, whereto, tailKind, loopAddr) ) | genCases(NONE, _) = () val () = ListPair.appEq genCases (cases, caseLabels) (* Finally set the exit jump to come here. *) val () = gen(setLabel exitJump, cvec) in () end | BICTuple recList => let val size = List.length recList in (* Get the fields and push them to the stack. *) List.app(fn v => gencde (v, ToStack, NotEnd, loopAddr)) recList; genAllocateFixedSize(size, 0w0, X0, X1, cvec); List.foldl(fn (_, w) => (genPopReg(X1, cvec); gen(storeRegScaled{regT=X1, regN=X0, unitOffset=w-1}, cvec); w-1)) size recList; topInX0 := true; realstackptr := !realstackptr - size end | BICSetContainer{container, tuple, filter} => (* Copy the contents of a tuple into a container. If the tuple is a Tuple instruction we can avoid generating the tuple and then unpacking it and simply copy the fields that make up the tuple directly into the container. *) ( case tuple of BICTuple cl => (* Simply set the container from the values. *) let (* Push the address of the container to the stack. *) val _ = gencde (container, ToStack, NotEnd, loopAddr) fun setValues([], _, _) = () | setValues(v::tl, sourceOffset, destOffset) = if sourceOffset < BoolVector.length filter andalso BoolVector.sub(filter, sourceOffset) then ( (* Get the value to store into X0. *) gencde (v, ToX0, NotEnd, loopAddr); (* Load the address of the container from the stack and store the value into the container. *) gen(loadRegScaled{regT=X1, regN=X_MLStackPtr, unitOffset=0}, cvec); gen(storeRegScaled{regT=X0, regN=X1, unitOffset=destOffset}, cvec); topInX0 := false; (* We've used it. *) setValues(tl, sourceOffset+1, destOffset+1) ) else setValues(tl, sourceOffset+1, destOffset) in setValues(cl, 0, 0) (* The container address is still on the stack. *) end | _ => let (* General case: copy values from the source tuple. *) (* First the target tuple, then the container. *) val () = gencde (tuple, ToStack, NotEnd, loopAddr) val () = gencde (container, ToX0, NotEnd, loopAddr) val () = genPopReg(X1, cvec) val () = decsp() (* Container address is in X0, tuple in X1. *) val last = BoolVector.foldli(fn (i, true, _) => i | (_, false, n) => n) ~1 filter fun copy (sourceOffset, destOffset) = if BoolVector.sub(filter, sourceOffset) then ( (* Load the value in the tuple. *) gen(loadRegScaled{regT=X2, regN=X1, unitOffset=sourceOffset}, cvec); (* Store into the container. *) gen(storeRegScaled{regT=X2, regN=X0, unitOffset=destOffset}, cvec); if sourceOffset = last then () else copy (sourceOffset+1, destOffset+1) ) else copy(sourceOffset+1, destOffset) in copy (0, 0); topInX0 := true (* Container address is in X0 *) end ) | BICTagTest { test, tag=tagValue, ... } => ( gencde (test, ToStack, NotEnd, loopAddr); genPopReg(X0, cvec); gen(subSImmediate{regN=X0, regD=XZero, immed=taggedWord tagValue, shifted=false}, cvec); setBooleanCondition(X0, condEqual, cvec); genPushReg(X0, cvec) ) | BICNullary {oper=BuiltIns.GetCurrentThreadId} => ( gen(loadRegScaled{regT=X0, regN=X_MLAssemblyInt, unitOffset=threadIdOffset}, cvec); genPushReg(X0, cvec); incsp() ) | BICNullary {oper=BuiltIns.CheckRTSException} => (* Raise an exception in ML if the last RTS call set the exception packet. *) let (* It may be better to do this in all RTS calls. *) val noException = createLabel() in (* Load the packet and see if it is nil (tagged 0) *) gen(loadRegScaled{regT=X0, regN=X_MLAssemblyInt, unitOffset=exceptionPacketOffset}, cvec); gen(subSImmediate{regN=X0, regD=XZero, immed=taggedWord 0w0, shifted=false}, cvec); gen(conditionalBranch(condEqual, noException), cvec); (* If it isn't then raise the exception. *) gen(loadRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset}, cvec); gen(loadRegScaled{regT=X1, regN=X_MLStackPtr, unitOffset=0}, cvec); gen(branchRegister X1, cvec); gen(setLabel noException, cvec) end | BICNullary {oper=BuiltIns.CPUPause} => gen(yield, cvec) | BICUnary { oper, arg1 } => let open BuiltIns val () = gencde (arg1, ToX0, NotEnd, loopAddr) in case oper of NotBoolean => (* Flip true to false and the reverse. *) gen(bitwiseXorImmediate{wordSize=WordSize32, bits=0w2, regN=X0, regD=X0}, cvec) | IsTaggedValue => ( gen(testBitPattern(X0, 0w1), cvec); setBooleanCondition(X0, condNotEqual (*Non-zero*), cvec) ) | MemoryCellLength => ( (* Load the length word. *) gen(loadRegUnscaled{regT=X0, regN=X0, byteOffset= ~8}, cvec); (* Extract the length, excluding the flag bytes and shift by one bit. *) gen(unsignedBitfieldInsertinZeros {wordSize=WordSize64, lsb=0w1, width=0w56, regN=X0, regD=X0}, cvec); (* Set the tag bit. *) gen(bitwiseOrImmediate{wordSize=WordSize64, bits=0w1, regN=X0, regD=X0}, cvec) ) | MemoryCellFlags => ( (* Load the flags byte. *) gen(loadRegUnscaledByte{regT=X0, regN=X0, byteOffset= ~1}, cvec); (* Tag the result. *) gen(logicalShiftLeft{wordSize=WordSize64, shift=0w1, regN=X0, regD=X0}, cvec); gen(bitwiseOrImmediate{wordSize=WordSize64, bits=0w1, regN=X0, regD=X0}, cvec) ) | ClearMutableFlag => ( gen(loadRegUnscaledByte{regT=X1, regN=X0, byteOffset= ~1}, cvec); gen(bitwiseAndImmediate{wordSize=WordSize32, bits=Word64.xorb(0wxffffffff, 0wx40), regN=X1, regD=X1}, cvec); gen(storeRegUnscaledByte{regT=X1, regN=X0, byteOffset= ~1}, cvec) ) | AtomicReset => ( (* Clear the mutex. Simply setting it to tagged 0 will work. If another thread is in the ldaxr/stlxr loop it will see the value has changed and retry. *) gen(loadNonAddressConstant(X1, taggedWord64 0w0), cvec); gen(storeRegScaled{regT=X1, regN=X0, unitOffset=0}, cvec) ) | LongWordToTagged => ( (* Load the value and tag it. *) gen(loadRegScaled{regT=X0, regN=X0, unitOffset=0}, cvec); (* Tag the result. *) gen(logicalShiftLeft{wordSize=WordSize64, shift=0w1, regN=X0, regD=X0}, cvec); gen(bitwiseOrImmediate{wordSize=WordSize64, bits=0w1, regN=X0, regD=X0}, cvec) ) | SignedToLongWord => ( gen(arithmeticShiftRight{wordSize=WordSize64, shift=0w1, regN=X0, regD=X1}, cvec); boxLargeWord(X1, cvec) ) | UnsignedToLongWord => ( gen(logicalShiftRight{wordSize=WordSize64, shift=0w1, regN=X0, regD=X1}, cvec); boxLargeWord(X1, cvec) ) - | RealAbs PrecDouble => genOpcode(opcode_realAbs, cvec) - | RealNeg PrecDouble => genOpcode(opcode_realNeg, cvec) - | RealFixedInt PrecDouble => genOpcode(opcode_fixedIntToReal, cvec) - | RealAbs PrecSingle => genOpcode(opcode_floatAbs, cvec) - | RealNeg PrecSingle => genOpcode(opcode_floatNeg, cvec) - | RealFixedInt PrecSingle => genOpcode(opcode_fixedIntToFloat, cvec) - | FloatToDouble => genOpcode(opcode_floatToReal, cvec) - | DoubleToFloat rnding => genDoubleToFloat(rnding, cvec) - | RealToInt (PrecDouble, rnding) => genRealToInt(rnding, cvec) - | RealToInt (PrecSingle, rnding) => genFloatToInt(rnding, cvec) + | RealAbs PrecDouble => + ( + gen(loadRegScaledDouble{regT=V0, regN=X0, unitOffset=0}, cvec); + gen(absDouble{regN=V0, regD=V0}, cvec); + boxDouble(V0, cvec) + ) + | RealNeg PrecDouble => + ( + gen(loadRegScaledDouble{regT=V0, regN=X0, unitOffset=0}, cvec); + gen(negDouble{regN=V0, regD=V0}, cvec); + boxDouble(V0, cvec) + ) + | RealFixedInt PrecDouble => + ( + (* Shift to remove the tag. *) + gen(arithmeticShiftRight{wordSize=WordSize64, shift=0w1, regN=X0, regD=X0}, cvec); + gen(convertIntToDouble{regN=X0, regD=V0}, cvec); + boxDouble(V0, cvec) + ) + | RealAbs PrecSingle => + ( + gen(logicalShiftRight{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); + gen(moveGeneralToFloat{regN=X0, regD=V0}, cvec); + gen(absFloat{regN=V0, regD=V0}, cvec); + gen(moveFloatToGeneral{regN=V0, regD=X0}, cvec); + gen(logicalShiftLeft{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); + gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) + ) + | RealNeg PrecSingle => + ( + gen(logicalShiftRight{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); + gen(moveGeneralToFloat{regN=X0, regD=V0}, cvec); + gen(negFloat{regN=V0, regD=V0}, cvec); + gen(moveFloatToGeneral{regN=V0, regD=X0}, cvec); + gen(logicalShiftLeft{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); + gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) + ) + | RealFixedInt PrecSingle => + ( + (* Shift to remove the tag. *) + gen(arithmeticShiftRight{wordSize=WordSize64, shift=0w1, regN=X0, regD=X0}, cvec); + gen(convertIntToFloat{regN=X0, regD=V0}, cvec); + gen(moveFloatToGeneral{regN=V0, regD=X0}, cvec); + gen(logicalShiftLeft{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); + gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) + ) + | FloatToDouble => + ( + gen(logicalShiftRight{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); + gen(moveGeneralToFloat{regN=X0, regD=V0}, cvec); + gen(convertFloatToDouble{regN=V0, regD=V0}, cvec); + boxDouble(V0, cvec) + ) + | DoubleToFloat => + ( + genDoubleToFloat(NONE, cvec) + ) + | RealToInt (PrecDouble, rnding) => + ( + (* The rounding mode is given explicitly *) + genRealToInt(rnding, cvec) + ) + | RealToInt (PrecSingle, rnding) => + ( + (* The rounding mode is given explicitly *) + genFloatToInt(rnding, cvec) + ) | TouchAddress => topInX0 := false (* Discard this *) | AllocCStack => genOpcode(opcode_allocCSpace, cvec) end | BICBinary { oper, arg1, arg2 } => let open BuiltIns (* Generate the first argument to the stack and the second to X0. *) val () = gencde (arg1, ToStack, NotEnd, loopAddr) val () = gencde (arg2, ToX0, NotEnd, loopAddr) fun compareWords cond = ( genPopReg(X1, cvec); (* First argument. *) compareRegs(X1, X0, cvec); setBooleanCondition(X0, cond, cvec) ) and compareLargeWords cond = ( (* The values are boxed so have to be loaded first. *) genPopReg(X1, cvec); gen(loadRegScaled{regT=X0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); compareRegs(X1, X0, cvec); setBooleanCondition(X0, cond, cvec) ) in case oper of WordComparison{test=TestEqual, ...} => compareWords condEqual | WordComparison{test=TestLess, isSigned=true} => compareWords condSignedLess | WordComparison{test=TestLessEqual, isSigned=true} => compareWords condSignedLessEq | WordComparison{test=TestGreater, isSigned=true} => compareWords condSignedGreater | WordComparison{test=TestGreaterEqual, isSigned=true} => compareWords condSignedGreaterEq | WordComparison{test=TestLess, isSigned=false} => compareWords condCarryClear | WordComparison{test=TestLessEqual, isSigned=false} => compareWords condUnsignedLowOrEq | WordComparison{test=TestGreater, isSigned=false} => compareWords condUnsignedHigher | WordComparison{test=TestGreaterEqual, isSigned=false} => compareWords condCarrySet | WordComparison{test=TestUnordered, ...} => raise InternalError "WordComparison: TestUnordered" | PointerEq => compareWords condEqual | FixedPrecisionArith ArithAdd => ( (* Subtract the tag bit. *) gen(subImmediate{regN=X0, regD=X0, immed=0w1, shifted=false}, cvec); genPopReg(X1, cvec); (* Add and set the flag bits *) gen(addSShiftedReg{regN=X1, regM=X0, regD=X0, shift=ShiftNone}, cvec); checkOverflow cvec ) | FixedPrecisionArith ArithSub => ( (* Subtract the tag bit. *) gen(subImmediate{regN=X0, regD=X0, immed=0w1, shifted=false}, cvec); genPopReg(X1, cvec); (* Subtract and set the flag bits *) gen(subSShiftedReg{regN=X1, regM=X0, regD=X0, shift=ShiftNone}, cvec); checkOverflow cvec ) | FixedPrecisionArith ArithMult => let (* There's no simple way of detecting overflow. We have to compute the high-order word and then check that it is either all zeros with the sign bit zero or all ones with the sign bit one. *) val noOverflow = createLabel() in (* Compute the result in the same way as for Word.* apart from the arithmetic shift. *) genPopReg(X1, cvec); (* Shift to remove the tags on one argument suing . *) gen(arithmeticShiftRight{regN=X0, regD=X2, wordSize=WordSize64, shift=0w1}, cvec); (* Remove the tag on the other. *) gen(bitwiseAndImmediate{regN=X1, regD=X1, wordSize=WordSize64, bits=tagBitMask}, cvec); gen(multiplyAndAdd{regM=X1, regN=X2, regA=XZero, regD=X0}, cvec); (* Put back the tag. *) gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec); (* Compute the high order part into X2 *) gen(signedMultiplyHigh{regM=X1, regN=X2, regD=X2}, cvec); (* Compare with the sign bit of the result. *) gen(subSShiftedReg{regD=XZero, regN=X2, regM=X0, shift=ShiftASR 0w63}, cvec); gen(conditionalBranch(condEqual, noOverflow), cvec); gen(loadAddressConstant(X0, toMachineWord Overflow), cvec); gen(loadRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset}, cvec); gen(loadRegScaled{regT=X1, regN=X_MLStackPtr, unitOffset=0}, cvec); gen(branchRegister X1, cvec); gen(setLabel noOverflow, cvec) end | FixedPrecisionArith ArithQuot => ( (*raise Fallback ("ArithQuot: " ^ name);*) (* The word version avoids an extra shift. Don't do that here at least for the moment. Division by zero and overflow are checked for at the higher level. *) genPopReg(X1, cvec); (* Shift to remove the tags on the arguments *) gen(arithmeticShiftRight{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); gen(arithmeticShiftRight{regN=X1, regD=X1, wordSize=WordSize64, shift=0w1}, cvec); gen(signedDivide{regM=X0, regN=X1, regD=X0}, cvec); (* Restore the tag. *) gen(logicalShiftLeft{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | FixedPrecisionArith ArithRem => ( (*raise Fallback ("ArithRem: " ^ name);*) (* For the moment we remove the tags and then retag afterwards. The word version avoids this but at least for the moment we do it the longer way. *) (* There's no direct way to get the remainder - have to use divide and multiply. *) genPopReg(X1, cvec); (* Shift to remove the tags on the arguments *) gen(arithmeticShiftRight{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); gen(arithmeticShiftRight{regN=X1, regD=X1, wordSize=WordSize64, shift=0w1}, cvec); gen(signedDivide{regM=X0, regN=X1, regD=X2}, cvec); (* X0 = X1 - (X2/X0)*X0 *) gen(multiplyAndSub{regM=X2, regN=X0, regA=X1, regD=X0}, cvec); (* Restore the tag. *) gen(logicalShiftLeft{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | FixedPrecisionArith ArithDiv => raise InternalError "unimplemented operation: FixedPrecisionArith ArithDiv" | FixedPrecisionArith ArithMod => raise InternalError "unimplemented operation: FixedPrecisionArith ArithMod" | WordArith ArithAdd => ( (* Subtract the tag bit. *) gen(subImmediate{regN=X0, regD=X0, immed=0w1, shifted=false}, cvec); genPopReg(X1, cvec); gen(addShiftedReg{regN=X1, regM=X0, regD=X0, shift=ShiftNone}, cvec) ) | WordArith ArithSub => ( (* Subtract the tag bit. *) gen(subImmediate{regN=X0, regD=X0, immed=0w1, shifted=false}, cvec); genPopReg(X1, cvec); gen(subShiftedReg{regN=X1, regM=X0, regD=X0, shift=ShiftNone}, cvec) ) | WordArith ArithMult => ( genPopReg(X1, cvec); (* Shift to remove the tags on one argument. *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); (* Remove the tag on the other. *) gen(bitwiseAndImmediate{regN=X1, regD=X1, wordSize=WordSize64, bits=tagBitMask}, cvec); gen(multiplyAndAdd{regM=X1, regN=X0, regA=XZero, regD=X0}, cvec); (* Put back the tag. *) gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | WordArith ArithDiv => ( genPopReg(X1, cvec); (* Shift to remove the tag on the divisor *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); (* Untag but don't shift the dividend. *) gen(bitwiseAndImmediate{regN=X1, regD=X1, wordSize=WordSize64, bits=tagBitMask}, cvec); gen(unsignedDivide{regM=X0, regN=X1, regD=X0}, cvec); (* Restore the tag: Note: it may already be set depending on the result of the division. *) gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | WordArith ArithMod => ( (* There's no direct way to get the remainder - have to use divide and multiply. *) genPopReg(X1, cvec); (* Shift to remove the tag on the divisor *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); (* Untag but don't shift the dividend. *) gen(bitwiseAndImmediate{regN=X1, regD=X2, wordSize=WordSize64, bits=tagBitMask}, cvec); gen(unsignedDivide{regM=X0, regN=X2, regD=X2}, cvec); (* Clear the bottom bit before the multiplication. *) gen(bitwiseAndImmediate{regN=X2, regD=X2, wordSize=WordSize64, bits=tagBitMask}, cvec); (* X0 = X1 - (X2/X0)*X0 *) gen(multiplyAndSub{regM=X2, regN=X0, regA=X1, regD=X0}, cvec) (* Because we're subtracting from the original, tagged, dividend the result is tagged. *) ) | WordArith _ => raise InternalError "WordArith - unimplemented instruction" | WordLogical LogicalAnd => ( genPopReg(X1, cvec); (* Since they're both tagged the tag bit is preserved. *) gen(andShiftedReg{regN=X1, regM=X0, regD=X0, shift=ShiftNone}, cvec) ) | WordLogical LogicalOr => ( genPopReg(X1, cvec); (* Since they're both tagged the tag bit is preserved. *) gen(orrShiftedReg{regN=X1, regM=X0, regD=X0, shift=ShiftNone}, cvec) ) | WordLogical LogicalXor => ( genPopReg(X1, cvec); (* Have to restore the tag bit because that will be cleared. *) gen(eorShiftedReg{regN=X1, regM=X0, regD=X0, shift=ShiftNone}, cvec); gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) (* Shifts: ARM64 shifts are taken modulo the word length but that's dealt with at a higher level. *) | WordShift ShiftLeft => ( genPopReg(X1, cvec); (* Remove the tag from value we're shifting. *) gen(bitwiseAndImmediate{regN=X1, regD=X1, wordSize=WordSize64, bits=tagBitMask}, cvec); (* Untag the shift amount. Can use 32-bit op here. *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize32, shift=0w1}, cvec); gen(logicalShiftLeftVariable{regM=X0, regN=X1, regD=X0}, cvec); (* Put back the tag. *) gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | WordShift ShiftRightLogical => ( genPopReg(X1, cvec); (* Don't need to remove the tag. *) (* Untag the shift amount. Can use 32-bit op here. *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize32, shift=0w1}, cvec); gen(logicalShiftRightVariable{regM=X0, regN=X1, regD=X0}, cvec); (* Put back the tag. *) gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | WordShift ShiftRightArithmetic => ( genPopReg(X1, cvec); (* Don't need to remove the tag. *) (* Untag the shift amount. Can use 32-bit op here. *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize32, shift=0w1}, cvec); gen(arithmeticShiftRightVariable{regM=X0, regN=X1, regD=X0}, cvec); (* Put back the tag. *) gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | AllocateByteMemory => (* Allocate memory for byte data. Unlike for word data it is not necessary to initialise it before any further allocation provided it has the mutable bit set. *) ( (* Load and untag the size and flags. The size is the number of words even though this is byte data. *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize32 (*byte*), shift=0w1}, cvec); genPopReg(X1, cvec); gen(logicalShiftRight{regN=X1, regD=X1, wordSize=WordSize64, shift=0w1}, cvec); allocateVariableSize({sizeReg=X1, flagsReg=X0, resultReg=X2}, cvec); gen(genMoveRegToReg{sReg=X2, dReg=X0}, cvec) ) | LargeWordComparison TestEqual => compareLargeWords condEqual | LargeWordComparison TestLess => compareLargeWords condCarryClear | LargeWordComparison TestLessEqual => compareLargeWords condUnsignedLowOrEq | LargeWordComparison TestGreater => compareLargeWords condUnsignedHigher | LargeWordComparison TestGreaterEqual => compareLargeWords condCarrySet | LargeWordComparison TestUnordered => raise InternalError "LargeWordComparison: TestUnordered" | LargeWordArith ArithAdd => ( genPopReg(X1, cvec); gen(loadRegScaled{regT=X0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); gen(addShiftedReg{regN=X1, regM=X0, regD=X1, shift=ShiftNone}, cvec); boxLargeWord(X1, cvec) ) | LargeWordArith ArithSub => ( genPopReg(X1, cvec); gen(loadRegScaled{regT=X0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); gen(subShiftedReg{regN=X1, regM=X0, regD=X1, shift=ShiftNone}, cvec); boxLargeWord(X1, cvec) ) | LargeWordArith ArithMult => ( genPopReg(X1, cvec); gen(loadRegScaled{regT=X0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); gen(multiplyAndAdd{regM=X1, regN=X0, regA=XZero, regD=X1}, cvec); boxLargeWord(X1, cvec) ) | LargeWordArith ArithDiv => ( genPopReg(X1, cvec); gen(loadRegScaled{regT=X0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); gen(unsignedDivide{regM=X0, regN=X1, regD=X1}, cvec); boxLargeWord(X1, cvec) ) | LargeWordArith ArithMod => ( genPopReg(X1, cvec); gen(loadRegScaled{regT=X0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); gen(unsignedDivide{regM=X0, regN=X1, regD=X2}, cvec); gen(multiplyAndSub{regM=X2, regN=X0, regA=X1, regD=X1}, cvec); boxLargeWord(X1, cvec) ) | LargeWordArith _ => raise InternalError "LargeWordArith - unimplemented instruction" | LargeWordLogical LogicalAnd => ( genPopReg(X1, cvec); gen(loadRegScaled{regT=X0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); gen(andShiftedReg{regN=X1, regM=X0, regD=X1, shift=ShiftNone}, cvec); boxLargeWord(X1, cvec) ) | LargeWordLogical LogicalOr => ( genPopReg(X1, cvec); gen(loadRegScaled{regT=X0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); gen(orrShiftedReg{regN=X1, regM=X0, regD=X1, shift=ShiftNone}, cvec); boxLargeWord(X1, cvec) ) | LargeWordLogical LogicalXor => ( genPopReg(X1, cvec); gen(loadRegScaled{regT=X0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); gen(eorShiftedReg{regN=X1, regM=X0, regD=X1, shift=ShiftNone}, cvec); boxLargeWord(X1, cvec) ) (* The shift is always a Word.word value i.e. tagged. There is a check at the higher level that the shift does not exceed 32/64 bits. *) | LargeWordShift ShiftLeft => ( genPopReg(X1, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); (* Untag the shift amount. Can use 32-bit op here. *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize32, shift=0w1}, cvec); gen(logicalShiftLeftVariable{regM=X0, regN=X1, regD=X1}, cvec); boxLargeWord(X1, cvec) ) | LargeWordShift ShiftRightLogical => ( genPopReg(X1, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); (* Untag the shift amount. Can use 32-bit op here. *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize32, shift=0w1}, cvec); gen(logicalShiftRightVariable{regM=X0, regN=X1, regD=X1}, cvec); boxLargeWord(X1, cvec) ) | LargeWordShift ShiftRightArithmetic => ( genPopReg(X1, cvec); gen(loadRegScaled{regT=X1, regN=X1, unitOffset=0}, cvec); (* Untag the shift amount. Can use 32-bit op here. *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize32, shift=0w1}, cvec); gen(arithmeticShiftRightVariable{regM=X0, regN=X1, regD=X1}, cvec); boxLargeWord(X1, cvec) ) | RealComparison (TestEqual, PrecDouble) => genOpcode(opcode_realEqual, cvec) | RealComparison (TestLess, PrecDouble) => genOpcode(opcode_realLess, cvec) | RealComparison (TestLessEqual, PrecDouble) => genOpcode(opcode_realLessEq, cvec) | RealComparison (TestGreater, PrecDouble) => genOpcode(opcode_realGreater, cvec) | RealComparison (TestGreaterEqual, PrecDouble) => genOpcode(opcode_realGreaterEq, cvec) | RealComparison (TestUnordered, PrecDouble) => genOpcode(opcode_realUnordered, cvec) | RealComparison (TestEqual, PrecSingle) => genOpcode(opcode_floatEqual, cvec) | RealComparison (TestLess, PrecSingle) => genOpcode(opcode_floatLess, cvec) | RealComparison (TestLessEqual, PrecSingle) => genOpcode(opcode_floatLessEq, cvec) | RealComparison (TestGreater, PrecSingle) => genOpcode(opcode_floatGreater, cvec) | RealComparison (TestGreaterEqual, PrecSingle) => genOpcode(opcode_floatGreaterEq, cvec) | RealComparison (TestUnordered, PrecSingle) => genOpcode(opcode_floatUnordered, cvec) | RealArith (ArithAdd, PrecDouble) => ( genPopReg(X1, cvec); gen(loadRegScaledDouble{regT=V0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaledDouble{regT=V1, regN=X1, unitOffset=0}, cvec); gen(addDouble{regM=V0, regN=V1, regD=V0}, cvec); boxDouble(V0, cvec) ) | RealArith (ArithSub, PrecDouble) => ( genPopReg(X1, cvec); gen(loadRegScaledDouble{regT=V0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaledDouble{regT=V1, regN=X1, unitOffset=0}, cvec); gen(subtractDouble{regM=V0, regN=V1, regD=V0}, cvec); boxDouble(V0, cvec) ) | RealArith (ArithMult, PrecDouble) => ( genPopReg(X1, cvec); gen(loadRegScaledDouble{regT=V0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaledDouble{regT=V1, regN=X1, unitOffset=0}, cvec); gen(multiplyDouble{regM=V0, regN=V1, regD=V0}, cvec); boxDouble(V0, cvec) ) | RealArith (ArithDiv, PrecDouble) => ( genPopReg(X1, cvec); gen(loadRegScaledDouble{regT=V0, regN=X0, unitOffset=0}, cvec); gen(loadRegScaledDouble{regT=V1, regN=X1, unitOffset=0}, cvec); gen(divideDouble{regM=V0, regN=V1, regD=V0}, cvec); boxDouble(V0, cvec) ) | RealArith (ArithAdd, PrecSingle) => ( (* 32-bit floats are represented as the value in the top 32-bits of a general register with the low-order word containing all zeros except the bottom bit which is one. *) genPopReg(X1, cvec); gen(logicalShiftRight{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); gen(logicalShiftRight{wordSize=WordSize64, shift=0w32, regN=X1, regD=X1}, cvec); gen(moveGeneralToFloat{regN=X0, regD=V0}, cvec); gen(moveGeneralToFloat{regN=X1, regD=V1}, cvec); gen(addFloat{regM=V0, regN=V1, regD=V0}, cvec); gen(moveFloatToGeneral{regN=V0, regD=X0}, cvec); gen(logicalShiftLeft{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | RealArith (ArithSub, PrecSingle) => ( genPopReg(X1, cvec); gen(logicalShiftRight{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); gen(logicalShiftRight{wordSize=WordSize64, shift=0w32, regN=X1, regD=X1}, cvec); gen(moveGeneralToFloat{regN=X0, regD=V0}, cvec); gen(moveGeneralToFloat{regN=X1, regD=V1}, cvec); gen(subtractFloat{regM=V0, regN=V1, regD=V0}, cvec); gen(moveFloatToGeneral{regN=V0, regD=X0}, cvec); gen(logicalShiftLeft{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | RealArith (ArithMult, PrecSingle) => ( genPopReg(X1, cvec); gen(logicalShiftRight{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); gen(logicalShiftRight{wordSize=WordSize64, shift=0w32, regN=X1, regD=X1}, cvec); gen(moveGeneralToFloat{regN=X0, regD=V0}, cvec); gen(moveGeneralToFloat{regN=X1, regD=V1}, cvec); gen(multiplyFloat{regM=V0, regN=V1, regD=V0}, cvec); gen(moveFloatToGeneral{regN=V0, regD=X0}, cvec); gen(logicalShiftLeft{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | RealArith (ArithDiv, PrecSingle) => ( genPopReg(X1, cvec); gen(logicalShiftRight{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); gen(logicalShiftRight{wordSize=WordSize64, shift=0w32, regN=X1, regD=X1}, cvec); gen(moveGeneralToFloat{regN=X0, regD=V0}, cvec); gen(moveGeneralToFloat{regN=X1, regD=V1}, cvec); gen(divideFloat{regM=V0, regN=V1, regD=V0}, cvec); gen(moveFloatToGeneral{regN=V0, regD=X0}, cvec); gen(logicalShiftLeft{wordSize=WordSize64, shift=0w32, regN=X0, regD=X0}, cvec); gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | RealArith _ => raise InternalError "RealArith - unimplemented instruction" | FreeCStack => genOpcode(opcode_freeCSpace, cvec) | AtomicExchangeAdd => (* The earliest versions of the Arm8 do not have the LDADD instruction which will do this directly. To preserve compatibility we use LDAXR/STLXR which require a loop. *) let val loopLabel = createLabel() in genPopReg(X1, cvec); (* Address of mutex *) (* Untag the value to add. *) gen(subImmediate{regN=X0, regD=X3, immed=0w1, shifted=false}, cvec); gen(setLabel loopLabel, cvec); (* Get the original value into X0. *) gen(loadAcquireExclusiveRegister{regN=X1, regT=X0}, cvec); (* Add and put the result into X3 *) gen(addShiftedReg{regM=X0, regN=X3, regD=X2, shift=ShiftNone}, cvec); (* Store the result of the addition. W4 will be zero if this succeeded. *) gen(storeReleaseExclusiveRegister{regS=X4, regT=X2, regN=X1}, cvec); gen(compareBranchNonZero(X4, WordSize32, loopLabel), cvec); (* Put in the memory barrier. *) gen(dmbIsh, cvec) end ; decsp() (* Removes one item from the stack. *) end | BICAllocateWordMemory {numWords, flags, initial } => let fun doAllocateAndInit() = let val () = gencde (numWords, ToStack, NotEnd, loopAddr) val () = gencde (flags, ToStack, NotEnd, loopAddr) val () = gencde (initial, ToX0, NotEnd, loopAddr) val exitLabel = createLabel() and loopLabel = createLabel() in genPopReg(X2, cvec); (* Flags as tagged value. *) gen(logicalShiftRight{regN=X2, regD=X2, wordSize=WordSize32 (*byte*), shift=0w1}, cvec); genPopReg(X1, cvec); (* Length as tagged value. *) gen(logicalShiftRight{regN=X1, regD=X1, wordSize=WordSize64, shift=0w1}, cvec); genPushReg(X0, cvec); (* Save initialiser - TODO: Add it to save set. *) allocateVariableSize({sizeReg=X1, flagsReg=X2, resultReg=X0}, cvec); genPopReg(X3, cvec); (* Pop initialiser. *) (* Add the length in bytes so we point at the end. *) gen(addShiftedReg{regM=X1, regN=X0, regD=X1, shift=ShiftLSL 0w3}, cvec); (* Loop to initialise. *) gen(setLabel loopLabel, cvec); compareRegs(X1, X0, cvec); (* Are we at the start? *) gen(conditionalBranch(condEqual, exitLabel), cvec); gen(storeRegPreIndex{regT=X3, regN=X1, byteOffset= ~8}, cvec); gen(conditionalBranch(condAlways, loopLabel), cvec); gen(setLabel exitLabel, cvec); decsp(); decsp() end in case (numWords, flags) of (BICConstnt(length, _), BICConstnt(flagValue, _)) => if isShort length andalso toShort length = 0w1 andalso isShort flagValue then (* This is a very common case for refs. *) let val flagByte = Word8.fromLargeWord(Word.toLargeWord(toShort flagValue)) in gencde (initial, ToStack, NotEnd, loopAddr); (* Initialiser. *) genAllocateFixedSize(1, flagByte, X0, X1, cvec); genPopReg(X1, cvec); gen(storeRegScaled{regT=X1, regN=X0, unitOffset=0}, cvec); decsp(); topInX0 := true end else (* Constant but not a single. *) doAllocateAndInit() | _ => (* Not constant. *) doAllocateAndInit() end | BICLoadOperation { kind=LoadStoreMLWord _, address} => ( case genMLLoadAddress(address, Word.toInt wordSize) of (base, MLLoadOffset offset) => gen(loadRegScaled{regT=X0, regN=base, unitOffset=offset}, cvec) | (base, MLLoadReg indexR) => gen(loadRegIndexed{regN=base, regM=indexR, regT=X0, option=ExtUXTX ScaleOrShift}, cvec) ) | BICLoadOperation { kind=LoadStoreMLByte _, address} => ( case genMLLoadAddress(address, 1) of (base, MLLoadOffset offset) => gen(loadRegScaledByte{regT=X0, regN=base, unitOffset=offset}, cvec) | (base, MLLoadReg indexR) => gen(loadRegIndexedByte{regN=base, regM=indexR, regT=X0, option=ExtUXTX NoScale}, cvec); (* Have to tag the result. *) gen(logicalShiftLeft{regN=X0, regD=X0, wordSize=WordSize32, shift=0w1}, cvec); gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize32, bits=0w1}, cvec) ) | BICLoadOperation { kind=LoadStoreC8, address} => ( genCAddress address; genOpcode(opcode_loadC8, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC16, address} => ( genCAddress address; genOpcode(opcode_loadC16, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC32, address} => ( genCAddress address; genOpcode(opcode_loadC32, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC64, address} => ( genCAddress address; genOpcode(opcode_loadC64, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreCFloat, address} => ( genCAddress address; genOpcode(opcode_loadCFloat, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreCDouble, address} => ( genCAddress address; genOpcode(opcode_loadCDouble, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreUntaggedUnsigned, address} => ( case genMLLoadAddress(address, Word.toInt wordSize) of (base, MLLoadOffset offset) => gen(loadRegScaled{regT=X0, regN=base, unitOffset=offset}, cvec) | (base, MLLoadReg indexR) => gen(loadRegIndexed{regN=base, regM=indexR, regT=X0, option=ExtUXTX ScaleOrShift}, cvec); (* Have to tag the result. *) gen(logicalShiftLeft{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); gen(bitwiseOrImmediate{regN=X0, regD=X0, wordSize=WordSize64, bits=0w1}, cvec) ) | BICStoreOperation { kind=LoadStoreMLWord _, address, value } => ( genMLAddress(address, Word.toInt wordSize); gencde (value, ToStack, NotEnd, loopAddr); genPopReg(X0, cvec); (* Value to store *) genPopReg(X1, cvec); (* Index: a tagged value. *) (* Shift right to remove the tag. N.B. Indexes into ML memory are unsigned. Unlike on the X86 we can't remove the tag by providing a displacement and the only options are to scale by either 1 or 8. *) gen(logicalShiftRight{regN=X1, regD=X1, wordSize=WordSize64, shift=0w1}, cvec); genPopReg(X2, cvec); (* Base address. *) gen(storeRegIndexed{regN=X2, regM=X1, regT=X0, option=ExtUXTX ScaleOrShift}, cvec); (* Don't put the unit result in; it probably isn't needed, *) decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreMLByte _, address, value } => ( (* Untag the value and store the byte. *) genMLAddress(address, 1); gencde (value, ToStack, NotEnd, loopAddr); genPopReg(X0, cvec); (* Value to store *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); (* Untag it *) genPopReg(X1, cvec); (* Index: a tagged value. *) (* Shift right to remove the tag. N.B. Indexes into ML memory are unsigned. Unlike on the X86 we can't remove the tag by providing a displacement and the only options are to scale by either 1 or 8. *) gen(logicalShiftRight{regN=X1, regD=X1, wordSize=WordSize64, shift=0w1}, cvec); genPopReg(X2, cvec); (* Base address. *) gen(storeRegIndexedByte{regN=X2, regM=X1, regT=X0, option=ExtUXTX NoScale}, cvec); (* Don't put the unit result in; it probably isn't needed, *) decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC8, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC8, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC16, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC16, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC32, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC32, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC64, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC64, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreCFloat, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeCFloat, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreCDouble, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeCDouble, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreUntaggedUnsigned, address, value} => ( (* Almost the same as LoadStoreMLWord except that the value to be stored must be untagged before it is stored. This is used primarily to set the length word on a string. *) genMLAddress(address, Word.toInt wordSize); gencde (value, ToStack, NotEnd, loopAddr); genPopReg(X0, cvec); (* Value to store *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); genPopReg(X1, cvec); (* Index: a tagged value. *) (* Shift right to remove the tag. N.B. Indexes into ML memory are unsigned. Unlike on the X86 we can't remove the tag by providing a displacement and the only options are to scale by either 1 or 8. *) gen(logicalShiftRight{regN=X1, regD=X1, wordSize=WordSize64, shift=0w1}, cvec); genPopReg(X2, cvec); (* Base address. *) gen(storeRegIndexed{regN=X2, regM=X1, regT=X0, option=ExtUXTX ScaleOrShift}, cvec); (* Don't put the unit result in; it probably isn't needed, *) decsp(); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpMove{isByteMove}, sourceLeft, destRight, length } => let val exitLabel = createLabel() and loopLabel = createLabel() in genMLAddress(sourceLeft, 1); genMLAddress(destRight, 1); gencde (length, ToX0, NotEnd, loopAddr); (* Length *) genPopReg(X2, cvec); (* Dest index - tagged value. *) genPopReg(X1, cvec); (* Dest base address. *) (* Add in the index N.B. ML index values are unsigned. *) gen(addShiftedReg{regM=X2, regN=X1, regD=X1, shift=ShiftLSR 0w1}, cvec); genPopReg(X3, cvec); (* Source index *) genPopReg(X2, cvec); gen(addShiftedReg{regM=X3, regN=X2, regD=X2, shift=ShiftLSR 0w1}, cvec); (* Untag the length *) gen(logicalShiftRight{regN=X0, regD=X0, wordSize=WordSize64, shift=0w1}, cvec); (* Test the loop value at the top in case it's already zero. *) compareRegs(X0, X0, cvec); (* Set condition code just in case. *) gen(setLabel loopLabel, cvec); gen(compareBranchZero(X0, WordSize64, exitLabel), cvec); if isByteMove then ( gen(loadRegPostIndexByte{regT=X3, regN=X2, byteOffset=1}, cvec); gen(storeRegPostIndexByte{regT=X3, regN=X1, byteOffset=1}, cvec) ) else ( gen(loadRegPostIndex{regT=X3, regN=X2, byteOffset=8}, cvec); gen(storeRegPostIndex{regT=X3, regN=X1, byteOffset=8}, cvec) ); gen(subImmediate{regN=X0, regD=X0, immed=0w1, shifted=false}, cvec); (* Back to the start. *) gen(conditionalBranch(condAlways, loopLabel), cvec); gen(setLabel exitLabel, cvec); topInX0 := false; (* X0 does not contain "unit" *) decsp(); decsp(); decsp(); decsp() end | BICBlockOperation { kind=BlockOpEqualByte, sourceLeft, destRight, length } => (* Compare byte vectors for equality - returns a boolean result. *) let val equalLabel = createLabel() in blockCompareBytes(sourceLeft, destRight, length, equalLabel, true); gen(setLabel equalLabel, cvec); (* Set the result condition. *) setBooleanCondition(X0, condEqual, cvec) end | BICBlockOperation { kind=BlockOpCompareByte, sourceLeft, destRight, length } => (* Compare byte vectors for ordering - return tagged -1, 0, +1. *) let val equalLabel = createLabel() and resultLabel = createLabel() in blockCompareBytes(sourceLeft, destRight, length, equalLabel, false); (* We drop through if we have found unequal bytes. *) gen(loadNonAddressConstant(X0, Word64.fromInt(tag 1)), cvec); (* Set X0 to either 1 or -1 depending on whether it's greater or less. *) gen(conditionalSetInverted{regD=X0, regTrue=X0, regFalse=XZero, cond=condUnsignedHigher}, cvec); gen(conditionalBranch(condAlways, resultLabel), cvec); gen(setLabel equalLabel, cvec); (* Equal case - set it to zero. *) gen(loadNonAddressConstant(X0, Word64.fromInt(tag 0)), cvec); gen(setLabel resultLabel, cvec) end | BICArbitrary { longCall, ... } => (* Just implement as a call to the long-precision case. *) ( gencde (longCall, whereto, tailKind, loopAddr) ) in (* body of gencde *) (* This ensures that there is precisely one item on the stack if whereto = ToStack and no items if whereto = NoResult. *) case whereto of ToStack => let val () = ensureX0() val newsp = oldsp + 1; val adjustment = !realstackptr - newsp val () = if adjustment = 0 then () else if adjustment < ~1 then raise InternalError ("gencde: bad adjustment " ^ Int.toString adjustment) (* Hack for declarations that should push values, but don't *) else if adjustment = ~1 then ( gen(loadNonAddressConstant(X0, Word64.fromInt(tag 0)), cvec); genPushReg(X0, cvec) ) else resetStack (adjustment, true, cvec) in realstackptr := newsp end | NoResult => let val () = topInX0 := false val adjustment = !realstackptr - oldsp val () = if adjustment = 0 then () else if adjustment < 0 then raise InternalError ("gencde: bad adjustment " ^ Int.toString adjustment) else resetStack (adjustment, false, cvec) in realstackptr := oldsp end | ToX0 => let (* If we have not pushed anything we have to push a unit result. *) val () = if !topInX0 then () else if !realstackptr = oldsp then gen(loadNonAddressConstant(X0, Word64.fromInt(tag 0)), cvec) else ( genPopReg(X0, cvec); decsp() ) val () = topInX0 := true val adjustment = !realstackptr - oldsp val () = if adjustment = 0 then () else if adjustment < 0 then raise InternalError ("gencde: bad adjustment " ^ Int.toString adjustment) else resetStack (adjustment, false, cvec) in realstackptr := oldsp end end (* gencde *) (* doNext is only used for mutually recursive functions where a function may not be able to fill in its closure if it does not have all the remaining declarations. *) (* TODO: This always creates the closure on the heap even when makeClosure is false. *) and genProc ({ closure=[], localCount, body, argTypes, name, ...}: bicLambdaForm, mutualDecs, doNext: unit -> unit) : unit = let (* Create a one word item for the closure. This is returned for recursive references and filled in with the address of the code when we've finished. *) val closure = makeConstantClosure() (* Code-gen function. No non-local references. *) val () = codegen (body, name, closure, List.length argTypes, localCount, parameters); val () = gen(loadAddressConstant(X0, closureAsAddress closure), cvec) val () = genPushReg(X0, cvec) val () = incsp(); in if mutualDecs then doNext () else () end | genProc ({ localCount, body, name, argTypes, closure, ...}, mutualDecs, doNext) = let (* Full closure required. *) val resClosure = makeConstantClosure() (* Code-gen function. *) val () = codegen (body, name, resClosure, List.length argTypes, localCount, parameters) (* Since we're using native words rather than 32-in-64 we can load this now. *) val codeAddr = codeAddressFromClosure resClosure val closureVars = List.length closure (* Size excluding the code address *) in if mutualDecs then let (* Have to make the closure now and fill it in later. *) val () = genAllocateFixedSize(closureVars+1, F_mutable, X0, X1, cvec) val () = gen(loadAddressConstant(X1, codeAddr), cvec); val () = gen(storeRegScaled{regT=X1, regN=X0, unitOffset=0}, cvec) val () = genPushReg(X0, cvec) val () = incsp () val entryAddr : int = !realstackptr (* Set the address of this entry in the declaration table and then process any other mutual-recursive functions. *) val () = doNext () (* Reload the address of the vector - If we have processed other closures the closure will no longer be on the top of the stack. *) val () = gen(loadRegScaled{regT=X1, regN=X_MLStackPtr, unitOffset= !realstackptr - entryAddr}, cvec); (* Load items for the closure. *) fun loadItems ([], _) = () | loadItems (v :: vs, addr : int) = ( (* Generate an item and move it into the closure *) gencde (BICExtract v, ToX0, NotEnd, NONE); (* The closure "address" excludes the code address. *) gen(storeRegScaled{regT=X0, regN=X1, unitOffset=addr+1}, cvec); topInX0 := false; loadItems (vs, addr + 1) ) val () = loadItems (closure, 0) (* Lock it by setting the top byte to zero. *) val () = gen(storeRegUnscaledByte{regT=XZero, regN=X1, byteOffset= ~1}, cvec) in () (* Don't need to do anything now. *) end else let val () = List.app (fn pt => gencde (BICExtract pt, ToStack, NotEnd, NONE)) closure in genAllocateFixedSize(closureVars+1, 0w0, X0, X1, cvec); List.foldl(fn (_, w) => (genPopReg(X1, cvec); gen(storeRegScaled{regT=X1, regN=X0, unitOffset=w-1}, cvec); w-1)) (closureVars+1) closure; gen(loadAddressConstant(X1, codeAddr), cvec); gen(storeRegScaled{regT=X1, regN=X0, unitOffset=0}, cvec); genPushReg(X0, cvec); realstackptr := !realstackptr - closureVars + 1 (* Popped the closure vars and pushed the address. *) end end and genCond (testCode, thenCode, elseCode, whereto, tailKind, loopAddr) = let (* andalso and orelse are turned into conditionals with constants. Convert this into a series of tests. *) fun genTest(BICConstnt(w, _), jumpOn, targetLabel) = let val cVal = case toShort w of 0w0 => false | 0w1 => true | _ => raise InternalError "genTest" in if cVal = jumpOn then gen(conditionalBranch (condAlways, targetLabel), cvec) else () end | genTest(BICUnary { oper=BuiltIns.NotBoolean, arg1 }, jumpOn, targetLabel) = genTest(arg1, not jumpOn, targetLabel) | genTest(BICCond (testPart, thenPart, elsePart), jumpOn, targetLabel) = let val toElse = createLabel() and exitJump = createLabel() in genTest(testPart, false, toElse); genTest(thenPart, jumpOn, targetLabel); gen(conditionalBranch (condAlways, exitJump), cvec); gen(setLabel toElse, cvec); genTest(elsePart, jumpOn, targetLabel); gen(setLabel exitJump, cvec) end | genTest(testCode, jumpOn, targetLabel) = ( gencde (testCode, ToStack, NotEnd, loopAddr); genPopReg(X0, cvec); gen(subSImmediate{regN=X0, regD=XZero, immed=taggedWord 0w1, shifted=false}, cvec); gen(conditionalBranch(if jumpOn then condEqual else condNotEqual, targetLabel), cvec); decsp() (* conditional branch pops a value. *) ) val toElse = createLabel() and exitJump = createLabel() val () = genTest(testCode, false, toElse) val () = gencde (thenCode, whereto, tailKind, loopAddr) (* Get rid of the result from the stack. If there is a result then the ``else-part'' will push it. *) val () = case whereto of ToStack => decsp () | NoResult => () | ToX0 => () val () = topInX0 := false val () = gen(conditionalBranch (condAlways, exitJump), cvec) (* start of "else part" *) val () = gen(setLabel toElse, cvec) val () = gencde (elseCode, whereto, tailKind, loopAddr) val () = gen(setLabel exitJump, cvec) in () end (* genCond *) and genEval (eval, tailKind : tail) : unit = let val argList : backendIC list = List.map #1 (#argList eval) val argsToPass : int = List.length argList; (* Load arguments *) fun loadArgs [] = () | loadArgs (v :: vs) = let (* Push each expression onto the stack. *) val () = gencde(v, ToStack, NotEnd, NONE) in loadArgs vs end; (* Have to guarantee that the expression to return the function is evaluated before the arguments. *) (* Returns true if evaluating it later is safe. *) fun safeToLeave (BICConstnt _) = true | safeToLeave (BICLambda _) = true | safeToLeave (BICExtract _) = true | safeToLeave (BICField {base, ...}) = safeToLeave base | safeToLeave (BICLoadContainer {base, ...}) = safeToLeave base | safeToLeave _ = false val () = if (case argList of [] => true | _ => safeToLeave (#function eval)) then let (* Can load the args first. *) val () = loadArgs argList in gencde (#function eval, ToStack, NotEnd, NONE) end else let (* The expression for the function is too complicated to risk leaving. It might have a side-effect and we must ensure that any side-effects it has are done before the arguments are loaded. *) val () = gencde(#function eval, ToStack, NotEnd, NONE); val () = loadArgs(argList); (* Load the function again. *) val () = gen(loadRegScaled{regT=X0, regN=X_MLStackPtr, unitOffset=argsToPass}, cvec) val () = genPushReg(X0, cvec) in incsp () end in (* body of genEval *) case tailKind of NotEnd => (* Normal call. *) let val () = genPopReg(X8, cvec) (* Pop the closure pointer. *) (* We need to put the first 8 arguments into registers and leave the rest on the stack. *) fun loadArg(n, reg) = if argsToPass > n then gen(loadRegScaled{regT=reg, regN=X_MLStackPtr, unitOffset=argsToPass-n-1}, cvec) else () val () = loadArg(0, X0) val () = loadArg(1, X1) val () = loadArg(2, X2) val () = loadArg(3, X3) val () = loadArg(4, X4) val () = loadArg(5, X5) val () = loadArg(6, X6) val () = loadArg(7, X7) in gen(loadRegScaled{regT=X9, regN=X8, unitOffset=0}, cvec); (* Entry point *) gen(branchAndLinkReg X9, cvec); (* We have popped the closure pointer. The caller has popped the stack arguments and we have pushed the result value. The register arguments are still on the stack. *) topInX0 := true; realstackptr := !realstackptr - Int.max(argsToPass-8, 0) - 1 (* Args popped by caller. *) end | EndOfProc => (* Tail recursive call. *) let val () = genPopReg(X8, cvec) (* Pop the closure pointer. *) val () = decsp() (* Get the return address into X30. *) val () = gen(loadRegScaled{regT=X30, regN=X_MLStackPtr, unitOffset= !realstackptr}, cvec) (* Load the register arguments *) fun loadArg(n, reg) = if argsToPass > n then gen(loadRegScaled{regT=reg, regN=X_MLStackPtr, unitOffset=argsToPass-n-1}, cvec) else () val () = loadArg(0, X0) val () = loadArg(1, X1) val () = loadArg(2, X2) val () = loadArg(3, X3) val () = loadArg(4, X4) val () = loadArg(5, X5) val () = loadArg(6, X6) val () = loadArg(7, X7) (* We need to move the stack arguments into the original argument area. *) (* This is the total number of words that this function is responsible for. It includes the stack arguments that the caller expects to be removed. *) val itemsOnStack = !realstackptr + 1 + numOfArgs (* Stack arguments are moved using X9. *) fun moveStackArg n = if n >= argsToPass then () else let val () = loadArg(n, X9) val destOffset = itemsOnStack - (n-8) - 1 val () = gen(storeRegScaled{regT=X9, regN=X_MLStackPtr, unitOffset=destOffset}, cvec) in moveStackArg(n+1) end val () = moveStackArg 8 in resetStack(itemsOnStack - Int.max(argsToPass-8, 0), false, cvec); gen(loadRegScaled{regT=X9, regN=X8, unitOffset=0}, cvec); (* Entry point *) gen(branchRegister X9, cvec) (* Since we're not returning we can ignore the stack pointer value. *) end end (* genEval *) (* Begin generating the code for the function. *) val prefix = ref [] (* Push the arguments passed in registers. *) val () = if numOfArgs >= 8 then genPushReg (X7, prefix) else () val () = if numOfArgs >= 7 then genPushReg (X6, prefix) else () val () = if numOfArgs >= 6 then genPushReg (X5, prefix) else () val () = if numOfArgs >= 5 then genPushReg (X4, prefix) else () val () = if numOfArgs >= 4 then genPushReg (X3, prefix) else () val () = if numOfArgs >= 3 then genPushReg (X2, prefix) else () val () = if numOfArgs >= 2 then genPushReg (X1, prefix) else () val () = if numOfArgs >= 1 then genPushReg (X0, prefix) else () val () = genPushReg (X30, prefix) val () = genPushReg (X8, prefix) (* Push closure pointer *) (* Generate the function. *) (* Assume we always want a result. There is otherwise a problem if the called routine returns a result of type void (i.e. no result) but the caller wants a result (e.g. the identity function). *) val () = gencde (pt, ToX0, EndOfProc, NONE) val () = resetStack(1, false, cvec) (* Skip over the pushed closure *) val () = genPopReg(X30, cvec) (* Return address => pop into X30 *) val () = resetStack(numOfArgs, false, cvec) (* Remove the arguments *) val () = gen(returnRegister X30, cvec) (* Jump to X30 *) (* Now we know the maximum stack size we can code-gen the stack check. This needs to go in after we have saved X30. *) val () = checkStackCode(X10, !maxStack, false(*name = "INTCODECONS().genCode(3)genByteCode(2)"*), prefix) val instructions = List.rev(!prefix) @ List.rev(!cvec) in (* body of codegen *) (* Having code-generated the body of the function, it is copied into a new data segment. *) generateCode{instrs=instructions, name=name, parameters=parameters, resultClosure=resultClosure} end (* codegen *) fun gencodeLambda(lambda as { name, body, argTypes, localCount, ...}:bicLambdaForm, parameters, closure) = if (*false andalso*) Debug.getParameter Debug.compilerDebugTag parameters = 0 then FallBackCG.gencodeLambda(lambda, parameters, closure) else codegen (body, name, closure, List.length argTypes, localCount, parameters) handle Fallback s => ( Pretty.getSimplePrinter(parameters, []) ("TODO: " ^ s ^ "\n"); FallBackCG.gencodeLambda(lambda, parameters, closure) ) structure Foreign = Arm64Foreign structure Sharing = struct open BackendTree.Sharing type closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml b/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml index e9b13284..ea79525b 100644 --- a/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml +++ b/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml @@ -1,739 +1,738 @@ (* Copyright (c) 2012, 2016-21 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Intermediate code tree for the back end of the compiler. *) structure BackendIntermediateCode: BackendIntermediateCodeSig = struct open Address structure BuiltIns = struct datatype testConditions = TestEqual | TestLess | TestLessEqual | TestGreater | TestGreaterEqual | TestUnordered (* Reals only. *) datatype arithmeticOperations = ArithAdd | ArithSub | ArithMult | ArithQuot | ArithRem | ArithDiv | ArithMod datatype logicalOperations = LogicalAnd | LogicalOr | LogicalXor datatype shiftOperations = ShiftLeft | ShiftRightLogical | ShiftRightArithmetic datatype unaryOps = NotBoolean | IsTaggedValue | MemoryCellLength | MemoryCellFlags | ClearMutableFlag | AtomicReset | LongWordToTagged | SignedToLongWord | UnsignedToLongWord | RealAbs of precision | RealNeg of precision | RealFixedInt of precision | FloatToDouble - | DoubleToFloat of IEEEReal.rounding_mode option + | DoubleToFloat | RealToInt of precision * IEEEReal.rounding_mode | TouchAddress | AllocCStack and precision = PrecSingle | PrecDouble and binaryOps = WordComparison of { test: testConditions, isSigned: bool } | FixedPrecisionArith of arithmeticOperations | WordArith of arithmeticOperations | WordLogical of logicalOperations | WordShift of shiftOperations | AllocateByteMemory | LargeWordComparison of testConditions | LargeWordArith of arithmeticOperations | LargeWordLogical of logicalOperations | LargeWordShift of shiftOperations | RealComparison of testConditions * precision | RealArith of arithmeticOperations * precision | PointerEq | FreeCStack | AtomicExchangeAdd and nullaryOps = GetCurrentThreadId | CheckRTSException | CPUPause fun unaryRepr NotBoolean = "NotBoolean" | unaryRepr IsTaggedValue = "IsTaggedValue" | unaryRepr MemoryCellLength = "MemoryCellLength" | unaryRepr MemoryCellFlags = "MemoryCellFlags" | unaryRepr ClearMutableFlag = "ClearMutableFlag" | unaryRepr AtomicReset = "AtomicReset" | unaryRepr LongWordToTagged = "LongWordToTagged" | unaryRepr SignedToLongWord = "SignedToLongWord" | unaryRepr UnsignedToLongWord = "UnsignedToLongWord" | unaryRepr (RealAbs prec) = "RealAbs" ^ precRepr prec | unaryRepr (RealNeg prec) = "RealNeg" ^ precRepr prec | unaryRepr (RealFixedInt prec) = "RealFixedInt" ^ precRepr prec | unaryRepr FloatToDouble = "FloatToDouble" - | unaryRepr (DoubleToFloat NONE) = "DoubleToFloat" - | unaryRepr (DoubleToFloat (SOME mode)) = "DoubleToFloat" ^ rndModeRepr mode + | unaryRepr DoubleToFloat = "DoubleToFloat" | unaryRepr (RealToInt (prec, mode)) = "RealToInt" ^ precRepr prec ^ rndModeRepr mode | unaryRepr TouchAddress = "TouchAddress" | unaryRepr AllocCStack = "AllocCStack" and binaryRepr (WordComparison{test, isSigned}) = "Test" ^ (testRepr test) ^ (if isSigned then "Signed" else "Unsigned") | binaryRepr (FixedPrecisionArith arithOp) = (arithRepr arithOp) ^ "Fixed" | binaryRepr (WordArith arithOp) = (arithRepr arithOp) ^ "Word" | binaryRepr (WordLogical logOp) = (logicRepr logOp) ^ "Word" | binaryRepr (WordShift shiftOp) = (shiftRepr shiftOp) ^ "Word" | binaryRepr AllocateByteMemory = "AllocateByteMemory" | binaryRepr (LargeWordComparison test) = "Test" ^ (testRepr test) ^ "LargeWord" | binaryRepr (LargeWordArith arithOp) = (arithRepr arithOp) ^ "LargeWord" | binaryRepr (LargeWordLogical logOp) = (logicRepr logOp) ^ "LargeWord" | binaryRepr (LargeWordShift shiftOp) = (shiftRepr shiftOp) ^ "LargeWord" | binaryRepr (RealComparison (test, prec)) = "Test" ^ testRepr test ^ precRepr prec | binaryRepr (RealArith (arithOp, prec)) = arithRepr arithOp ^ precRepr prec | binaryRepr PointerEq = "PointerEq" | binaryRepr FreeCStack = "FreeCStack" | binaryRepr AtomicExchangeAdd = "AtomicExchangeAdd" and nullaryRepr GetCurrentThreadId = "GetCurrentThreadId" | nullaryRepr CheckRTSException = "CheckRTSException" | nullaryRepr CPUPause = "CPUPause" and testRepr TestEqual = "Equal" | testRepr TestLess = "Less" | testRepr TestLessEqual = "LessEqual" | testRepr TestGreater = "Greater" | testRepr TestGreaterEqual = "GreaterEqual" | testRepr TestUnordered = "Unordered" and arithRepr ArithAdd = "Add" | arithRepr ArithSub = "Sub" | arithRepr ArithMult = "Mult" | arithRepr ArithQuot = "Quot" | arithRepr ArithRem = "Rem" | arithRepr ArithDiv = "Div" | arithRepr ArithMod = "Mod" and logicRepr LogicalAnd = "And" | logicRepr LogicalOr = "Or" | logicRepr LogicalXor = "Xor" and shiftRepr ShiftLeft = "Left" | shiftRepr ShiftRightLogical = "RightLogical" | shiftRepr ShiftRightArithmetic = "RightArithmetic" and precRepr PrecSingle = "Single" | precRepr PrecDouble = "Double" and rndModeRepr IEEEReal.TO_NEAREST = "Round" | rndModeRepr IEEEReal.TO_NEGINF = "Down" | rndModeRepr IEEEReal.TO_POSINF = "Up" | rndModeRepr IEEEReal.TO_ZERO = "Trunc" end datatype argumentType = GeneralType | DoubleFloatType | SingleFloatType datatype backendIC = BICNewenv of bicCodeBinding list * backendIC (* Set of bindings with an expression. *) | BICConstnt of machineWord * Universal.universal list (* Load a constant *) | BICExtract of bicLoadForm (* Get a local variable, an argument or a closure value *) | BICField of {base: backendIC, offset: int } (* Load a field from a tuple or record *) | BICEval of (* Evaluate a function with an argument list. *) { function: backendIC, argList: (backendIC * argumentType) list, resultType: argumentType } (* Built-in functions. *) | BICNullary of {oper: BuiltIns.nullaryOps} | BICUnary of {oper: BuiltIns.unaryOps, arg1: backendIC} | BICBinary of {oper: BuiltIns.binaryOps, arg1: backendIC, arg2: backendIC} | BICArbitrary of {oper: BuiltIns.arithmeticOperations, shortCond: backendIC, arg1: backendIC, arg2: backendIC, longCall: backendIC} | BICLambda of bicLambdaForm (* Lambda expressions. *) | BICCond of backendIC * backendIC * backendIC (* If-then-else expression *) | BICCase of (* Case expressions *) { cases : backendIC option list, (* NONE means "jump to the default". *) test : backendIC, default : backendIC, isExhaustive: bool, firstIndex: word } | BICBeginLoop of (* Start of tail-recursive inline function. *) { loop: backendIC, arguments: (bicSimpleBinding * argumentType) list } | BICLoop of (backendIC * argumentType) list (* Jump back to start of tail-recursive function. *) | BICRaise of backendIC (* Raise an exception *) | BICHandle of (* Exception handler. *) { exp: backendIC, handler: backendIC, exPacketAddr: int } | BICTuple of backendIC list (* Tuple *) | BICSetContainer of (* Copy a tuple to a container. *) { container: backendIC, tuple: backendIC, filter: BoolVector.vector } | BICLoadContainer of {base: backendIC, offset: int } | BICTagTest of { test: backendIC, tag: word, maxTag: word } | BICLoadOperation of { kind: loadStoreKind, address: bicAddress } | BICStoreOperation of { kind: loadStoreKind, address: bicAddress, value: backendIC } | BICBlockOperation of { kind: blockOpKind, sourceLeft: bicAddress, destRight: bicAddress, length: backendIC } | BICAllocateWordMemory of {numWords: backendIC, flags: backendIC, initial: backendIC} and bicCodeBinding = BICDeclar of bicSimpleBinding (* Make a local declaration or push an argument *) | BICRecDecs of { addr: int, lambda: bicLambdaForm } list (* Set of mutually recursive declarations. *) | BICNullBinding of backendIC (* Just evaluate the expression and discard the result. *) | BICDecContainer of { addr: int, size: int } (* Create a container for a tuple on the stack. *) and caseType = CaseWord (* Word or fixed-precision integer. *) | CaseTag of word and bicLoadForm = BICLoadLocal of int (* Local binding *) | BICLoadArgument of int (* Argument - 0 is first arg etc.*) | BICLoadClosure of int (* Closure - 0 is first closure item etc *) | BICLoadRecursive (* Recursive call *) and loadStoreKind = LoadStoreMLWord of {isImmutable: bool} (* Load/Store an ML word in an ML word cell. *) | LoadStoreMLByte of {isImmutable: bool} (* Load/Store a byte, tagging and untagging as appropriate, in an ML byte cell. *) | LoadStoreC8 (* Load/Store C values - The base address is a boxed SysWord.word value. *) | LoadStoreC16 | LoadStoreC32 | LoadStoreC64 | LoadStoreCFloat | LoadStoreCDouble | LoadStoreUntaggedUnsigned and blockOpKind = BlockOpMove of {isByteMove: bool} | BlockOpEqualByte | BlockOpCompareByte withtype bicSimpleBinding = { (* Declare a value or push an argument. *) value: backendIC, addr: int } and bicLambdaForm = { (* Lambda expressions. *) body : backendIC, name : string, closure : bicLoadForm list, argTypes : argumentType list, resultType : argumentType, localCount : int, heapClosure : bool } and bicAddress = (* Address form used in loads, store and block operations. The base is an ML address if this is to/from ML memory or a (boxed) SysWord.word if it is to/from C memory. The index is a value in units of the size of the item being loaded/stored and the offset is always in bytes. *) {base: backendIC, index: backendIC option, offset: int} structure CodeTags = struct open Universal val tupleTag: universal list list tag = tag() fun splitProps _ [] = (NONE, []) | splitProps tag (hd::tl) = if Universal.tagIs tag hd then (SOME hd, tl) else let val (p, l) = splitProps tag tl in (p, hd :: l) end fun mergeTupleProps(p, []) = p | mergeTupleProps([], p) = p | mergeTupleProps(m, n) = ( case (splitProps tupleTag m, splitProps tupleTag n) of ((SOME mp, ml), (SOME np, nl)) => let val mpl = Universal.tagProject tupleTag mp and npl = Universal.tagProject tupleTag np val merge = ListPair.mapEq mergeTupleProps (mpl, npl) in Universal.tagInject tupleTag merge :: (ml @ nl) end | _ => m @ n ) end fun loadStoreKindRepr(LoadStoreMLWord {isImmutable=true}) = "MLWordImmutable" | loadStoreKindRepr(LoadStoreMLWord {isImmutable=false}) = "MLWord" | loadStoreKindRepr(LoadStoreMLByte {isImmutable=true}) = "MLByteImmutable" | loadStoreKindRepr(LoadStoreMLByte {isImmutable=false}) = "MLByte" | loadStoreKindRepr LoadStoreC8 = "C8Bit" | loadStoreKindRepr LoadStoreC16 = "C16Bit" | loadStoreKindRepr LoadStoreC32 = "C32Bit" | loadStoreKindRepr LoadStoreC64 = "C64Bit" | loadStoreKindRepr LoadStoreCFloat = "CFloat" | loadStoreKindRepr LoadStoreCDouble = "CDouble" | loadStoreKindRepr LoadStoreUntaggedUnsigned = "MLWordUntagged" fun blockOpKindRepr (BlockOpMove{isByteMove=false}) = "MoveWord" | blockOpKindRepr (BlockOpMove{isByteMove=true}) = "MoveByte" | blockOpKindRepr BlockOpEqualByte = "EqualByte" | blockOpKindRepr BlockOpCompareByte = "CompareByte" open Pretty fun pList ([]: 'b list, _: string, _: 'b->pretty) = [] | pList ([h], _, disp) = [disp h] | pList (h::t, sep, disp) = PrettyBlock (0, false, [], [ disp h, PrettyBreak (0, 0), PrettyString sep ] ) :: PrettyBreak (1, 0) :: pList (t, sep, disp) fun pretty (pt : backendIC) : pretty = let fun printList(start, lst, sep) : pretty = PrettyBlock (1, true, [], PrettyString (start ^ "(") :: pList(lst, sep, pretty) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) fun prettyArgType GeneralType = PrettyString "G" | prettyArgType DoubleFloatType = PrettyString "D" | prettyArgType SingleFloatType = PrettyString "F" fun prettyArg (c, t) = PrettyBlock(1, false, [], [pretty c, PrettyBreak (1, 0), prettyArgType t]) fun prettyArgs(start, lst, sep) : pretty = PrettyBlock (1, true, [], PrettyString (start ^ "(") :: pList(lst, sep, prettyArg) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) fun prettyAddress({base, index, offset}: bicAddress): pretty = let in PrettyBlock (1, true, [], [ PrettyString "[", PrettyBreak (0, 3), pretty base, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), case index of NONE => PrettyString "-" | SOME i => pretty i, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), PrettyString(Int.toString offset), PrettyBreak (0, 0), PrettyString "]" ]) end in case pt of BICEval {function, argList, resultType} => let val prettyArgs = PrettyBlock (1, true, [], PrettyString ("$(") :: pList(argList, ",", prettyArg) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) in PrettyBlock (3, false, [], [ pretty function, PrettyBreak(1, 0), prettyArgType resultType, PrettyBreak(1, 0), prettyArgs ] ) end | BICUnary { oper, arg1 } => PrettyBlock (3, false, [], [ PrettyString(BuiltIns.unaryRepr oper), PrettyBreak(1, 0), printList("", [arg1], ",") ] ) | BICBinary { oper, arg1, arg2 } => PrettyBlock (3, false, [], [ PrettyString(BuiltIns.binaryRepr oper), PrettyBreak(1, 0), printList("", [arg1, arg2], ",") ] ) | BICNullary { oper } => PrettyString(BuiltIns.nullaryRepr oper) | BICArbitrary { oper, shortCond, arg1, arg2, longCall } => PrettyBlock (3, false, [], [ PrettyString(BuiltIns.arithRepr oper), PrettyBreak(1, 0), printList("", [shortCond, arg1, arg2, longCall], ",") ] ) | BICAllocateWordMemory { numWords, flags, initial } => PrettyBlock (3, false, [], [ PrettyString "AllocateWordMemory", PrettyBreak(1, 0), printList("", [numWords, flags, initial], ",") ] ) | BICExtract (BICLoadLocal addr) => let val str : string = concat ["LOCAL(", Int.toString addr, ")"] in PrettyString str end | BICExtract (BICLoadArgument addr) => let val str : string = concat ["PARAM(", Int.toString addr, ")"] in PrettyString str end | BICExtract (BICLoadClosure addr) => let val str : string = concat ["CLOS(", Int.toString addr, ")"] in PrettyString str end | BICExtract (BICLoadRecursive) => let val str : string = concat ["RECURSIVE(", ")"] in PrettyString str end | BICField {base, offset} => let val str = "INDIRECT(" ^ Int.toString offset ^ ", "; in PrettyBlock(0, false, [], [ PrettyString str, pretty base, PrettyString ")" ] ) end | BICLambda {body, name, closure, argTypes, heapClosure, resultType, localCount} => let fun prettyArgTypes [] = [] | prettyArgTypes [last] = [prettyArgType last] | prettyArgTypes (hd::tl) = prettyArgType hd :: PrettyBreak(1, 0) :: prettyArgTypes tl in PrettyBlock (1, true, [], [ PrettyString ("LAMBDA("), PrettyBreak (1, 0), PrettyString name, PrettyBreak (1, 0), PrettyString ( "CL=" ^ Bool.toString heapClosure), PrettyString (" LOCALS=" ^ Int.toString localCount), PrettyBreak(1, 0), PrettyBlock (1, false, [], PrettyString "ARGS=" :: prettyArgTypes argTypes), PrettyBreak(1, 0), PrettyBlock (1, false, [], [PrettyString "RES=", prettyArgType resultType]), printList (" CLOS=", map BICExtract closure, ","), PrettyBreak (1, 0), pretty body, PrettyString "){LAMBDA}" ] ) end | BICConstnt (w, _) => PrettyString (stringOfWord w) | BICCond (f, s, t) => PrettyBlock (1, true, [], [ PrettyString "IF(", pretty f, PrettyString ", ", PrettyBreak (0, 0), pretty s, PrettyString ", ", PrettyBreak (0, 0), pretty t, PrettyBreak (0, 0), PrettyString (")") ] ) | BICNewenv(decs, final) => PrettyBlock (1, true, [], PrettyString ("BLOCK" ^ "(") :: pList(decs, ";", prettyBinding) @ [ PrettyBreak (1, 0), pretty final, PrettyBreak (0, 0), PrettyString (")") ] ) | BICBeginLoop{loop=loopExp, arguments=args } => let fun prettyArg (c, t) = PrettyBlock(1, false, [], [prettySimpleBinding c, PrettyBreak (1, 0), prettyArgType t]) in PrettyBlock (3, false, [], [ PrettyBlock (1, true, [], PrettyString ("BEGINLOOP(") :: pList(args, ",", prettyArg) @ [ PrettyBreak (0, 0), PrettyString (")") ] ), PrettyBreak (0, 0), PrettyString "(", PrettyBreak (0, 0), pretty loopExp, PrettyBreak (0, 0), PrettyString ")" ] ) end | BICLoop ptl => prettyArgs("LOOP", ptl, ",") | BICRaise c => PrettyBlock (1, true, [], [ PrettyString "RAISE(", pretty c, PrettyBreak (0, 0), PrettyString (")") ] ) | BICHandle {exp, handler, exPacketAddr} => PrettyBlock (3, false, [], [ PrettyString "HANDLE(", pretty exp, PrettyString ("WITH exid=" ^ Int.toString exPacketAddr), PrettyBreak (1, 0), pretty handler, PrettyString ")" ] ) | BICCase {cases, test, default, isExhaustive, firstIndex, ...} => PrettyBlock (1, true, [], PrettyString "CASE (" :: pretty test :: PrettyBreak (1, 0) :: PrettyString ("( from " ^ Word.toString firstIndex ^ (if isExhaustive then " exhaustive" else "")) :: PrettyBreak (1, 0) :: pList(cases, ",", fn (SOME exp) => PrettyBlock (1, true, [], [ PrettyString "=>", PrettyBreak (1, 0), pretty exp ]) | NONE => PrettyString "=> default" ) @ [ PrettyBreak (1, 0), PrettyBlock (1, false, [], [ PrettyString "ELSE:", PrettyBreak (1, 0), pretty default ] ), PrettyBreak (1, 0), PrettyString (") {"^"CASE"^"}") ] ) | BICTuple ptl => printList("RECCONSTR", ptl, ",") | BICSetContainer{container, tuple, filter} => let val source = BoolVector.length filter val dest = BoolVector.foldl(fn (true, n) => n+1 | (false, n) => n) 0 filter in PrettyBlock (3, false, [], [ PrettyString (concat["SETCONTAINER(", Int.toString dest, "/", Int.toString source, ", "]), pretty container, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), pretty tuple, PrettyBreak (0, 0), PrettyString ")" ] ) end | BICLoadContainer {base, offset} => let val str = "INDIRECTCONTAINER(" ^ Int.toString offset ^ ", "; in PrettyBlock(0, false, [], [ PrettyString str, pretty base, PrettyString ")" ] ) end | BICTagTest { test, tag, maxTag } => PrettyBlock (3, false, [], [ PrettyString (concat["TAGTEST(", Word.toString tag, ", ", Word.toString maxTag, ","]), PrettyBreak (1, 0), pretty test, PrettyBreak (0, 0), PrettyString ")" ] ) | BICLoadOperation{ kind, address } => PrettyBlock (3, false, [], [ PrettyString("Load" ^ loadStoreKindRepr kind), PrettyBreak (1, 0), prettyAddress address ] ) | BICStoreOperation{ kind, address, value } => PrettyBlock (3, false, [], [ PrettyString("Store" ^ loadStoreKindRepr kind), PrettyBreak (1, 0), prettyAddress address, PrettyBreak (1, 0), PrettyString "<=", PrettyBreak (1, 0), pretty value ] ) | BICBlockOperation{ kind, sourceLeft, destRight, length } => PrettyBlock (3, false, [], [ PrettyString(blockOpKindRepr kind ^ "("), PrettyBreak (1, 0), prettyAddress sourceLeft, PrettyBreak (1, 0), PrettyString ",", prettyAddress destRight, PrettyBreak (1, 0), PrettyString ",", pretty length, PrettyBreak (1, 0), PrettyString ")" ] ) (* That list should be exhaustive! *) end (* pretty *) and prettyBinding(BICDeclar dec) = prettySimpleBinding dec | prettyBinding(BICRecDecs ptl) = let fun prettyRDec {lambda, addr} = PrettyBlock (1, false, [], [ PrettyString (concat ["DECL #", Int.toString addr, "="]), PrettyBreak (1, 0), pretty(BICLambda lambda) ] ) in PrettyBlock (1, true, [], PrettyString ("MUTUAL" ^ "(") :: pList(ptl, " AND ", prettyRDec) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) end | prettyBinding(BICNullBinding c) = pretty c | prettyBinding(BICDecContainer{addr, size}) = PrettyString (concat ["CONTAINER #", Int.toString addr, "=", Int.toString size]) and prettySimpleBinding{value, addr} = PrettyBlock (1, false, [], [ PrettyString (concat ["DECL #", Int.toString addr, "="]), PrettyBreak (1, 0), pretty value ] ) structure Sharing = struct type backendIC = backendIC and bicLoadForm = bicLoadForm and caseType = caseType and pretty = pretty and argumentType = argumentType and bicCodeBinding = bicCodeBinding and bicSimpleBinding = bicSimpleBinding and loadStoreKind = loadStoreKind and blockOpKind = blockOpKind and unaryOps = BuiltIns.unaryOps and binaryOps = BuiltIns.binaryOps and nullaryOps = BuiltIns.nullaryOps and testConditions = BuiltIns.testConditions and arithmeticOperations = BuiltIns.arithmeticOperations end end; diff --git a/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONS.ML b/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONS.ML index acf1ddc2..da277cbb 100644 --- a/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONS.ML +++ b/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONS.ML @@ -1,1803 +1,1802 @@ (* Copyright (c) 2015-18, 2020-21 David C.J. Matthews Copyright (c) 2000 Cambridge University Technical Services Limited This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor INTCODECONS ( structure DEBUG: DEBUG structure PRETTY: PRETTYSIG ) : INTCODECONSSIG = struct open CODE_ARRAY open DEBUG open Address open Misc infix 5 << <<+ <<- >> >>+ >>- ~>> ~>>+ ~>>- (* Shift operators *) infix 3 andb orb xorb andbL orbL xorbL andb8 orb8 xorb8 val op << = Word.<< and op >> = Word.>> and op ~>> = Word.~>> val wordToWord8 = Word8.fromLargeWord o Word.toLargeWord and word8ToWord = Word.fromLargeWord o Word8.toLargeWord (* Typically the compiler is built on a little-endian machine but it could be run on a machine with either endian-ness. We have to find out the endian-ness when we run. There are separate versions of the compiler for 32-bit and 64-bit so that can be a constant. *) local val isBigEndian: unit -> bool = RunCall.rtsCallFast1 "PolyIsBigEndian" in val isBigEndian = isBigEndian() end val opcode_jump = 0wx02 (* 8-bit unsigned jump forward. *) and opcode_jumpFalse = 0wx03 (* Test top of stack. Take 8-bit unsigned jump if false. *) and opcode_loadMLWord = 0wx04 and opcode_storeMLWord = 0wx05 and opcode_alloc_ref = 0wx06 and opcode_blockMoveWord = 0wx07 and opcode_loadUntagged = 0wx08 and opcode_storeUntagged = 0wx09 and opcode_case16 = 0wx0a and opcode_callClosure = 0wx0c and opcode_returnW = 0wx0d and opcode_containerB = 0wx0e and opcode_raiseEx = 0wx10 (*and opcode_callConstAddr16 = 0wx11 and opcode_callConstAddr8 = 0wx12*) and opcode_localW = 0wx13 and opcode_constAddr16_8 = 0wx14 and opcode_constAddr8_8 = 0wx15 and opcode_callLocalB = 0wx16 and opcode_callConstAddr8_8 = 0wx17 and opcode_callConstAddr16_8 = 0wx18 (*and opcode_constAddr16 = 0wx1a *) and opcode_constIntW = 0wx1b and opcode_jumpBack8 = 0wx1e (* 8-bit unsigned jump backwards - relative to end of instr. *) and opcode_returnB = 0wx1f and opcode_jumpBack16 = 0wx20 (* 16-bit unsigned jump backwards - relative to end of instr. *) and opcode_indirectLocalBB = 0wx21 and opcode_localB = 0wx22 and opcode_indirectB = 0wx23 and opcode_moveToContainerB = 0wx24 and opcode_setStackValB = 0wx25 and opcode_resetB = 0wx26 and opcode_resetRB = 0wx27 and opcode_constIntB = 0wx28 and opcode_local_0 = 0wx29 and opcode_local_1 = 0wx2a and opcode_local_2 = 0wx2b and opcode_local_3 = 0wx2c and opcode_local_4 = 0wx2d and opcode_local_5 = 0wx2e and opcode_local_6 = 0wx2f and opcode_local_7 = 0wx30 and opcode_local_8 = 0wx31 and opcode_local_9 = 0wx32 and opcode_local_10 = 0wx33 and opcode_local_11 = 0wx34 and opcode_indirect_0 = 0wx35 and opcode_indirect_1 = 0wx36 and opcode_indirect_2 = 0wx37 and opcode_indirect_3 = 0wx38 and opcode_indirect_4 = 0wx39 and opcode_indirect_5 = 0wx3a and opcode_const_0 = 0wx3b and opcode_const_1 = 0wx3c and opcode_const_2 = 0wx3d and opcode_const_3 = 0wx3e and opcode_const_4 = 0wx3f and opcode_const_10 = 0wx40 and opcode_return_1 = 0wx42 and opcode_return_2 = 0wx43 and opcode_return_3 = 0wx44 and opcode_local_12 = 0wx45 and opcode_jumpTrue = 0wx46 and opcode_jump16True = 0wx47 and opcode_local_13 = 0wx49 and opcode_local_14 = 0wx4a and opcode_local_15 = 0wx4b and opcode_arbAdd = 0wx4c and opcode_arbSubtract = 0wx4d and opcode_arbMultiply = 0wx4e and opcode_reset_1 = 0wx50 and opcode_reset_2 = 0wx51 and opcode_noop = 0wx52 (* Used for alignment in ARM64 enter-int. *) and opcode_indirectClosureBB = 0wx54 and opcode_constAddr8_0 = 0wx55 and opcode_constAddr8_1 = 0wx56 and opcode_callConstAddr8_0 = 0wx57 and opcode_callConstAddr8_1 = 0wx58 and opcode_resetR_1 = 0wx64 and opcode_resetR_2 = 0wx65 and opcode_resetR_3 = 0wx66 and opcode_tupleB = 0wx68 and opcode_tuple_2 = 0wx69 and opcode_tuple_3 = 0wx6a and opcode_tuple_4 = 0wx6b and opcode_lock = 0wx6c and opcode_ldexc = 0wx6d and opcode_indirectContainerB= 0wx74 and opcode_moveToMutClosureB = 0wx75 and opcode_allocMutClosureB = 0wx76 and opcode_indirectClosureB0 = 0wx77 and opcode_pushHandler = 0wx78 and opcode_indirectClosureB1 = 0wx7a and opcode_tailbb = 0wx7b and opcode_indirectClosureB2 = 0wx7c and opcode_setHandler = 0wx81 and opcode_callFastRTS0 = 0wx83 and opcode_callFastRTS1 = 0wx84 and opcode_callFastRTS2 = 0wx85 and opcode_callFastRTS3 = 0wx86 and opcode_callFastRTS4 = 0wx87 and opcode_callFastRTS5 = 0wx88 and opcode_notBoolean = 0wx91 and opcode_isTagged = 0wx92 and opcode_cellLength = 0wx93 and opcode_cellFlags = 0wx94 and opcode_clearMutable = 0wx95 and opcode_equalWord = 0wxa0 and opcode_lessSigned = 0wxa2 and opcode_lessUnsigned = 0wxa3 and opcode_lessEqSigned = 0wxa4 and opcode_lessEqUnsigned = 0wxa5 and opcode_greaterSigned = 0wxa6 and opcode_greaterUnsigned = 0wxa7 and opcode_greaterEqSigned = 0wxa8 and opcode_greaterEqUnsigned = 0wxa9 and opcode_fixedAdd = 0wxaa and opcode_fixedSub = 0wxab and opcode_fixedMult = 0wxac and opcode_fixedQuot = 0wxad and opcode_fixedRem = 0wxae and opcode_wordAdd = 0wxb1 and opcode_wordSub = 0wxb2 and opcode_wordMult = 0wxb3 and opcode_wordDiv = 0wxb4 and opcode_wordMod = 0wxb5 and opcode_wordAnd = 0wxb7 and opcode_wordOr = 0wxb8 and opcode_wordXor = 0wxb9 and opcode_wordShiftLeft = 0wxba and opcode_wordShiftRLog = 0wxbb and opcode_allocByteMem = 0wxbd and opcode_indirectLocalB1 = 0wxc1 and opcode_isTaggedLocalB = 0wxc2 and opcode_jumpNEqLocalInd = 0wxc3 and opcode_jumpTaggedLocal = 0wxc4 and opcode_jumpNEqLocal = 0wxc5 and opcode_indirect0Local0 = 0wxc6 and opcode_indirectLocalB0 = 0wxc7 and opcode_closureB = 0wxd0 and opcode_getThreadId = 0wxd9 and opcode_allocWordMemory = 0wxda and opcode_loadMLByte = 0wxdc and opcode_storeMLByte = 0wxe4 (*and opcode_enterIntARM64 = 0wxe9*) (* Reserved - the low order byte of a move *) and opcode_blockMoveByte = 0wxec and opcode_blockEqualByte = 0wxed and opcode_blockCompareByte = 0wxee and opcode_deleteHandler = 0wxf1 (* Just deletes the handler - no jump. *) and opcode_jump16 = 0wxf7 and opcode_jump16False = 0wxf8 and opcode_setHandler16 = 0wxf9 (*and opcode_constAddr8 = 0wxfa*) (*and opcode_stackSize8 = 0wxfb*) and opcode_stackSize16 = 0wxfc and opcode_escape = 0wxfe (* For two-byte opcodes. *) (*and opcode_enterIntX86 = 0wxff*) (* Reserved - this is the first byte of a call *) (* Extended opcodes - preceded by 0xfe escape *) val ext_opcode_containerW = 0wx0b and ext_opcode_allocMutClosureW = 0wx0f (* Allocate a mutable closure for mutual recursion *) and ext_opcode_indirectClosureW = 0wx10 and ext_opcode_indirectContainerW= 0wx11 and ext_opcode_indirectW = 0wx14 and ext_opcode_moveToContainerW = 0wx15 and ext_opcode_moveToMutClosureW = 0wx16 and ext_opcode_setStackValW = 0wx17 and ext_opcode_resetW = 0wx18 and ext_opcode_resetR_w = 0wx19 and ext_opcode_callFastRTSRRtoR = 0wx1c and ext_opcode_callFastRTSRGtoR = 0wx1d and ext_opcode_jump32True = 0wx48 and ext_opcode_floatAbs = 0wx56 and ext_opcode_floatNeg = 0wx57 and ext_opcode_fixedIntToFloat = 0wx58 and ext_opcode_floatToReal = 0wx59 and ext_opcode_realToFloat = 0wx5a and ext_opcode_floatEqual = 0wx5b and ext_opcode_floatLess = 0wx5c and ext_opcode_floatLessEq = 0wx5d and ext_opcode_floatGreater = 0wx5e and ext_opcode_floatGreaterEq = 0wx5f and ext_opcode_floatAdd = 0wx60 and ext_opcode_floatSub = 0wx61 and ext_opcode_floatMult = 0wx62 and ext_opcode_floatDiv = 0wx63 and ext_opcode_tupleW = 0wx67 and ext_opcode_realToInt = 0wx6e and ext_opcode_floatToInt = 0wx6f and ext_opcode_callFastRTSFtoF = 0wx70 and ext_opcode_callFastRTSGtoF = 0wx71 and ext_opcode_callFastRTSFFtoF = 0wx72 and ext_opcode_callFastRTSFGtoF = 0wx73 and ext_opcode_realUnordered = 0wx79 and ext_opcode_floatUnordered = 0wx7a and ext_opcode_tail = 0wx7c and ext_opcode_callFastRTSRtoR = 0wx8f and ext_opcode_callFastRTSGtoR = 0wx90 and ext_opcode_atomicExchAdd = 0wx96 and ext_opcode_atomicReset = 0wx99 and ext_opcode_longWToTagged = 0wx9a and ext_opcode_signedToLongW = 0wx9b and ext_opcode_unsignedToLongW = 0wx9c and ext_opcode_realAbs = 0wx9d and ext_opcode_realNeg = 0wx9e and ext_opcode_fixedIntToReal = 0wx9f and ext_opcode_fixedDiv = 0wxaf and ext_opcode_fixedMod = 0wxb0 and ext_opcode_wordShiftRArith = 0wxbc and ext_opcode_lgWordEqual = 0wxbe and ext_opcode_lgWordLess = 0wxc0 and ext_opcode_lgWordLessEq = 0wxc1 and ext_opcode_lgWordGreater = 0wxc2 and ext_opcode_lgWordGreaterEq = 0wxc3 and ext_opcode_lgWordAdd = 0wxc4 and ext_opcode_lgWordSub = 0wxc5 and ext_opcode_lgWordMult = 0wxc6 and ext_opcode_lgWordDiv = 0wxc7 and ext_opcode_lgWordMod = 0wxc8 and ext_opcode_lgWordAnd = 0wxc9 and ext_opcode_lgWordOr = 0wxca and ext_opcode_lgWordXor = 0wxcb and ext_opcode_lgWordShiftLeft = 0wxcc and ext_opcode_lgWordShiftRLog = 0wxcd and ext_opcode_lgWordShiftRArith = 0wxce and ext_opcode_realEqual = 0wxcf and ext_opcode_closureW = 0wxd0 and ext_opcode_realLess = 0wxd1 and ext_opcode_realLessEq = 0wxd2 and ext_opcode_realGreater = 0wxd3 and ext_opcode_realGreaterEq = 0wxd4 and ext_opcode_realAdd = 0wxd5 and ext_opcode_realSub = 0wxd6 and ext_opcode_realMult = 0wxd7 and ext_opcode_realDiv = 0wxd8 and ext_opcode_loadC8 = 0wxdd and ext_opcode_loadC16 = 0wxde and ext_opcode_loadC32 = 0wxdf and ext_opcode_loadC64 = 0wxe0 and ext_opcode_loadCFloat = 0wxe1 and ext_opcode_loadCDouble = 0wxe2 and ext_opcode_storeC8 = 0wxe5 and ext_opcode_storeC16 = 0wxe6 and ext_opcode_storeC32 = 0wxe7 and ext_opcode_storeC64 = 0wxe8 and ext_opcode_storeCFloat = 0wxe9 and ext_opcode_storeCDouble = 0wxea and ext_opcode_constAddr32_16 = 0wxf0 (* Followed by a 32-bit offset and a 16-bit constant number. *) and ext_opcode_jump32 = 0wxf2 (* 32-bit signed jump, forwards or backwards. *) and ext_opcode_jump32False = 0wxf3 (* Test top item. Take 32-bit signed jump if false. *) (*and ext_opcode_constAddr32 = 0wxf4 *) and ext_opcode_setHandler32 = 0wxf5 (* Setup a handler whose address is given by the 32-bit signed offset. *) and ext_opcode_case32 = 0wxf6 (* Indexed case with 32-bit offsets *) and ext_opcode_allocCSpace = 0wxfd and ext_opcode_freeCSpace = 0wxfe (* A Label is a ref that is later set to the location. Several labels can be linked together so that they are only set at a single point. Only forward jumps are linked so when we come to finally set the label we will have the full list. *) type labels = Word.word ref list ref (* Used for jump, jumpFalse, setHandler and delHandler. *) datatype jumpTypes = Jump | JumpBack | JumpFalse | JumpTrue | SetHandler datatype opcode = SimpleCode of Word8.word list (* Bytes that don't need any special treatment *) | LabelCode of labels (* A label - forwards or backwards. *) | JumpInstruction of { label: labels, jumpType: jumpTypes, size: jumpSize ref } (* Jumps or SetHandler. *) | PushConstant of { constNum: int, size : jumpSize ref, isCall: bool } | PushShort of Word.word | IndexedCase of { labels: labels list, size : jumpSize ref } | LoadLocal of Word8.word (* Locals - simplifies peephole optimisation. *) | IndirectLocal of { localAddr: Word8.word, indirect: Word8.word } (* Ditto *) | UncondTransfer of Word8.word list (* Raisex, return and tail. *) | IsTaggedLocalB of Word8.word | JumpOnIsTaggedLocalB of { label: labels, size: jumpSize ref, localAddr: Word8.word } | JumpNotEqualLocalInd0BB of { label: labels, size: jumpSize ref, localAddr: Word8.word, const: Word8.word } | JumpNotEqualLocalConstBB of { label: labels, size: jumpSize ref, localAddr: Word8.word, const: Word8.word } | EnterIntArm64 of Word8.word (* Special case because it has to be 32-bit aligned. *) and jumpSize = Size8 | Size16 | Size32 and code = Code of { constVec: machineWord list ref, (* Vector of words to be put at end *) procName: string, (* Name of the procedure. *) printAssemblyCode:bool, (* Whether to print the code when we finish. *) printStream: string->unit, (* The stream to use *) stage1Code: opcode list ref, enterIntMode: int (* 0 => None, 1 => X86. *) } val getEnterIntMode: unit -> int = RunCall.rtsCallFast0 "PolyInterpretedEnterIntMode" (* create and initialise a code segment *) fun codeCreate (name : string, parameters) = let val printStream = PRETTY.getSimplePrinter(parameters, []) in Code { constVec = ref [], procName = name, printAssemblyCode = DEBUG.getParameter DEBUG.assemblyCodeTag parameters, printStream = printStream, stage1Code = ref [], enterIntMode = getEnterIntMode() } end (* Find the offset in the constant area of a constant. *) (* The first has offset 0. *) fun addConstToVec (valu, Code{constVec, ...}) = let (* Search the list to see if the constant is already there. *) fun findConst valu [] num = (* Add to the list *) ( constVec := ! constVec @ [valu]; num ) | findConst valu (h :: t) num = if wordEq (valu, h) then num else findConst valu t (num + 1) (* Not equal *) in findConst valu (! constVec) 0 end fun printCode (seg: codeVec, procName: string, endcode, printStream) = let val () = printStream "\n"; val () = if procName = "" (* No name *) then printStream "?" else printStream procName; val () = printStream ":\n"; (* prints a string representation of a number *) fun printHex (v) = printStream(Word.fmt StringCvt.HEX v); val ptr = ref 0w0; (* Gets "length" bytes from locations "addr", "addr"+1... Returns an unsigned number. *) fun getB (0, _, _) = 0w0 | getB (length, addr, seg) = (getB (length - 1, addr + 0w1, seg) << 0w8) + word8ToWord (codeVecGet (seg, addr)) (* Prints a relative address. *) fun printDisp (len, spacer: string) = let val ad = getB(len, !ptr, seg) + !ptr + Word.fromInt len val () = printStream spacer; val () = printHex ad; in ptr := !ptr + Word.fromInt len end (* Prints an operand of an instruction *) fun printOp (len, spacer : string) = let val () = printStream spacer; val () = printHex (getB (len, !ptr, seg)) in ptr := !ptr + Word.fromInt len end; in while !ptr < endcode do let val addr = !ptr in printHex addr; (* The address. *) let (* It's an instruction. *) val () = printStream "\t" val opc = codeVecGet (seg, !ptr) (* opcode *) val () = ptr := !ptr + 0w1 in case opc of 0wx02 => (printStream "jump"; printDisp (1, "\t\t")) | 0wx03 => (printStream "jumpFalse"; printDisp (1, "\t")) | 0wx04 => printStream "loadMLWord" | 0wx05 => printStream "storeMLWord" | 0wx06 => printStream "alloc_ref" | 0wx07 => printStream "blockMoveWord" | 0wx08 => printStream "loadUntagged" | 0wx09 => printStream "storeUntagged" | 0wx0a => let (* Have to find out how many items there are. *) val limit = getB (2, !ptr, seg); val () = printOp (2, "case16\t"); val base = !ptr; fun printEntry _ = (printStream "\n\t"; printHex(base + getB(2, !ptr, seg)); ptr := !ptr + 0w2) fun forLoop f i n = if i >= n then () else (f i; forLoop f (i + 0w1) n) in forLoop printEntry 0w0 limit end | 0wx0c => printStream "callClosure" | 0wx0d => printOp(2, "returnW\t") | 0wx0e => printStream "containerB" | 0wx0f => printOp(2, "allocMutClosure") | 0wx10 => printStream "raiseEx" | 0wx11 => printDisp (2, "callConstAddr16\t") | 0wx12 => printDisp (1, "callConstAddr8\t") | 0wx13 => printOp(2, "localW\t") | 0wx14 => (printDisp (2, "constAddr16_8\t"); printOp(1, ",")) | 0wx15 => (printDisp (1, "constAddr8_8\t"); printOp(1, ",")) | 0wx16 => printOp(1, "callLocalB\t") | 0wx17 => (printDisp (1, "callConstAddr8_8\t"); printOp(1, ",")) | 0wx18 => (printDisp (2, "callConstAddr16_8\t"); printOp(1, ",")) | 0wx1a => (printStream "constAddr16"; printDisp (2, "\t")) | 0wx1b => printOp(2, "constIntW\t") | 0wx1e => ((* Should be negative *) printStream "jumpBack8\t"; printHex((!ptr - 0w1) - getB(1, !ptr, seg)); ptr := !ptr + 0w1 ) | 0wx1f => printOp(1, "returnB\t") | 0wx20 => ( printStream "jumpBack16\t"; printHex((!ptr - 0w1) - getB(2, !ptr, seg)); ptr := !ptr + 0w2 ) | 0wx21 => (printOp(1, "indirectLocalBB\t"); printOp(1, ",")) | 0wx22 => printOp(1, "localB\t") | 0wx23 => printOp(1, "indirectB\t") | 0wx24 => printOp(1, "moveToContainerB\t") | 0wx25 => printOp(1, "setStackValB\t") | 0wx26 => printOp(1, "resetB\t") | 0wx27 => printOp(1, "resetRB\t") | 0wx28 => printOp(1, "constIntB\t") | 0wx29 => printStream "local_0" | 0wx2a => printStream "local_1" | 0wx2b => printStream "local_2" | 0wx2c => printStream "local_3" | 0wx2d => printStream "local_4" | 0wx2e => printStream "local_5" | 0wx2f => printStream "local_6" | 0wx30 => printStream "local_7" | 0wx31 => printStream "local_8" | 0wx32 => printStream "local_9" | 0wx33 => printStream "local_10" | 0wx34 => printStream "local_11" | 0wx35 => printStream "indirect_0" | 0wx36 => printStream "indirect_1" | 0wx37 => printStream "indirect_2" | 0wx38 => printStream "indirect_3" | 0wx39 => printStream "indirect_4" | 0wx3a => printStream "indirect_5" | 0wx3b => printStream "const_0" | 0wx3c => printStream "const_1" | 0wx3d => printStream "const_2" | 0wx3e => printStream "const_3" | 0wx3f => printStream "const_4" | 0wx40 => printStream "const_10" | 0wx41 => printStream "return_0" | 0wx42 => printStream "return_1" | 0wx43 => printStream "return_2" | 0wx44 => printStream "return_3" | 0wx45 => printStream "local_12" | 0wx46 => (printStream "jumpTrue"; printDisp (1, "\t")) | 0wx47 => (printStream "jumpTrue"; printDisp (2, "\t")) | 0wx49 => printStream "local_13" | 0wx4a => printStream "local_14" | 0wx4b => printStream "local_15" | 0wx4c => printStream "arbAdd" | 0wx4d => printStream "arbSubtract" | 0wx4e => printStream "arbMultiply" | 0wx50 => printStream "reset_1" | 0wx51 => printStream "reset_2" | 0wx52 => printStream "noop" | 0wx54 => (printOp(1, "indirectClosureBB\t"); printOp(1, ", ")) | 0wx55 => printDisp (1, "constAddr8_0\t") | 0wx56 => printDisp (1, "constAddr8_1\t") | 0wx57 => printDisp (1, "callConstAddr8_0\t") | 0wx58 => printDisp (1, "callConstAddr8_1\t") | 0wx64 => printStream "resetR_1" | 0wx65 => printStream "resetR_2" | 0wx66 => printStream "resetR_3" | 0wx68 => printOp(1, "tupleB\t") | 0wx69 => printStream "tuple_2" | 0wx6a => printStream "tuple_3" | 0wx6b => printStream "tuple_4" | 0wx6c => printStream "lock" | 0wx6d => printStream "ldexc" | 0wx74 => printOp(1, "indirectContainerB\t") | 0wx75 => printOp(1, "moveToMutClosureB\t") | 0wx76 => printOp(1, "allocMutClosureB\t") | 0wx77 => printOp(1, "indirectClosureB0\t") | 0wx78 => printStream "pushHandler" | 0wx7a => printOp(1, "indirectClosureB1\t") | 0wx7b => (printOp (1, "tailbb\t"); printOp (1, ",")) | 0wx7c => printOp(1, "indirectClosureB2\t") | 0wx7d => printOp(1, "tail3b\t") | 0wx7e => printOp(1, "tail4b\t") | 0wx7f => printStream "tail3_2" | 0wx80 => printStream "tail3_3" | 0wx81 => (printStream "setHandler"; printDisp (1, "\t")) | 0wx83 => printStream "callFastRTS0" | 0wx84 => printStream "callFastRTS1" | 0wx85 => printStream "callFastRTS2" | 0wx86 => printStream "callFastRTS3" | 0wx87 => printStream "callFastRTS4" | 0wx88 => printStream "callFastRTS5" | 0wx91 => printStream "notBoolean" | 0wx92 => printStream "isTagged" | 0wx93 => printStream "cellLength" | 0wx94 => printStream "cellFlags" | 0wx95 => printStream "clearMutable" | 0wxa0 => printStream "equalWord" | 0wxa1 => printOp(1, "equalWordConstB\t") | 0wxa2 => printStream "lessSigned" | 0wxa3 => printStream "lessUnsigned" | 0wxa4 => printStream "lessEqSigned" | 0wxa5 => printStream "lessEqUnsigned" | 0wxa6 => printStream "greaterSigned" | 0wxa7 => printStream "greaterUnsigned" | 0wxa8 => printStream "greaterEqSigned" | 0wxa9 => printStream "greaterEqUnsigned" | 0wxaa => printStream "fixedAdd" | 0wxab => printStream "fixedSub" | 0wxac => printStream "fixedMult" | 0wxad => printStream "fixedQuot" | 0wxae => printStream "fixedRem" | 0wxb1 => printStream "wordAdd" | 0wxb2 => printStream "wordSub" | 0wxb3 => printStream "wordMult" | 0wxb4 => printStream "wordDiv" | 0wxb5 => printStream "wordMod" | 0wxb7 => printStream "wordAnd" | 0wxb8 => printStream "wordOr" | 0wxb9 => printStream "wordXor" | 0wxba => printStream "wordShiftLeft" | 0wxbb => printStream "wordShiftRLog" | 0wxbd => printStream "allocByteMem" | 0wxc1 => printOp(1, "indirectLocalB1\t") | 0wxc2 => printOp(1, "isTaggedLocalB\t") | 0wxc3 => (printOp(1, "jumpNEqLocalInd\t"); printOp(1, ","); printOp(1, ","); printDisp(1, "\t")) | 0wxc4 => (printOp(1, "jumpTaggedLocal\t"); printDisp(1, "\t")) | 0wxc5 => (printOp(1, "jumpNEqLocal\t"); printOp(1, ","); printOp(1, ","); printDisp(1, "\t")) | 0wxc6 => printStream "indirect0Local0" | 0wxc7 => printOp(1, "indirectLocalB0\t") | 0wxd0 => printOp(1, "closureB\t") | 0wxd9 => printStream "getThreadId" | 0wxda => printStream "allocWordMemory" | 0wxdc => printStream "loadMLByte" | 0wxe4 => printStream "storeMLByte" | 0wxe9 => (printStream "enterIntARM64"; ptr := !ptr + 0w12) | 0wxec => printStream "blockMoveByte" | 0wxed => printStream "blockEqualByte" | 0wxee => printStream "blockCompareByte" | 0wxf1 => printStream "deleteHandler" | 0wxf7 => printStream "jump16" | 0wxf8 => printStream "jump16False" | 0wxf9 => printStream "setHandler16" | 0wxfa => printDisp (1, "constAddr8\t") | 0wxfb => printOp(1, "stackSize8\t") | 0wxfc => printOp(2, "stackSize16\t") | 0wxff => (printStream "enterIntX86"; ptr := !ptr + 0w3) | 0wxfe => ( case codeVecGet (seg, !ptr) before ptr := !ptr + 0w1 of 0wx0b => printStream "containerW" | 0wx10 => printOp(2, "indirectClosureW\t") | 0wx11 => printOp(2, "indirectContainerW\t") | 0wx14 => printOp(2, "indirectW\t") | 0wx15 => printOp(2, "moveToContainerW\t") | 0wx16 => printOp(2, "moveToMutClosureW\t") | 0wx17 => printOp(2, "setStackValW\t") | 0wx18 => printOp(2, "resetW\t") | 0wx19 => printOp(2, "resetR_w\t") | 0wx1c => printStream "callFastRTSRRtoR" | 0wx1d => printStream "callFastRTSRGtoR" | 0wx48 => (printStream "jumpTrue"; printDisp (4, "\t")) | 0wx56 => printStream "floatAbs" | 0wx57 => printStream "floatNeg" | 0wx58 => printStream "fixedIntToFloat" | 0wx59 => printStream "floatToReal" | 0wx5a => printOp(1, "realToFloat\t") | 0wx5b => printStream "floatEqual" | 0wx5c => printStream "floatLess" | 0wx5d => printStream "floatLessEq" | 0wx5e => printStream "floatGreater" | 0wx5f => printStream "floatGreaterEq" | 0wx60 => printStream "floatAdd" | 0wx61 => printStream "floatSub" | 0wx62 => printStream "floatMult" | 0wx63 => printStream "floatDiv" | 0wx67 => printOp(2, "tupleW\t") | 0wx6e => printOp(1, "realToInt\t") | 0wx6f => printOp(1, "floatToInt\t") | 0wx70 => printStream "callFastRTSFtoF" | 0wx71 => printStream "callFastRTSGtoF" | 0wx72 => printStream "callFastRTSFFtoF" | 0wx73 => printStream "callFastRTSFGtoF" | 0wx79 => printStream "realUnordered" | 0wx7a => printStream "floatUnordered" | 0wx7c => (printOp (2, "tail\t"); printOp (2, ",")) | 0wx8f => printStream "callFastRTSRtoR" | 0wx90 => printStream "callFastRTSGtoR" | 0wx96 => printStream "atomicExchAdd" | 0wx99 => printStream "atomicReset" | 0wx9a => printStream "longWToTagged" | 0wx9b => printStream "signedToLongW" | 0wx9c => printStream "unsignedToLongW" | 0wx9d => printStream "realAbs" | 0wx9e => printStream "realNeg" | 0wx9f => printStream "fixedIntToReal" | 0wxaf => printStream "fixedDiv" | 0wxb0 => printStream "fixedMod" | 0wxbc => printStream "wordShiftRArith" | 0wxbe => printStream "lgWordEqual" | 0wxc0 => printStream "lgWordLess" | 0wxc1 => printStream "lgWordLessEq" | 0wxc2 => printStream "lgWordGreater" | 0wxc3 => printStream "lgWordGreaterEq" | 0wxc4 => printStream "lgWordAdd" | 0wxc5 => printStream "lgWordSub" | 0wxc6 => printStream "lgWordMult" | 0wxc7 => printStream "lgWordDiv" | 0wxc8 => printStream "lgWordMod" | 0wxc9 => printStream "lgWordAnd" | 0wxca => printStream "lgWordOr" | 0wxcb => printStream "lgWordXor" | 0wxcc => printStream "lgWordShiftLeft" | 0wxcd => printStream "lgWordShiftRLog" | 0wxce => printStream "lgWordShiftRArith" | 0wxcf => printStream "realEqual" | 0wxd0 => printOp(2, "closureW\t") | 0wxd1 => printStream "realLess" | 0wxd2 => printStream "realLessEq" | 0wxd3 => printStream "realGreater" | 0wxd4 => printStream "realGreaterEq" | 0wxd5 => printStream "realAdd" | 0wxd6 => printStream "realSub" | 0wxd7 => printStream "realMult" | 0wxd8 => printStream "realDiv" | 0wxdd => printStream "loadC8" | 0wxde => printStream "loadC16" | 0wxdf => printStream "loadC32" | 0wxe0 => printStream "loadC64" | 0wxe1 => printStream "loadCFloat" | 0wxe2 => printStream "loadCDouble" | 0wxe5 => printStream "storeC8" | 0wxe6 => printStream "storeC16" | 0wxe7 => printStream "storeC32" | 0wxe8 => printStream "storeC64" | 0wxe9 => printStream "storeCFloat" | 0wxea => printStream "storeCDouble" | 0wxf0 => (printDisp(4, "constAddr32_16\t"); printOp (2, ",")) | 0wxf2 => printDisp (4, "jump32\t") | 0wxf3 => printDisp (4, "jump32False\t") | 0wxf4 => printDisp (4, "constAddr32\t") | 0wxf5 => printDisp (4, "setHandler32\t") | 0wxf6 => let (* Have to find out how many items there are. *) val limit = getB (2, !ptr, seg); val () = printOp (2, "case32\t"); val base = !ptr; fun printEntry _ = (printStream "\n\t"; printHex(base + getB(4, !ptr, seg)); ptr := !ptr + 0w4) fun forLoop f i n = if i >= n then () else (f i; forLoop f (i + 0w1) n) in forLoop printEntry 0w0 limit end | 0wxfd => printStream "allocCSpace" | 0wxfe => printStream "freeCSpace" | _ => printStream ("unknown:0xfe 0x" ^ Word8.toString opc) ) | opc => printStream("unknown:0x" ^ Word8.toString opc) end; (* an instruction. *) printStream "\n" end (* main loop *) end (* printCode *) fun codeSize (SimpleCode l) = List.length l | codeSize (LabelCode _) = 0 | codeSize (JumpInstruction{size=ref Size8, ...}) = 2 | codeSize (JumpInstruction{size=ref Size16, ...}) = 3 | codeSize (JumpInstruction{size=ref Size32, ...}) = 6 | codeSize (PushConstant{size=ref Size8, constNum, ...}) = if constNum <= 1 then 2 else 3 | codeSize (PushConstant{size=ref Size16, ...}) = 4 | codeSize (PushConstant{size=ref Size32, isCall=false, ...}) = 8 | codeSize (PushConstant{size=ref Size32, isCall=true, ...}) = 9 | codeSize (PushShort value) = if value <= 0w4 orelse value = 0w10 then 1 else if value < 0w256 then 2 else 3 | codeSize (IndexedCase{labels, size=ref Size32, ...}) = 4 + List.length labels * 4 | codeSize (IndexedCase{labels, size=ref Size16, ...}) = 3 + List.length labels * 2 | codeSize (IndexedCase{labels=_, size=ref Size8, ...}) = raise InternalError "codeSize" | codeSize (LoadLocal w) = if w <= 0w15 then 1 else 2 | codeSize (IndirectLocal{indirect=0w0, localAddr=0w0}) = 1 | codeSize (IndirectLocal{indirect=0w0, ...}) = 2 | codeSize (IndirectLocal{indirect=0w1, ...}) = 2 | codeSize (IndirectLocal _) = 3 | codeSize (UncondTransfer l) = List.length l | codeSize (IsTaggedLocalB _) = 2 | codeSize (JumpOnIsTaggedLocalB{size=ref Size8, ...}) = 3 | codeSize (JumpOnIsTaggedLocalB{size=ref Size16, ...}) = 5 | codeSize (JumpOnIsTaggedLocalB{size=ref Size32, ...}) = 8 | codeSize (JumpNotEqualLocalInd0BB{size=ref Size8, ...}) = 4 | codeSize (JumpNotEqualLocalInd0BB{label, size, localAddr, const}) = codeSize(IndirectLocal{localAddr=localAddr, indirect=0w0}) + codeSize(PushShort(word8ToWord const)) + 1 + codeSize(JumpInstruction{jumpType=JumpFalse, label=label, size=size}) | codeSize (JumpNotEqualLocalConstBB{size=ref Size8, ...}) = 4 | codeSize (JumpNotEqualLocalConstBB {label, size, localAddr, const}) = codeSize(LoadLocal localAddr) + codeSize(PushShort(word8ToWord const)) + 1 + codeSize(JumpInstruction{jumpType=JumpFalse, label=label, size=size}) | codeSize (EnterIntArm64 _) = 16 (* For simplicity we add no-ops before and/or after *) (* General function to process the code. ic is the byte counter within the original code. *) fun foldCode startIc foldFn ops = let fun doFold(oper :: operList, ic) = doFold(operList, (* Get the size BEFORE any possible change. *) ic + Word.fromInt(codeSize oper) before foldFn(oper, ic)) | doFold(_, ic) = ic in doFold(ops, startIc) end (* Process the code, setting the destination of any labels. Return the length of the code. *) fun setLabels(LabelCode(ref labs) :: ops, ic) = (List.app(fn d => d := ic) labs; setLabels(ops, ic)) | setLabels(oper :: ops, ic) = setLabels(ops, ic + Word.fromInt(codeSize oper)) | setLabels([], ic) = ic (* Set the sizes of branches depending on the distance to the destination. *) fun setLabelsAndSizes ops = let (* Set the labels and adjust the sizes, repeating until it never gets smaller*) fun setLabAndSize(ops, lastSize) = let (* Calculate offsets for constants. *) val endIC = Word.andb(lastSize + 0w7, ~ 0w8) val startOfConstants = endIC (* Because the constant area is 8-byte aligned we have to allow for the possibility that the distance between a "load constant" instruction and the target could actually increase. *) val alignment = 0w7 fun adjust(JumpInstruction{size as ref Size32, label=ref lab, ...}, ic) = let val dest = !(hd lab) val diff = if dest <= ic (* N.B. Include infinite loops as backwards. *) then ic - dest (* Backwards - Counts from start of instruction. *) else dest - (ic + 0w6) (* Forwards - Relative to the current end. *) in if diff < 0wx100 then size := Size8 else if diff < 0wx10000 then size := Size16 else () end | adjust(JumpInstruction{size as ref Size16, label=ref lab, ...}, ic) = let val dest = !(hd lab) in if dest <= ic then if ic - dest < 0wx100 then size := Size8 else () else if dest - (ic + 0w3) < 0wx100 then size := Size8 else () end | adjust(IndexedCase{size as ref Size32, labels}, ic) = let val startAddr = ic+0w4 (* Use 16-bit case if all the offsets are 16-bits. *) fun is16bit(ref lab) = let val dest = !(hd lab) in dest > startAddr andalso dest < startAddr+0wx10000 end in if List.all is16bit labels then size := Size16 else () end | adjust(PushConstant{size as ref Size32, constNum, ...}, ic) = let val offset = startOfConstants - (ic + 0w8) (* 8 is ext+opcode+4+2 *) in if constNum >= 0x100 (* Constant numbers >= 256 require full opcode. *) then () else if offset < 0wx100-alignment then size := Size8 else if offset < 0wx10000-alignment then size := Size16 else () end | adjust(PushConstant{size as ref Size16, ...}, ic) = let val offset = startOfConstants - (ic + 0w4) (* 4 is opc+2+1 *) in if offset < 0wx100-alignment then size := Size8 else () end | adjust(JumpOnIsTaggedLocalB{size as ref Size32, label=ref lab, ...}, ic) = let val dest = !(hd lab) val diff = dest - (ic + 0w8) in if diff < 0wx100 then size := Size8 else if diff < 0wx10000 then size := Size16 else () end | adjust(JumpOnIsTaggedLocalB{size as ref Size16, label=ref lab, ...}, ic) = let val dest = !(hd lab) in if dest - (ic + 0w5) < 0wx100 then size := Size8 else () end | adjust(j as JumpNotEqualLocalInd0BB{size as ref Size32, label=ref lab, ...}, ic) = let val dest = !(hd lab) val diff = dest - (ic + Word.fromInt(codeSize j)) in if diff < 0wx100 then size := Size8 else if diff < 0wx10000 then size := Size16 else () end | adjust(j as JumpNotEqualLocalInd0BB{size as ref Size16, label=ref lab, ...}, ic) = let val dest = !(hd lab) in if dest - (ic + Word.fromInt(codeSize j)) < 0wx100 then size := Size8 else () end | adjust(j as JumpNotEqualLocalConstBB{size as ref Size32, label=ref lab, ...}, ic) = let val dest = !(hd lab) val diff = dest - (ic + Word.fromInt(codeSize j)) in if diff < 0wx100 then size := Size8 else if diff < 0wx10000 then size := Size16 else () end | adjust(j as JumpNotEqualLocalConstBB{size as ref Size16, label=ref lab, ...}, ic) = let val dest = !(hd lab) in if dest - (ic + Word.fromInt(codeSize j)) < 0wx100 then size := Size8 else () end | adjust _ = () val _ = foldCode 0w0 adjust ops val nextSize = setLabels(ops, 0w0) in if nextSize < lastSize then setLabAndSize(ops, nextSize) else if nextSize = lastSize then lastSize else raise InternalError "setLabAndSize - size increased" end in setLabAndSize(ops, setLabels(ops, 0w0)) end fun genCode(ops, Code {constVec, ...}) = let (* First pass - set the labels. *) val codeSize = setLabelsAndSizes ops val wordSize = wordSize (* Align to 8 bytes on both 32-bits and 64-bits. *) val endIC = Word.andb(codeSize + 0w7, ~ 0w8) val paddingBytes = List.tabulate(Word.toInt(endIC - codeSize), fn _ => SimpleCode[opcode_const_0]) val endOfCode = endIC div wordSize val startOfConstants = endIC val segSize = endOfCode + Word.fromInt(List.length(! constVec)) + 0w4 val codeVec = byteVecMake segSize val ic = ref 0w0 fun genByte b = byteVecSet(codeVec, !ic, b) before ic := !ic + 0w1 fun genByteCode(SimpleCode bytes, _) = (* Simple code - just generate the bytes. *) List.app genByte bytes | genByteCode(UncondTransfer bytes, _) = List.app genByte bytes | genByteCode(LabelCode _, _) = () | genByteCode(JumpInstruction{label=ref labs, jumpType, size=ref Size32, ...}, ic) = let val dest = !(hd labs) val extOpc = case jumpType of SetHandler => ext_opcode_setHandler32 | JumpFalse => ext_opcode_jump32False | JumpTrue => ext_opcode_jump32True | Jump => ext_opcode_jump32 | JumpBack => ext_opcode_jump32 val diff = dest - (ic + 0w6) in genByte opcode_escape; genByte extOpc; genByte(wordToWord8 diff); (* This may be negative so we must use an arithmetic shift. *) genByte(wordToWord8(diff ~>> 0w8)); genByte(wordToWord8(diff ~>> 0w16)); genByte(wordToWord8(diff ~>> 0w24)) end | genByteCode(JumpInstruction{label=ref labs, jumpType, size=ref Size16, ...}, ic) = let val dest = !(hd labs) in if dest <= ic then (* Jump back. *) let val _ = jumpType = JumpBack orelse raise InternalError "genByteCode - back jump" val diff = ic - dest val _ = diff < 0wx10000 orelse raise InternalError "genByteCode - jump range" in genByte opcode_jumpBack16; genByte(wordToWord8 diff); genByte(wordToWord8(diff >> 0w8)) end else let val opc = case jumpType of SetHandler => opcode_setHandler16 | JumpFalse => opcode_jump16False | JumpTrue => opcode_jump16True | Jump => opcode_jump16 | JumpBack => raise InternalError "genByteCode: JumpBack goes forward" val diff = dest - (ic + 0w3) val _ = diff < 0wx10000 orelse raise InternalError "genByteCode - jump range" in genByte opc; genByte(wordToWord8 diff); genByte(wordToWord8(diff >> 0w8)) end end | genByteCode(JumpInstruction{label=ref labs, jumpType, size=ref Size8, ...}, ic) = let val dest = !(hd labs) in if dest <= ic then (* Jump back. *) let val _ = jumpType = JumpBack orelse raise InternalError "genByteCode - back jump" val diff = ic - dest val _ = diff < 0wx100 orelse raise InternalError "genByteCode - jump range" in genByte opcode_jumpBack8; genByte(wordToWord8 diff) end else let val opc = case jumpType of SetHandler => opcode_setHandler | JumpFalse => opcode_jumpFalse | JumpTrue => opcode_jumpTrue | Jump => opcode_jump | JumpBack => raise InternalError "genByteCode: JumpBack goes forward" val diff = dest - (ic + 0w2) val _ = diff < 0wx100 orelse raise InternalError "genByteCode - jump range" in genByte opc; genByte(wordToWord8 diff) end end | genByteCode(PushConstant{ constNum, size=ref Size32, isCall=false, ... }, ic) = let (* Offsets are calculated from the END of the instruction *) val offset = startOfConstants - (ic + 0w8) val constNumW = Word.fromInt constNum in genByte opcode_escape; genByte ext_opcode_constAddr32_16; genByte(wordToWord8 offset); genByte(wordToWord8(offset >> 0w8)); genByte(wordToWord8(offset >> 0w16)); genByte(wordToWord8(offset >> 0w24)); genByte(wordToWord8 constNumW); genByte(wordToWord8(constNumW >> 0w8)) end | genByteCode(PushConstant{ constNum, size=ref Size32, isCall=true, ... }, ic) = ( (* Turn this back into a push of a constant and call-closure. *) genByteCode(PushConstant{ constNum=constNum, size=ref Size32, isCall=false }, ic); genByte opcode_callClosure ) | genByteCode(PushConstant{ constNum, size=ref Size16, isCall, ... }, ic) = let val offset = startOfConstants - (ic + 0w4) val _ = offset < 0wx10000 orelse raise InternalError "genByteCode - constant range" in genByte(if isCall then opcode_callConstAddr16_8 else opcode_constAddr16_8); genByte(wordToWord8 offset); genByte(wordToWord8(offset >> 0w8)); genByte(Word8.fromInt constNum) end | genByteCode(PushConstant{ constNum, size=ref Size8, isCall, ... }, ic) = if constNum <= 1 then let val offset = startOfConstants - (ic + 0w2) val _ = offset < 0wx100 orelse raise InternalError "genByteCode - constant range" val opCode = case (isCall, constNum) of (false, 0) => opcode_constAddr8_0 | (false, 1) => opcode_constAddr8_1 | (true, 0) => opcode_callConstAddr8_0 | (true, 1) => opcode_callConstAddr8_1 | _ => raise InternalError "genByteCode: constAddr" in genByte opCode; genByte(wordToWord8 offset) end else let val offset = startOfConstants - (ic + 0w3) val _ = offset < 0wx100 orelse raise InternalError "genByteCode - constant range" in genByte(if isCall then opcode_callConstAddr8_8 else opcode_constAddr8_8); genByte(wordToWord8 offset); genByte(Word8.fromInt constNum) end | genByteCode(PushShort 0w0, _) = genByte opcode_const_0 | genByteCode(PushShort 0w1, _) = genByte opcode_const_1 | genByteCode(PushShort 0w2, _) = genByte opcode_const_2 | genByteCode(PushShort 0w3, _) = genByte opcode_const_3 | genByteCode(PushShort 0w4, _) = genByte opcode_const_4 | genByteCode(PushShort 0w10, _) = genByte opcode_const_10 | genByteCode(PushShort value, _) = if value < 0w256 then (genByte opcode_constIntB; genByte(wordToWord8 value)) else (genByte opcode_constIntW; genByte(wordToWord8 value); genByte(wordToWord8(value >> 0w8))) | genByteCode(IndexedCase{labels, size=ref Size32, ...}, ic) = let val nCases = List.length labels val () = genByte opcode_escape val () = genByte ext_opcode_case32 val () = genByte(Word8.fromInt nCases) val () = genByte(Word8.fromInt (nCases div 256)) val startOffset = ic+0w4 (* Offsets are relative to here. *) fun putLabel(ref labs) = let val dest = !(hd labs) val diff = dest - startOffset val _ = dest > startOffset orelse raise InternalError "genByteCode - indexed case" in genByte(wordToWord8 diff); genByte(wordToWord8(diff >> 0w8)); genByte(wordToWord8(diff >> 0w16)); genByte(wordToWord8(diff >> 0w24)) end in List.app putLabel labels end | genByteCode(IndexedCase{labels, size=ref Size16, ...}, ic) = let val nCases = List.length labels val () = genByte(opcode_case16) val () = genByte(Word8.fromInt nCases) val () = genByte(Word8.fromInt (nCases div 256)) val startOffset = ic+0w3 (* Offsets are relative to here. *) fun putLabel(ref labs) = let val dest = !(hd labs) val diff = dest - startOffset val _ = dest > startOffset orelse raise InternalError "genByteCode - indexed case" val _ = diff < 0wx10000 orelse raise InternalError "genByteCode - indexed case" in genByte(wordToWord8 diff); genByte(wordToWord8(diff >> 0w8)) end in List.app putLabel labels end | genByteCode(IndexedCase{size=ref Size8, ...}, _) = raise InternalError "genByteCode - IndexedCase byte" | genByteCode(LoadLocal 0w0, _) = genByte opcode_local_0 | genByteCode(LoadLocal 0w1, _) = genByte opcode_local_1 | genByteCode(LoadLocal 0w2, _) = genByte opcode_local_2 | genByteCode(LoadLocal 0w3, _) = genByte opcode_local_3 | genByteCode(LoadLocal 0w4, _) = genByte opcode_local_4 | genByteCode(LoadLocal 0w5, _) = genByte opcode_local_5 | genByteCode(LoadLocal 0w6, _) = genByte opcode_local_6 | genByteCode(LoadLocal 0w7, _) = genByte opcode_local_7 | genByteCode(LoadLocal 0w8, _) = genByte opcode_local_8 | genByteCode(LoadLocal 0w9, _) = genByte opcode_local_9 | genByteCode(LoadLocal 0w10, _) = genByte opcode_local_10 | genByteCode(LoadLocal 0w11, _) = genByte opcode_local_11 | genByteCode(LoadLocal 0w12, _) = genByte opcode_local_12 | genByteCode(LoadLocal 0w13, _) = genByte opcode_local_13 | genByteCode(LoadLocal 0w14, _) = genByte opcode_local_14 | genByteCode(LoadLocal 0w15, _) = genByte opcode_local_15 | genByteCode(LoadLocal w, _) = (genByte opcode_localB; genByte w) | genByteCode(IndirectLocal{localAddr=0w0, indirect=0w0}, _) = genByte opcode_indirect0Local0 | genByteCode(IndirectLocal{localAddr, indirect=0w0}, _) = (genByte opcode_indirectLocalB0; genByte localAddr) | genByteCode(IndirectLocal{localAddr, indirect=0w1}, _) = (genByte opcode_indirectLocalB1; genByte localAddr) | genByteCode(IndirectLocal{localAddr, indirect}, _) = (genByte opcode_indirectLocalBB; genByte localAddr; genByte indirect) | genByteCode(IsTaggedLocalB addr, _) = (genByte opcode_isTaggedLocalB; genByte addr) | genByteCode(JumpOnIsTaggedLocalB {label=ref labs, size=ref Size8, localAddr}, ic) = let val dest = !(hd labs) val diff = dest - (ic + 0w3) in genByte opcode_jumpTaggedLocal; genByte localAddr; genByte(wordToWord8 diff) end | genByteCode(JumpOnIsTaggedLocalB {label, size, localAddr}, ic) = ( (* Turn this back into the original sequence. *) genByteCode(IsTaggedLocalB localAddr, ic); genByteCode(JumpInstruction{jumpType=JumpTrue, label=label, size=size}, ic+0w2) ) | genByteCode(JumpNotEqualLocalInd0BB {label=ref labs, size=ref Size8, localAddr, const}, ic) = let val dest = !(hd labs) val diff = dest - (ic + 0w4) in genByte opcode_jumpNEqLocalInd; genByte localAddr; genByte const; genByte(wordToWord8 diff) end | genByteCode(JumpNotEqualLocalInd0BB {label, size, localAddr, const}, ic) = (* Turn this back into the original sequence. *) (foldCode ic genByteCode [IndirectLocal{localAddr=localAddr, indirect=0w0}, PushShort(word8ToWord const), SimpleCode[opcode_equalWord], JumpInstruction{jumpType=JumpFalse, label=label, size=size}]; ()) | genByteCode(JumpNotEqualLocalConstBB {label=ref labs, size=ref Size8, localAddr, const}, ic) = let val dest = !(hd labs) val diff = dest - (ic + 0w4) in genByte opcode_jumpNEqLocal; genByte localAddr; genByte const; genByte(wordToWord8 diff) end | genByteCode(JumpNotEqualLocalConstBB {label, size, localAddr, const}, ic) = (* Turn this back into the original sequence. *) (foldCode ic genByteCode [LoadLocal localAddr, PushShort(word8ToWord const), SimpleCode[opcode_equalWord], JumpInstruction{jumpType=JumpFalse, label=label, size=size}]; ()) | genByteCode(EnterIntArm64 b, ic) = let (* The machine code is 12 bytes that must be 32-bit aligned. There is then a single byte that indicates the type of enter-int instruction. We may need up to three bytes to get the alignment. It is simpler to define this as a fixed-length (16-byte) instruction and add no-ops afterwards if necessary. *) val bytesBefore = if Word.andb(ic, 0w3) = 0w0 then 0w0 else 0w4-Word.andb(ic, 0w3) val byteSequence = List.tabulate(Word.toInt bytesBefore, fn _ => opcode_noop) @ [0wxe9, 0wx03, 0wx1e, 0wxaa, 0wx50, 0wx03, 0wx40, 0wxf9, 0wx00, 0wx02, 0wx3f, 0wxd6, b] @ List.tabulate(3 - Word.toInt bytesBefore, fn _ => opcode_noop) in List.app genByte byteSequence end in foldCode 0w0 genByteCode (ops @ paddingBytes); (codeVec (* Return the completed code. *), endIC (* And the size. *)) end fun setLong (value, addrs, seg) = let val wordLength = wordSize fun putBytes(value, a, seg, i) = if i = wordLength then () else ( byteVecSet(seg, if not isBigEndian then a+i else a+wordLength-i-0w1, Word8.fromInt(value mod 256)); putBytes(value div 256, a, seg, i+0w1) ) in putBytes(value, addrs, seg, 0w0) end (* Peephole optimisation. *) local fun peepHole([], _, output) = List.rev output | peepHole(LabelCode lab1 :: (instrs as LabelCode lab2 :: _), exited, output) = ( (* Consecutive labels. Merge these, discarding the first. *) lab2 := !lab1 @ !lab2; peepHole(instrs, exited, output) ) (* A label followed by an unconditional branch. Forward the original label. Although JumpBack is also unconditional we don't forward those because we don't have a conditional backwards jump. *) | peepHole((LabelCode lab1) :: (jump as JumpInstruction{jumpType=Jump, label=lab2, ...}) :: tl, exited, output) = ( lab2 := !lab1 @ !lab2; (* Leave the jump in the stream and leave "exited" unchanged. This will now be unreachable if we had previously exited but we need to take the jump if we hadn't. *) peepHole(jump :: tl, exited, output) ) (* Discard everything after an unconditional transfer until the next label. *) | peepHole((label as LabelCode _) :: tl, _, output) = peepHole(tl, false, label::output) | peepHole(_ :: tl, true, output) = peepHole(tl, true, output) | peepHole((jump as JumpInstruction{jumpType=Jump, ...}) :: tl, _, output) = peepHole(tl, true, jump :: output) (* Return, raise-exception and tail-call. *) | peepHole((uncond as UncondTransfer _) :: tl, _, output) = peepHole(tl, true, uncond :: output) (* A conditional branch round an unconditional branch. Replace by a conditional branch with the sense reversed. *) | peepHole((cond as JumpInstruction{jumpType=JumpFalse, label=lab1, ...}) :: (uncond as JumpInstruction{jumpType=Jump, label=lab2, size}) :: (tail as LabelCode lab3 :: _), _, output) = if lab1 = lab3 then peepHole(tail, false, JumpInstruction{jumpType=JumpTrue, label=lab2, size=size} :: output) else peepHole(uncond :: tail, false, cond :: output) | peepHole((cond as JumpInstruction{jumpType=JumpTrue, label=lab1, ...}) :: (uncond as JumpInstruction{jumpType=Jump, label=lab2, size}) :: (tail as LabelCode lab3 :: _), _, output) = if lab1 = lab3 then peepHole(tail, false, JumpInstruction{jumpType=JumpFalse, label=lab2, size=size} :: output) else peepHole(uncond :: tail, false, cond :: output) | peepHole(IsTaggedLocalB addr :: JumpInstruction{jumpType=JumpTrue, label, size} :: tail, _, output) = peepHole(tail, false, JumpOnIsTaggedLocalB {label=label, size=size, localAddr=addr} :: output) | peepHole((indLocal as IndirectLocal{localAddr, indirect=0w0}) :: (instrs as PushShort const :: SimpleCode[0wxa0(*opcode_equalWord*)] :: JumpInstruction{jumpType=JumpFalse, label, size} :: tail), _, output) = if const < 0w256 then peepHole(tail, false, JumpNotEqualLocalInd0BB {label=label, size=size, localAddr=localAddr, const=wordToWord8 const} :: output) else peepHole(instrs, false, indLocal :: output) | peepHole((load as LoadLocal localAddr) :: (instrs as PushShort const :: SimpleCode[0wxa0(*opcode_equalWord*)] :: JumpInstruction{jumpType=JumpFalse, label, size} :: tail), _, output) = if const < 0w256 then peepHole(tail, false, JumpNotEqualLocalConstBB {label=label, size=size, localAddr=localAddr, const=wordToWord8 const} :: output) else peepHole(instrs, false, load :: output) | peepHole(hd::tl, exited, output) = peepHole(tl, exited, hd::output) in fun optimise code = peepHole(code, false, []) end (* Generate the code sequence to enter the interpreter when this code is called or returned to or an exception is raised. This is only required when bootstrapping a native code compiler. *) fun genEnterInt(_, Code { enterIntMode = 0 (* None *), ...}) = [] | genEnterInt(b, Code { enterIntMode = 1 (* X86_32 *), ...}) = [SimpleCode[0wxff, 0wx55, 0wx24, b]] | genEnterInt(b, Code { enterIntMode = 2 (* X86_64 *), ...}) = [SimpleCode[0wxff, 0wx55, 0wx48, b]] | genEnterInt(b, Code { enterIntMode = 3 (* X86_32_64 *), ...}) = [SimpleCode[0wxff, 0wx55, 0wx48, b]] | genEnterInt(b, Code { enterIntMode = 4 (* ARM_64 *), ...}) = [EnterIntArm64 b] | genEnterInt _ = raise InternalError "genEnterInt: unknown architecture value" (* Adds the constants onto the code, and copies the code into a new segment *) fun copyCode {code as Code{ printAssemblyCode, printStream, procName, constVec, stage1Code, ...}, maxStack, numberOfArguments, resultClosure} = let val cvec = code local val revCode = optimise(List.rev(!stage1Code)) (* Add a stack check. This is only needed if the function needs more than 128 words since the call and tail functions check for this much. *) in val codeList = if maxStack < 128 then revCode else SimpleCode[opcode_stackSize16, Word8.fromInt maxStack, Word8.fromInt(maxStack div 256)] :: revCode end (* Add an enterInt if necessary *) (* If we need enter-int code it must go first. *) val enterInt = genEnterInt(Word8.fromInt numberOfArguments + 0wx80, cvec) val (byteVec, endIC) = genCode(enterInt @ codeList, cvec) val wordLength = wordSize (* +3 for profile count, function name and constants count *) val numOfConst = List.length(! constVec) val endOfCode = endIC div wordLength val segSize = endOfCode + Word.fromInt numOfConst + 0w4 val firstConstant = endIC + wordLength * 0w3 (* Add 3 for no of consts, fn name and profile count. *) (* Put in the number of constants. This must go in before we actually put in any constants. *) local val lastWord = (segSize - 0w1) * wordLength in val () = setLong(numOfConst + 2, endIC, byteVec) (* Set the last word of the code to the (negative) byte offset of the start of the code area from the end of this word. *) val () = setLong((numOfConst + 3) * ~ (Word.toInt wordLength), lastWord, byteVec) end (* Now we've filled in all the size info we need to convert the segment into a proper code segment before it's safe to put in any ML values. *) val codeVec = byteVecToCodeVec(byteVec, resultClosure) local val name : string = procName val nameWord : machineWord = toMachineWord name in val () = codeVecPutWord (codeVec, endOfCode+0w1, nameWord) end (* Profile ref. A byte ref used by the profiler in the RTS. *) local val v = RunCall.allocateByteMemory(0w1, Word.fromLargeWord(Word8.toLargeWord(Word8.orb(F_mutable, F_bytes)))) fun clear 0w0 = () | clear i = (assignByte(v, i-0w1, 0w0); clear (i-0w1)) val () = clear(wordSize) in val () = codeVecPutWord (codeVec, endOfCode+0w2, toMachineWord v) end (* and then copy the constants from the constant list. *) local fun setConstant(value, num) = let val constAddr = (firstConstant div wordLength) + num in codeVecPutWord (codeVec, constAddr, value); num+0w1 end in val _ = List.foldl setConstant 0w0 (!constVec) end in if printAssemblyCode then (* print out the code *) (printCode (codeVec, procName, endIC, printStream); printStream"\n") else (); codeVecLock(codeVec, resultClosure) end (* copyCode *) fun addItemToList(item, Code{stage1Code, ...}) = stage1Code := item :: !stage1Code val genOpcode = addItemToList fun putBranchInstruction(brOp, label, cvec) = addItemToList(JumpInstruction{label=label, jumpType=brOp, size = ref Size32}, cvec) fun setLabel(label, cvec) = addItemToList(LabelCode label, cvec) fun createLabel () = ref [ref 0w0] local fun genOpc(opc, cvec) = addItemToList(SimpleCode [opc], cvec) and genExtOpc(opc, cvec) = addItemToList(SimpleCode [opcode_escape, opc], cvec) and genOpcByte(opc, arg1, cvec) = if 0 <= arg1 andalso arg1 < 256 then addItemToList(SimpleCode [opc, Word8.fromInt arg1], cvec) else raise InternalError "genOpcByte" and genExtOpcByte(opc, arg1, cvec) = if 0 <= arg1 andalso arg1 < 256 then addItemToList(SimpleCode [opcode_escape, opc, Word8.fromInt arg1], cvec) else raise InternalError "genExtOpcByte" and genExtOpcWord(opc, arg1, cvec) = if 0 <= arg1 andalso arg1 < 65536 then addItemToList(SimpleCode[opcode_escape, opc, Word8.fromInt arg1, Word8.fromInt (arg1 div 256)], cvec) else raise InternalError "genExtOpcWord" open IEEEReal fun encodeRound TO_NEAREST = 0 | encodeRound TO_NEGINF = 1 | encodeRound TO_POSINF = 2 | encodeRound TO_ZERO = 3 in fun genRaiseEx cvec = addItemToList(UncondTransfer [opcode_raiseEx], cvec) fun genLock cvec = genOpc (opcode_lock, cvec) fun genLdexc cvec = genOpc (opcode_ldexc, cvec) fun genPushHandler cvec = genOpc (opcode_pushHandler, cvec) fun genRTSCallFast(0, cvec) = genOpc (opcode_callFastRTS0, cvec) | genRTSCallFast(1, cvec) = genOpc (opcode_callFastRTS1, cvec) | genRTSCallFast(2, cvec) = genOpc (opcode_callFastRTS2, cvec) | genRTSCallFast(3, cvec) = genOpc (opcode_callFastRTS3, cvec) | genRTSCallFast(4, cvec) = genOpc (opcode_callFastRTS4, cvec) | genRTSCallFast(5, cvec) = genOpc (opcode_callFastRTS5, cvec) | genRTSCallFast(_, _) = raise InternalError "genRTSFastCall" fun genContainer (size, cvec) = if size < 256 then genOpcByte(opcode_containerB, size, cvec) else genExtOpcWord(ext_opcode_containerW, size, cvec) fun genCase (nCases, cvec) = let val labels = List.tabulate(nCases, fn _ => createLabel()) in addItemToList(IndexedCase{labels=labels, size=ref Size32}, cvec); labels end (* For the moment don't try to merge stack resets. *) fun resetStack(0, _, _) = () | resetStack(1, true, cvec) = addItemToList(SimpleCode[opcode_resetR_1], cvec) | resetStack(2, true, cvec) = addItemToList(SimpleCode[opcode_resetR_2], cvec) | resetStack(3, true, cvec) = addItemToList(SimpleCode[opcode_resetR_3], cvec) | resetStack(offset, true, cvec) = if offset < 0 then raise InternalError "resetStack" else if offset > 255 then genExtOpcWord(ext_opcode_resetR_w, offset, cvec) else genOpcByte(opcode_resetRB, offset, cvec) | resetStack(1, false, cvec) = addItemToList(SimpleCode[opcode_reset_1], cvec) | resetStack(2, false, cvec) = addItemToList(SimpleCode[opcode_reset_2], cvec) | resetStack(offset, false, cvec) = if offset < 0 then raise InternalError "resetStack" else if offset > 255 then genExtOpcWord(ext_opcode_resetW, offset, cvec) else genOpcByte(opcode_resetB, offset, cvec) fun genCallClosure(Code{stage1Code as ref(PushConstant{constNum, size, isCall=false} :: tail), ...}) = stage1Code := PushConstant{constNum=constNum, size=size, isCall=true} :: tail | genCallClosure(Code{stage1Code as ref(LoadLocal w :: tail), ...}) = stage1Code := SimpleCode [opcode_callLocalB, w] :: tail | genCallClosure(Code{stage1Code, ...}) = stage1Code := SimpleCode [opcode_callClosure] :: !stage1Code fun genTailCall (toslide, slideby, cvec) = if toslide < 256 andalso slideby < 256 then (* General byte case *) addItemToList(UncondTransfer[opcode_tailbb, Word8.fromInt toslide, Word8.fromInt slideby], cvec) else (* General case. *) addItemToList( UncondTransfer[opcode_escape, ext_opcode_tail, Word8.fromInt toslide, Word8.fromInt(toslide div 256), Word8.fromInt slideby, Word8.fromInt (slideby div 256)], cvec) fun pushConst (value : machineWord, cvec) = if isShort value andalso toShort value < 0w32768 then addItemToList(PushShort(toShort value), cvec) else (* address or large short *) addItemToList(PushConstant{constNum = addConstToVec(value, cvec), size=ref Size32, isCall=false}, cvec) fun genRTSCallFastRealtoReal cvec = genExtOpc (ext_opcode_callFastRTSRtoR, cvec) and genRTSCallFastRealRealtoReal cvec = genExtOpc (ext_opcode_callFastRTSRRtoR, cvec) and genRTSCallFastGeneraltoReal cvec = genExtOpc (ext_opcode_callFastRTSGtoR, cvec) and genRTSCallFastRealGeneraltoReal cvec = genExtOpc (ext_opcode_callFastRTSRGtoR, cvec) and genRTSCallFastFloattoFloat cvec = genExtOpc (ext_opcode_callFastRTSFtoF, cvec) and genRTSCallFastFloatFloattoFloat cvec = genExtOpc (ext_opcode_callFastRTSFFtoF, cvec) and genRTSCallFastGeneraltoFloat cvec = genExtOpc (ext_opcode_callFastRTSGtoF, cvec) and genRTSCallFastFloatGeneraltoFloat cvec = genExtOpc (ext_opcode_callFastRTSFGtoF, cvec) - fun genDoubleToFloat(SOME rnding, cvec) = genExtOpcByte(ext_opcode_realToFloat, encodeRound rnding, cvec) - | genDoubleToFloat(NONE, cvec) = genExtOpcByte(ext_opcode_realToFloat, 5, cvec) + fun genDoubleToFloat cvec = genExtOpcByte(ext_opcode_realToFloat, 5, cvec) and genRealToInt(rnding, cvec) = genExtOpcByte(ext_opcode_realToInt, encodeRound rnding, cvec) and genFloatToInt(rnding, cvec) = genExtOpcByte(ext_opcode_floatToInt, encodeRound rnding, cvec) fun genEqualWordConst(w, cvec) = (pushConst(toMachineWord w, cvec); genOpc(opcode_equalWord, cvec)) fun genIsTagged(Code{stage1Code as ref(LoadLocal addr :: tail), ...}) = stage1Code := IsTaggedLocalB addr :: tail | genIsTagged cvec = genOpc(opcode_isTagged, cvec) fun genIndirectSimple(0, cvec) = genOpc(opcode_indirect_0, cvec) | genIndirectSimple(1, cvec) = genOpc(opcode_indirect_1, cvec) | genIndirectSimple(2, cvec) = genOpc(opcode_indirect_2, cvec) | genIndirectSimple(3, cvec) = genOpc(opcode_indirect_3, cvec) | genIndirectSimple(4, cvec) = genOpc(opcode_indirect_4, cvec) | genIndirectSimple(5, cvec) = genOpc(opcode_indirect_5, cvec) | genIndirectSimple(arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_indirectB, arg1, cvec) else genExtOpcWord(ext_opcode_indirectW, arg1, cvec) fun genIndirectContainer(arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_indirectContainerB, arg1, cvec) else genExtOpcWord(ext_opcode_indirectContainerW, arg1, cvec) fun genMoveToContainer (arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_moveToContainerB, arg1, cvec) else genExtOpcWord(ext_opcode_moveToContainerW, arg1, cvec) fun genMoveToMutClosure (arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_moveToMutClosureB, arg1, cvec) else genExtOpcWord(ext_opcode_moveToMutClosureW, arg1, cvec) fun genSetStackVal (arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_setStackValB, arg1, cvec) else genExtOpcWord(ext_opcode_setStackValW, arg1, cvec) fun genTuple (2, cvec) = genOpc(opcode_tuple_2, cvec) | genTuple (3, cvec) = genOpc(opcode_tuple_3, cvec) | genTuple (4, cvec) = genOpc(opcode_tuple_4, cvec) | genTuple (arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_tupleB, arg1, cvec) else genExtOpcWord(ext_opcode_tupleW, arg1, cvec) fun genAllocMutableClosure(closureSize, cvec) = if closureSize < 256 then genOpcByte(opcode_allocMutClosureB, closureSize, cvec) else genExtOpcWord(ext_opcode_allocMutClosureW, closureSize, cvec) fun genClosure (arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_closureB, arg1, cvec) else genExtOpcWord(ext_opcode_closureW, arg1, cvec) fun genLocal (arg1, cvec) = if 0 <= arg1 andalso arg1 < 256 then addItemToList(LoadLocal(Word8.fromInt arg1), cvec) else addItemToList(SimpleCode[opcode_localW, Word8.fromInt arg1, Word8.fromInt(arg1 div 256)], cvec) fun genIndirectClosure{ addr, item, code=cvec } = if addr < 256 andalso item < 256 then ( case item of 0 => genOpcByte(opcode_indirectClosureB0, addr, cvec) | 1 => genOpcByte(opcode_indirectClosureB1, addr, cvec) | 2 => genOpcByte(opcode_indirectClosureB2, addr, cvec) | _ => addItemToList(SimpleCode[opcode_indirectClosureBB, Word8.fromInt addr, Word8.fromInt item], cvec) ) else ( genLocal (addr, cvec); addItemToList(SimpleCode[opcode_escape, ext_opcode_indirectClosureW, Word8.fromInt item, Word8.fromInt (item div 256)], cvec) ) end fun genReturn(1, cvec) = addItemToList(UncondTransfer[opcode_return_1], cvec) | genReturn(2, cvec) = addItemToList(UncondTransfer[opcode_return_2], cvec) | genReturn(3, cvec) = addItemToList(UncondTransfer[opcode_return_3], cvec) | genReturn(arg1, cvec) = addItemToList(UncondTransfer( if 0 <= arg1 andalso arg1 <= 255 then [opcode_returnB, Word8.fromInt arg1] else [opcode_returnW, Word8.fromInt arg1, Word8.fromInt(arg1 div 256)]), cvec) fun genIndirect (arg1, cvec as Code{stage1Code as ref(LoadLocal w :: tail), ...}) = if 0 <= arg1 andalso arg1 <= 255 then stage1Code := IndirectLocal{localAddr=w, indirect=Word8.fromInt arg1} :: tail else genIndirectSimple(arg1, cvec) | genIndirect (arg1, cvec) = genIndirectSimple(arg1, cvec) fun genEnterIntCatch(code as Code{stage1Code, ...}) = stage1Code := genEnterInt(0wxff, code) @ !stage1Code and genEnterIntCall(code as Code{stage1Code, ...}, args) = stage1Code := genEnterInt(Word8.fromInt args, code) @ !stage1Code val opcode_notBoolean = SimpleCode [opcode_notBoolean] val opcode_cellLength = SimpleCode [opcode_cellLength] and opcode_cellFlags = SimpleCode [opcode_cellFlags] and opcode_clearMutable = SimpleCode [opcode_clearMutable] and opcode_atomicExchAdd = SimpleCode [opcode_escape, ext_opcode_atomicExchAdd] and opcode_atomicReset = SimpleCode [opcode_escape, ext_opcode_atomicReset] and opcode_longWToTagged = SimpleCode [opcode_escape, ext_opcode_longWToTagged] and opcode_signedToLongW = SimpleCode [opcode_escape, ext_opcode_signedToLongW] and opcode_unsignedToLongW = SimpleCode [opcode_escape, ext_opcode_unsignedToLongW] and opcode_realAbs = SimpleCode [opcode_escape, ext_opcode_realAbs] and opcode_realNeg = SimpleCode [opcode_escape, ext_opcode_realNeg] and opcode_fixedIntToReal = SimpleCode [opcode_escape, ext_opcode_fixedIntToReal] and opcode_fixedIntToFloat = SimpleCode [opcode_escape, ext_opcode_fixedIntToFloat] and opcode_floatToReal = SimpleCode [opcode_escape, ext_opcode_floatToReal] val opcode_equalWord = SimpleCode [opcode_equalWord] and opcode_lessSigned = SimpleCode [opcode_lessSigned] and opcode_lessUnsigned = SimpleCode [opcode_lessUnsigned] and opcode_lessEqSigned = SimpleCode [opcode_lessEqSigned] and opcode_lessEqUnsigned = SimpleCode [opcode_lessEqUnsigned] and opcode_greaterSigned = SimpleCode [opcode_greaterSigned] and opcode_greaterUnsigned = SimpleCode [opcode_greaterUnsigned] and opcode_greaterEqSigned = SimpleCode [opcode_greaterEqSigned] and opcode_greaterEqUnsigned = SimpleCode [opcode_greaterEqUnsigned] val opcode_fixedAdd = SimpleCode [opcode_fixedAdd] val opcode_fixedSub = SimpleCode [opcode_fixedSub] val opcode_fixedMult = SimpleCode [opcode_fixedMult] val opcode_fixedQuot = SimpleCode [opcode_fixedQuot] val opcode_fixedRem = SimpleCode [opcode_fixedRem] val opcode_fixedDiv = SimpleCode [opcode_escape, ext_opcode_fixedDiv] val opcode_fixedMod = SimpleCode [opcode_escape, ext_opcode_fixedMod] val opcode_wordAdd = SimpleCode [opcode_wordAdd] val opcode_wordSub = SimpleCode [opcode_wordSub] val opcode_wordMult = SimpleCode [opcode_wordMult] val opcode_wordDiv = SimpleCode [opcode_wordDiv] val opcode_wordMod = SimpleCode [opcode_wordMod] val opcode_wordAnd = SimpleCode [opcode_wordAnd] val opcode_wordOr = SimpleCode [opcode_wordOr] val opcode_wordXor = SimpleCode [opcode_wordXor] val opcode_wordShiftLeft = SimpleCode [opcode_wordShiftLeft] val opcode_wordShiftRLog = SimpleCode [opcode_wordShiftRLog] val opcode_wordShiftRArith = SimpleCode [opcode_escape, ext_opcode_wordShiftRArith] val opcode_allocByteMem = SimpleCode [opcode_allocByteMem] val opcode_lgWordEqual = SimpleCode [opcode_escape, ext_opcode_lgWordEqual] val opcode_lgWordLess = SimpleCode [opcode_escape, ext_opcode_lgWordLess] val opcode_lgWordLessEq = SimpleCode [opcode_escape, ext_opcode_lgWordLessEq] val opcode_lgWordGreater = SimpleCode [opcode_escape, ext_opcode_lgWordGreater] val opcode_lgWordGreaterEq = SimpleCode [opcode_escape, ext_opcode_lgWordGreaterEq] val opcode_lgWordAdd = SimpleCode [opcode_escape, ext_opcode_lgWordAdd] val opcode_lgWordSub = SimpleCode [opcode_escape, ext_opcode_lgWordSub] val opcode_lgWordMult = SimpleCode [opcode_escape, ext_opcode_lgWordMult] val opcode_lgWordDiv = SimpleCode [opcode_escape, ext_opcode_lgWordDiv] val opcode_lgWordMod = SimpleCode [opcode_escape, ext_opcode_lgWordMod] val opcode_lgWordAnd = SimpleCode [opcode_escape, ext_opcode_lgWordAnd] val opcode_lgWordOr = SimpleCode [opcode_escape, ext_opcode_lgWordOr] val opcode_lgWordXor = SimpleCode [opcode_escape, ext_opcode_lgWordXor] val opcode_lgWordShiftLeft = SimpleCode [opcode_escape, ext_opcode_lgWordShiftLeft] val opcode_lgWordShiftRLog = SimpleCode [opcode_escape, ext_opcode_lgWordShiftRLog] val opcode_lgWordShiftRArith = SimpleCode [opcode_escape, ext_opcode_lgWordShiftRArith] val opcode_realEqual = SimpleCode [opcode_escape, ext_opcode_realEqual] val opcode_realLess = SimpleCode [opcode_escape, ext_opcode_realLess] val opcode_realLessEq = SimpleCode [opcode_escape, ext_opcode_realLessEq] val opcode_realGreater = SimpleCode [opcode_escape, ext_opcode_realGreater] val opcode_realGreaterEq = SimpleCode [opcode_escape, ext_opcode_realGreaterEq] val opcode_realUnordered = SimpleCode [opcode_escape, ext_opcode_realUnordered] val opcode_realAdd = SimpleCode [opcode_escape, ext_opcode_realAdd] val opcode_realSub = SimpleCode [opcode_escape, ext_opcode_realSub] val opcode_realMult = SimpleCode [opcode_escape, ext_opcode_realMult] val opcode_realDiv = SimpleCode [opcode_escape, ext_opcode_realDiv] and opcode_floatAbs = SimpleCode [opcode_escape, ext_opcode_floatAbs] and opcode_floatNeg = SimpleCode [opcode_escape, ext_opcode_floatNeg] val opcode_floatEqual = SimpleCode [opcode_escape, ext_opcode_floatEqual] val opcode_floatLess = SimpleCode [opcode_escape, ext_opcode_floatLess] val opcode_floatLessEq = SimpleCode [opcode_escape, ext_opcode_floatLessEq] val opcode_floatGreater = SimpleCode [opcode_escape, ext_opcode_floatGreater] val opcode_floatGreaterEq = SimpleCode [opcode_escape, ext_opcode_floatGreaterEq] val opcode_floatUnordered = SimpleCode [opcode_escape, ext_opcode_floatUnordered] val opcode_floatAdd = SimpleCode [opcode_escape, ext_opcode_floatAdd] val opcode_floatSub = SimpleCode [opcode_escape, ext_opcode_floatSub] val opcode_floatMult = SimpleCode [opcode_escape, ext_opcode_floatMult] val opcode_floatDiv = SimpleCode [opcode_escape, ext_opcode_floatDiv] val opcode_getThreadId = SimpleCode [opcode_getThreadId] val opcode_allocWordMemory = SimpleCode [opcode_allocWordMemory] val opcode_alloc_ref = SimpleCode [opcode_alloc_ref] val opcode_loadMLWord = SimpleCode [opcode_loadMLWord] val opcode_loadMLByte = SimpleCode [opcode_loadMLByte] val opcode_loadC8 = SimpleCode [opcode_escape, ext_opcode_loadC8] val opcode_loadC16 = SimpleCode [opcode_escape, ext_opcode_loadC16] val opcode_loadC32 = SimpleCode [opcode_escape, ext_opcode_loadC32] val opcode_loadC64 = SimpleCode [opcode_escape, ext_opcode_loadC64] val opcode_loadCFloat = SimpleCode [opcode_escape, ext_opcode_loadCFloat] val opcode_loadCDouble = SimpleCode [opcode_escape, ext_opcode_loadCDouble] val opcode_loadUntagged = SimpleCode [opcode_loadUntagged] val opcode_storeMLWord = SimpleCode [opcode_storeMLWord] val opcode_storeMLByte = SimpleCode [opcode_storeMLByte] val opcode_storeC8 = SimpleCode [opcode_escape, ext_opcode_storeC8] val opcode_storeC16 = SimpleCode [opcode_escape, ext_opcode_storeC16] val opcode_storeC32 = SimpleCode [opcode_escape, ext_opcode_storeC32] val opcode_storeC64 = SimpleCode [opcode_escape, ext_opcode_storeC64] val opcode_storeCFloat = SimpleCode [opcode_escape, ext_opcode_storeCFloat] val opcode_storeCDouble = SimpleCode [opcode_escape, ext_opcode_storeCDouble] val opcode_storeUntagged = SimpleCode [opcode_storeUntagged] val opcode_blockMoveWord = SimpleCode [opcode_blockMoveWord] val opcode_blockMoveByte = SimpleCode [opcode_blockMoveByte] val opcode_blockEqualByte = SimpleCode [opcode_blockEqualByte] val opcode_blockCompareByte = SimpleCode [opcode_blockCompareByte] val opcode_deleteHandler = SimpleCode [opcode_deleteHandler] val opcode_allocCSpace = SimpleCode [opcode_escape, ext_opcode_allocCSpace] val opcode_freeCSpace = SimpleCode [opcode_escape, ext_opcode_freeCSpace] val opcode_arbAdd = SimpleCode [opcode_arbAdd] val opcode_arbSubtract = SimpleCode [opcode_arbSubtract] val opcode_arbMultiply = SimpleCode [opcode_arbMultiply] structure Sharing = struct type code = code type opcode = opcode type labels = labels type closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONSSIG.sml b/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONSSIG.sml index bb022bb4..076c5649 100644 --- a/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONSSIG.sml +++ b/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONSSIG.sml @@ -1,224 +1,224 @@ (* Copyright (c) 2016-18, 2020-21 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) signature INTCODECONSSIG = sig type machineWord = Address.machineWord type address = Address.address type code type opcode type labels type closureRef val opcode_notBoolean: opcode val opcode_cellLength: opcode and opcode_cellFlags: opcode and opcode_clearMutable: opcode and opcode_atomicExchAdd: opcode and opcode_atomicReset: opcode and opcode_longWToTagged: opcode and opcode_signedToLongW: opcode and opcode_unsignedToLongW: opcode and opcode_realAbs: opcode and opcode_realNeg: opcode and opcode_fixedIntToReal: opcode and opcode_fixedIntToFloat: opcode and opcode_floatToReal: opcode and opcode_floatAbs: opcode and opcode_floatNeg: opcode val opcode_equalWord: opcode and opcode_lessSigned: opcode and opcode_lessUnsigned: opcode and opcode_lessEqSigned: opcode and opcode_lessEqUnsigned: opcode and opcode_greaterSigned: opcode and opcode_greaterUnsigned: opcode and opcode_greaterEqSigned: opcode and opcode_greaterEqUnsigned: opcode val opcode_fixedAdd: opcode val opcode_fixedSub: opcode val opcode_fixedMult: opcode val opcode_fixedQuot: opcode val opcode_fixedRem: opcode val opcode_fixedDiv: opcode val opcode_fixedMod: opcode val opcode_wordAdd: opcode val opcode_wordSub: opcode val opcode_wordMult: opcode val opcode_wordDiv: opcode val opcode_wordMod: opcode val opcode_wordAnd: opcode val opcode_wordOr: opcode val opcode_wordXor: opcode val opcode_wordShiftLeft: opcode val opcode_wordShiftRLog: opcode val opcode_wordShiftRArith: opcode val opcode_allocByteMem: opcode val opcode_lgWordEqual: opcode val opcode_lgWordLess: opcode val opcode_lgWordLessEq: opcode val opcode_lgWordGreater: opcode val opcode_lgWordGreaterEq: opcode val opcode_lgWordAdd: opcode val opcode_lgWordSub: opcode val opcode_lgWordMult: opcode val opcode_lgWordDiv: opcode val opcode_lgWordMod: opcode val opcode_lgWordAnd: opcode val opcode_lgWordOr: opcode val opcode_lgWordXor: opcode val opcode_lgWordShiftLeft: opcode val opcode_lgWordShiftRLog: opcode val opcode_lgWordShiftRArith: opcode val opcode_realEqual: opcode val opcode_realLess: opcode val opcode_realLessEq: opcode val opcode_realGreater: opcode val opcode_realGreaterEq: opcode val opcode_realUnordered: opcode val opcode_realAdd: opcode val opcode_realSub: opcode val opcode_realMult: opcode val opcode_realDiv: opcode val opcode_floatEqual: opcode val opcode_floatLess: opcode val opcode_floatLessEq: opcode val opcode_floatGreater: opcode val opcode_floatGreaterEq: opcode val opcode_floatUnordered: opcode val opcode_floatAdd: opcode val opcode_floatSub: opcode val opcode_floatMult: opcode val opcode_floatDiv: opcode val opcode_getThreadId: opcode val opcode_allocWordMemory: opcode val opcode_alloc_ref: opcode val opcode_loadMLWord: opcode val opcode_loadMLByte: opcode val opcode_loadC8: opcode val opcode_loadC16: opcode val opcode_loadC32: opcode val opcode_loadC64: opcode val opcode_loadCFloat: opcode val opcode_loadCDouble: opcode val opcode_loadUntagged: opcode val opcode_storeMLWord: opcode val opcode_storeMLByte: opcode val opcode_storeC8: opcode val opcode_storeC16: opcode val opcode_storeC32: opcode val opcode_storeC64: opcode val opcode_storeCFloat: opcode val opcode_storeCDouble: opcode val opcode_storeUntagged: opcode val opcode_blockMoveWord: opcode val opcode_blockMoveByte: opcode val opcode_blockEqualByte: opcode val opcode_blockCompareByte: opcode val opcode_deleteHandler: opcode val opcode_allocCSpace: opcode val opcode_freeCSpace: opcode val opcode_arbAdd: opcode val opcode_arbSubtract: opcode val opcode_arbMultiply: opcode val codeCreate: string * Universal.universal list -> code (* makes the initial segment. *) (* GEN- routines all put a value at the instruction counter and add an appropriate amount to it. *) (* gen... - put instructions and their operands. *) val genCallClosure : code -> unit val genRaiseEx : code -> unit val genLock : code -> unit val genLdexc : code -> unit val genPushHandler : code -> unit val genReturn : int * code -> unit val genLocal : int * code -> unit val genIndirect : int * code -> unit val genSetStackVal : int * code -> unit val genCase : int * code -> labels list val genTuple : int * code -> unit val genTailCall : int * int * code -> unit val genIndirectClosure: { addr: int, item: int, code: code } -> unit and genIndirectContainer: int * code -> unit and genMoveToContainer: int * code -> unit and genMoveToMutClosure: int * code -> unit and genClosure: int * code -> unit - val genDoubleToFloat: IEEEReal.rounding_mode option * code -> unit + val genDoubleToFloat: code -> unit and genRealToInt: IEEEReal.rounding_mode * code -> unit and genFloatToInt: IEEEReal.rounding_mode * code -> unit val genAllocMutableClosure: int * code -> unit val genRTSCallFast: int * code -> unit val genRTSCallFastRealtoReal: code -> unit val genRTSCallFastRealRealtoReal: code -> unit val genRTSCallFastGeneraltoReal: code -> unit val genRTSCallFastRealGeneraltoReal: code -> unit val genRTSCallFastFloattoFloat: code -> unit val genRTSCallFastFloatFloattoFloat: code -> unit val genRTSCallFastGeneraltoFloat: code -> unit val genRTSCallFastFloatGeneraltoFloat: code -> unit val genOpcode: opcode * code -> unit (* genEnter instructions are only needed when machine-code routines can call interpreted routines or vice-versa. The enterInt instruction causes the interpreter to be entered and the argument indicates the reason. *) val genEnterIntCatch : code -> unit val genEnterIntCall : code * int -> unit (* pushConst - Generates code to push a constant. *) val pushConst : machineWord * code -> unit (* Create a container on the stack *) val genContainer : int * code -> unit (* copyCode - Finish up after compiling a function. *) val copyCode : {code: code, maxStack: int, numberOfArguments: int, resultClosure: closureRef } -> unit (* putBranchInstruction puts in an instruction which involves a forward reference. *) datatype jumpTypes = Jump | JumpBack | JumpFalse | JumpTrue | SetHandler val putBranchInstruction: jumpTypes * labels * code -> unit val createLabel: unit -> labels (* Define the position of a label. *) val setLabel: labels * code -> unit val resetStack: int * bool * code -> unit (* Set a pending reset *) val genEqualWordConst: word * code -> unit val genIsTagged: code -> unit structure Sharing: sig type code = code type opcode = opcode type labels = labels type closureRef = closureRef end end ; diff --git a/mlsource/MLCompiler/CodeTree/ByteCode/INTGCODE.ML b/mlsource/MLCompiler/CodeTree/ByteCode/INTGCODE.ML index 6aa19d4b..c58b0db8 100644 --- a/mlsource/MLCompiler/CodeTree/ByteCode/INTGCODE.ML +++ b/mlsource/MLCompiler/CodeTree/ByteCode/INTGCODE.ML @@ -1,1234 +1,1234 @@ (* Copyright (c) 2000 Cambridge University Technical Services Limited Further development copyright David C.J. Matthews 2016-18,2020-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Title: Generate interpretable code for Poly system from the code tree. Author: Dave Matthews, Cambridge University Computer Laboratory Copyright Cambridge University 1985 *) (* This generates byte-code that is interpreted by the run-time system. It is now used as a fall-back to allow Poly/ML to run on non-X86 architectures. Early versions were used as a porting aid while a native code-generator was being developed and the "enter-int" instructions that were needed for that have been retained although they no longer actually generate code. *) functor INTGCODE ( structure CODECONS : INTCODECONSSIG structure BACKENDTREE: BackendIntermediateCodeSig structure CODE_ARRAY: CODEARRAYSIG sharing CODECONS.Sharing = BACKENDTREE.Sharing = CODE_ARRAY.Sharing ) : GENCODESIG = struct open CODECONS open Address open BACKENDTREE open Misc open CODE_ARRAY val word0 = toMachineWord 0; val DummyValue : machineWord = word0; (* used as result of "raise e" etc. *) type caseForm = { cases : (backendIC * word) list, test : backendIC, caseType: caseType, default : backendIC } (* Where the result, if any, should go *) datatype whereto = NoResult (* discard result *) | ToStack (* Need a result but it can stay on the pseudo-stack *); (* Are we at the end of the function. *) datatype tail = EndOfProc | NotEnd (* Code generate a function or global declaration *) fun codegen (pt, cvec, resultClosure, numOfArgs, localCount, parameters) = let datatype decEntry = StackAddr of int | Empty val decVec = Array.array (localCount, Empty) (* Count of number of items on the stack. *) val realstackptr = ref 1 (* The closure ptr is already there *) (* Maximum size of the stack. *) val maxStack = ref 1 (* Push a value onto the stack. *) fun incsp () = ( realstackptr := !realstackptr + 1; if !realstackptr > !maxStack then maxStack := !realstackptr else () ) (* An entry has been removed from the stack. *) fun decsp () = realstackptr := !realstackptr - 1; fun pushLocalStackValue addr = ( genLocal(!realstackptr + addr, cvec); incsp() ) (* Loads a local, argument or closure value; translating local stack addresses to real stack offsets. *) fun locaddr(BICLoadArgument locn) = pushLocalStackValue (numOfArgs-locn) | locaddr(BICLoadLocal locn) = ( (* positive address - on the stack. *) case Array.sub (decVec, locn) of StackAddr n => pushLocalStackValue (~ n) | _ => (* Should be on the stack, not a function. *) raise InternalError "locaddr: bad stack address" ) | locaddr(BICLoadClosure locn) = (* closure-pointer relative *) ( genIndirectClosure{addr = !realstackptr-1, item=locn, code=cvec}; incsp() ) | locaddr BICLoadRecursive = pushLocalStackValue ~1 (* The closure itself - first value on the stack. *) (* generates code from the tree *) fun gencde (pt : backendIC, whereto : whereto, tailKind : tail, loopAddr) : unit = let (* Save the stack pointer value here. We may want to reset the stack. *) val oldsp = !realstackptr; (* Operations on ML memory always have the base as an ML address. Word operations are always word aligned. The higher level will have extracted any constant offset and scaled it if necessary. That's helpful for the X86 but not for the interpreter. We have to turn them back into indexes. *) fun genMLAddress({base, index, offset}, scale) = ( gencde (base, ToStack, NotEnd, loopAddr); offset mod scale = 0 orelse raise InternalError "genMLAddress"; case (index, offset div scale) of (NONE, soffset) => (pushConst (toMachineWord soffset, cvec); incsp()) | (SOME indexVal, 0) => gencde (indexVal, ToStack, NotEnd, loopAddr) | (SOME indexVal, soffset) => ( gencde (indexVal, ToStack, NotEnd, loopAddr); pushConst (toMachineWord soffset, cvec); genOpcode(opcode_wordAdd, cvec) ) ) (* Load the address, index value and offset for non-byte operations. Because the offset has already been scaled by the size of the operand we have to load the index and offset separately. *) fun genCAddress{base, index, offset} = ( gencde (base, ToStack, NotEnd, loopAddr); case index of NONE => (pushConst (toMachineWord 0, cvec); incsp()) | SOME indexVal => gencde (indexVal, ToStack, NotEnd, loopAddr); pushConst (toMachineWord offset, cvec); incsp() ) val () = case pt of BICEval evl => genEval (evl, tailKind) | BICExtract ext => (* This may just be being used to discard a value which isn't used on this branch. *) if whereto = NoResult then () else locaddr ext | BICField {base, offset} => (gencde (base, ToStack, NotEnd, loopAddr); genIndirect (offset, cvec)) | BICLoadContainer {base, offset} => (gencde (base, ToStack, NotEnd, loopAddr); genIndirectContainer (offset, cvec)) | BICLambda lam => genProc (lam, false, fn () => ()) | BICConstnt(w, _) => let val () = pushConst (w, cvec); in incsp () end | BICCond (testPart, thenPart, elsePart) => genCond (testPart, thenPart, elsePart, whereto, tailKind, loopAddr) | BICNewenv(decls, exp) => let (* Processes a list of entries. *) (* Mutually recursive declarations. May be either lambdas or constants. Recurse down the list pushing the addresses of the closure vectors, then unwind the recursion and fill them in. *) fun genMutualDecs [] = () | genMutualDecs ({lambda, addr, ...} :: otherDecs) = genProc (lambda, true, fn() => ( Array.update (decVec, addr, StackAddr (! realstackptr)); genMutualDecs (otherDecs) )) fun codeDecls(BICRecDecs dl) = genMutualDecs dl | codeDecls(BICDecContainer{size, addr}) = ( (* If this is a container we have to process it here otherwise it will be removed in the stack adjustment code. *) genContainer(size, cvec); (* Push the address of this container. *) realstackptr := !realstackptr + size + 1; (* Pushes N words plus the address. *) Array.update (decVec, addr, StackAddr(!realstackptr)) ) | codeDecls(BICDeclar{value, addr, ...}) = ( gencde (value, ToStack, NotEnd, loopAddr); Array.update (decVec, addr, StackAddr(!realstackptr)) ) | codeDecls(BICNullBinding exp) = gencde (exp, NoResult, NotEnd, loopAddr) in List.app codeDecls decls; gencde (exp, whereto, tailKind, loopAddr) end | BICBeginLoop {loop=body, arguments} => (* Execute the body which will contain at least one Loop instruction. There will also be path(s) which don't contain Loops and these will drop through. *) let val args = List.map #1 arguments (* Evaluate each of the arguments, pushing the result onto the stack. *) fun genLoopArg ({addr, value, ...}) = ( gencde (value, ToStack, NotEnd, loopAddr); Array.update (decVec, addr, StackAddr (!realstackptr)); !realstackptr (* Return the posn on the stack. *) ) val argIndexList = map genLoopArg args; val startSp = ! realstackptr; (* Remember the current top of stack. *) val startLoop = createLabel () val () = setLabel(startLoop, cvec) (* Start of loop *) in (* Process the body, passing the jump-back address down for the Loop instruction(s). *) gencde (body, whereto, tailKind, SOME(startLoop, startSp, argIndexList)) (* Leave the arguments on the stack. They can be cleared later if needed. *) end | BICLoop argList => (* Jump back to the enclosing BeginLoop. *) let val (startLoop, startSp, argIndexList) = case loopAddr of SOME l => l | NONE => raise InternalError "No BeginLoop for Loop instr" (* Evaluate the arguments. First push them to the stack because evaluating an argument may depend on the current value of others. Only when we've evaluated all of them can we overwrite the original argument positions. *) fun loadArgs ([], []) = !realstackptr - startSp (* The offset of all the args. *) | loadArgs (arg:: argList, _ :: argIndexList) = let (* Evaluate all the arguments. *) val () = gencde (arg, ToStack, NotEnd, NONE); val argOffset = loadArgs(argList, argIndexList); in genSetStackVal(argOffset, cvec); (* Copy the arg over. *) decsp(); (* The argument has now been popped. *) argOffset end | loadArgs _ = raise InternalError "loadArgs: Mismatched arguments"; val _: int = loadArgs(List.map #1 argList, argIndexList) in if !realstackptr <> startSp then resetStack (!realstackptr - startSp, false, cvec) (* Remove any local variables. *) else (); (* Jump back to the start of the loop. *) putBranchInstruction(JumpBack, startLoop, cvec) end | BICRaise exp => ( gencde (exp, ToStack, NotEnd, loopAddr); genRaiseEx cvec ) | BICHandle {exp, handler, exPacketAddr} => let (* Save old handler *) val () = genPushHandler cvec val () = incsp () val handlerLabel = createLabel() val () = putBranchInstruction (SetHandler, handlerLabel, cvec) val () = incsp() (* Code generate the body; "NotEnd" because we have to come back to remove the handler; "ToStack" because delHandler needs a result to carry down. *) val () = gencde (exp, ToStack, NotEnd, loopAddr) (* Now get out of the handler and restore the old one. *) val () = genOpcode(opcode_deleteHandler, cvec) val skipHandler = createLabel() val () = putBranchInstruction (Jump, skipHandler, cvec) (* Now process the handler itself. First we have to reset the stack. Note that we have to use "ToStack" again to be consistent with the stack-handling in the body-part. If we actually wanted "NoResult", the stack adjustment code at the end of gencde will take care of this. This means that I don't want to do any clever "end-of-function" optimisation either. SPF 6/1/97 *) val () = realstackptr := oldsp val () = setLabel (handlerLabel, cvec) (* If we were executing machine code we must re-enter the interpreter. *) val () = genEnterIntCatch cvec (* Push the exception packet and set the address. *) val () = genLdexc cvec val () = incsp () val () = Array.update (decVec, exPacketAddr, StackAddr(!realstackptr)) val () = gencde (handler, ToStack, NotEnd, loopAddr) (* Have to remove the exception packet. *) val () = resetStack(1, true, cvec) val () = decsp() (* Finally fix-up the jump around the handler *) val () = setLabel (skipHandler, cvec) in () end | BICCase ({cases, test, default, firstIndex, ...}) => let val () = gencde (test, ToStack, NotEnd, loopAddr) (* Label to jump to at the end of each case. *) val exitJump = createLabel() val () = if firstIndex = 0w0 then () else ( (* Subtract lower limit. Don't check for overflow. Instead allow large value to wrap around and check in "case" instruction. *) pushConst(toMachineWord firstIndex, cvec); genOpcode(opcode_wordSub, cvec) ) (* Generate the case instruction followed by the table of jumps. *) val nCases = List.length cases val caseLabels = genCase (nCases, cvec) val () = decsp () (* The default case, if any, follows the case statement. *) (* If we have a jump to the default set it to jump here. *) local fun fixDefault(NONE, defCase) = setLabel(defCase, cvec) | fixDefault(SOME _, _) = () in val () = ListPair.appEq fixDefault (cases, caseLabels) end val () = gencde (default, whereto, tailKind, loopAddr); fun genCases(SOME body, label) = ( (* First exit from the previous case or the default if this is the first. *) putBranchInstruction(Jump, exitJump, cvec); (* Remove the result - the last case will leave it. *) case whereto of ToStack => decsp () | NoResult => (); (* Fix up the jump to come here. *) setLabel(label, cvec); gencde (body, whereto, tailKind, loopAddr) ) | genCases(NONE, _) = () val () = ListPair.appEq genCases (cases, caseLabels) (* Finally set the exit jump to come here. *) val () = setLabel (exitJump, cvec) in () end | BICTuple recList => let val size = List.length recList in (* Move the fields into the vector. *) List.app(fn v => gencde (v, ToStack, NotEnd, loopAddr)) recList; genTuple (size, cvec); realstackptr := !realstackptr - (size - 1) end | BICSetContainer{container, tuple, filter} => (* Copy the contents of a tuple into a container. If the tuple is a Tuple instruction we can avoid generating the tuple and then unpacking it and simply copy the fields that make up the tuple directly into the container. *) ( case tuple of BICTuple cl => (* Simply set the container from the values. *) let (* Load the address of the container. *) val _ = gencde (container, ToStack, NotEnd, loopAddr); fun setValues([], _, _) = () | setValues(v::tl, sourceOffset, destOffset) = if sourceOffset < BoolVector.length filter andalso BoolVector.sub(filter, sourceOffset) then ( gencde (v, ToStack, NotEnd, loopAddr); (* Move the entry into the container. This instruction pops the value to be moved but not the destination. *) genMoveToContainer(destOffset, cvec); decsp(); setValues(tl, sourceOffset+1, destOffset+1) ) else setValues(tl, sourceOffset+1, destOffset) in setValues(cl, 0, 0) (* The container address is still on the stack. *) end | _ => let (* General case. *) (* First the target tuple, then the container. *) val () = gencde (tuple, ToStack, NotEnd, loopAddr) val () = gencde (container, ToStack, NotEnd, loopAddr) val last = BoolVector.foldli(fn (i, true, _) => i | (_, false, n) => n) ~1 filter fun copy (sourceOffset, destOffset) = if BoolVector.sub(filter, sourceOffset) then ( (* Duplicate the tuple address . *) genLocal(1, cvec); genIndirect(sourceOffset, cvec); genMoveToContainer(destOffset, cvec); if sourceOffset = last then () else copy (sourceOffset+1, destOffset+1) ) else copy(sourceOffset+1, destOffset) in copy (0, 0) (* The container and tuple addresses are still on the stack. *) end ) | BICTagTest { test, tag, ... } => ( gencde (test, ToStack, NotEnd, loopAddr); genEqualWordConst(tag, cvec) ) | BICNullary {oper=BuiltIns.GetCurrentThreadId} => ( genOpcode(opcode_getThreadId, cvec); incsp() ) | BICNullary {oper=BuiltIns.CheckRTSException} => ( (* Do nothing. This is done in the RTS call. *) ) | BICNullary {oper=BuiltIns.CPUPause} => ( (* Do nothing. It's really only a hint. *) ) | BICUnary { oper, arg1 } => let open BuiltIns val () = gencde (arg1, ToStack, NotEnd, loopAddr) in case oper of NotBoolean => genOpcode(opcode_notBoolean, cvec) | IsTaggedValue => genIsTagged cvec | MemoryCellLength => genOpcode(opcode_cellLength, cvec) | MemoryCellFlags => genOpcode(opcode_cellFlags, cvec) | ClearMutableFlag => genOpcode(opcode_clearMutable, cvec) | AtomicReset => genOpcode(opcode_atomicReset, cvec) | LongWordToTagged => genOpcode(opcode_longWToTagged, cvec) | SignedToLongWord => genOpcode(opcode_signedToLongW, cvec) | UnsignedToLongWord => genOpcode(opcode_unsignedToLongW, cvec) | RealAbs PrecDouble => genOpcode(opcode_realAbs, cvec) | RealNeg PrecDouble => genOpcode(opcode_realNeg, cvec) | RealFixedInt PrecDouble => genOpcode(opcode_fixedIntToReal, cvec) | RealAbs PrecSingle => genOpcode(opcode_floatAbs, cvec) | RealNeg PrecSingle => genOpcode(opcode_floatNeg, cvec) | RealFixedInt PrecSingle => genOpcode(opcode_fixedIntToFloat, cvec) | FloatToDouble => genOpcode(opcode_floatToReal, cvec) - | DoubleToFloat rnding => genDoubleToFloat(rnding, cvec) + | DoubleToFloat => genDoubleToFloat cvec | RealToInt (PrecDouble, rnding) => genRealToInt(rnding, cvec) | RealToInt (PrecSingle, rnding) => genFloatToInt(rnding, cvec) | TouchAddress => resetStack(1, false, cvec) (* Discard this *) | AllocCStack => genOpcode(opcode_allocCSpace, cvec) end | BICBinary { oper=BuiltIns.WordComparison{test=BuiltIns.TestEqual, ...}, arg1, arg2=BICConstnt(w, _) } => let val () = gencde (arg1, ToStack, NotEnd, loopAddr) in genEqualWordConst(toShort w, cvec) end | BICBinary { oper=BuiltIns.WordComparison{test=BuiltIns.TestEqual, ...}, arg1=BICConstnt(w, _), arg2 } => let val () = gencde (arg2, ToStack, NotEnd, loopAddr) in genEqualWordConst(toShort w, cvec) end | BICBinary { oper, arg1, arg2 } => let open BuiltIns val () = gencde (arg1, ToStack, NotEnd, loopAddr) val () = gencde (arg2, ToStack, NotEnd, loopAddr) in case oper of WordComparison{test=TestEqual, ...} => genOpcode(opcode_equalWord, cvec) | WordComparison{test=TestLess, isSigned=true} => genOpcode(opcode_lessSigned, cvec) | WordComparison{test=TestLessEqual, isSigned=true} => genOpcode(opcode_lessEqSigned, cvec) | WordComparison{test=TestGreater, isSigned=true} => genOpcode(opcode_greaterSigned, cvec) | WordComparison{test=TestGreaterEqual, isSigned=true} => genOpcode(opcode_greaterEqSigned, cvec) | WordComparison{test=TestLess, isSigned=false} => genOpcode(opcode_lessUnsigned, cvec) | WordComparison{test=TestLessEqual, isSigned=false} => genOpcode(opcode_lessEqUnsigned, cvec) | WordComparison{test=TestGreater, isSigned=false} => genOpcode(opcode_greaterUnsigned, cvec) | WordComparison{test=TestGreaterEqual, isSigned=false} => genOpcode(opcode_greaterEqUnsigned, cvec) | WordComparison{test=TestUnordered, ...} => raise InternalError "WordComparison: TestUnordered" | PointerEq => genOpcode(opcode_equalWord, cvec) | FixedPrecisionArith ArithAdd => genOpcode(opcode_fixedAdd, cvec) | FixedPrecisionArith ArithSub => genOpcode(opcode_fixedSub, cvec) | FixedPrecisionArith ArithMult => genOpcode(opcode_fixedMult, cvec) | FixedPrecisionArith ArithQuot => genOpcode(opcode_fixedQuot, cvec) | FixedPrecisionArith ArithRem => genOpcode(opcode_fixedRem, cvec) | FixedPrecisionArith ArithDiv => raise InternalError "TODO: FixedPrecisionArith ArithDiv" | FixedPrecisionArith ArithMod => raise InternalError "TODO: FixedPrecisionArith ArithMod" | WordArith ArithAdd => genOpcode(opcode_wordAdd, cvec) | WordArith ArithSub => genOpcode(opcode_wordSub, cvec) | WordArith ArithMult => genOpcode(opcode_wordMult, cvec) | WordArith ArithDiv => genOpcode(opcode_wordDiv, cvec) | WordArith ArithMod => genOpcode(opcode_wordMod, cvec) | WordArith _ => raise InternalError "WordArith - unimplemented instruction" | WordLogical LogicalAnd => genOpcode(opcode_wordAnd, cvec) | WordLogical LogicalOr => genOpcode(opcode_wordOr, cvec) | WordLogical LogicalXor => genOpcode(opcode_wordXor, cvec) | WordShift ShiftLeft => genOpcode(opcode_wordShiftLeft, cvec) | WordShift ShiftRightLogical => genOpcode(opcode_wordShiftRLog, cvec) | WordShift ShiftRightArithmetic => genOpcode(opcode_wordShiftRArith, cvec) | AllocateByteMemory => genOpcode(opcode_allocByteMem, cvec) | LargeWordComparison TestEqual => genOpcode(opcode_lgWordEqual, cvec) | LargeWordComparison TestLess => genOpcode(opcode_lgWordLess, cvec) | LargeWordComparison TestLessEqual => genOpcode(opcode_lgWordLessEq, cvec) | LargeWordComparison TestGreater => genOpcode(opcode_lgWordGreater, cvec) | LargeWordComparison TestGreaterEqual => genOpcode(opcode_lgWordGreaterEq, cvec) | LargeWordComparison TestUnordered => raise InternalError "LargeWordComparison: TestUnordered" | LargeWordArith ArithAdd => genOpcode(opcode_lgWordAdd, cvec) | LargeWordArith ArithSub => genOpcode(opcode_lgWordSub, cvec) | LargeWordArith ArithMult => genOpcode(opcode_lgWordMult, cvec) | LargeWordArith ArithDiv => genOpcode(opcode_lgWordDiv, cvec) | LargeWordArith ArithMod => genOpcode(opcode_lgWordMod, cvec) | LargeWordArith _ => raise InternalError "LargeWordArith - unimplemented instruction" | LargeWordLogical LogicalAnd => genOpcode(opcode_lgWordAnd, cvec) | LargeWordLogical LogicalOr => genOpcode(opcode_lgWordOr, cvec) | LargeWordLogical LogicalXor => genOpcode(opcode_lgWordXor, cvec) | LargeWordShift ShiftLeft => genOpcode(opcode_lgWordShiftLeft, cvec) | LargeWordShift ShiftRightLogical => genOpcode(opcode_lgWordShiftRLog, cvec) | LargeWordShift ShiftRightArithmetic => genOpcode(opcode_lgWordShiftRArith, cvec) | RealComparison (TestEqual, PrecDouble) => genOpcode(opcode_realEqual, cvec) | RealComparison (TestLess, PrecDouble) => genOpcode(opcode_realLess, cvec) | RealComparison (TestLessEqual, PrecDouble) => genOpcode(opcode_realLessEq, cvec) | RealComparison (TestGreater, PrecDouble) => genOpcode(opcode_realGreater, cvec) | RealComparison (TestGreaterEqual, PrecDouble) => genOpcode(opcode_realGreaterEq, cvec) | RealComparison (TestUnordered, PrecDouble) => genOpcode(opcode_realUnordered, cvec) | RealComparison (TestEqual, PrecSingle) => genOpcode(opcode_floatEqual, cvec) | RealComparison (TestLess, PrecSingle) => genOpcode(opcode_floatLess, cvec) | RealComparison (TestLessEqual, PrecSingle) => genOpcode(opcode_floatLessEq, cvec) | RealComparison (TestGreater, PrecSingle) => genOpcode(opcode_floatGreater, cvec) | RealComparison (TestGreaterEqual, PrecSingle) => genOpcode(opcode_floatGreaterEq, cvec) | RealComparison (TestUnordered, PrecSingle) => genOpcode(opcode_floatUnordered, cvec) | RealArith (ArithAdd, PrecDouble) => genOpcode(opcode_realAdd, cvec) | RealArith (ArithSub, PrecDouble) => genOpcode(opcode_realSub, cvec) | RealArith (ArithMult, PrecDouble) => genOpcode(opcode_realMult, cvec) | RealArith (ArithDiv, PrecDouble) => genOpcode(opcode_realDiv, cvec) | RealArith (ArithAdd, PrecSingle) => genOpcode(opcode_floatAdd, cvec) | RealArith (ArithSub, PrecSingle) => genOpcode(opcode_floatSub, cvec) | RealArith (ArithMult, PrecSingle) => genOpcode(opcode_floatMult, cvec) | RealArith (ArithDiv, PrecSingle) => genOpcode(opcode_floatDiv, cvec) | RealArith _ => raise InternalError "RealArith - unimplemented instruction" | FreeCStack => genOpcode(opcode_freeCSpace, cvec) | AtomicExchangeAdd => genOpcode(opcode_atomicExchAdd, cvec) ; decsp() (* Removes one item from the stack. *) end | BICAllocateWordMemory {numWords as BICConstnt(length, _), flags as BICConstnt(flagByte, _), initial } => if isShort length andalso toShort length = 0w1 andalso isShort flagByte andalso toShort flagByte = 0wx40 then (* This is a very common case. *) ( gencde (initial, ToStack, NotEnd, loopAddr); genOpcode(opcode_alloc_ref, cvec) ) else let val () = gencde (numWords, ToStack, NotEnd, loopAddr) val () = gencde (flags, ToStack, NotEnd, loopAddr) val () = gencde (initial, ToStack, NotEnd, loopAddr) in genOpcode(opcode_allocWordMemory, cvec); decsp(); decsp() end | BICAllocateWordMemory { numWords, flags, initial } => let val () = gencde (numWords, ToStack, NotEnd, loopAddr) val () = gencde (flags, ToStack, NotEnd, loopAddr) val () = gencde (initial, ToStack, NotEnd, loopAddr) in genOpcode(opcode_allocWordMemory, cvec); decsp(); decsp() end | BICLoadOperation { kind=LoadStoreMLWord _, address={base, index=NONE, offset}} => ( (* If the index is a constant, frequently zero, we can use indirection. The offset is a byte count so has to be divided by the word size but it should always be an exact multiple. *) gencde (base, ToStack, NotEnd, loopAddr); offset mod Word.toInt wordSize = 0 orelse raise InternalError "gencde: BICLoadOperation - not word multiple"; genIndirect (offset div Word.toInt wordSize, cvec) ) | BICLoadOperation { kind=LoadStoreMLWord _, address} => ( genMLAddress(address, Word.toInt wordSize); genOpcode(opcode_loadMLWord, cvec); decsp() ) | BICLoadOperation { kind=LoadStoreMLByte _, address} => ( genMLAddress(address, 1); genOpcode(opcode_loadMLByte, cvec); decsp() ) | BICLoadOperation { kind=LoadStoreC8, address} => ( genCAddress address; genOpcode(opcode_loadC8, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC16, address} => ( genCAddress address; genOpcode(opcode_loadC16, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC32, address} => ( genCAddress address; genOpcode(opcode_loadC32, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC64, address} => ( genCAddress address; genOpcode(opcode_loadC64, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreCFloat, address} => ( genCAddress address; genOpcode(opcode_loadCFloat, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreCDouble, address} => ( genCAddress address; genOpcode(opcode_loadCDouble, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreUntaggedUnsigned, address} => ( genMLAddress(address, Word.toInt wordSize); genOpcode(opcode_loadUntagged, cvec); decsp() ) | BICStoreOperation { kind=LoadStoreMLWord _, address, value } => ( genMLAddress(address, Word.toInt wordSize); gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeMLWord, cvec); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreMLByte _, address, value } => ( genMLAddress(address, 1); gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeMLByte, cvec); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC8, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC8, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC16, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC16, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC32, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC32, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC64, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC64, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreCFloat, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeCFloat, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreCDouble, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeCDouble, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreUntaggedUnsigned, address, value} => ( genMLAddress(address, Word.toInt wordSize); gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeUntagged, cvec); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpMove{isByteMove=true}, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, 1); genMLAddress(destRight, 1); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockMoveByte, cvec); decsp(); decsp(); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpMove{isByteMove=false}, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, Word.toInt wordSize); genMLAddress(destRight, Word.toInt wordSize); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockMoveWord, cvec); decsp(); decsp(); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpEqualByte, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, 1); genMLAddress(destRight, 1); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockEqualByte, cvec); decsp(); decsp(); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpCompareByte, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, 1); genMLAddress(destRight, 1); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockCompareByte, cvec); decsp(); decsp(); decsp(); decsp() ) | BICArbitrary { oper, arg1, arg2, ... } => let open BuiltIns val () = gencde (arg1, ToStack, NotEnd, loopAddr) val () = gencde (arg2, ToStack, NotEnd, loopAddr) in case oper of ArithAdd => genOpcode(opcode_arbAdd, cvec) | ArithSub => genOpcode(opcode_arbSubtract, cvec) | ArithMult => genOpcode(opcode_arbMultiply, cvec) | _ => raise InternalError "Unknown arbitrary precision operation"; decsp() (* Removes one item from the stack. *) end in (* body of gencde *) (* This ensures that there is precisely one item on the stack if whereto = ToStack and no items if whereto = NoResult. There are two points to note carefully here: (1) Negative stack adjustments are legal if we have exited. This is because matchFailFn can cut the stack back too far for its immediately enclosing expression. This is harmless because the code actually exits that expression. (2) A stack adjustment of ~1 is legal if we're generating a declaration in "ToStack" mode, because not all declarations actually generate the dummy value that we expect. This used to be handled in resetStack itself, but it's more transparent to do it here. (In addition, there was a bug in resetStack - it accumulated the stack resets, but didn't correctly accumulate these "~1" dummy value pushes.) It's all much better now. SPF 9/1/97 *) case whereto of ToStack => let val newsp = oldsp + 1; val adjustment = !realstackptr - newsp val () = if adjustment = 0 then () else if adjustment < ~1 then raise InternalError ("gencde: bad adjustment " ^ Int.toString adjustment) (* Hack for declarations that should push values, but don't *) else if adjustment = ~1 then pushConst (DummyValue, cvec) else resetStack (adjustment, true, cvec) in realstackptr := newsp end | NoResult => let val adjustment = !realstackptr - oldsp val () = if adjustment = 0 then () else if adjustment < 0 then raise InternalError ("gencde: bad adjustment " ^ Int.toString adjustment) else resetStack (adjustment, false, cvec) in realstackptr := oldsp end end (* gencde *) (* doNext is only used for mutually recursive functions where a function may not be able to fill in its closure if it does not have all the remaining declarations. *) (* TODO: This always creates the closure on the heap even when makeClosure is false. *) and genProc ({ closure=[], localCount, body, argTypes, name, ...}: bicLambdaForm, mutualDecs, doNext: unit -> unit) : unit = let (* Create a one word item for the closure. This is returned for recursive references and filled in with the address of the code when we've finished. *) val closure = makeConstantClosure() val newCode : code = codeCreate(name, parameters); (* Code-gen function. No non-local references. *) val () = codegen (body, newCode, closure, List.length argTypes, localCount, parameters); val () = pushConst(closureAsAddress closure, cvec); val () = incsp(); in if mutualDecs then doNext () else () end | genProc ({ localCount, body, name, argTypes, closure, ...}, mutualDecs, doNext) = let (* Full closure required. *) val resClosure = makeConstantClosure() val newCode = codeCreate (name, parameters) (* Code-gen function. *) val () = codegen (body, newCode, resClosure, List.length argTypes, localCount, parameters) val closureVars = List.length closure (* Size excluding the code address *) in if mutualDecs then let (* Have to make the closure now and fill it in later. *) val () = pushConst(toMachineWord resClosure, cvec) val () = genAllocMutableClosure(closureVars, cvec) val () = incsp () val entryAddr : int = !realstackptr val () = doNext () (* Any mutually recursive functions. *) (* Push the address of the vector - If we have processed other closures the vector will no longer be on the top of the stack. *) val () = pushLocalStackValue (~ entryAddr) (* Load items for the closure. *) fun loadItems ([], _) = () | loadItems (v :: vs, addr : int) = let (* Generate an item and move it into the clsoure *) val () = gencde (BICExtract v, ToStack, NotEnd, NONE) (* The closure "address" excludes the code address. *) val () = genMoveToMutClosure(addr, cvec) val () = decsp () in loadItems (vs, addr + 1) end val () = loadItems (closure, 0) val () = genLock cvec (* Lock it. *) (* Remove the extra reference. *) val () = resetStack (1, false, cvec) in realstackptr := !realstackptr - 1 end else let (* Put it on the stack. *) val () = pushConst (toMachineWord resClosure, cvec) val () = incsp () val () = List.app (fn pt => gencde (BICExtract pt, ToStack, NotEnd, NONE)) closure val () = genClosure (closureVars, cvec) in realstackptr := !realstackptr - closureVars end end and genCond (testCode, thenCode, elseCode, whereto, tailKind, loopAddr) = let (* andalso and orelse are turned into conditionals with constants. Convert this into a series of tests. *) fun genTest(BICConstnt(w, _), jumpOn, targetLabel) = let val cVal = case toShort w of 0w0 => false | 0w1 => true | _ => raise InternalError "genTest" in if cVal = jumpOn then putBranchInstruction (Jump, targetLabel, cvec) else () end | genTest(BICUnary { oper=BuiltIns.NotBoolean, arg1 }, jumpOn, targetLabel) = genTest(arg1, not jumpOn, targetLabel) | genTest(BICCond (testPart, thenPart, elsePart), jumpOn, targetLabel) = let val toElse = createLabel() and exitJump = createLabel() in genTest(testPart, false, toElse); genTest(thenPart, jumpOn, targetLabel); putBranchInstruction (Jump, exitJump, cvec); setLabel (toElse, cvec); genTest(elsePart, jumpOn, targetLabel); setLabel (exitJump, cvec) end | genTest(testCode, jumpOn, targetLabel) = ( gencde (testCode, ToStack, NotEnd, loopAddr); putBranchInstruction(if jumpOn then JumpTrue else JumpFalse, targetLabel, cvec); decsp() (* conditional branch pops a value. *) ) val toElse = createLabel() and exitJump = createLabel() val () = genTest(testCode, false, toElse) val () = gencde (thenCode, whereto, tailKind, loopAddr) (* Get rid of the result from the stack. If there is a result then the ``else-part'' will push it. *) val () = case whereto of ToStack => decsp () | NoResult => () val () = putBranchInstruction (Jump, exitJump, cvec) (* start of "else part" *) val () = setLabel (toElse, cvec) val () = gencde (elseCode, whereto, tailKind, loopAddr) val () = setLabel (exitJump, cvec) in () end (* genCond *) and genEval (eval, tailKind : tail) : unit = let val argList : backendIC list = List.map #1 (#argList eval) val argsToPass : int = List.length argList; (* Load arguments *) fun loadArgs [] = () | loadArgs (v :: vs) = let (* Push each expression onto the stack. *) val () = gencde(v, ToStack, NotEnd, NONE) in loadArgs vs end; (* Called after the args and the closure to call have been pushed onto the stack. *) fun callClosure () : unit = case tailKind of NotEnd => (* Normal call. *) genCallClosure cvec | EndOfProc => (* Tail recursive call. *) let (* Get the return address onto the top of the stack. *) val () = pushLocalStackValue 0 (* Slide the return address, closure and args over the old closure, return address and args, and reset the stack. Then jump to the closure. *) val () = genTailCall(argsToPass + 2, !realstackptr - 1 + (numOfArgs - argsToPass), cvec); (* It's "-1" not "-2", because we didn't bump the realstackptr when we pushed the return address. SPF 3/1/97 *) in () end (* Have to guarantee that the expression to return the function is evaluated before the arguments. *) (* Returns true if evaluating it later is safe. *) fun safeToLeave (BICConstnt _) = true | safeToLeave (BICLambda _) = true | safeToLeave (BICExtract _) = true | safeToLeave (BICField {base, ...}) = safeToLeave base | safeToLeave (BICLoadContainer {base, ...}) = safeToLeave base | safeToLeave _ = false val () = if (case argList of [] => true | _ => safeToLeave (#function eval)) then let (* Can load the args first. *) val () = loadArgs argList in gencde (#function eval, ToStack, NotEnd, NONE) end else let (* The expression for the function is too complicated to risk leaving. It might have a side-effect and we must ensure that any side-effects it has are done before the arguments are loaded. *) val () = gencde(#function eval, ToStack, NotEnd, NONE); val () = loadArgs(argList); (* Load the function again. *) val () = genLocal(argsToPass, cvec); in incsp () end val () = callClosure () (* Call the function. *) (* Make sure we interpret when we return from the call *) val () = genEnterIntCall (cvec, argsToPass) in (* body of genEval *) realstackptr := !realstackptr - argsToPass (* Args popped by caller. *) end (* Generate the function. *) (* Assume we always want a result. There is otherwise a problem if the called routine returns a result of type void (i.e. no result) but the caller wants a result (e.g. the identity function). *) val () = gencde (pt, ToStack, EndOfProc, NONE) val () = genReturn (numOfArgs, cvec); in (* body of codegen *) (* Having code-generated the body of the function, it is copied into a new data segment. *) copyCode{code = cvec, maxStack = !maxStack, resultClosure=resultClosure, numberOfArguments=numOfArgs} end (* codegen *); fun gencodeLambda({ name, body, argTypes, localCount, ...}:bicLambdaForm, parameters, closure) = let (* make the code buffer for the new function. *) val newCode : code = codeCreate (name, parameters) (* This function must have no non-local references. *) in codegen (body, newCode, closure, List.length argTypes, localCount, parameters) end local val makeEntryPoint: string -> machineWord = RunCall.rtsCallFull1 "PolyCreateEntryPointObject" fun rtsCall makeCall (entryName: string, numOfArgs, debugArgs: Universal.universal list): machineWord = let open Address val cvec = codeCreate (entryName, debugArgs) val entryPointAddr = makeEntryPoint entryName (* Each argument is at the same offset, essentially we're just shifting them *) fun genLocals 0 = () | genLocals n = (genLocal(numOfArgs +1, cvec); genLocals (n-1)) val () = genLocals numOfArgs val () = pushConst(entryPointAddr, cvec) val () = makeCall(numOfArgs, cvec) val () = genReturn (numOfArgs, cvec) val closure = makeConstantClosure() val () = copyCode{code=cvec, maxStack=numOfArgs+1, numberOfArguments=numOfArgs, resultClosure=closure} in closureAsAddress closure end in structure Foreign = struct val rtsCallFast = rtsCall genRTSCallFast fun rtsCallFastRealtoReal(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastRealtoReal c) (entryName, 1, debugArgs) and rtsCallFastRealRealtoReal(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastRealRealtoReal c) (entryName, 2, debugArgs) and rtsCallFastGeneraltoReal(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastGeneraltoReal c) (entryName, 1, debugArgs) and rtsCallFastRealGeneraltoReal(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastRealGeneraltoReal c) (entryName, 2, debugArgs) fun rtsCallFastFloattoFloat(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastFloattoFloat c) (entryName, 1, debugArgs) and rtsCallFastFloatFloattoFloat(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastFloatFloattoFloat c) (entryName, 2, debugArgs) and rtsCallFastGeneraltoFloat(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastGeneraltoFloat c) (entryName, 1, debugArgs) and rtsCallFastFloatGeneraltoFloat(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastFloatGeneraltoFloat c) (entryName, 2, debugArgs) type abi = int (* This must match the type in Foreign.LowLevel. Once this is bootstrapped we could use that type but note that this is the type we use within the compiler and we build Foreign.LowLevel AFTER compiling this. *) datatype cTypeForm = CTypeFloatingPt | CTypePointer | CTypeSignedInt | CTypeUnsignedInt | CTypeStruct of cType list | CTypeVoid withtype cType = { typeForm: cTypeForm, align: word, size: word } val abiList: unit -> (string * abi) list = RunCall.rtsCallFull0 "PolyInterpretedGetAbiList" type cif = Foreign.Memory.voidStar val createCIF: abi * cType * cType list -> cif= RunCall.rtsCallFull3 "PolyInterpretedCreateCIF" val callCFunction: cif * LargeWord.word * LargeWord.word * LargeWord.word -> unit = RunCall.rtsCallFull4 "PolyInterpretedCallFunction" (* foreignCall returns a function that actually calls the foreign function. *) fun foreignCall(abi, argTypes, resultType) = let val memocif = Foreign.Memory.memoise (fn () => createCIF(abi, resultType, argTypes)) () val closure = makeConstantClosure() (* For compatibility with the native code version we have to construct a function that takes three arguments rather than a single triple. *) val bodyCode = BICEval{function=BICConstnt(toMachineWord callCFunction, []), argList=[ (BICTuple[ BICEval{ function=BICConstnt(toMachineWord memocif, []), argList=[(BICConstnt(toMachineWord 0, []), GeneralType)], (* Unit. *) resultType=GeneralType }, BICExtract(BICLoadArgument 0), BICExtract(BICLoadArgument 2), BICExtract(BICLoadArgument 1)], GeneralType) ], resultType=GeneralType} val lambdaCode = { body = bodyCode, name = "foreignCall", closure=[], argTypes=[GeneralType, GeneralType, GeneralType], resultType = GeneralType, localCount=0, heapClosure=false} val () = gencodeLambda(lambdaCode, [], closure) in closureAsAddress closure end fun buildCallBack((*abi*) _, (*argTypes*) _, (*resultType*)_) = let fun buildClosure ((*mlFun*)_: LargeWord.word*LargeWord.word -> unit) = (* The result is the SysWord.word holding the C function. *) raise Foreign.Foreign "foreignCall not implemented" in Address.toMachineWord buildClosure end end end structure Sharing = struct open BACKENDTREE.Sharing type closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/CODETREE_FUNCTIONS.sml b/mlsource/MLCompiler/CodeTree/CODETREE_FUNCTIONS.sml index a697ed4d..0ae5d7dc 100644 --- a/mlsource/MLCompiler/CodeTree/CODETREE_FUNCTIONS.sml +++ b/mlsource/MLCompiler/CodeTree/CODETREE_FUNCTIONS.sml @@ -1,625 +1,625 @@ (* Copyright (c) 2012,13,16,18-21 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Miscellaneous construction and operation functions on the code-tree. *) functor CODETREE_FUNCTIONS( structure BASECODETREE: BaseCodeTreeSig structure STRONGLY: sig val stronglyConnectedComponents: {nodeAddress: 'a -> int, arcs: 'a -> int list } -> 'a list -> 'a list list end ) : CodetreeFunctionsSig = struct open BASECODETREE open STRONGLY open Address exception InternalError = Misc.InternalError fun mkEnv([], exp) = exp | mkEnv(decs, exp) = Newenv(decs, exp) val word0 = toMachineWord 0 and word1 = toMachineWord 1 val False = word0 and True = word1 val F_mutable_words : Word8.word = Word8.orb (F_words, F_mutable) val CodeFalse = Constnt(False, []) and CodeTrue = Constnt(True, []) and CodeZero = Constnt(word0, []) (* Properties of code. This indicates the extent to which the code has side-effects (i.e. where even if the result is unused the code still needs to be produced) or is applicative (i.e. where its value depends only arguments and can safely be reordered). *) (* The RTS has a table of properties for RTS functions. The 103 call returns these Or-ed into the register mask. *) val PROPWORD_NORAISE = 0wx40000000 and PROPWORD_NOUPDATE = 0wx20000000 and PROPWORD_NODEREF = 0wx10000000 (* Since RTS calls are being eliminated leave residual versions of these. *) fun earlyRtsCall _ = false and sideEffectFreeRTSCall _ = false local infix orb andb val op orb = Word.orb and op andb = Word.andb val noSideEffect = PROPWORD_NORAISE orb PROPWORD_NOUPDATE val applicative = noSideEffect orb PROPWORD_NODEREF in fun codeProps (Lambda _) = applicative | codeProps (Constnt _) = applicative | codeProps (Extract _) = applicative | codeProps (TagTest{ test, ... }) = codeProps test | codeProps (Cond(i, t, e)) = codeProps i andb codeProps t andb codeProps e | codeProps (Newenv(decs, exp)) = List.foldl (fn (d, r) => bindingProps d andb r) (codeProps exp) decs | codeProps (Handle { exp, handler, ... }) = (* A handler processes all the exceptions in the body *) (codeProps exp orb PROPWORD_NORAISE) andb codeProps handler | codeProps (Tuple { fields, ...}) = testList fields | codeProps (Indirect{base, ...}) = codeProps base (* A built-in function may be side-effect free. This can occur if we have, for example, "if exp1 orelse exp2" where exp2 can be reduced to "true", typically because it's inside an inline function and some of the arguments to the function are constants. This then gets converted to (exp1; true) and we can eliminate exp1 if it is simply a comparison. *) | codeProps (Unary{oper, arg1}) = let open BuiltIns val operProps = case oper of NotBoolean => applicative | IsTaggedValue => applicative | MemoryCellLength => applicative (* MemoryCellFlags could return a different result if a mutable cell was locked. *) | MemoryCellFlags => applicative | ClearMutableFlag => Word.orb(PROPWORD_NODEREF, PROPWORD_NORAISE) | AtomicReset => Word.orb(PROPWORD_NODEREF, PROPWORD_NORAISE) | LongWordToTagged => applicative | SignedToLongWord => applicative | UnsignedToLongWord => applicative | RealAbs _ => applicative (* Does not depend on rounding setting. *) | RealNeg _ => applicative (* Does not depend on rounding setting. *) (* If we float a 64-bit int to a 64-bit floating point value we may lose precision so this depends on the current rounding mode. *) | RealFixedInt _ => Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) | FloatToDouble => applicative - (* The rounding mode is set explicitly. *) - | DoubleToFloat _ => applicative + (* This also depends on the current rounding mode. *) + | DoubleToFloat => Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) (* May raise the overflow exception *) | RealToInt _ => PROPWORD_NOUPDATE orb PROPWORD_NODEREF | TouchAddress => PROPWORD_NORAISE (* Treat as updating a notional reference count. *) | AllocCStack => PROPWORD_NORAISE in operProps andb codeProps arg1 end | codeProps (Binary{oper, arg1, arg2}) = let open BuiltIns val mayRaise = PROPWORD_NOUPDATE orb PROPWORD_NODEREF val operProps = case oper of WordComparison _ => applicative | FixedPrecisionArith _ => mayRaise | WordArith _ => applicative (* Quot and Rem don't raise exceptions - zero checking is done before. *) | WordLogical _ => applicative | WordShift _ => applicative | AllocateByteMemory => Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) (* Allocation returns a different value on each call. *) | LargeWordComparison _ => applicative | LargeWordArith _ => applicative (* Quot and Rem don't raise exceptions - zero checking is done before. *) | LargeWordLogical _ => applicative | LargeWordShift _ => applicative | RealComparison _ => applicative (* Real arithmetic operations depend on the current rounding setting. *) | RealArith _ => Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) | FreeCStack => PROPWORD_NORAISE orb PROPWORD_NODEREF | PointerEq => applicative | AtomicExchangeAdd => PROPWORD_NORAISE in operProps andb codeProps arg1 andb codeProps arg2 end | codeProps (Nullary{oper=BuiltIns.GetCurrentThreadId}) = Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) | codeProps (Nullary{oper=BuiltIns.CheckRTSException}) = PROPWORD_NOUPDATE (* Although Pause does not affect the store directly it does have observable effects. *) | codeProps (Nullary{oper=BuiltIns.CPUPause}) = PROPWORD_NORAISE | codeProps (Arbitrary{shortCond, arg1, arg2, longCall, ...}) = (* Arbitrary precision operations are applicative but the longCall is a function call. It should never have a side-effect so it might be better to remove it. *) codeProps shortCond andb codeProps arg1 andb codeProps arg2 andb codeProps longCall | codeProps (AllocateWordMemory {numWords, flags, initial}) = let val operProps = Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) in operProps andb codeProps numWords andb codeProps flags andb codeProps initial end | codeProps (Eval _) = 0w0 | codeProps(Raise exp) = codeProps exp andb (Word.notb PROPWORD_NORAISE) (* Treat these as unsafe at least for the moment. *) | codeProps(BeginLoop _) = 0w0 | codeProps(Loop _) = 0w0 | codeProps (SetContainer _) = 0w0 | codeProps (LoadOperation {address, kind}) = let val operProps = case kind of LoadStoreMLWord {isImmutable=true} => applicative | LoadStoreMLByte {isImmutable=true} => applicative | _ => Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) in operProps andb addressProps address end | codeProps (StoreOperation {address, value, ...}) = Word.orb(PROPWORD_NODEREF, PROPWORD_NORAISE) andb addressProps address andb codeProps value | codeProps (BlockOperation {kind, sourceLeft, destRight, length}) = let val operProps = case kind of BlockOpMove _ => PROPWORD_NORAISE | BlockOpEqualByte => applicative | BlockOpCompareByte => applicative in operProps andb addressProps sourceLeft andb addressProps destRight andb codeProps length end and testList t = List.foldl(fn (c, r) => codeProps c andb r) applicative t and bindingProps(Declar{value, ...}) = codeProps value | bindingProps(RecDecs _) = applicative (* These should all be lambdas *) | bindingProps(NullBinding c) = codeProps c | bindingProps(Container{setter, ...}) = codeProps setter and addressProps{base, index=NONE, ...} = codeProps base | addressProps{base, index=SOME index, ...} = codeProps base andb codeProps index (* sideEffectFree - does not raise an exception or make an assignment. *) fun sideEffectFree c = (codeProps c andb noSideEffect) = noSideEffect (* reorderable - does not raise an exception or access a reference. *) and reorderable c = codeProps c = applicative end (* Return the inline property if it is set. *) fun findInline [] = EnvSpecNone | findInline (h::t) = if Universal.tagIs CodeTags.inlineCodeTag h then Universal.tagProject CodeTags.inlineCodeTag h else findInline t (* Makes a constant value from an expression which is known to be constant but may involve inline functions, tuples etc. *) fun makeConstVal (cVal:codetree) = let fun makeVal (c as Constnt _) = c (* should just be a tuple *) (* Get a vector, copy the entries into it and return it as a constant. *) | makeVal (Tuple {fields= [], ...}) = CodeZero (* should have been optimised already! *) | makeVal (Tuple {fields, ...}) = let val tupleSize = List.length fields val vec : address = allocWordData(Word.fromInt tupleSize, F_mutable_words, word0) val fieldCode = map makeVal fields fun copyToVec ([], _) = [] | copyToVec (Constnt(w, prop) :: t, locn) = ( assignWord (vec, locn, w); prop :: copyToVec (t, locn + 0w1) ) | copyToVec _ = raise InternalError "not constant" val props = copyToVec(fieldCode, 0w0) (* If any of the constants have properties create a tuple property for the result. *) val tupleProps = if List.all null props then [] else let (* We also need to construct an EnvSpecTuple property because findInline does not look at tuple properties. *) val inlineProps = map findInline props val inlineProp = if List.all (fn EnvSpecNone => true | _ => false) inlineProps then [] else let fun tupleEntry n = (EnvGenConst(loadWord(vec, Word.fromInt n), List.nth(props, n)), List.nth(inlineProps, n)) in [Universal.tagInject CodeTags.inlineCodeTag (EnvSpecTuple(tupleSize, tupleEntry))] end in Universal.tagInject CodeTags.tupleTag props :: inlineProp end in lock vec; Constnt(toMachineWord vec, tupleProps) end | makeVal _ = raise InternalError "makeVal - not constant or tuple" in makeVal cVal end local fun allConsts [] = true | allConsts (Constnt _ :: t) = allConsts t | allConsts _ = false fun mkRecord isVar xp = let val tuple = Tuple{fields = xp, isVariant = isVar } in if allConsts xp then (* Make it now. *) makeConstVal tuple else tuple end; in val mkTuple = mkRecord false and mkDatatype = mkRecord true end (* Set the inline property. If the property is already present it is replaced. If the property we are setting is EnvSpecNone no property is set. *) fun setInline p (h::t) = if Universal.tagIs CodeTags.inlineCodeTag h then setInline p t else h :: setInline p t | setInline EnvSpecNone [] = [] | setInline p [] = [Universal.tagInject CodeTags.inlineCodeTag p] (* These are very frequently used and it might be worth making special bindings for values such as 0, 1, 2, 3 etc to reduce garbage. *) fun checkNonZero n = if n < 0 then raise InternalError "mkLoadxx: argument negative" else n val mkLoadLocal = Extract o LoadLocal o checkNonZero and mkLoadArgument = Extract o LoadArgument o checkNonZero and mkLoadClosure = Extract o LoadClosure o checkNonZero (* Set the container to the fields of the record. Try to push this down as far as possible. *) fun mkSetContainer(container, Cond(ifpt, thenpt, elsept), filter) = Cond(ifpt, mkSetContainer(container, thenpt, filter), mkSetContainer(container, elsept, filter)) | mkSetContainer(container, Newenv(decs, exp), filter) = Newenv(decs, mkSetContainer(container, exp, filter)) | mkSetContainer(_, r as Raise _, _) = r (* We may well have the situation where one branch of an "if" raises an exception. We can simply raise the exception on that branch. *) | mkSetContainer(container, Handle {exp, handler, exPacketAddr}, filter) = Handle{exp=mkSetContainer(container, exp, filter), handler=mkSetContainer(container, handler, filter), exPacketAddr = exPacketAddr} | mkSetContainer(container, tuple, filter) = SetContainer{container = container, tuple = tuple, filter = filter } local val except: exn = InternalError "Invalid load encountered in compiler" (* Exception value to use for invalid cases. We put this in the code but it should never actually be executed. *) val raiseError = Raise (Constnt (toMachineWord except, [])) in (* Look for an entry in a tuple. Used in both the optimiser and in mkInd. *) fun findEntryInBlock (Tuple { fields, isVariant, ...}, offset, isVar) = ( isVariant = isVar orelse raise InternalError "findEntryInBlock: tuple/datatype mismatch"; if offset < List.length fields then List.nth(fields, offset) (* This can arise if we're processing a branch of a case discriminating on a datatype which won't actually match at run-time. e.g. Tests/Succeed/Test030. *) else if isVar then raiseError else raise InternalError "findEntryInBlock: invalid address" ) | findEntryInBlock (Constnt (b, props), offset, isVar) = let (* Find the tuple property if it is present and extract the field props. *) val fieldProps = case List.find(Universal.tagIs CodeTags.tupleTag) props of NONE => [] | SOME p => List.nth(Universal.tagProject CodeTags.tupleTag p, offset) in case findInline props of EnvSpecTuple(_, env) => (* Do the selection now. This is especially useful if we have a global structure *) (* At the moment at least we assume that we can get all the properties from the tuple selection. *) ( case env offset of (EnvGenConst(w, p), inl) => Constnt(w, setInline inl p) (* The general value from selecting a field from a constant tuple must be a constant. *) | _ => raise InternalError "findEntryInBlock: not constant" ) | _ => (* The ML compiler may generate loads from invalid addresses as a result of a val binding to a constant which has the wrong shape. e.g. val a :: b = nil It will always result in a Bind exception being generated before the invalid load, but we have to be careful that the optimiser does not fall over. *) if isShort b orelse not (Address.isWords (toAddress b)) orelse Address.length (toAddress b) <= Word.fromInt offset then if isVar then raiseError else raise InternalError "findEntryInBlock: invalid address" else Constnt (loadWord (toAddress b, Word.fromInt offset), fieldProps) end | findEntryInBlock(base, offset, isVar) = Indirect {base = base, offset = offset, indKind = if isVar then IndVariant else IndTuple} (* anything else *) end (* Exported indirect load operation i.e. load a field from a tuple. We can't use findEntryInBlock in every case since that discards unused entries in a tuple and at this point we haven't checked that the unused entries don't have side-effects/raise exceptions e.g. #1 (1, raise Fail "bad") *) local fun mkIndirect isVar (addr, base as Constnt _) = findEntryInBlock(base, addr, isVar) | mkIndirect isVar (addr, base) = Indirect {base = base, offset = addr, indKind = if isVar then IndVariant else IndTuple} in val mkInd = mkIndirect false and mkVarField = mkIndirect true end fun mkIndContainer(addr, base) = Indirect{offset=addr, base=base, indKind=IndContainer} (* Create a tuple from a container. *) fun mkTupleFromContainer(addr, size) = Tuple{fields = List.tabulate(size, fn n => mkIndContainer(n, mkLoadLocal addr)), isVariant = false} (* Get the value from the code. *) fun evalue (Constnt(c, _)) = SOME c | evalue _ = NONE (* This is really to simplify the change from mkEnv taking a codetree list to taking a codeBinding list * code. This extracts the last entry which must be a NullBinding and packages the declarations with it. *) fun decSequenceWithFinalExp decs = let fun splitLast _ [] = raise InternalError "decSequenceWithFinalExp: empty" | splitLast decs [NullBinding exp] = (List.rev decs, exp) | splitLast _ [_] = raise InternalError "decSequenceWithFinalExp: last is not a NullDec" | splitLast decs (hd::tl) = splitLast (hd:: decs) tl in mkEnv(splitLast [] decs) end local type node = { addr: int, lambda: lambdaForm, use: codeUse list } fun nodeAddress({addr, ...}: node) = addr and arcs({lambda={closure, ...}, ...}: node) = List.foldl(fn (LoadLocal addr, l) => addr :: l | (_, l) => l) [] closure in val stronglyConnected = stronglyConnectedComponents{nodeAddress=nodeAddress, arcs=arcs} end (* In general any mutually recursive declaration can refer to any other. It's better to partition the recursive declarations into strongly connected components i.e. those that actually refer to each other. *) fun partitionMutualBindings(RecDecs rlist) = let val processed = stronglyConnected rlist (* Convert the result. Note that stronglyConnectedComponents returns the dependencies in the reverse order i.e. if X depends on Y but not the other way round then X will appear before Y in the list. We need to reverse it so that X goes after Y. *) fun rebuild ([{lambda, addr, use}], tl) = Declar{addr=addr, use=use, value=Lambda lambda} :: tl | rebuild (multiple, tl) = RecDecs multiple :: tl in List.foldl rebuild [] processed end (* This is only intended for RecDecs but it's simpler to handle all bindings. *) | partitionMutualBindings other = [other] (* Functions to help in building a closure. *) datatype createClosure = Closure of (loadForm * int) list ref fun makeClosure() = Closure(ref []) (* Function to build a closure. Items are added to the closure if they are not already there. *) fun addToClosure (Closure closureList) (ext: loadForm): loadForm = case (List.find (fn (l, _) => l = ext) (!closureList), ! closureList) of (SOME(_, n), _) => (* Already there *) LoadClosure n | (NONE, []) => (* Not there - first *) (closureList := [(ext, 0)]; LoadClosure 0) | (NONE, cl as (_, n) :: _) => (closureList := (ext, n+1) :: cl; LoadClosure(n+1)) fun extractClosure(Closure (ref closureList)) = List.foldl (fn ((ext, _), l) => ext :: l) [] closureList datatype inlineTest = TooBig | NonRecursive | TailRecursive of bool vector | NonTailRecursive of bool vector fun evaluateInlining(function, numArgs, maxInlineSize) = let (* This checks for the possibility of inlining a function. It sees if it is small enough according to some rough estimate of the cost and it also looks for recursive uses of the function. Typically if the function is small enough to inline there will be only one recursive use but we consider the possibility of more than one. If the only uses are tail recursive we can replace the recursive calls by a Loop with a BeginLoop outside it. If there are non-tail recursive calls we may be able to lift out arguments that are unchanged. For example for fun map f [] = [] | map f (a::b) = f a :: map f b it may be worth lifting out f and generating specific mapping functions for each application. *) val hasRecursiveCall = ref false (* Set to true if rec call *) val allTail = ref true (* Set to false if non recursive *) (* An element of this is set to false if the actual value if anything other than the original argument. At the end we are then left with the arguments that are unchanged. *) val argMod = Array.array(numArgs, true) infix 6 -- (* Subtract y from x but return 0 rather than a negative number. *) fun x -- y = if x >= y then x-y else 0 (* Check for the code size and also recursive references. N,B. We assume in hasLoop that tail recursion applies only with Cond, Newenv and Handler. *) fun checkUse _ (_, 0, _) = 0 (* The function is too big to inline. *) | checkUse isMain (Newenv(decs, exp), cl, isTail) = let fun checkBind (Declar{value, ...}, cl) = checkUse isMain(value, cl, false) | checkBind (RecDecs decs, cl) = List.foldl(fn ({lambda, ...}, n) => checkUse isMain (Lambda lambda, n, false)) cl decs | checkBind (NullBinding c, cl) = checkUse isMain (c, cl, false) | checkBind (Container{setter, ...}, cl) = checkUse isMain(setter, cl -- 1, false) in checkUse isMain (exp, List.foldl checkBind cl decs, isTail) end | checkUse _ (Constnt(w, _), cl, _) = if isShort w then cl else cl -- 1 (* A recursive reference in any context other than a call prevents any inlining. *) | checkUse true (Extract LoadRecursive, _, _) = 0 | checkUse _ (Extract _, cl, _) = cl -- 1 | checkUse isMain (Indirect{base, ...}, cl, _) = checkUse isMain (base, cl -- 1, false) | checkUse _ (Lambda {body, argTypes, closure, ...}, cl, _) = (* For the moment, any recursive use in an inner function prevents inlining. *) if List.exists (fn LoadRecursive => true | _ => false) closure then 0 else checkUse false (body, cl -- (List.length argTypes + List.length closure), false) | checkUse true (Eval{function = Extract LoadRecursive, argList, ...}, cl, isTail) = let (* If the actual argument is anything but the original argument then the corresponding entry in the array is set to false. *) fun testArg((exp, _), n) = ( if (case exp of Extract(LoadArgument a) => n = a | _ => false) then () else Array.update(argMod, n, false); n+1 ) in List.foldl testArg 0 argList; hasRecursiveCall := true; if isTail then () else allTail := false; List.foldl(fn ((e, _), n) => checkUse true (e, n, false)) (cl--3) argList end | checkUse isMain (Eval{function, argList, ...}, cl, _) = checkUse isMain (function, List.foldl(fn ((e, _), n) => checkUse isMain (e, n, false)) (cl--2) argList, false) | checkUse _ (Nullary _, cl, _) = cl -- 1 | checkUse isMain (Unary{arg1, ...}, cl, _) = checkUse isMain (arg1, cl -- 1, false) | checkUse isMain (Binary{arg1, arg2, ...}, cl, _) = checkUseList isMain ([arg1, arg2], cl -- 1) | checkUse isMain (Arbitrary{arg1, arg2, ...}, cl, _) = checkUseList isMain ([arg1, arg2], cl -- 4) | checkUse isMain (AllocateWordMemory {numWords, flags, initial}, cl, _) = checkUseList isMain ([numWords, flags, initial], cl -- 1) | checkUse isMain (Cond(i, t, e), cl, isTail) = checkUse isMain (i, checkUse isMain (t, checkUse isMain (e, cl -- 2, isTail), isTail), false) | checkUse isMain (BeginLoop { loop, arguments, ...}, cl, _) = checkUse isMain (loop, List.foldl (fn (({value, ...}, _), n) => checkUse isMain (value, n, false)) cl arguments, false) | checkUse isMain (Loop args, cl, _) = List.foldl(fn ((e, _), n) => checkUse isMain (e, n, false)) cl args | checkUse isMain (Raise c, cl, _) = checkUse isMain (c, cl -- 1, false) | checkUse isMain (Handle {exp, handler, ...}, cl, isTail) = checkUse isMain (exp, checkUse isMain (handler, cl, isTail), false) | checkUse isMain (Tuple{ fields, ...}, cl, _) = checkUseList isMain (fields, cl) | checkUse isMain (SetContainer{container, tuple = Tuple { fields, ...}, ...}, cl, _) = (* This can be optimised *) checkUse isMain (container, checkUseList isMain (fields, cl), false) | checkUse isMain (SetContainer{container, tuple, filter}, cl, _) = checkUse isMain (container, checkUse isMain (tuple, cl -- (BoolVector.length filter), false), false) | checkUse isMain (TagTest{test, ...}, cl, _) = checkUse isMain (test, cl -- 1, false) | checkUse isMain (LoadOperation{address, ...}, cl, _) = checkUseAddress isMain (address, cl -- 1) | checkUse isMain (StoreOperation{address, value, ...}, cl, _) = checkUse isMain (value, checkUseAddress isMain (address, cl -- 1), false) | checkUse isMain (BlockOperation{sourceLeft, destRight, length, ...}, cl, _) = checkUse isMain (length, checkUseAddress isMain (destRight, checkUseAddress isMain (sourceLeft, cl -- 1)), false) and checkUseList isMain (elems, cl) = List.foldl(fn (e, n) => checkUse isMain (e, n, false)) cl elems and checkUseAddress isMain ({base, index=NONE, ...}, cl) = checkUse isMain (base, cl, false) | checkUseAddress isMain ({base, index=SOME index, ...}, cl) = checkUseList isMain ([base, index], cl) val costLeft = checkUse true (function, maxInlineSize, true) in if costLeft = 0 then TooBig else if not (! hasRecursiveCall) then NonRecursive else if ! allTail then TailRecursive(Array.vector argMod) else NonTailRecursive(Array.vector argMod) end structure Sharing = struct type codetree = codetree and codeBinding = codeBinding and loadForm = loadForm and createClosure = createClosure and envSpecial = envSpecial end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML index e6ad38db..3823fbbf 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML @@ -1,4089 +1,4066 @@ (* Copyright David C. J. Matthews 2016-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86CodetreeToICode( structure BACKENDTREE: BackendIntermediateCodeSig structure ICODE: ICodeSig structure DEBUG: DEBUG structure X86FOREIGN: FOREIGNCALLSIG structure ICODETRANSFORM: X86ICODETRANSFORMSIG structure CODE_ARRAY: CODEARRAYSIG sharing ICODE.Sharing = ICODETRANSFORM.Sharing = CODE_ARRAY.Sharing ): GENCODESIG = struct open BACKENDTREE open Address open ICODE open CODE_ARRAY exception InternalError = Misc.InternalError local val regs = case targetArch of Native32Bit => [eax, ebx] | Native64Bit => [eax, ebx, r8, r9, r10] | ObjectId32Bit => [eax, esi, r8, r9, r10] val fpResult = case targetArch of Native32Bit => FPReg fp0 | _ => XMMReg xmm0 val fpArgRegs = case targetArch of Native32Bit => [] | _ => [xmm0, xmm1, xmm2] in val generalArgRegs = List.map GenReg regs val floatingPtArgRegs = List.map XMMReg fpArgRegs fun resultReg GeneralType = GenReg eax | resultReg DoubleFloatType = fpResult | resultReg SingleFloatType = fpResult end (* tag a short constant *) fun tag c = 2 * c + 1 (* shift a short constant, but don't set tag bit *) fun semitag c = 2 * c (* Reverse a list and append the second. This is used a lot when converting between the reverse and forward list versions. e.g. codeToICode and codeToICodeRev *) fun revApp([], l) = l | revApp(hd :: tl, l) = revApp(tl, hd :: l) datatype blockStruct = BlockSimple of x86ICode | BlockExit of x86ICode | BlockLabel of int | BlockFlow of controlFlow | BlockBegin of { regArgs: (preg * reg) list, stackArgs: stackLocn list } | BlockRaiseAndHandle of x86ICode * int | BlockOptionalHandle of {call: x86ICode, handler: int, label: int } local open RunCall val F_mutable_bytes = Word.fromLargeWord(Word8.toLargeWord(Word8.orb (F_mutable, F_bytes))) fun makeRealConst l = let val r = allocateByteMemory(0wx8 div bytesPerWord, F_mutable_bytes) fun setBytes([], _) = () | setBytes(hd::tl, n) = (storeByte(r, n, hd); setBytes(tl, n+0wx1)) val () = setBytes(l, 0w0) val () = clearMutableBit r in r end in (* These are floating point constants used to change and mask the sign bit. *) val realSignBit: machineWord = makeRealConst [0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx80] and realAbsMask: machineWord = makeRealConst [0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wx7f] and floatSignBit: machineWord = makeRealConst [0wx00, 0wx00, 0wx00, 0wx80, 0wx00, 0wx00, 0wx00, 0wx00] and floatAbsMask: machineWord = makeRealConst [0wxff, 0wxff, 0wxff, 0wx7f, 0wx00, 0wx00, 0wx00, 0wx00] end datatype commutative = Commutative | NonCommutative (* Check that a large-word constant looks right and get the value as a large int*) fun largeWordConstant value = if isShort value then raise InternalError "largeWordConstant: invalid" else let val addr = toAddress value in if length addr <> nativeWordSize div wordSize orelse flags addr <> F_bytes then raise InternalError "largeWordConstant: invalid" else (); LargeWord.toLargeInt(RunCall.unsafeCast addr) end fun codeFunctionToX86({body, localCount, name, argTypes, resultType=fnResultType, closure, ...}:bicLambdaForm, debugSwitches, resultClosure) = let (* Pseudo-registers are allocated sequentially and the properties added to the list. *) val pregCounter = ref 0 val pregPropList = ref [] fun newPReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropGeneral :: !pregPropList in PReg regNo end and newUReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropUntagged :: !pregPropList in PReg regNo end and newStackLoc size = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropStack size :: !pregPropList in StackLoc{size=size, rno=regNo} end and newMergeReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropMultiple :: !pregPropList in PReg regNo end datatype locationValue = NoLocation | PregLocation of preg | ContainerLocation of { container: stackLocn, stackOffset: int } val locToPregArray = Array.array(localCount, NoLocation) val labelCounter = ref 1 (* Start at 1. Zero is used for the root. *) fun newLabel() = !labelCounter before labelCounter := !labelCounter + 1 val ccRefCounter = ref 0 fun newCCRef() = CcRef(!ccRefCounter) before ccRefCounter := !ccRefCounter + 1 fun constantAsArgument value = if isShort value then IntegerConstant(tag(Word.toLargeIntX(toShort value))) else AddressConstant value (* Create the branch condition from the test, isSigned and jumpOn values. (In)equality tests are the same for signed and unsigned values. *) local open BuiltIns in fun testAsBranch(TestEqual, _, true) = JE | testAsBranch(TestEqual, _, false) = JNE (* Signed tests *) | testAsBranch(TestLess, true, true) = JL | testAsBranch(TestLess, true, false) = JGE | testAsBranch(TestLessEqual, true, true) = JLE | testAsBranch(TestLessEqual, true, false) = JG | testAsBranch(TestGreater, true, true) = JG | testAsBranch(TestGreater, true, false) = JLE | testAsBranch(TestGreaterEqual, true, true) = JGE | testAsBranch(TestGreaterEqual, true, false) = JL (* Unsigned tests *) | testAsBranch(TestLess, false, true) = JB | testAsBranch(TestLess, false, false) = JNB | testAsBranch(TestLessEqual, false, true) = JNA | testAsBranch(TestLessEqual, false, false) = JA | testAsBranch(TestGreater, false, true) = JA | testAsBranch(TestGreater, false, false) = JNA | testAsBranch(TestGreaterEqual, false, true) = JNB | testAsBranch(TestGreaterEqual, false, false) = JB | testAsBranch(TestUnordered, _, _) = raise InternalError "TestUnordered" (* Switch the direction of a test if we turn c op x into x op c. *) fun leftRightTest TestEqual = TestEqual | leftRightTest TestLess = TestGreater | leftRightTest TestLessEqual = TestGreaterEqual | leftRightTest TestGreater = TestLess | leftRightTest TestGreaterEqual = TestLessEqual | leftRightTest TestUnordered = TestUnordered end (* Overflow check. This raises Overflow if the overflow bit is set in the cc. This generates a single block for the function unless there is a handler. As well as reducing the size of the code this also means that overflow checks are generally JO instructions to the end of the code. Since the default branch prediction is not to take forward jumps this should improve prefetching on the normal, non-overflow, path. *) fun checkOverflow ({currHandler=NONE, overflowBlock=ref(SOME overFlowLab), ...}) ccRef = (* It's already been set and there's no surrounding handler - use this. *) let val noOverflowLab = newLabel() in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel noOverflowLab ] end | checkOverflow ({currHandler=NONE, overflowBlock, ...}) ccRef = let (* *) val overFlowLab = newLabel() and noOverflowLab = newLabel() val packetReg = newPReg() val () = overflowBlock := SOME overFlowLab in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel overFlowLab, BlockSimple(LoadArgument{source=AddressConstant(toMachineWord(Overflow)), dest=packetReg, kind=movePolyWord}), BlockExit(RaiseExceptionPacket{packetReg=packetReg}), BlockLabel noOverflowLab ] end | checkOverflow ({currHandler=SOME h, ...}) ccRef = let val overFlowLab = newLabel() and noOverflowLab = newLabel() val packetReg = newPReg() in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel overFlowLab, BlockSimple(LoadArgument{source=AddressConstant(toMachineWord(Overflow)), dest=packetReg, kind=movePolyWord}), BlockRaiseAndHandle(RaiseExceptionPacket{packetReg=packetReg}, h), BlockLabel noOverflowLab ] end fun setAndRestoreRounding (rndMode, doWithRounding) = let open IEEEReal val savedRnd = newUReg() and setRnd = newUReg() in case fpMode of FPModeX87 => [BlockSimple(GetX87ControlReg{dest=savedRnd})] @ (* Set the appropriate bits in the control word. *) (case rndMode of TO_NEAREST => (* The bits need to be zero - just mask them. *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32})] | TO_NEGINF => let val wrk = newUReg() in (* Mask the bits and set to 01 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x400, ccRef=newCCRef(), opSize=OpSize32})] end | TO_POSINF => let val wrk = newUReg() in (* Mask the bits and set to 10 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x800, ccRef=newCCRef(), opSize=OpSize32})] end | TO_ZERO => (* The bits need to be one - just set them. *) [BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xc00, ccRef=newCCRef(), opSize=OpSize32})]) @ [BlockSimple(SetX87ControlReg{source=setRnd})] @ doWithRounding() @ (* Restore the original rounding. *) [BlockSimple(SetX87ControlReg{source=savedRnd})] | FPModeSSE2 => [BlockSimple(GetSSE2ControlReg{dest=savedRnd})] @ (* Set the appropriate bits in the control word. *) (case rndMode of TO_NEAREST => (* The bits need to be zero - just mask them. *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32})] | TO_NEGINF => let val wrk = newUReg() in (* Mask the bits and set to 01 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x2000, ccRef=newCCRef(), opSize=OpSize32})] end | TO_POSINF => let val wrk = newUReg() in (* Mask the bits and set to 10 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x4000, ccRef=newCCRef(), opSize=OpSize32})] end | TO_ZERO => (* The bits need to be one - just set them. *) [BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x6000, ccRef=newCCRef(), opSize=OpSize32})]) @ [BlockSimple(SetSSE2ControlReg{source=setRnd})] @ doWithRounding() @ [BlockSimple(SetSSE2ControlReg{source=savedRnd})] end (* Put a floating point value into a box or tag it so the value can be held in a general register. *) fun boxOrTagReal(srcReg, destReg, precision) = if precision = BuiltIns.PrecDouble orelse wordSize <> 0w8 then let open BuiltIns val boxFloat = case (fpMode, precision) of (FPModeX87, PrecDouble) => BoxX87Double | (FPModeX87, PrecSingle) => BoxX87Float | (FPModeSSE2, PrecDouble) => BoxSSE2Double | (FPModeSSE2, PrecSingle) => BoxSSE2Float in [BlockSimple(BoxValue{boxKind=boxFloat, source=srcReg, dest=destReg, saveRegs=[]})] end else [BlockSimple(TagFloat{source=srcReg, dest=destReg})] (* Indicate that the base address is actually an object index where appropriate. *) val memIndexOrObject = case targetArch of ObjectId32Bit => ObjectIndex | _ => NoMemIndex (* Generally we have an offset in words and no index register. *) fun wordOffsetAddress(offset, baseReg: preg): argument = MemoryLocation{offset=offset*Word.toInt wordSize, base=baseReg, index=memIndexOrObject, cache=NONE} (* The large-word operations all work on the value within the box pointed at by the register. We generate all large-word operations using this even where the X86 instruction requires a register. This allows the next level to optimise cases of cascaded instructions and avoid creating boxes for intermediate values. *) fun wordAt reg = wordOffsetAddress(0, reg) val returnAddressEntry = newStackLoc 1 datatype argLoc = ArgInReg of { realReg: reg, argReg: preg } | ArgOnStack of { stackOffset: int, stackReg: stackLocn } (* Pseudo-regs for the result, the closure and the args that were passed in real regs. *) val resultTarget = newPReg() val closureRegAddr = newPReg() (* Create a map for the arguments indicating their register or stack location. *) local (* Select the appropriate argument register depending on the argument type. *) fun argTypesToArgEntries([], _, _, _) = ([], [], [], []) | argTypesToArgEntries(DoubleFloatType :: tl, gRegs, fpReg :: fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val pRegArg = newPReg() and uRegArg = newUReg() in (ArgInReg{realReg=fpReg, argReg=pRegArg} :: argTypes, boxOrTagReal(uRegArg, pRegArg, BuiltIns.PrecDouble) @ argCode, (uRegArg, fpReg) :: argRegs, stackArgs) end | argTypesToArgEntries(SingleFloatType :: tl, gRegs, fpReg :: fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val pRegArg = newPReg() and uRegArg = newUReg() in (ArgInReg{realReg=fpReg, argReg=pRegArg} :: argTypes, boxOrTagReal(uRegArg, pRegArg, BuiltIns.PrecSingle) @ argCode, (uRegArg, fpReg) :: argRegs, stackArgs) end | argTypesToArgEntries(_ :: tl, gReg :: gRegs, fpRegs, n) = (* This deals with general arguments but also with extra floating point arguments. They are boxed as usual. *) let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val argReg=newPReg() in (ArgInReg{realReg=gReg, argReg=argReg} :: argTypes, argCode, (argReg, gReg) :: argRegs, stackArgs) end | argTypesToArgEntries(_ :: tl, [], fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, [], fpRegs, n-1) val stackLoc = newStackLoc 1 in (ArgOnStack {stackOffset=n, stackReg = stackLoc } :: argTypes, argCode, argRegs, stackLoc :: stackArgs) end val (argEntries, argCode, argRegs, stackArguments) = argTypesToArgEntries(argTypes, generalArgRegs, floatingPtArgRegs, List.length argTypes) val clReg = case closure of [] => [] | _ => [(closureRegAddr, GenReg edx)] in val argumentVector = Vector.fromList argEntries (* Start code for the function. *) val beginInstructions = argCode @ [BlockBegin{regArgs=clReg @ argRegs, stackArgs=stackArguments @ [returnAddressEntry]}] (* The number of arguments on the stack. Needed in return instrs and tail calls. *) val currentStackArgs = List.length stackArguments end (* The return instruction. This can be added on to various tails but there is always one at the end anyway. *) fun returnInstruction({stackPtr, ...}, target, tailCode) = let val (returnCode, resReg) = case fnResultType of GeneralType => ([], target) | DoubleFloatType => let val resReg = newUReg() in ([BlockSimple(LoadArgument{source=wordAt target, dest=resReg, kind=MoveDouble})], resReg) end | SingleFloatType => let val resReg = newUReg() val unpack = if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument target, dest=resReg, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt target, dest=resReg, kind=MoveFloat}) in ([unpack], resReg) end in BlockExit(ReturnResultFromFunction{resultReg=resReg, realReg=resultReg fnResultType, numStackArgs=currentStackArgs}) :: returnCode @ (if stackPtr <> 0 then BlockSimple(ResetStackPtr{numWords=stackPtr, preserveCC=false}) :: tailCode else tailCode) end (* This controls what codeAsArgument returns. Different instructions have different requirements. If an option is set to false the value is instead loaded into a new preg. "const32s" means that it will fit into 32-bits. Any constant satisfies that on X86/32 but on the X86/64 we don't allow addresses because we can't be sure whether they will fit or not. *) type allowedArgument = { anyConstant: bool, const32s: bool, memAddr: bool, existingPreg: bool } val allowInMemMove = (* We can move a 32-bit constant into memory but not a long constant. *) { anyConstant=false, const32s=true, memAddr=false, existingPreg=true } and allowInPReg = { anyConstant=false, const32s=false, memAddr=false, existingPreg=true } (* AllowDefer can be used to ensure that any side-effects are done before something else but otherwise we only evaluate afterwards. *) and allowDefer = { anyConstant=true, const32s=true, memAddr=true, existingPreg=true } datatype destination = SpecificPReg of preg | NoResult | Allowed of allowedArgument (* Context type. *) type context = { loopArgs: (preg list * int * int) option, stackPtr: int, currHandler: int option, overflowBlock: int option ref } (* If a preg has been provided, use that, otherwise generate a new one. *) fun asTarget(SpecificPReg preg) = preg | asTarget NoResult = newPReg() | asTarget(Allowed _) = newPReg() fun moveIfNotAllowed(NoResult, code, arg) = (code, arg, false) | moveIfNotAllowed(Allowed{anyConstant=true, ...}, code, arg as AddressConstant _) = (code, arg, false) | moveIfNotAllowed(Allowed{anyConstant=true, ...}, code, arg as IntegerConstant _) = (code, arg, false) | moveIfNotAllowed(dest as Allowed{const32s=true, ...}, code, arg as IntegerConstant value) = (* This is allowed if the value is within 32-bits *) if is32bit value then (code, arg, false) else moveToTarget(dest, code, arg) | moveIfNotAllowed(dest as Allowed{const32s=true, ...}, code, arg as AddressConstant _) = if targetArch = Native32Bit then (code, arg, false) (* We can store the address directly *) else moveToTarget(dest, code, arg) | moveIfNotAllowed(Allowed{existingPreg=true, ...}, code, arg as RegisterArgument(PReg _)) = (code, arg, false) | moveIfNotAllowed(Allowed{memAddr=true, ...}, code, arg as MemoryLocation _) = (code, arg, false) | moveIfNotAllowed(dest, code, arg) = moveToTarget(dest, code, arg) and moveToTarget(dest, code, arg) = let val target = asTarget dest val moveSize = case arg of AddressConstant _ => movePolyWord | MemoryLocation _ => movePolyWord | _ => moveNativeWord in (code @ [BlockSimple(LoadArgument{source=arg, dest=target, kind=moveSize})], RegisterArgument target, false) end (* Create a bool result from a test by returning true or false. *) fun makeBoolResultRev(condition, ccRef, target, testCode) = let val trueLab = newLabel() and falseLab = newLabel() and mergeLab = newLabel() val mergeReg = newMergeReg() in BlockSimple(LoadArgument{dest=target, source=RegisterArgument mergeReg, kind=Move32Bit}) :: BlockLabel mergeLab :: BlockFlow(Unconditional mergeLab) :: BlockSimple(LoadArgument{dest=mergeReg, source=IntegerConstant(tag 0), kind=Move32Bit}) :: BlockLabel falseLab :: BlockFlow(Unconditional mergeLab) :: BlockSimple(LoadArgument{dest=mergeReg, source=IntegerConstant(tag 1), kind=Move32Bit}) :: BlockLabel trueLab :: BlockFlow(Conditional{ ccRef=ccRef, condition=condition, trueJump=trueLab, falseJump=falseLab }) :: testCode end fun moveIfNotAllowedRev(NoResult, code, arg) = (code, arg, false) | moveIfNotAllowedRev(Allowed{anyConstant=true, ...}, code, arg as AddressConstant _) = (code, arg, false) | moveIfNotAllowedRev(Allowed{anyConstant=true, ...}, code, arg as IntegerConstant _) = (code, arg, false) | moveIfNotAllowedRev(dest as Allowed{const32s=true, ...}, code, arg as IntegerConstant value) = (* This is allowed if the value is within 32-bits *) if is32bit value then (code, arg, false) else moveToTargetRev(dest, code, arg) | moveIfNotAllowedRev(dest as Allowed{const32s=true, ...}, code, arg as AddressConstant _) = if targetArch = Native32Bit then (code, arg, false) else moveToTargetRev(dest, code, arg) | moveIfNotAllowedRev(Allowed{existingPreg=true, ...}, code, arg as RegisterArgument(PReg _)) = (code, arg, false) | moveIfNotAllowedRev(Allowed{memAddr=true, ...}, code, arg as MemoryLocation _) = (code, arg, false) | moveIfNotAllowedRev(dest, code, arg) = moveToTargetRev(dest, code, arg) and moveToTargetRev(dest, code, arg) = let val target = asTarget dest val moveSize = case arg of AddressConstant _ => movePolyWord | MemoryLocation _ => movePolyWord | _ => moveNativeWord in (BlockSimple(LoadArgument{source=arg, dest=target, kind=moveSize}) :: code, RegisterArgument target, false) end (* Use a move if there's no offset or index. We could use an add if there's no index. *) and loadAddress{base, offset=0, index=NoMemIndex, dest} = LoadArgument{source=RegisterArgument base, dest=dest, kind=movePolyWord} | loadAddress{base, offset, index, dest} = LoadEffectiveAddress{base=SOME base, offset=offset, dest=dest, index=index, opSize=nativeWordOpSize} and codeToICodeTarget(instr, context: context, isTail, target) = (* This is really for backwards compatibility. *) let val (code, _, _) = codeToICode(instr, context, isTail, SpecificPReg target) in code end and codeToPReg(instr, context) = let (* Many instructions require an argument in a register. If it's already in a register use that rather than creating a new one. *) val (code, result, _) = codeToICode(instr, context, false, Allowed allowInPReg) val preg = case result of RegisterArgument pr => pr | _ => raise InternalError "codeToPReg" in (code, preg) end and codeToPRegRev(instr, context, tailCode) = let (* Many instructions require an argument in a register. If it's already in a register use that rather than creating a new one. *) val (code, result, _) = codeToICodeRev(instr, context, false, Allowed allowInPReg, tailCode) val preg = case result of RegisterArgument pr => pr | _ => raise InternalError "codeToPRegRev" in (code, preg) end and codeToICode(instr, context, isTail, destination) = let val (code, dest, haveExited) = codeToICodeRev(instr, context, isTail, destination, []) in (List.rev code, dest, haveExited) end (* Main function to turn the codetree into ICode. Optimisation is generally left to later passes. This does detect tail recursion. This builds the result up in reverse order. There was an allocation hotspot in loadFields in the BICTuple case which was eliminated by building the list in reverse and then reversing the result. It seems better to build the list in reverse generally but for the moment there are too many special cases to do everything. *) and codeToICodeRev(BICNewenv (bindings, exp), context: context as {stackPtr=initialSp, ...} , isTail, destination, tailCode) = let (* Process a list of bindings. We need to accumulate the space used by any containers and reset the stack pointer at the end if necessary. *) fun doBindings([], context, tailCode) = (tailCode, context) | doBindings(BICDeclar{value=BICExtract(BICLoadLocal l), addr, ...} :: decs, context, tailCode) = let (* Giving a new name to an existing entry. This should have been removed at a higher level but it doesn't always seem to be. In particular we must treat this specially if it's a container. *) val original = Array.sub(locToPregArray, l) val () = Array.update(locToPregArray, addr, original) in doBindings(decs, context, tailCode) end | doBindings(BICDeclar{value, addr, ...} :: decs, context, tailCode) = let val (code, dest) = codeToPRegRev(value, context, tailCode) val () = Array.update(locToPregArray, addr, PregLocation dest) in doBindings(decs, context, code) end | doBindings(BICRecDecs [{lambda, addr, ...}] :: decs, context, tailCode) = (* We shouldn't have single entries in RecDecs but it seems to occur at the moment. *) let val dest = newPReg() val (code, _, _) = codeToICodeRev(BICLambda lambda, context, false, SpecificPReg dest, tailCode) val () = Array.update(locToPregArray, addr, PregLocation dest) in doBindings(decs, context, code) end | doBindings(BICRecDecs recDecs :: decs, context, tailCode) = let val destRegs = map (fn _ => newPReg()) recDecs (* First build the closures as mutable cells containing zeros. Set the entry in the address table to the register containing the address. *) fun makeClosure({lambda={closure, ...}, addr, ...}, dest, c) = let val () = Array.update(locToPregArray, addr, PregLocation dest) val sizeClosure = List.length closure + (if targetArch = ObjectId32Bit then 2 else 1) open Address fun clear n = if n = sizeClosure then [BlockSimple(AllocateMemoryOperation{size=sizeClosure, flags=if targetArch = ObjectId32Bit then Word8.orb(F_mutable, F_closure) else F_mutable, dest=dest, saveRegs=[]})] else (clear (n+1) @ [BlockSimple( StoreArgument{source=IntegerConstant(tag 0), base=dest, offset=n*Word.toInt wordSize, index=memIndexOrObject, kind=movePolyWord, isMutable=false})]) in c @ clear 0 @ [BlockSimple InitialisationComplete] end val allocClosures = ListPair.foldlEq makeClosure [] (recDecs, destRegs) fun setClosure({lambda as {closure, ...}, ...}, dest, l) = let val clResult = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, clResult) (* Basically the same as tuple except we load the address of the closure we've made. *) fun loadFields([], _) = [] | loadFields(f :: rest, n) = let val (code, source, _) = codeToICode(BICExtract f, context, false, Allowed allowInMemMove) val storeValue = [BlockSimple(StoreArgument{ source=source, base=dest, offset=n*Word.toInt wordSize, index=memIndexOrObject, kind=movePolyWord, isMutable=false })] in code @ storeValue @ loadFields(rest, n+1) end val setCodeAddress = if targetArch = ObjectId32Bit then let (* We can't get the code address until run time. *) val codeReg = newUReg() val closureReg = newPReg() in map BlockSimple [ LoadArgument{ source=AddressConstant(toMachineWord clResult), dest=closureReg, kind=movePolyWord}, LoadArgument{ source=MemoryLocation{offset=0, base=closureReg, index=ObjectIndex, cache=NONE}, dest=codeReg, kind=Move64Bit}, StoreArgument{ source=RegisterArgument codeReg, offset=0, base=dest, index=ObjectIndex, kind=moveNativeWord, isMutable=false} ] end else let val codeAddr = codeAddressFromClosure clResult val (code, source, _) = moveIfNotAllowed(Allowed allowInMemMove, [], AddressConstant codeAddr) in code @ [BlockSimple( StoreArgument{ source=source, base=dest, offset=0, index=NoMemIndex, kind=movePolyWord, isMutable=false })] end val setFields = setCodeAddress @ loadFields(closure, if targetArch = ObjectId32Bit then 2 else 1) in l @ setFields @ [BlockSimple(LockMutable{addr=dest})] end val setClosures = ListPair.foldlEq setClosure [] (recDecs, destRegs) val code = List.rev(allocClosures @ setClosures) in doBindings(decs, context, code @ tailCode) end | doBindings(BICNullBinding exp :: decs, context, tailCode) = let val (code, _, _) = codeToICodeRev(exp, context, false, NoResult, tailCode) (* And discard result. *) in doBindings(decs, context, code) end | doBindings(BICDecContainer{ addr, size } :: decs, {loopArgs, stackPtr, currHandler, overflowBlock}, tailCode) = let val containerReg = newStackLoc size val () = Array.update(locToPregArray, addr, ContainerLocation{container=containerReg, stackOffset=stackPtr+size}) in doBindings(decs, {loopArgs=loopArgs, stackPtr=stackPtr+size, currHandler=currHandler, overflowBlock=overflowBlock}, BlockSimple(ReserveContainer{size=size, container=containerReg}) :: tailCode) end val (codeBindings, resContext as {stackPtr=finalSp, ...}) = doBindings(bindings, context, tailCode) (* If we have had a container we'll need to reset the stack *) in if initialSp <> finalSp then let val _ = finalSp >= initialSp orelse raise InternalError "codeToICode - stack ptr" val bodyReg = newPReg() and resultReg = asTarget destination val (codeExp, result, haveExited) = codeToICodeRev(exp, resContext, isTail, SpecificPReg bodyReg, codeBindings) val afterAdjustSp = if haveExited then codeExp else BlockSimple(LoadArgument{source=result, dest=resultReg, kind=movePolyWord}) :: BlockSimple(ResetStackPtr{numWords=finalSp-initialSp, preserveCC=false}) :: codeExp in (afterAdjustSp, RegisterArgument resultReg, haveExited) end else codeToICodeRev(exp, resContext, isTail, destination, codeBindings) end | codeToICodeRev(BICConstnt(value, _), _, _, destination, tailCode) = moveIfNotAllowedRev(destination, tailCode, constantAsArgument value) | codeToICodeRev(BICExtract(BICLoadLocal l), {stackPtr, ...}, _, destination, tailCode) = ( case Array.sub(locToPregArray, l) of NoLocation => raise InternalError "codeToICodeRev - local unset" | PregLocation preg => moveIfNotAllowedRev(destination, tailCode, RegisterArgument preg) | ContainerLocation{container, stackOffset} => (* This always returns a ContainerAddr whatever the "allowed". *) (tailCode, ContainerAddr{container=container, stackOffset=stackPtr-stackOffset}, false) ) | codeToICodeRev(BICExtract(BICLoadArgument a), {stackPtr, ...}, _, destination, tailCode) = ( case Vector.sub(argumentVector, a) of ArgInReg{argReg, ...} => (* It was originally in a register. It's now in a preg. *) moveIfNotAllowedRev(destination, tailCode, RegisterArgument argReg) | ArgOnStack{stackOffset, stackReg} => (* Pushed before call. *) let val target = asTarget destination in (BlockSimple(LoadArgument{ source=StackLocation{wordOffset=stackOffset+stackPtr, container=stackReg, field=0, cache=NONE}, dest=target, kind=moveNativeWord}) :: tailCode, RegisterArgument target, false) end ) | codeToICodeRev(BICExtract(BICLoadClosure c), _, _, destination, tailCode) = let (* Add the number of words for the code address. This is 1 in native but 2 in 32-in-64. *) val offset = case targetArch of ObjectId32Bit => c+2 | _ => c+1 in if c >= List.length closure then raise InternalError "BICExtract: closure" else (); (* N.B. We need to add one to the closure entry because zero is the code address. *) moveIfNotAllowedRev(destination, tailCode, wordOffsetAddress(offset, closureRegAddr)) end | codeToICodeRev(BICExtract BICLoadRecursive, _, _, destination, tailCode) = (* If the closure is empty we must use the constant. We can't guarantee that the caller will actually load the closure register if it knows the closure is empty. *) moveIfNotAllowedRev(destination, tailCode, case closure of [] => AddressConstant(closureAsAddress resultClosure) | _ => RegisterArgument closureRegAddr) | codeToICodeRev(BICField{base, offset}, context, _, destination, tailCode) = let val (codeBase, baseEntry, _) = codeToICodeRev(base, context, false, Allowed allowInPReg, tailCode) in (* This should not be used with a container. *) case baseEntry of RegisterArgument baseR => moveIfNotAllowedRev(destination, codeBase, wordOffsetAddress(offset, baseR)) | _ => raise InternalError "codeToICodeRev-BICField" end | codeToICodeRev(BICLoadContainer{base, offset}, context, _, destination, tailCode) = let val (codeBase, baseEntry, _) = codeToICodeRev(base, context, false, Allowed allowInPReg, tailCode) val multiplier = Word.toInt(nativeWordSize div wordSize) in (* If this is a local container we extract the field. *) case baseEntry of RegisterArgument baseR => moveIfNotAllowedRev(destination, codeBase, wordOffsetAddress(offset*multiplier, baseR)) | ContainerAddr{container, stackOffset} => let val target = asTarget destination val finalOffset = stackOffset+offset val _ = finalOffset >= 0 orelse raise InternalError "offset" in (BlockSimple(LoadArgument{ source=StackLocation{wordOffset=finalOffset, container=container, field=offset, cache=NONE}, dest=target, kind=moveNativeWord}) :: tailCode, RegisterArgument target, false) end | _ => raise InternalError "codeToICodeRev-BICField" end | codeToICodeRev(BICEval{function, argList, resultType, ...}, context as { currHandler, ...}, isTail, destination, tailCode) = let val target = asTarget destination (* Create pregs for the closure and each argument. *) val clPReg = newPReg() (* If we have a constant closure we can go directly to the entry point. If the closure is a single word we don't need to load the closure register. *) val (functionCode, closureEntry, callKind) = case function of BICConstnt(addr, _) => let val addrAsAddr = toAddress addr (* If this is a closure we're still compiling we can't get the code address. However if this is directly recursive we can use the recursive convention. *) in if wordEq(closureAsAddress resultClosure, addr) then (tailCode, [], Recursive) else if flags addrAsAddr <> Address.F_words andalso flags addrAsAddr <> Address.F_closure then (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], FullCall) else if targetArch = ObjectId32Bit then (* We can't actually load the code address here. *) let val addrLength = length addrAsAddr val _ = addrLength >= 0w1 orelse raise InternalError "BICEval address" val _ = flags addrAsAddr = Address.F_closure orelse raise InternalError "BICEval address not a closure" in if addrLength = 0w2 then (tailCode, [], ConstantCode addr) else (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], ConstantCode addr) end else (* Native 32 or 64-bits. *) let val addrLength = length addrAsAddr val _ = addrLength >= 0w1 orelse raise InternalError "BICEval address" val codeAddr = loadWord(addrAsAddr, 0w0) val _ = isCode (toAddress codeAddr) orelse raise InternalError "BICEval address not code" in if addrLength = 0w1 then (tailCode, [], ConstantCode codeAddr) else (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], ConstantCode codeAddr) end end | BICExtract BICLoadRecursive => ( (* If the closure is empty we don't need to load rdx *) case closure of [] => (tailCode, [], Recursive) | _ => (BlockSimple(LoadArgument {source=RegisterArgument closureRegAddr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], Recursive) ) | function => (* General case. *) (#1 (codeToICodeRev(function, context, false, SpecificPReg clPReg, tailCode)), [(RegisterArgument clPReg, GenReg edx)], FullCall) (* Optimise arguments. We have to be careful with tail-recursive functions because they need to save any stack arguments that could be overwritten. This is complicated because we overwrite the stack before loading the register arguments. In some circumstances it could be safe but for the moment leave it. This should be safe in the new code-transform but not the old codeICode. Currently we don't allow memory arguments at all. There's the potential for problems later. Memory arguments could possibly lead to aliasing of the stack if the memory actually refers to a container on the stack. That would mess up the code that ensures that stack arguments are stored in the right order. *) (* We don't allow long constants in stack arguments to a tail-recursive call because we may use a memory move to set them. We also don't allow them in 32-in-64 because we can't push an address constant. *) val allowInStackArg = Allowed {anyConstant=not isTail andalso targetArch <> ObjectId32Bit, const32s=true, memAddr=false, existingPreg=not isTail } and allowInRegArg = Allowed {anyConstant=true, const32s=true, memAddr=false, existingPreg=not isTail } (* Load the first arguments into registers and the rest to the stack. *) fun loadArgs ([], _, _, tailCode) = (tailCode, [], []) | loadArgs ((arg, DoubleFloatType) :: args, gRegs, fpReg :: fpRegs, tailCode) = let (* Floating point register argument. *) val (c, r) = codeToPRegRev(arg, context, tailCode) val r1 = newUReg() val c1 = BlockSimple(LoadArgument{source=wordAt r, dest=r1, kind=MoveDouble}) :: c val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c1) in (code, (RegisterArgument r1, fpReg) :: regArgs, stackArgs) end | loadArgs ((arg, SingleFloatType) :: args, gRegs, fpReg :: fpRegs, tailCode) = let (* Floating point register argument. *) val (c, r) = codeToPRegRev(arg, context, tailCode) val r1 = newUReg() val c1 = if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument r, dest=r1, cache=NONE}) :: c else BlockSimple(LoadArgument{source=wordAt r, dest=r1, kind=MoveFloat}) :: c val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c1) in (code, (RegisterArgument r1, fpReg) :: regArgs, stackArgs) end | loadArgs ((arg, _) :: args, gReg::gRegs, fpRegs, tailCode) = let (* General register argument. *) val (c, r, _) = codeToICodeRev(arg, context, false, allowInRegArg, tailCode) val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c) in (code, (r, gReg) :: regArgs, stackArgs) end | loadArgs ((arg, _) :: args, [], fpRegs, tailCode) = let (* Stack argument. *) val (c, r, _) = codeToICodeRev(arg, context, false, allowInStackArg, tailCode) val (code, regArgs, stackArgs) = loadArgs(args, [], fpRegs, c) in (code, regArgs, r :: stackArgs) end val (codeArgs, regArgs, stackArgs) = loadArgs(argList, generalArgRegs, floatingPtArgRegs, functionCode) (* If this is at the end of the function and the result types are the same we can use a tail-recursive call. *) val tailCall = isTail andalso resultType = fnResultType val callCode = if tailCall then let val {stackPtr, ...} = context (* The number of arguments currently on the stack. *) val currentStackArgCount = currentStackArgs val newStackArgCount = List.length stackArgs (* The offset of the first argument or the return address if there are no stack arguments. N.B. We actually have currentStackArgCount+1 items on the stack including the return address. Offsets can be negative. *) val stackOffset = stackPtr val firstArgumentAddr = currentStackArgCount fun makeStackArgs([], _) = [] | makeStackArgs(arg::args, offset) = {src=arg, stack=offset} :: makeStackArgs(args, offset-1) val stackArgs = makeStackArgs(stackArgs, firstArgumentAddr) (* The stack adjustment needed to compensate for any items that have been pushed and the differences in the number of arguments. May be positive or negative. This is also the destination address of the return address so when we enter the new function the return address will be the first item on the stack. *) val stackAdjust = firstArgumentAddr - newStackArgCount (* Add an entry for the return address to the stack arguments. *) val returnEntry = {src=StackLocation{wordOffset=stackPtr, container=returnAddressEntry, field=0, cache=NONE}, stack=stackAdjust} (* Because we're storing into the stack we may be overwriting values we want. If the source of any value is a stack location below the current stack pointer we load it except in the special case where the destination is the same as the source (which is often the case with the return address). *) local fun loadArgs [] = ([], []) | loadArgs (arg :: rest) = let val (loadCode, loadedArgs) = loadArgs rest in case arg of {src as StackLocation{wordOffset, ...}, stack} => if wordOffset = stack+stackOffset (* Same location *) orelse stack+stackOffset < 0 (* Storing above current top of stack *) orelse stackOffset+wordOffset > ~ stackAdjust (* Above the last argument *) then (loadCode, arg :: loadedArgs) else let val preg = newPReg() in (BlockSimple(LoadArgument{source=src, dest=preg, kind=moveNativeWord}) :: loadCode, {src=RegisterArgument preg, stack=stack} :: loadedArgs) end | _ => (loadCode, arg :: loadedArgs) end in val (loadStackArgs, loadedStackArgs) = loadArgs(returnEntry :: stackArgs) end in BlockExit(TailRecursiveCall{regArgs=closureEntry @ regArgs, stackArgs=loadedStackArgs, stackAdjust = stackAdjust, currStackSize=stackOffset, callKind=callKind, workReg=newPReg()}) :: loadStackArgs @ codeArgs end else let val (moveResult, resReg) = case resultType of GeneralType => ([], target) | DoubleFloatType => let val fpRegDest = newUReg() in (boxOrTagReal(fpRegDest, target, BuiltIns.PrecDouble), fpRegDest) end | SingleFloatType => let val fpRegDest = newUReg() in (boxOrTagReal(fpRegDest, target, BuiltIns.PrecSingle), fpRegDest) end val call = FunctionCall{regArgs=closureEntry @ regArgs, stackArgs=stackArgs, dest=resReg, realDest=resultReg resultType, callKind=callKind, saveRegs=[]} val callBlock = case currHandler of NONE => BlockSimple call :: codeArgs | SOME h => BlockOptionalHandle{call=call, handler=h, label=newLabel()} :: codeArgs in moveResult @ callBlock end in (callCode, RegisterArgument target, tailCall (* We've exited if this was a tail jump *)) end | codeToICodeRev(BICNullary{oper=BuiltIns.GetCurrentThreadId}, _, _, destination, tailCode) = (* Get the ID of the current thread. *) let val target = asTarget destination in (BlockSimple(LoadMemReg{offset=memRegThreadSelf, dest=target, kind=movePolyWord}) :: tailCode, RegisterArgument target, false) end | codeToICodeRev(BICNullary{oper=BuiltIns.CheckRTSException}, { currHandler, ...}, _, destination, tailCode) = let (* Raise an exception in ML if the last RTS call set the exception packet. *) val haveException = newLabel() and noException = newLabel() val ccRef = newCCRef() val testReg = newPReg() val raiseCode = RaiseExceptionPacket{packetReg=testReg} val code = BlockLabel noException :: (case currHandler of NONE => BlockExit raiseCode | SOME h => BlockRaiseAndHandle(raiseCode, h)) :: BlockLabel haveException :: BlockFlow(Conditional{ ccRef=ccRef, condition=JNE, trueJump=haveException, falseJump=noException }) :: BlockSimple(CompareLiteral{arg1=RegisterArgument testReg, arg2=tag 0, opSize=polyWordOpSize, ccRef=ccRef}) :: BlockSimple(LoadMemReg{offset=memRegExceptionPacket, dest=testReg, kind=movePolyWord}) :: tailCode in moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeRev(BICNullary{oper=BuiltIns.CPUPause}, _, _, destination, tailCode) = (* Pause during spinlock phase of mutex locking. *) moveIfNotAllowedRev(destination, BlockSimple PauseCPU :: tailCode, (* Unit result *) IntegerConstant(tag 0)) | codeToICodeRev(BICUnary instr, context, isTail, destination, tailCode) = codeToICodeUnaryRev(instr, context, isTail, destination, tailCode) | codeToICodeRev(BICBinary instr, context, isTail, destination, tailCode) = codeToICodeBinaryRev(instr, context, isTail, destination, tailCode) | codeToICodeRev(BICArbitrary{oper, shortCond, arg1, arg2, longCall}, context, _, destination, tailCode) = let val startLong = newLabel() and resultLabel = newLabel() val target = asTarget destination val condResult = newMergeReg() (* Overflow check - if there's an overflow jump to the long precision case. *) fun jumpOnOverflow ccRef = let val noOverFlow = newLabel() in [BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=startLong, falseJump=noOverFlow }), BlockLabel noOverFlow] end val (longCode, _, _) = codeToICode(longCall, context, false, SpecificPReg condResult) (* We could use a tail jump here if this is a tail. *) val (code, dest, haveExited) = ( (* Test the tag bits and skip to the long case if either is clear. *) List.rev(codeConditionRev(shortCond, context, false, startLong, [])) @ (* Try evaluating as fixed precision and jump if we get an overflow. *) codeFixedPrecisionArith(oper, arg1, arg2, context, condResult, jumpOnOverflow) @ (* If we haven't had an overflow jump to the result. *) [BlockFlow(Unconditional resultLabel), (* If we need to use the full long-precision call we come here. *) BlockLabel startLong] @ longCode @ [BlockLabel resultLabel, BlockSimple(LoadArgument{source=RegisterArgument condResult, dest=target, kind=movePolyWord})], RegisterArgument target, false) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICAllocateWordMemory instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeAllocate(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICLambda(lambda as { closure = [], ...}), _, _, destination, tailCode) = (* Empty closure - create a constant closure for any recursive calls. *) let val closure = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, closure) (* Return the closure itself as the value. *) in moveIfNotAllowedRev(destination, tailCode, AddressConstant(closureAsAddress closure)) end | codeToICodeRev(BICLambda(lambda as { closure, ...}), context, isTail, destination, tailCode) = (* Non-empty closure. Ignore stack closure option at the moment. *) let val closureRef = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, closureRef) in if targetArch = ObjectId32Bit then let val target = asTarget destination val memAddr = newPReg() fun loadFields([], n, tlCode) = let val codeReg = newUReg() val closureReg = newPReg() in (* The code address occupies the first native word but we need to extract it at run-time. We don't currently have a way to have 64-bit constants. *) BlockSimple( StoreArgument{ source=RegisterArgument codeReg, offset=0, base=memAddr, index=ObjectIndex, kind=moveNativeWord, isMutable=false}) :: BlockSimple(LoadArgument{ source=MemoryLocation{offset=0, base=closureReg, index=ObjectIndex, cache=NONE}, dest=codeReg, kind=Move64Bit}) :: BlockSimple(LoadArgument{ source=AddressConstant(toMachineWord closureRef), dest=closureReg, kind=movePolyWord}) :: BlockSimple(AllocateMemoryOperation{size=n, flags=F_closure, dest=memAddr, saveRegs=[]}) :: tlCode end | loadFields(f :: rest, n, tlCode) = let (* Defer the evaluation if possible. We may have a constant that we can't move directly but it's better to load it after the allocation otherwise we will have to push the register if we need to GC. *) val (code1, source1, _) = codeToICodeRev(BICExtract f, context, false, Allowed allowDefer, tlCode) val restAndAlloc = loadFields(rest, n+1, code1) val (code2, source, _) = moveIfNotAllowedRev(Allowed allowInMemMove, restAndAlloc, source1) val storeValue = BlockSimple(StoreArgument{ source=source, offset=n*Word.toInt wordSize, base=memAddr, index=ObjectIndex, kind=movePolyWord, isMutable=false}) in storeValue :: code2 end val code = BlockSimple InitialisationComplete :: BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord}) :: loadFields(closure, 2, tailCode) in (code, RegisterArgument target, false) end (* Treat it as a tuple with the code as the first field. *) else codeToICodeRev(BICTuple(BICConstnt(codeAddressFromClosure closureRef, []) :: map BICExtract closure), context, isTail, destination, tailCode) end | codeToICodeRev(BICCond(test, thenPt, elsePt), context, isTail, NoResult, tailCode) = let (* If we don't want the result but are only evaluating for side-effects we may be able to optimise special cases. This was easier in the forward case but for now we don't bother and leave it to the lower levels. *) val startElse = newLabel() and skipElse = newLabel() val codeTest = codeConditionRev(test, context, false, startElse, tailCode) val (codeThen, _, _) = codeToICodeRev(thenPt, context, isTail, NoResult, codeTest) val (codeElse, _, _) = codeToICodeRev(elsePt, context, isTail, NoResult, BlockLabel startElse :: BlockFlow(Unconditional skipElse) :: codeThen) in (BlockLabel skipElse :: codeElse, (* Unit result *) IntegerConstant(tag 0), false) end | codeToICodeRev(BICCond(test, thenPt, elsePt), context, isTail, destination, tailCode) = let (* Because we may push the result onto the stack we have to create a new preg to hold the result and then copy that to the final result. *) (* If this is a tail each arm will exit separately and neither will return a result. *) val target = asTarget destination val condResult = newMergeReg() val thenTarget = if isTail then newPReg() else condResult val startElse = newLabel() val testCode = codeConditionRev(test, context, false, startElse, tailCode) (* Put the result in the target register. *) val (thenCode, _, thenExited) = codeToICodeRev(thenPt, context, isTail, SpecificPReg thenTarget, testCode) (* Add a jump round the else-part except that if this is a tail we return. The then-part could have exited e.g. with a raise or a loop. *) val (exitThen, thenLabel, elseTarget) = if thenExited then (thenCode, [], target (* Can use original target. *)) else if isTail then (returnInstruction(context, thenTarget, thenCode), [], newPReg()) else let val skipElse = newLabel() in (BlockFlow(Unconditional skipElse) :: thenCode, [BlockSimple(LoadArgument{source=RegisterArgument condResult, dest=target, kind=movePolyWord}), BlockLabel skipElse], condResult) end val (elseCode, _, elseExited) = codeToICodeRev(elsePt, context, isTail, SpecificPReg elseTarget, BlockLabel startElse :: exitThen) (* Add a return to the else-part if necessary so we will always exit on a tail. *) val exitElse = if isTail andalso not elseExited then returnInstruction(context, elseTarget, elseCode) else elseCode in (thenLabel @ exitElse, RegisterArgument target, isTail orelse thenExited andalso elseExited) end | codeToICodeRev(BICCase { cases, test, default, isExhaustive, firstIndex}, context, isTail, destination, tailCode) = let (* We have to create a new preg for the result in case we need to push it to the stack. *) val targetReg = newMergeReg() local val initialTestReg = newPReg() val (testCode, _, _) = codeToICodeRev(test, context, false, SpecificPReg initialTestReg, tailCode) (* Subtract the minimum value so the value we're testing is always in the range of (tagged) 0 to the maximum. It is possible to adjust the value when computing the index but that can lead to overflows during compilation if the minimum is very large or small. We can ignore overflow and allow values to wrap round. *) in val (testCode, testReg) = if firstIndex = 0w0 then (testCode, initialTestReg) else let val newTestReg = newPReg() val subtract = BlockSimple(ArithmeticFunction{oper=SUB, resultReg=newTestReg, operand1=initialTestReg, operand2=IntegerConstant(semitag(Word.toLargeInt firstIndex)), ccRef=newCCRef(), opSize=polyWordOpSize}) in (subtract :: testCode, newTestReg) end end val workReg = newPReg() (* Unless this is exhaustive we need to add a range check. *) val (rangeCheck, extraDefaults) = if isExhaustive then (testCode, []) else let val defLab1 = newLabel() val tReg1 = newPReg() val ccRef1 = newCCRef() (* Since we've subtracted any minimum we only have to check whether the value is greater (unsigned) than the maximum. *) val numberOfCases = LargeInt.fromInt(List.length cases) val continueLab = newLabel() val testCode2 = BlockLabel continueLab :: BlockFlow(Conditional{ccRef=ccRef1, condition=JNB, trueJump=defLab1, falseJump=continueLab}) :: BlockSimple(WordComparison{arg1=tReg1, arg2=IntegerConstant(tag numberOfCases), ccRef=ccRef1, opSize=polyWordOpSize}) :: BlockSimple(LoadArgument {source=RegisterArgument testReg, dest=tReg1, kind=movePolyWord}) :: testCode in (testCode2, [defLab1]) end (* Make a label for each item in the list. *) val codeLabels = map (fn _ => newLabel()) cases (* Create an exit label in case it's needed. *) val labelForExit = if isTail then ~1 (* Illegal label. *) else newLabel() (* Generate the code for each of the cases and the default. We need to put an unconditional branch after each to skip the other cases. *) fun codeCases (SOME c :: otherCases, startLabel :: otherLabels, tailCode) = let val caseTarget = if isTail then newPReg() else targetReg (* Put in the case with a jump to the end of the sequence. *) val (codeThisCase, _, caseExited) = codeToICodeRev(c, context, isTail, SpecificPReg caseTarget, BlockLabel startLabel :: tailCode) val exitThisCase = if caseExited then codeThisCase else if isTail then returnInstruction(context, caseTarget, codeThisCase) else BlockFlow(Unconditional labelForExit) :: codeThisCase in codeCases(otherCases, otherLabels, exitThisCase) end | codeCases(NONE :: otherCases, _ :: otherLabels, tailCode) = codeCases(otherCases, otherLabels, tailCode) | codeCases ([], [], tailCode) = let (* We need to add labels for all the gaps we filled and also for a "default" label for the indexed-case instruction itself as well as any range checks. *) fun addDefault (startLabel, NONE, l) = BlockLabel startLabel :: l | addDefault (_, SOME _, l) = l fun asForward l = BlockLabel l val dLabs = map asForward extraDefaults @ tailCode val defLabels = ListPair.foldlEq addDefault dLabs (codeLabels, cases) val defaultTarget = if isTail then newPReg() else targetReg val (defaultCode, _, defaultExited) = codeToICodeRev(default, context, isTail, SpecificPReg defaultTarget, defLabels) in (* Put in the default. Because this is the last we don't need to jump round it. However if this is a tail and we haven't exited we put in a return. That way the case will always have exited if this is a tail. *) if isTail andalso not defaultExited then returnInstruction(context, defaultTarget, defaultCode) else defaultCode end | codeCases _ = raise InternalError "codeCases: mismatch" val codedCases = codeCases(cases, codeLabels, BlockFlow(IndexedBr codeLabels) :: BlockSimple(IndexedCaseOperation{testReg=testReg, workReg=workReg}) :: rangeCheck) (* We can now copy to the target. If we need to push the result this load will be converted into a push. *) val target = asTarget destination val copyToTarget = if isTail then codedCases else BlockSimple(LoadArgument{source=RegisterArgument targetReg, dest=target, kind=movePolyWord}) :: BlockLabel labelForExit :: codedCases in (copyToTarget, RegisterArgument target, isTail (* We have always exited on a tail. *)) end | codeToICodeRev(BICBeginLoop {loop, arguments}, context as { stackPtr, currHandler, overflowBlock, ...}, isTail, destination, tailCode) = let val target = asTarget destination fun codeArgs ([], tailCode) = ([], tailCode) | codeArgs (({value, addr}, _) :: rest, tailCode) = let val pr = newPReg() val () = Array.update(locToPregArray, addr, PregLocation pr) val (code, _, _) = codeToICodeRev(value, context, false, SpecificPReg pr, tailCode) val (pregs, othercode) = codeArgs(rest, code) in (pr::pregs, othercode) end val (loopRegs, argCode) = codeArgs(arguments, tailCode) val loopLabel = newLabel() val (loopBody, _, loopExited) = codeToICodeRev(loop, {loopArgs=SOME (loopRegs, loopLabel, stackPtr), stackPtr=stackPtr, currHandler=currHandler, overflowBlock=overflowBlock }, isTail, SpecificPReg target, BlockLabel loopLabel :: BlockSimple BeginLoop :: argCode) in (loopBody, RegisterArgument target, loopExited) end | codeToICodeRev(BICLoop args, context as {loopArgs=SOME (loopRegs, loopLabel, loopSp), stackPtr, currHandler, ...}, _, destination, tailCode) = let val target = asTarget destination (* Registers to receive the evaluated arguments. We can't put the values into the loop variables yet because the values could depend on the current values of the loop variables. *) val argPRegs = map(fn _ => newPReg()) args val codeArgs = ListPair.foldlEq(fn ((arg, _), pr, l) => #1 (codeToICodeRev(arg, context, false, SpecificPReg pr, l))) tailCode (args, argPRegs) val jumpArgs = ListPair.mapEq(fn (s, l) => (RegisterArgument s, l)) (argPRegs, loopRegs) (* If we've allocated a container in the loop we have to remove it before jumping back. *) val stackReset = if loopSp = stackPtr then codeArgs else BlockSimple(ResetStackPtr{numWords=stackPtr-loopSp, preserveCC=false}) :: codeArgs val jumpLoop = JumpLoop{regArgs=jumpArgs, stackArgs=[], checkInterrupt=SOME[], workReg=NONE} (* "checkInterrupt" could result in a Interrupt exception so we treat this like a function call. *) val code = case currHandler of NONE => BlockFlow(Unconditional loopLabel) :: BlockSimple jumpLoop :: stackReset | SOME h => BlockOptionalHandle{call=jumpLoop, handler=h, label=loopLabel} :: stackReset in (code, RegisterArgument target, true) end | codeToICodeRev(BICLoop _, {loopArgs=NONE, ...}, _, _, _) = raise InternalError "BICLoop without BICBeginLoop" | codeToICodeRev(BICRaise exc, context as { currHandler, ...}, _, destination, tailCode) = let val packetReg = newPReg() val (code, _, _) = codeToICodeRev(exc, context, false, SpecificPReg packetReg, tailCode) val raiseCode = RaiseExceptionPacket{packetReg=packetReg} val block = case currHandler of NONE => BlockExit raiseCode | SOME h => BlockRaiseAndHandle(raiseCode, h) in (block :: code, RegisterArgument(asTarget destination), true (* Always exits *)) end | codeToICodeRev(BICHandle{exp, handler, exPacketAddr}, context as { stackPtr, loopArgs, overflowBlock, ... }, isTail, destination, tailCode) = let (* As with BICCond and BICCase we need to create a new register for the result in case we need to push it to the stack. *) val handleResult = newMergeReg() val handlerLab = newLabel() and startHandling = newLabel() val (bodyTarget, handlerTarget) = if isTail then (newPReg(), newPReg()) else (handleResult, handleResult) (* TODO: Even if we don't actually want a result we force one in here by using "asTarget". *) (* The expression cannot be treated as a tail because the handler has to be removed after. It may "exit" if it has raised an unconditional exception. If it has we mustn't generate a PopExceptionHandler because there won't be any result for resultReg. We need to add two words to the stack to account for the items pushed by PushExceptionHandler. We create an instruction to push the handler followed by a block fork to the start of the code and, potentially the handler, then a label to start the code that the handler is in effect for. *) val initialCode = BlockLabel startHandling :: BlockFlow(SetHandler{handler=handlerLab, continue=startHandling}) :: BlockSimple(PushExceptionHandler{workReg=newPReg()}) :: tailCode val (expCode, _, expExit) = codeToICodeRev(exp, {stackPtr=stackPtr+2, loopArgs=loopArgs, currHandler=SOME handlerLab, overflowBlock=overflowBlock}, false (* Not tail *), SpecificPReg bodyTarget, initialCode) (* If this is the tail we can replace the jump at the end of the handled code with returns. If the handler has exited we don't need a return there. Otherwise we need to add an unconditional jump to skip the handler. *) val (atExpEnd, skipExpLabel) = case (isTail, expExit) of (true, true) => (* Tail and exited. *) (expCode, NONE) | (true, false) => (* Tail and not exited. *) (returnInstruction(context, bodyTarget, BlockSimple(PopExceptionHandler{workReg=newPReg()}) :: expCode), NONE) | (false, true) => (* Not tail but exited. *) (expCode, NONE) | (false, false) => let val skipHandler = newLabel() in (BlockFlow(Unconditional skipHandler) :: BlockSimple(PopExceptionHandler{workReg=newPReg()}) :: expCode, SOME skipHandler) end (* Make a register to hold the exception packet and put eax into it. *) val packetAddr = newPReg() val () = Array.update(locToPregArray, exPacketAddr, PregLocation packetAddr) val (handleCode, _, handleExit) = codeToICodeRev(handler, context, isTail, SpecificPReg handlerTarget, BlockSimple(BeginHandler{workReg=newPReg(), packetReg=packetAddr}) :: BlockLabel handlerLab :: atExpEnd) val target = asTarget destination val afterHandler = case (isTail, handleExit) of (true, true) => (* Tail and exited. *) handleCode | (true, false) => (* Tail and not exited. *) returnInstruction(context, handlerTarget, handleCode) | (false, _) => (* Not tail. *) handleCode val addLabel = case skipExpLabel of SOME lab => BlockLabel lab:: afterHandler | NONE => afterHandler in (BlockSimple(LoadArgument{source=RegisterArgument handleResult, dest=target, kind=movePolyWord}) :: addLabel, RegisterArgument target, isTail) end | codeToICodeRev(BICTuple fields, context, _, destination, tailCode) = let (* TODO: This is a relic of the old fall-back code-generator. It required the result of a tuple to be at the top of the stack. It should be changed. *) val target = asTarget destination (* Actually we want this. *) val memAddr = newPReg() fun loadFields([], n, tlCode) = BlockSimple(AllocateMemoryOperation{size=n, flags=0w0, dest=memAddr, saveRegs=[]}) :: tlCode | loadFields(f :: rest, n, tlCode) = let (* Defer the evaluation if possible. We may have a constant that we can't move directly but it's better to load it after the allocation otherwise we will have to push the register if we need to GC. *) val (code1, source1, _) = codeToICodeRev(f, context, false, Allowed allowDefer, tlCode) val restAndAlloc = loadFields(rest, n+1, code1) val (code2, source, _) = moveIfNotAllowedRev(Allowed allowInMemMove, restAndAlloc, source1) val storeValue = BlockSimple(StoreArgument{ source=source, offset=n*Word.toInt wordSize, base=memAddr, index=memIndexOrObject, kind=movePolyWord, isMutable=false}) in storeValue :: code2 end val code = BlockSimple InitialisationComplete :: BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord}) :: loadFields(fields, 0, tailCode) in (code, RegisterArgument target, false) end (* Copy the source tuple into the container. There are important special cases for both the source tuple and the container. If the source tuple is a BICTuple we have the fields and can store them without creating a tuple on the heap. If the destination is a local container we can store directly into the stack. *) | codeToICodeRev(BICSetContainer{container, tuple, filter}, context as {stackPtr, ...}, _, destination, tailCode) = let local fun createStore containerReg (source, destWord) = StoreArgument{source=source, offset=destWord*Word.toInt nativeWordSize, base=containerReg, index=NoMemIndex, kind=moveNativeWord, isMutable=false} in val findContainer = case container of BICExtract(BICLoadLocal l) => ( case Array.sub(locToPregArray, l) of ContainerLocation{container, stackOffset} => let fun storeToStack(source, destWord) = StoreToStack{source=source, container=container, field=destWord, stackOffset=stackPtr-stackOffset+destWord} in SOME storeToStack end | _ => NONE ) | _ => NONE val (codeContainer, storeInstr) = case findContainer of SOME storeToStack => (tailCode, storeToStack) | NONE => let val containerTarget = newPReg() val (codeContainer, _, _) = codeToICodeRev(container, context, false, SpecificPReg containerTarget, tailCode) in (codeContainer, createStore containerTarget) end end val filterLength = BoolVector.length filter val code = case tuple of BICTuple cl => let (* In theory it's possible that the tuple could contain fields that are not used but nevertheless need to be evaluated for their side-effects. Create all the fields and push to the stack. *) fun codeField(arg, (regs, tailCode)) = let val (c, r, _) = codeToICodeRev(arg, context, false, Allowed allowInMemMove, tailCode) in (r :: regs, c) end val (pregsRev, codeFields) = List.foldl codeField ([], codeContainer) cl val pregs = List.rev pregsRev fun copyField(srcReg, (sourceWord, destWord, tailCode)) = if sourceWord < filterLength andalso BoolVector.sub(filter, sourceWord) then (sourceWord+1, destWord+1, BlockSimple(storeInstr(srcReg, destWord)) :: tailCode) else (sourceWord+1, destWord, tailCode) val (_, _, resultCode) = List.foldl copyField (0, 0, codeFields) pregs in resultCode end | tuple => let (* Copy a heap tuple. It is possible that this is another container in which case we must load the fields directly. We mustn't load its address and then copy because loading the address would be the last reference and might cause the container to be reused prematurely. *) val findContainer = case tuple of BICExtract(BICLoadLocal l) => ( case Array.sub(locToPregArray, l) of ContainerLocation{container, stackOffset} => let fun getAddr sourceWord = StackLocation{wordOffset=stackPtr-stackOffset+sourceWord, container=container, field=sourceWord, cache=NONE} in SOME getAddr end | _ => NONE ) | _ => NONE val (codeTuple, loadField) = case findContainer of SOME getAddr => (codeContainer, getAddr) | NONE => let val tupleTarget = newPReg() val (codeTuple, _, _) = codeToICodeRev(tuple, context, false, SpecificPReg tupleTarget, codeContainer) fun loadField sourceWord = wordOffsetAddress(sourceWord, tupleTarget) in (codeTuple, loadField) end fun copyContainer(sourceWord, destWord, tailCode) = if sourceWord = filterLength then tailCode else if BoolVector.sub(filter, sourceWord) then let val loadReg = newPReg() val code = BlockSimple(storeInstr(RegisterArgument loadReg, destWord)) :: BlockSimple(LoadArgument{source=loadField sourceWord, dest=loadReg, kind=movePolyWord}) :: tailCode in copyContainer(sourceWord+1, destWord+1, code) end else copyContainer(sourceWord+1, destWord, tailCode) in copyContainer(0, 0, codeTuple) end in moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeRev(BICTagTest{test, tag=tagValue, ...}, context, _, destination, tailCode) = (* Check the "tag" word of a union (datatype). N.B. Not the same as testing the tag bit of a word. *) let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (testCode, tagArg, _) = codeToICodeRev(test, context, false, Allowed memOrReg, tailCode) val target = asTarget destination in (makeBoolResultRev(JE, ccRef, target, (* Use CompareLiteral because the tag must fit in 32-bits. *) BlockSimple(CompareLiteral{arg1=tagArg, arg2=tag(Word.toLargeInt tagValue), opSize=polyWordOpSize, ccRef=ccRef}) :: testCode), RegisterArgument target, false) end | codeToICodeRev(BICLoadOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeLoad(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICStoreOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeStore(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICBlockOperation ({kind=BlockOpEqualByte, sourceLeft, destRight, length}), context, _, destination, tailCode) = let val vec1Reg = newUReg() and vec2Reg = newUReg() val ccRef = newCCRef() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddressRev(sourceLeft, true, context, tailCode) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddressRev(destRight, true, context, leftCode) val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToRegRev(length, false (* unsigned *), context, rightCode) val target = asTarget destination val code = makeBoolResultRev(JE, ccRef, target, BlockSimple(CompareByteVectors{ vec1Addr=vec1Reg, vec2Addr=vec2Reg, length=lengthArg, ccRef=ccRef }) :: lengthUntag @ BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg}) :: rightUntag @ BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg}) :: leftUntag @ lengthCode) in (code, RegisterArgument target, false) end | codeToICodeRev(BICBlockOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeBlock(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end and codeToICodeUnaryRev({oper=BuiltIns.NotBoolean, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val ccRef = newCCRef() val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} val (argCode, testDest, _) = codeToICodeRev(arg1, context, false, allow, tailCode) in (* Test the argument and return a boolean result. If either the argument is a condition or the result is used in a test this will be better than using XOR. *) (makeBoolResultRev(JNE, ccRef, target, BlockSimple(CompareLiteral{arg1=testDest, arg2=tag 1, opSize=polyWordOpSize, ccRef=ccRef}) :: argCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.IsTaggedValue, arg1}, context, _, destination, tailCode) = let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (testCode, testResult, _) = codeToICodeRev(arg1, context, false, Allowed memOrReg, tailCode) (* Test the tag bit. This sets the zero bit if the value is untagged. *) val target = asTarget destination in (makeBoolResultRev(JNE, ccRef, target, BlockSimple(TestTagBit{arg=testResult, ccRef=ccRef}) :: testCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.MemoryCellLength, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val argReg1 = newUReg() and argReg2 = newUReg() and argReg3 = newUReg() (* These are untagged until the tag is put in. *) and ccRef1 = newCCRef() and ccRef2 = newCCRef() and ccRef3 = newCCRef() (* Get the length of a memory cell (heap object). We need to mask out the top byte containing the flags and to tag the result. The mask is 56 bits on 64-bit which won't fit in an inline constant. Since we have to shift it anyway we might as well do this by shifts. *) val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=argReg3, operand2=IntegerConstant 1, ccRef=ccRef3, opSize=polyWordOpSize}) :: BlockSimple(ShiftOperation{shift=SHR, resultReg=argReg3, operand=argReg2, shiftAmount=IntegerConstant 7 (* 8-tagshift*), ccRef=ccRef2, opSize=polyWordOpSize }) :: BlockSimple(ShiftOperation{shift=SHL, resultReg=argReg2, operand=argReg1, shiftAmount=IntegerConstant 8, ccRef=ccRef1, opSize=polyWordOpSize }) :: BlockSimple(LoadArgument{source=wordOffsetAddress(~1, addrReg), dest=argReg1, kind=movePolyWord}) :: argCode, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.MemoryCellFlags, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val argReg1 = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(TagValue{ source=argReg1, dest=target, isSigned=false, opSize=OpSize32 }) :: BlockSimple(LoadArgument{source=MemoryLocation{offset= ~1, base=addrReg, index=memIndexOrObject, cache=NONE}, dest=argReg1, kind=MoveByte}) :: argCode, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.ClearMutableFlag, arg1}, context, _, destination, tailCode) = let val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in moveIfNotAllowedRev(destination, BlockSimple(LockMutable{addr=addrReg}) :: argCode, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeUnaryRev({oper=BuiltIns.AtomicReset, arg1}, context, _, destination, tailCode) = let (* This is needed only for the interpreted version where we have a single real mutex to interlock atomic increment and decrement. We have to use the same mutex to interlock clearing a mutex. On the X86 we use hardware locking and the hardware guarantees that an assignment of a word will be atomic. *) val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) (* Store tagged 0 in the mutex. This is the unlocked value. *) val code = BlockSimple(StoreArgument{source=IntegerConstant(tag 0), base=addrReg, index=memIndexOrObject, offset=0, kind=movePolyWord, isMutable=true}) :: argCode in moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeUnaryRev({oper=BuiltIns.LongWordToTagged, arg1}, context, _, destination, tailCode) = let (* This is exactly the same as StringLengthWord at the moment. TODO: introduce a new ICode entry so that the next stage can optimise longword operations. *) val target = asTarget destination val argReg1 = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) val code = BlockSimple(TagValue{ source=argReg1, dest=target, isSigned=false, opSize=polyWordOpSize }) :: (* Use movePolyWord even on 32-in-64 since we're producing a 32-bit value anyway. *) BlockSimple(LoadArgument{source=wordAt addrReg, dest=argReg1, kind=movePolyWord}) :: argCode in (code, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.SignedToLongWord, arg1}, context, _, destination, tailCode) = let val addrReg = newPReg() and untagArg = newUReg() val (argCode, argReg1) = codeToPRegRev(arg1, context, tailCode) val (signExtend, sxReg) = case targetArch of ObjectId32Bit => let val sReg = newUReg() in ([BlockSimple(SignExtend32To64{source=RegisterArgument argReg1, dest=sReg})], sReg) end | _ => ([], argReg1) val code = BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untagArg, dest=addrReg, saveRegs=[]}) :: BlockSimple(UntagValue{source=sxReg, dest=untagArg, isSigned=true, cache=NONE, opSize=nativeWordOpSize}) :: signExtend @ argCode in moveIfNotAllowedRev(destination, code, RegisterArgument addrReg) end | codeToICodeUnaryRev({oper=BuiltIns.UnsignedToLongWord, arg1}, context, _, destination, tailCode) = let val addrReg = newPReg() and untagArg = newUReg() val (argCode, argReg1) = codeToPRegRev(arg1, context, tailCode) val code = BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untagArg, dest=addrReg, saveRegs=[]}) :: (* We can just use a polyWord operation to untag the unsigned value. *) BlockSimple(UntagValue{source=argReg1, dest=untagArg, isSigned=false, cache=NONE, opSize=polyWordOpSize}) :: argCode in moveIfNotAllowedRev(destination, code, RegisterArgument addrReg) end | codeToICodeUnaryRev({oper=BuiltIns.RealNeg precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val fpRegSrc = newUReg() and fpRegDest = newUReg() and sse2ConstReg = newUReg() (* The SSE2 code uses an SSE2 logical operation to flip the sign bit. This requires the values to be loaded into registers first because the logical operations require 128-bit operands. *) val (argCode, aReg1) = codeToPReg(arg1, context) (* Double precision values are always boxed and single precision values if they won't fit in a word. Otherwise we can using tagging. *) open BuiltIns val load = if precision = PrecDouble then BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveDouble}) else if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpRegSrc, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveFloat}) val code = case fpMode of FPModeX87 => [BlockSimple(X87FPUnaryOps{ fpOp=FCHS, dest=fpRegDest, source=fpRegSrc})] | FPModeSSE2 => let (* In single precision mode the sign bit is in the low 32-bits. There may be a better way to load it. *) val signBit = if precision = PrecDouble then realSignBit else floatSignBit in [BlockSimple(LoadArgument{source=AddressConstant signBit, dest=sse2ConstReg, kind=MoveDouble}), BlockSimple(SSE2FPBinary{opc=SSE2BXor, resultReg=fpRegDest, arg1=fpRegSrc, arg2=RegisterArgument sse2ConstReg})] end val result = boxOrTagReal(fpRegDest, target, precision) in (revApp(argCode @ load :: code @ result, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.RealAbs precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val fpRegSrc = newUReg() and fpRegDest = newUReg() and sse2ConstReg = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) open BuiltIns val load = if precision = PrecDouble then BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveDouble}) else if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpRegSrc, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveFloat}) val code = case fpMode of FPModeX87 => [BlockSimple(X87FPUnaryOps{ fpOp=FABS, dest=fpRegDest, source=fpRegSrc})] | FPModeSSE2 => let val mask = if precision = PrecDouble then realAbsMask else floatAbsMask in [BlockSimple(LoadArgument{source=AddressConstant mask, dest=sse2ConstReg, kind=MoveDouble}), BlockSimple(SSE2FPBinary{opc=SSE2BAnd, resultReg=fpRegDest, arg1=fpRegSrc, arg2=RegisterArgument sse2ConstReg})] end val result = boxOrTagReal(fpRegDest, target, precision) in (revApp(argCode @ load :: code @ result, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.RealFixedInt precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val untagReg = newUReg() and fpReg = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) val floatOp = case fpMode of FPModeX87 => X87Float | FPModeSSE2 => SSE2Float val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double val _ = precision = BuiltIns.PrecDouble orelse raise InternalError "RealFixedInt - single" val code = argCode @ [BlockSimple(UntagValue{source=aReg1, dest=untagReg, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(floatOp{ dest=fpReg, source=RegisterArgument untagReg}), BlockSimple(BoxValue{boxKind=boxFloat, source=fpReg, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.FloatToDouble, arg1}, context, _, destination, tailCode) = let (* Convert a single precision floating point value to double precision. *) val target = asTarget destination val fpReg = newUReg() and fpReg2 = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) (* MoveFloat always converts from single to double-precision. *) val unboxOrUntag = case (fpMode, wordSize) of (FPModeX87, _) => [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg2, kind=MoveFloat})] | (FPModeSSE2, 0w4) => [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveFloat}), BlockSimple(SSE2FPUnary{opc=SSE2UFloatToDouble, resultReg=fpReg2, source=RegisterArgument fpReg})] | (FPModeSSE2, _) => [BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpReg, cache=NONE}), BlockSimple(SSE2FPUnary{opc=SSE2UFloatToDouble, resultReg=fpReg2, source=RegisterArgument fpReg})] val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double val code = argCode @ unboxOrUntag @ [BlockSimple(BoxValue{boxKind=boxFloat, source=fpReg2, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end - | codeToICodeUnaryRev({oper=BuiltIns.DoubleToFloat NONE, arg1}, context, _, destination, tailCode) = + | codeToICodeUnaryRev({oper=BuiltIns.DoubleToFloat, arg1}, context, _, destination, tailCode) = let (* Convert a double precision value to a single precision using the current rounding mode. This is simpler than setting the rounding mode and then restoring it. *) val target = asTarget destination val fpReg = newUReg() and fpReg2 = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) (* In 32-bit mode we need to box the float. In 64-bit mode we can tag it. *) val boxOrTag = case fpMode of FPModeX87 => [BlockSimple(BoxValue{boxKind=BoxX87Float, source=fpReg, dest=target, saveRegs=[]})] | FPModeSSE2 => BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=fpReg2, source=RegisterArgument fpReg}) :: boxOrTagReal(fpReg2, target, BuiltIns.PrecSingle) val code = argCode @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveDouble})] @ boxOrTag in (revApp(code, tailCode), RegisterArgument target, false) end - | codeToICodeUnaryRev({oper=BuiltIns.DoubleToFloat (SOME rndMode), arg1}, context, _, destination, tailCode) = - let - (* Convert a double precision value to a single precision. The rounding mode - is passed in explicitly. *) - val target = asTarget destination - val fpReg = newUReg() and fpReg2 = newUReg() - val (argCode, aReg1) = codeToPReg(arg1, context) - (* In 32-bit mode we need to box the float. In 64-bit mode we can tag it. *) - (* We need to save the rounding mode before we change it and restore it afterwards. *) - open IEEEReal - fun doConversion() = - case fpMode of - FPModeX87 => (* Convert the value using the appropriate rounding. *) - [BlockSimple(BoxValue{boxKind=BoxX87Float, source=fpReg, dest=target, saveRegs=[]})] - | FPModeSSE2 => - BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=fpReg2, source=RegisterArgument fpReg}) :: - boxOrTagReal(fpReg2, target, BuiltIns.PrecSingle) - val code = argCode @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveDouble})] @ - setAndRestoreRounding(rndMode, doConversion) - in - (revApp(code, tailCode), RegisterArgument target, false) - end - | codeToICodeUnaryRev({oper=BuiltIns.RealToInt(precision, rndMode), arg1}, context, _, destination, tailCode) = let val target = asTarget destination val chkOverflow = newCCRef() val convResult = newUReg() and wrkReg2 = newUReg() (* Convert a floating point value to an integer. We need to raise overflow if the result is out of range. We first convert the value to 32/64 bits then tag it. An overflow can happen either because the real number does not fit in 32/64 bits or if it is not a 31/63 bit value. Fortunately, if the first conversion fails the result is a value that causes an overflow when we try it shift it so the check for overflow only needs to happen there. There is an SSE2 instruction that implements truncation (round to zero) directly but in other cases we need to set the rounding mode. *) val doConvert = case (fpMode, precision) of (FPModeX87, _) => let val fpReg = newUReg() val (argCode, aReg) = codeToPReg(arg1, context) fun doConvert() = [BlockSimple(X87RealToInt{source=fpReg, dest=convResult })] in argCode @ [BlockSimple(LoadArgument{source=wordAt aReg, dest=fpReg, kind=MoveDouble})] @ setAndRestoreRounding(rndMode, doConvert) end | (FPModeSSE2, BuiltIns.PrecDouble) => let val (argCode, argReg) = codeToPReg(arg1, context) fun doConvert() = [BlockSimple( SSE2RealToInt{source=wordAt argReg, dest=convResult, isDouble=true, isTruncate = rndMode = IEEEReal.TO_ZERO }) ] in argCode @ ( case rndMode of IEEEReal.TO_ZERO => doConvert() | _ => setAndRestoreRounding(rndMode, doConvert)) end | (FPModeSSE2, BuiltIns.PrecSingle) => let val (argCode, aReg) = codeToPReg(arg1, context) val fpReg = newUReg() fun doConvert() = [BlockSimple( SSE2RealToInt{source=RegisterArgument fpReg, dest=convResult, isDouble=false, isTruncate = rndMode = IEEEReal.TO_ZERO })] in argCode @ [BlockSimple(UntagFloat{source=RegisterArgument aReg, dest=fpReg, cache=NONE})] @ ( case rndMode of IEEEReal.TO_ZERO => doConvert() | _ => setAndRestoreRounding(rndMode, doConvert) ) end val checkAndTag = BlockSimple(ShiftOperation{ shift=SHL, resultReg=wrkReg2, operand=convResult, shiftAmount=IntegerConstant 1, ccRef=chkOverflow, opSize=polyWordOpSize}) :: checkOverflow context chkOverflow @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=wrkReg2, operand2=IntegerConstant 1, ccRef = newCCRef(), opSize=polyWordOpSize})] in (revApp(doConvert @ checkAndTag, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.TouchAddress, arg1}, context, _, destination, tailCode) = let (* Put the value in a register. This is not entirely necessary but ensures that if the value is a constant the constant will be included in the code. *) val (argCode, aReg) = codeToPRegRev(arg1, context, tailCode) in moveIfNotAllowedRev(destination, BlockSimple(TouchArgument{source=aReg}) :: argCode, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeUnaryRev({oper=BuiltIns.AllocCStack, arg1}, context, _, destination, tailCode) = (* Allocate space on the C stack. Assumes that the argument has already been aligned. *) let val target = asTarget destination val (argCode, untaggedArg) = case arg1 of BICConstnt(value, _) => (tailCode, IntegerConstant(Word.toLargeInt(toShort value)) (* Leave untagged *)) | _ => let val (argCode, aReg) = codeToPRegRev(arg1, context, tailCode) val arg1Untagged = newUReg() in ( BlockSimple(UntagValue{source=aReg, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}) :: argCode, RegisterArgument arg1Untagged ) end val argReg1 = newUReg() and resReg1 = newUReg() val code = BlockSimple(BoxValue{boxKind=BoxLargeWord, source=resReg1, dest=target, saveRegs=[]}) :: BlockSimple(StoreMemReg{offset=memRegCStackPtr, source=resReg1, kind=moveNativeWord}) :: BlockSimple(ArithmeticFunction{oper=SUB, resultReg=resReg1, operand1=argReg1, operand2=untaggedArg, ccRef=newCCRef(), opSize=nativeWordOpSize}) :: BlockSimple(LoadMemReg{offset=memRegCStackPtr, dest=argReg1, kind=moveNativeWord}) :: argCode in (code, RegisterArgument target, false) end and codeToICodeBinaryRev({oper=BuiltIns.WordComparison{test, isSigned}, arg1, arg2=BICConstnt(arg2Value, _)}, context, _, destination, tailCode) = let (* Comparisons. Because this is also used for pointer equality and even for exception matching it is perfectly possible that the argument could be an address. The higher levels used to generate this for pointer equality. *) val ccRef = newCCRef() val comparison = (* If the argument is a tagged value that will fit in 32-bits we can use the literal version. Use toLargeIntX here because the value will be sign-extended even if we're actually doing an unsigned comparison. *) if isShort arg2Value andalso is32bit(tag(Word.toLargeIntX(toShort arg2Value))) then let val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} in (* We're often comparing with a character or a string length field that has to be untagged. In that case we can avoid loading it into a register and untagging it by doing the comparison directly. *) case arg1 of BICLoadOperation{kind=LoadStoreUntaggedUnsigned, address} => let val (codeBaseIndex, codeUntag, memLoc) = codeAddressRev(address, false, context, tailCode) val literal = Word.toLargeIntX(toShort arg2Value) in BlockSimple(CompareLiteral{arg1=MemoryLocation memLoc, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: codeUntag @ codeBaseIndex end | BICLoadOperation{kind=LoadStoreMLByte _, address} => let val (codeBaseIndex, codeUntag, {base, index, offset, ...}) = codeAddressRev(address, true, context, tailCode) val _ = toShort arg2Value >= 0w0 andalso toShort arg2Value < 0w256 orelse raise InternalError "Compare byte not a byte" val literal = Word8.fromLargeWord(Word.toLargeWord(toShort arg2Value)) in BlockSimple(CompareByteMem{arg1={base=base, index=index, offset=offset}, arg2=literal, ccRef=ccRef}) :: codeUntag @ codeBaseIndex end | BICUnary({oper=BuiltIns.MemoryCellFlags, arg1}) => (* This occurs particularly in arbitrary precision comparisons. *) let val (baseCode, baseReg) = codeToPRegRev(arg1, context, tailCode) val _ = toShort arg2Value >= 0w0 andalso toShort arg2Value < 0w256 orelse raise InternalError "Compare memory cell not a byte" val literal = Word8.fromLargeWord(Word.toLargeWord(toShort arg2Value)) in BlockSimple(CompareByteMem{arg1={base=baseReg, index=memIndexOrObject, offset= ~1}, arg2=literal, ccRef=ccRef}) :: baseCode end | _ => let (* TODO: We could include rarer cases of tagging by looking at the code and seeing if it's a TagValue. *) val (testCode, testDest, _) = codeToICodeRev(arg1, context, false, allow, tailCode) val literal = tag(Word.toLargeIntX(toShort arg2Value)) in BlockSimple(CompareLiteral{arg1=testDest, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: testCode end end else (* Addresses or larger values. We need to use a register comparison. *) let val (testCode, testReg) = codeToPRegRev(arg1, context, tailCode) val arg2Arg = constantAsArgument arg2Value in BlockSimple(WordComparison{arg1=testReg, arg2=arg2Arg, ccRef=ccRef, opSize=polyWordOpSize}) :: testCode end val target = asTarget destination in (makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, comparison), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordComparison{test, isSigned}, arg1=BICConstnt(arg1Value, _), arg2}, context, _, destination, tailCode) = let (* If we have the constant first we need to reverse the test so the first argument is a register. *) val ccRef = newCCRef() val comparison = if isShort arg1Value andalso is32bit(tag(Word.toLargeIntX(toShort arg1Value))) then let val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} val (testCode, testDest, _) = codeToICodeRev(arg2, context, false, allow, tailCode) val literal = tag(Word.toLargeIntX(toShort arg1Value)) in BlockSimple(CompareLiteral{arg1=testDest, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: testCode end else (* Addresses or larger values. We need to use a register comparison. *) let val (testCode, testReg) = codeToPRegRev(arg2, context, tailCode) val arg1Arg = constantAsArgument arg1Value in BlockSimple(WordComparison{arg1=testReg, arg2=arg1Arg, ccRef=ccRef, opSize=polyWordOpSize}) :: testCode end val target = asTarget destination in (makeBoolResultRev(testAsBranch(leftRightTest test, isSigned, true), ccRef, target, comparison), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordComparison {test, isSigned}, arg1, arg2}, context, _, destination, tailCode) = let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (arg1Code, arg1Result, _) = codeToICodeRev(arg1, context, false, Allowed memOrReg, tailCode) val (arg2Code, arg2Result, _) = codeToICodeRev(arg2, context, false, Allowed memOrReg, arg1Code) val target = asTarget destination val code = case (arg1Result, arg2Result) of (RegisterArgument arg1Reg, arg2Result) => makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg1Reg, arg2=arg2Result, ccRef=ccRef, opSize=polyWordOpSize}) :: arg2Code) | (arg1Result, RegisterArgument arg2Reg) => (* The second argument is in a register - switch the sense of the test. *) makeBoolResultRev(testAsBranch(leftRightTest test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg2Reg, arg2=arg1Result, ccRef=ccRef, opSize=polyWordOpSize}) :: arg2Code) | (arg1Result, arg2Result) => let (* Have to load an argument - pick the first. *) val arg1Reg = newPReg() in makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg1Reg, arg2=arg2Result, ccRef=ccRef, opSize=polyWordOpSize}) :: BlockSimple(LoadArgument{source=arg1Result, dest=arg1Reg, kind=movePolyWord}) :: arg2Code) end in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.PointerEq, arg1, arg2}, context, isTail, destination, tailCode) = (* Equality of general values which can include pointers. This can be treated exactly as a word equality. It has to be analysed differently for indexed cases. *) codeToICodeBinaryRev({oper=BuiltIns.WordComparison{test=BuiltIns.TestEqual, isSigned=false}, arg1=arg1, arg2=arg2}, context, isTail, destination, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.FixedPrecisionArith oper, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val code = codeFixedPrecisionArith(oper, arg1, arg2, context, target, checkOverflow context) in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. N.B. it is possible to have type-incorrect values in dead code. i.e. code that will never be executed because of a run-time check. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef = newCCRef(), opSize=polyWordOpSize}) :: arg1Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg2Code, aReg2) = codeToPRegRev(arg2, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg2, operand2=IntegerConstant constVal, ccRef = newCCRef(), opSize=polyWordOpSize}) :: arg2Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) (* Use LEA to do the addition since we're not concerned with overflow. This is shorter than subtracting the tag and adding the values and also moves the result into the appropriate register. *) val code = arg1Code @ arg2Code @ [BlockSimple(LoadEffectiveAddress{base=SOME aReg1, offset= ~1, index=MemIndex1 aReg2, dest=target, opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithSub, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=polyWordOpSize}) :: arg1Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithSub, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val aReg3 = newPReg() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Do the subtraction and add in the tag bit. This could be reordered if we have cascaded operations since we don't need to check for overflow. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=RegisterArgument aReg2, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = codeMultiplyConstantWordRev(arg1, context, destination, if isShort value then toShort value else 0w0, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = codeMultiplyConstantWordRev(arg2, context, destination, if isShort value then toShort value else 0w0, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() and resUntagged = newUReg() val code = arg1Code @ arg2Code @ (* Shift one argument and subtract the tag from the other. It's possible this could be reordered if we have a value that is already untagged. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=arg2Untagged, operand1=aReg2, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=arg1Untagged, operand2=RegisterArgument arg2Untagged, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithDiv, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() val code = arg1Code @ arg2Code @ (* Shift both of the arguments to remove the tags. We don't test for zero here - that's done explicitly. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = false, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=quotient, dest=target, isSigned=false, opSize=polyWordOpSize })] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMod, arg1, arg2}, context, _, destination, tailCode) = let (* Identical to Quot except that the result is the remainder. *) val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() val code = arg1Code @ arg2Code @ (* Shift both of the arguments to remove the tags. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = false, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=remainder, dest=target, isSigned=false, opSize=polyWordOpSize })] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith _, ...}, _, _, _, _) = raise InternalError "codeToICodeNonRev: WordArith - unimplemented operation" | codeToICodeBinaryRev({oper=BuiltIns.WordLogical logOp, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) (* Use a semitagged value for XOR. This preserves the tag bit. Use toLargeIntX here because the operations will sign-extend 32-bit values. *) val constVal = if isShort value then (case logOp of BuiltIns.LogicalXor => semitag | _ => tag) (Word.toLargeIntX(toShort value)) else 0 val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constVal <= 0xffffffff andalso constVal >= 0 then OpSize32 else polyWordOpSize val code = arg1Code @ [BlockSimple(ArithmeticFunction{oper=oper, resultReg=target, operand1=arg1Reg, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=opSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical logOp, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, arg2Reg) = codeToPReg(arg2, context) (* Use a semitagged value for XOR. This preserves the tag bit. *) val constVal = if isShort value then (case logOp of BuiltIns.LogicalXor => semitag | _ => tag) (Word.toLargeIntX(toShort value)) else 0 val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constVal <= 0xffffffff andalso constVal >= 0 then OpSize32 else polyWordOpSize val code = arg2Code @ [BlockSimple(ArithmeticFunction{oper=oper, resultReg=target, operand1=arg2Reg, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=opSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalOr, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Or-ing preserves the tag bit. *) [BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalAnd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Since they're both tagged the result will be tagged. *) [BlockSimple(ArithmeticFunction{oper=AND, resultReg=target, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalXor, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val aReg3 = newPReg() val code = arg1Code @ arg2Code @ (* We need to restore the tag bit after the operation. *) [BlockSimple(ArithmeticFunction{oper=XOR, resultReg=aReg3, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordShift BuiltIns.ShiftLeft, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = (* Use the general case multiplication code. This will use a shift except for small values. It does detect special cases such as multiplication by 4 and 8 which can be implemented with LEA. *) codeMultiplyConstantWordRev(arg1, context, destination, if isShort value then Word.<<(0w1, toShort value) else 0w1, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordShift shift, arg1, arg2}, context, _, destination, tailCode) = (* N.B. X86 shifts of greater than the word length mask the higher bits. That isn't what ML wants but that is dealt with at a higher level *) let open BuiltIns val target = asTarget destination (* Load the value into an untagged register. If this is a left shift we need to clear the tag bit. We don't need to do that for right shifts. *) val argRegUntagged = newUReg() val arg1Code = case arg1 of BICConstnt(value, _) => let (* Remove the tag bit. This isn't required for right shifts. *) val cnstntVal = if isShort value then semitag(Word.toLargeInt(toShort value)) else 1 in [BlockSimple(LoadArgument{source=IntegerConstant cnstntVal, dest=argRegUntagged, kind=movePolyWord})] end | _ => let val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val removeTag = case shift of ShiftLeft => ArithmeticFunction{oper=SUB, resultReg=argRegUntagged, operand1=arg1Reg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize} | _ => LoadArgument{source=RegisterArgument arg1Reg, dest=argRegUntagged, kind=movePolyWord} in arg1Code @ [BlockSimple removeTag] end (* The shift amount can usefully be a constant. *) val (arg2Code, untag2Code, arg2Arg) = codeAsUntaggedByte(arg2, false, context) val resRegUntagged = newUReg() val shiftOp = case shift of ShiftLeft => SHL | ShiftRightLogical => SHR | ShiftRightArithmetic => SAR val code = arg1Code @ arg2Code @ untag2Code @ [BlockSimple(ShiftOperation{ shift=shiftOp, resultReg=resRegUntagged, operand=argRegUntagged, shiftAmount=arg2Arg, ccRef=newCCRef(), opSize=polyWordOpSize }), (* Set the tag by ORing it in. This will work whether or not a right shift has shifted a 1 into this position. *) BlockSimple( ArithmeticFunction{oper=OR, resultReg=target, operand1=resRegUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.AllocateByteMemory, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val sizeReg = newPReg() and baseReg = newPReg() val sizeCode = codeToICodeTarget(arg1, context, false, sizeReg) val (flagsCode, flagUntag, flagArg) = codeAsUntaggedByte(arg2, false, context) val code =sizeCode @ flagsCode @ [BlockSimple(AllocateMemoryVariable{size=sizeReg, dest=baseReg, saveRegs=[]})] @ flagUntag @ [BlockSimple(StoreArgument{ source=flagArg, base=baseReg, offset= ~1, index=memIndexOrObject, kind=MoveByte, isMutable=false}), BlockSimple InitialisationComplete, BlockSimple(LoadArgument{ source=RegisterArgument baseReg, dest=target, kind=movePolyWord})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordComparison test, arg1, arg2}, context, _, destination, tailCode) = let val ccRef = newCCRef() val (arg1Code, arg1Reg) = codeToPRegRev(arg1, context, tailCode) (* In X64 we can extract the word from a constant and do the comparison directly. That can't be done in X86/32 because the value isn't tagged and might look like an address. The RTS scans for comparisons with inline constant addresses. *) val (arg2Code, arg2Operand) = if targetArch <> Native32Bit then (* Native 64-bit or 32-in-64. *) ( case arg2 of BICConstnt(value, _) => (arg1Code, IntegerConstant(largeWordConstant value)) | _ => let val (code, reg) = codeToPRegRev(arg2, context, arg1Code) in (code, wordAt reg) end ) else let val (code, reg) = codeToPRegRev(arg2, context, arg1Code) in (code, wordAt reg) end val argReg = newUReg() val target = asTarget destination val code = makeBoolResultRev(testAsBranch(test, false, true), ccRef, target, BlockSimple(WordComparison{arg1=argReg, arg2=arg2Operand, ccRef=ccRef, opSize=nativeWordOpSize}) :: BlockSimple(LoadArgument{source=wordAt arg1Reg, dest=argReg, kind=moveNativeWord}) :: arg2Code) in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code =arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code = arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg2, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithSub, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code = arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithSub, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithMult, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val resValue = newUReg() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val argReg1 = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg1, kind=moveNativeWord}), BlockSimple(Multiplication{resultReg=resValue, operand1=argReg1, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=resValue, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithDiv, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val quotient = newUReg() and remainder = newUReg() val dividendReg = newUReg() and divisorReg = newUReg() val code = arg1Code @ arg2Code @ (* We don't test for zero here - that's done explicitly. *) [BlockSimple(LoadArgument{source=wordAt aReg1, dest=dividendReg, kind=moveNativeWord}), BlockSimple(LoadArgument{source=wordAt aReg2, dest=divisorReg, kind=moveNativeWord}), BlockSimple(Division { isSigned = false, dividend=dividendReg, divisor=RegisterArgument divisorReg, quotient=quotient, remainder=remainder, opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=quotient, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithMod, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val quotient = newUReg() and remainder = newUReg() val dividendReg = newUReg() and divisorReg = newUReg() val code = arg1Code @ arg2Code @ (* We don't test for zero here - that's done explicitly. *) [BlockSimple(LoadArgument{source=wordAt aReg1, dest=dividendReg, kind=moveNativeWord}), BlockSimple(LoadArgument{source=wordAt aReg2, dest=divisorReg, kind=moveNativeWord}), BlockSimple(Division { isSigned = false, dividend=dividendReg, divisor=RegisterArgument divisorReg, quotient=quotient, remainder=remainder, opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=remainder, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith _, ...}, _, _, _, _) = raise InternalError "codeToICodeNonRev: LargeWordArith - unimplemented operation" | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constantValue <= 0xffffffff andalso constantValue >= 0 then OpSize32 else nativeWordOpSize val code = arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=opSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constantValue <= 0xffffffff andalso constantValue >= 0 then OpSize32 else nativeWordOpSize val code = arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg2, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=opSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordShift shift, arg1, arg2}, context, _, destination, tailCode) = (* The shift is always a Word.word value i.e. tagged. There is a check at the higher level that the shift does not exceed 32/64 bits. *) let open BuiltIns val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, untag2Code, arg2Arg) = codeAsUntaggedByte(arg2, false, context) val aReg3 = newUReg() val shiftOp = case shift of ShiftLeft => SHL | ShiftRightLogical => SHR | ShiftRightArithmetic => SAR val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord})] @ untag2Code @ [BlockSimple(ShiftOperation{ shift=shiftOp, resultReg=aReg3, operand=argReg, shiftAmount=arg2Arg, ccRef=newCCRef(), opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealArith(fpOpPrec as (fpOp, fpPrec)), arg1, arg2}, context, _, destination, tailCode) = let open BuiltIns val commutative = case fpOp of ArithSub => NonCommutative | ArithDiv => NonCommutative | ArithAdd => Commutative | ArithMult => Commutative | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" val (argCodeRev, fpRegSrc, arg2Value) = codeFPBinaryArgsRev(arg1, arg2, fpPrec, commutative, context, []) val argCode = List.rev argCodeRev val target = asTarget destination val fpRegDest = newUReg() val arith = case fpMode of FPModeX87 => let val fpOp = case fpOp of ArithAdd => FADD | ArithSub => FSUB | ArithMult => FMUL | ArithDiv => FDIV | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" val isDouble = case fpPrec of PrecSingle => false | PrecDouble => true in [BlockSimple(X87FPArith{ opc=fpOp, resultReg=fpRegDest, arg1=fpRegSrc, arg2=arg2Value, isDouble=isDouble})] end | FPModeSSE2 => let val fpOp = case fpOpPrec of (ArithAdd, PrecSingle) => SSE2BAddSingle | (ArithSub, PrecSingle) => SSE2BSubSingle | (ArithMult, PrecSingle) => SSE2BMulSingle | (ArithDiv, PrecSingle) => SSE2BDivSingle | (ArithAdd, PrecDouble) => SSE2BAddDouble | (ArithSub, PrecDouble) => SSE2BSubDouble | (ArithMult, PrecDouble) => SSE2BMulDouble | (ArithDiv, PrecDouble) => SSE2BDivDouble | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" in [BlockSimple(SSE2FPBinary{ opc=fpOp, resultReg=fpRegDest, arg1=fpRegSrc, arg2=arg2Value})] end (* Box or tag the result. *) val result = boxOrTagReal(fpRegDest, target, fpPrec) in (revApp(argCode @ arith @ result, tailCode), RegisterArgument target, false) end (* Floating point comparison. This is complicated because we have different instruction sequences for SSE2 and X87. We also have to get the handling of unordered (NaN) values right. All the tests are treated as false if either argument is a NaN. To combine that test with the other tests we sometimes have to reverse the comparison. *) | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(BuiltIns.TestEqual, precision), arg1, arg2}, context, _, destination, tailCode) = let (* Get the arguments. It's commutative. *) val (arg2Code, fpReg, arg2Val) = codeFPBinaryArgsRev(arg1, arg2, precision, Commutative, context, tailCode) val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testReg1 = newUReg() and testReg2 = newUReg() and testReg3 = newUReg() (* If this is X87 we get the condition into RAX and test it there. If it is SSE2 we have to treat the unordered result (parity set) specially. *) val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=XOR, resultReg=testReg3, operand1=testReg2, operand2=IntegerConstant 0x4000, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant 0x4400, ccRef=newCCRef(), opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) | FPModeSSE2 => let val noParityLabel = newLabel() val resultLabel = newLabel() val falseLabel = newLabel() val trueLabel = newLabel() val mergeReg = newMergeReg() in BlockSimple(LoadArgument{ source=RegisterArgument mergeReg, dest=target, kind=Move32Bit }) :: BlockLabel resultLabel :: BlockFlow(Unconditional resultLabel) :: (* Result is false if parity is set i.e. unordered or if unequal. *) BlockSimple(LoadArgument{ source=IntegerConstant(tag 0), dest=mergeReg, kind=Move32Bit }) :: BlockLabel falseLabel :: BlockFlow(Unconditional resultLabel) :: (* Result is true if it's ordered and equal. *) BlockSimple(LoadArgument{ source=IntegerConstant(tag 1), dest=mergeReg, kind=Move32Bit }) :: BlockLabel trueLabel :: (* Not unordered - test the equality *) BlockFlow(Conditional{ccRef=ccRef1, condition=JE, trueJump=trueLabel, falseJump=falseLabel}) :: BlockLabel noParityLabel :: (* Go to falseLabel if unordered and therefore not equal. *) BlockFlow(Conditional{ccRef=ccRef1, condition=JP, trueJump=falseLabel, falseJump=noParityLabel}) :: BlockSimple(SSE2Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code end in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(BuiltIns.TestUnordered, precision), arg1, arg2}, context, _, destination, tailCode) = let (* The unordered test is really included because it is easy to implement and is the simplest way of implementing isNan. *) (* Get the arguments. It's commutative. *) val (arg2Code, fpReg, arg2Val) = codeFPBinaryArgsRev(arg1, arg2, precision, Commutative, context, tailCode) val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testReg1 = newUReg() and testReg2 = newUReg() and testReg3 = newUReg() (* If this is X87 we get the condition into RAX and test it there. If it is SSE2 we have to treat the unordered result (parity set) specially. *) val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => (* And with 0x4500. We have to use XOR rather than CMP to avoid having an untagged constant comparison. *) makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=XOR, resultReg=testReg3, operand1=testReg2, operand2=IntegerConstant 0x4500, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant 0x4500, ccRef=newCCRef(), opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) | FPModeSSE2 => makeBoolResultRev(JP, ccRef1, target, BlockSimple(SSE2Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(comparison, precision), arg1, arg2}, context, _, destination, tailCode) = let (* Ordered comparisons are complicated because they are all defined to be false if either argument is a NaN. We have two different tests for a > b and a >= b and implement a < b and a <= b by changing the order of the arguments. *) val (arg1Code, arg1Value) = codeFPArgument(arg1, precision, context, tailCode) val (arg2Code, arg2Value) = codeFPArgument(arg2, precision, context, arg1Code) val (regArg, opArg, isGeq) = case comparison of BuiltIns.TestGreater => (arg1Value, arg2Value, false) | BuiltIns.TestLess => (arg2Value, arg1Value, false) (* Reversed: aa. *) | BuiltIns.TestGreaterEqual => (arg1Value, arg2Value, true) | BuiltIns.TestLessEqual => (arg2Value, arg1Value, true) (* Reversed: a<=b is b>=a. *) | _ => raise InternalError "RealComparison: unimplemented operation" (* Load the first operand into a register. *) val (fpReg, loadCode) = case regArg of RegisterArgument fpReg => (fpReg, arg2Code) | regArg => let val fpReg = newUReg() val moveOp = case precision of BuiltIns.PrecDouble => MoveDouble | BuiltIns.PrecSingle => MoveFloat in (fpReg, BlockSimple(LoadArgument{source=regArg, dest=fpReg, kind=moveOp}) :: arg2Code) end val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => let val testReg1 = newUReg() and testReg2 = newUReg() val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testBits = if isGeq then 0x500 else 0x4500 in makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant testBits, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=opArg, ccRef=ccRef1, isDouble = isDouble}) :: loadCode) end | FPModeSSE2 => let val ccRef1 = newCCRef() val condition = if isGeq then JNB (* >=, <= *) else JA (* >, < *) in makeBoolResultRev(condition, ccRef1, target, BlockSimple(SSE2Compare{arg1=fpReg, arg2=opArg, ccRef=ccRef1, isDouble = isDouble}) :: loadCode) end in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.FreeCStack, arg1, arg2}, context, _, destination, tailCode) = (* Free space on the C stack by storing the address in the argument into the "memory register". This is a binary operation that takes the base address and the size. The base address isn't used in this version. *) let val (arg2Code, untaggedLength) = case arg2 of BICConstnt(value, _) => (tailCode, IntegerConstant(Word.toLargeInt(toShort value)) (* Leave untagged *)) | _ => let val (arg2Code, lengthReg) = codeToPRegRev(arg2, context, tailCode) val lengthUntagged = newUReg() in ( BlockSimple(UntagValue{source=lengthReg, dest=lengthUntagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}) :: arg2Code, RegisterArgument lengthUntagged ) end (* Evaluate the first argument for side-effects but discard it. *) val (arg1Code, _, _) = codeToICodeRev(arg1, context, false, Allowed allowDefer, arg2Code) val addrReg = newUReg() and resAddrReg = newUReg() val code = BlockSimple(StoreMemReg{offset=memRegCStackPtr, source=resAddrReg, kind=moveNativeWord}) :: BlockSimple(ArithmeticFunction{oper=ADD, resultReg=resAddrReg, operand1=addrReg, operand2=untaggedLength, ccRef=newCCRef(), opSize=nativeWordOpSize}) :: BlockSimple(LoadMemReg{offset=memRegCStackPtr, dest=addrReg, kind=moveNativeWord}) :: arg1Code in moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeBinaryRev({oper=BuiltIns.AtomicExchangeAdd, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let (* The value to be added is always a constant. *) val target = asTarget destination val incrReg = newUReg() val (arg1Code, addrReg) = codeToPRegRev(arg1, context, tailCode) (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val code = BlockSimple(AtomicExchangeAndAdd{ base=addrReg, source=incrReg, resultReg=target }) :: BlockSimple(LoadArgument{source=IntegerConstant constVal, dest=incrReg, kind=movePolyWord}) :: arg1Code in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.AtomicExchangeAdd, arg1, arg2}, context, _, destination, tailCode) = let (* This is only needed if the function is returned as a value. *) val target = asTarget destination val incrReg = newUReg() val (arg1Code, addrReg) = codeToPRegRev(arg1, context, tailCode) val (arg2Code, arg2Reg) = codeToPRegRev(arg2, context, arg1Code) val code = BlockSimple(AtomicExchangeAndAdd{ base=addrReg, source=incrReg, resultReg=target }) :: (* Subtract the tag from the increment before the operation *) BlockSimple(ArithmeticFunction{oper=SUB, resultReg=incrReg, operand1=arg2Reg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}) :: arg2Code in (code, RegisterArgument target, false) end (* Multiply tagged word by a constant. We're not concerned with overflow so it's possible to use various short cuts. *) and codeMultiplyConstantWordRev(arg, context, destination, multiplier, tailCode) = let val target = asTarget destination val (argCode, aReg) = codeToPReg(arg, context) val doMultiply = case multiplier of 0w0 => [BlockSimple(LoadArgument{source=IntegerConstant 1, dest=target, kind=movePolyWord})] | 0w1 => [BlockSimple(LoadArgument{source=RegisterArgument aReg, dest=target, kind=movePolyWord})] | 0w2 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~1, index=MemIndex1 aReg, dest=target, opSize=polyWordOpSize})] | 0w3 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~2, index=MemIndex2 aReg, dest=target, opSize=polyWordOpSize})] | 0w4 => [BlockSimple(LoadEffectiveAddress{base=NONE, offset= ~3, index=MemIndex4 aReg, dest=target, opSize=polyWordOpSize})] | 0w5 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~4, index=MemIndex4 aReg, dest=target, opSize=polyWordOpSize})] | 0w8 => [BlockSimple(LoadEffectiveAddress{base=NONE, offset= ~7, index=MemIndex8 aReg, dest=target, opSize=polyWordOpSize})] | 0w9 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~8, index=MemIndex8 aReg, dest=target, opSize=polyWordOpSize})] | _ => let val tReg = newUReg() val tagCorrection = Word.toLargeInt multiplier - 1 fun getPower2 n = let fun p2 (n, l) = if n = 0w1 then SOME l else if Word.andb(n, 0w1) = 0w1 then NONE else p2(Word.>>(n, 0w1), l+0w1) in if n = 0w0 then NONE else p2(n,0w0) end val multiply = case getPower2 multiplier of SOME power => (* Shift it including the tag. *) BlockSimple(ShiftOperation{ shift=SHL, resultReg=tReg, operand=aReg, shiftAmount=IntegerConstant(Word.toLargeInt power), ccRef=newCCRef(), opSize=polyWordOpSize }) | NONE => (* Multiply including the tag. *) BlockSimple(Multiplication{resultReg=tReg, operand1=aReg, operand2=IntegerConstant(Word.toLargeInt multiplier), ccRef=newCCRef(), opSize=polyWordOpSize}) in [multiply, BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=tReg, operand2=IntegerConstant tagCorrection, ccRef=newCCRef(), opSize=polyWordOpSize})] end in (revApp(argCode @ doMultiply, tailCode), RegisterArgument target, false) end and codeToICodeAllocate({numWords as BICConstnt(length, _), flags as BICConstnt(flagValue, _), initial}, context, _, destination) = (* Constant length and flags is used for ref. We could handle other cases. *) if isShort length andalso isShort flagValue andalso toShort length = 0w1 then let val target = asTarget destination (* Force a different register. *) val vecLength = Word.toInt(toShort length) val flagByte = Word8.fromLargeWord(Word.toLargeWord(toShort flagValue)) val memAddr = newPReg() and valueReg = newPReg() fun initialise n = BlockSimple(StoreArgument{ source=RegisterArgument valueReg, offset=n*Word.toInt wordSize, base=memAddr, index=memIndexOrObject, kind=movePolyWord, isMutable=false}) val code = codeToICodeTarget(initial, context, false, valueReg) @ [BlockSimple(AllocateMemoryOperation{size=vecLength, flags=flagByte, dest=memAddr, saveRegs=[]})] @ List.tabulate(vecLength, initialise) @ [BlockSimple InitialisationComplete, BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord})] in (code, RegisterArgument target, false) end else (* If it's longer use the full run-time form. *) allocateMemoryVariable(numWords, flags, initial, context, destination) | codeToICodeAllocate({numWords, flags, initial}, context, _, destination) = allocateMemoryVariable(numWords, flags, initial, context, destination) and codeToICodeLoad({kind=LoadStoreMLWord _, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, false, context) in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument {source=MemoryLocation memLoc, dest=target, kind=movePolyWord})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreMLByte _, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, true, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveByte}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC8, address}, context, _, destination) = let (* Load a byte from C memory. This is almost exactly the same as LoadStoreMLByte except that the base address is a LargeWord.word value. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w1, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveByte}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC16, address}, context, _, destination) = let (* Load a 16-bit value from C memory. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w2, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move16Bit}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC32, address}, context, _, destination) = let (* Load a 32-bit value from C memory. If this is 64-bit mode we can tag it but if this is 32-bit mode we need to box it. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w4, context) val untaggedResReg = newUReg() val boxTagCode = if targetArch = Native64Bit then BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize64 (* It becomes 33 bits *)}) else BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untaggedResReg, dest=target, saveRegs=[]}) in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move32Bit}), boxTagCode], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC64, address}, context, _, destination) = let (* Load a 64-bit value from C memory. This is only allowed in 64-bit mode. The result is a boxed value. *) val _ = targetArch <> Native32Bit orelse raise InternalError "codeToICodeNonRev: BICLoadOperation LoadStoreC64 in 32-bit" val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w8, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move64Bit}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreCFloat, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w4, context) val untaggedResReg = newUReg() val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double (* We need to convert the float into a double. *) val loadArg = case fpMode of FPModeX87 => BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveFloat}) | FPModeSSE2 => BlockSimple(SSE2FPUnary { source=MemoryLocation memLoc, resultReg=untaggedResReg, opc=SSE2UFloatToDouble}) in (codeBaseIndex @ codeUntag @ [loadArg, BlockSimple(BoxValue{boxKind=boxFloat, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreCDouble, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w8, context) val untaggedResReg = newUReg() val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveDouble}), BlockSimple(BoxValue{boxKind=boxFloat, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreUntaggedUnsigned, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, false, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=movePolyWord}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=polyWordOpSize})], RegisterArgument target, false) end and codeToICodeStore({kind=LoadStoreMLWord _, address, value}, context, _, destination) = let val (sourceCode, source, _) = codeToICode(value, context, false, Allowed allowInMemMove) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, false, context) val code = codeBaseIndex @ sourceCode @ codeUntag @ [BlockSimple(StoreArgument {source=source, base=base, offset=offset, index=index, kind=movePolyWord, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreMLByte _, address, value}, context, _, destination) = let val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, true, context) (* We have to untag the value to store. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedByte(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=valueArg, base=base, offset=offset, index=index, kind=MoveByte, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC8, address, value}, context, _, destination) = let (* Store a byte to C memory. Almost exactly the same as LoadStoreMLByte. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w1, context) val (valueCode, untagValue, valueArg) = codeAsUntaggedByte(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=valueArg, base=base, offset=offset, index=index, kind=MoveByte, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC16, address, value}, context, _, destination) = let (* Store a 16-bit value to C memory. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w2, context) (* We don't currently implement 16-bit constant moves so this must always be in a reg. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedToReg(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueArg, base=base, offset=offset, index=index, kind=Move16Bit, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC32, address, value}, context, _, destination) = (* Store a 32-bit value. If this is 64-bit mode we untag it but if this is 32-bit mode we unbox it. *) let val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w4, context) val code = if targetArch = Native64Bit then let (* We don't currently implement 32-bit constant moves so this must always be in a reg. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedToReg(value, false, context) in codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueArg, base=base, offset=offset, index=index, kind=Move32Bit, isMutable=true})] end else let val (valueCode, valueReg) = codeToPReg(value, context) val valueReg1 = newUReg() in codeBaseIndex @ valueCode @ BlockSimple(LoadArgument{source=wordAt valueReg, dest=valueReg1, kind=Move32Bit}) :: codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueReg1, base=base, offset=offset, index=index, kind=Move32Bit, isMutable=true})] end in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC64, address, value}, context, _, destination) = let (* Store a 64-bit value. *) val _ = targetArch <> Native32Bit orelse raise InternalError "codeToICodeNonRev: BICStoreOperation LoadStoreC64 in 32-bit" val (valueCode, valueReg) = codeToPReg(value, context) val valueReg1 = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w8, context) val code = codeBaseIndex @ valueCode @ codeUntag @ [BlockSimple(LoadArgument{source=wordAt valueReg, dest=valueReg1, kind=Move64Bit}), BlockSimple(StoreArgument {source=RegisterArgument valueReg1, base=base, offset=offset, index=index, kind=Move64Bit, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreCFloat, address, value}, context, _, destination) = let val floatReg = newUReg() and float2Reg = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w4, context) val (valueCode, valueReg) = codeToPReg(value, context) (* If we're using an SSE2 reg we have to convert it from double to single precision. *) val (storeReg, cvtCode) = case fpMode of FPModeSSE2 => (float2Reg, [BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=float2Reg, source=RegisterArgument floatReg})]) | FPModeX87 => (floatReg, []) val code = codeBaseIndex @ valueCode @ codeUntag @ BlockSimple(LoadArgument{source=wordAt valueReg, dest=floatReg, kind=MoveDouble}) :: cvtCode @ [BlockSimple(StoreArgument {source=RegisterArgument storeReg, base=base, offset=offset, index=index, kind=MoveFloat, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreCDouble, address, value}, context, _, destination) = let val floatReg = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w8, context) val (valueCode, valueReg) = codeToPReg(value, context) val code = codeBaseIndex @ valueCode @ codeUntag @ [BlockSimple(LoadArgument{source=wordAt valueReg, dest=floatReg, kind=MoveDouble}), BlockSimple(StoreArgument {source=RegisterArgument floatReg, base=base, offset=offset, index=index, kind=MoveDouble, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreUntaggedUnsigned, address, value}, context, _, destination) = let (* We have to untag the value to store. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, false, context) (* See if it's a constant. This is frequently used to set the last word of a string to zero. *) (* We have to be a bit more careful on the X86. We use moves to store constants that can include addresses. To avoid problems we only use a move if the value is zero or odd and so looks like a tagged value. *) val storeAble = case value of BICConstnt(value, _) => if not(isShort value) then NONE else let val ival = Word.toLargeIntX(toShort value) in if targetArch = Native64Bit then if is32bit ival then SOME ival else NONE else if ival = 0 orelse ival mod 2 = 1 then SOME ival else NONE end | _ => NONE val (storeVal, valCode) = case storeAble of SOME value => (IntegerConstant value (* Leave untagged *), []) | NONE => let val valueReg = newPReg() and valueReg1 = newUReg() in (RegisterArgument valueReg1, codeToICodeTarget(value, context, false, valueReg) @ [BlockSimple(UntagValue{dest=valueReg1, source=valueReg, isSigned=false, cache=NONE, opSize=polyWordOpSize})]) end val code = codeBaseIndex @ valCode @ codeUntag @ [BlockSimple(StoreArgument {source=storeVal, base=base, offset=offset, index=index, kind=movePolyWord, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end and codeToICodeBlock({kind=BlockOpCompareByte, sourceLeft, destRight, length}, context, _, destination) = let (* This is effectively a big-endian comparison since we compare the bytes until we find an inequality. *) val target = asTarget destination val mergeResult = newMergeReg() val vec1Reg = newUReg() and vec2Reg = newUReg() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddress(sourceLeft, true, context) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddress(destRight, true, context) val ccRef = newCCRef() val labLess = newLabel() and labGreater = newLabel() and exitLab = newLabel() val labNotLess = newLabel() and labNotGreater = newLabel() val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToReg(length, false (* unsigned *), context) val code = leftCode @ rightCode @ lengthCode @ leftUntag @ [BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg})] @ rightUntag @ [BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg})] @ lengthUntag @ [BlockSimple(CompareByteVectors{ vec1Addr=vec1Reg, vec2Addr=vec2Reg, length=lengthArg, ccRef=ccRef }), (* N.B. These are unsigned comparisons. *) BlockFlow(Conditional{ ccRef=ccRef, condition=JB, trueJump=labLess, falseJump=labNotLess }), BlockLabel labNotLess, BlockFlow(Conditional{ ccRef=ccRef, condition=JA, trueJump=labGreater, falseJump=labNotGreater }), BlockLabel labNotGreater, BlockSimple(LoadArgument{ source=IntegerConstant(tag 0), dest=mergeResult, kind=movePolyWord }), BlockFlow(Unconditional exitLab), BlockLabel labLess, BlockSimple(LoadArgument{ source=IntegerConstant(tag ~1), dest=mergeResult, kind=movePolyWord }), BlockFlow(Unconditional exitLab), BlockLabel labGreater, BlockSimple(LoadArgument{ source=IntegerConstant(tag 1), dest=mergeResult, kind=movePolyWord }), BlockLabel exitLab, BlockSimple(LoadArgument{ source=RegisterArgument mergeResult, dest=target, kind=movePolyWord })] in (code, RegisterArgument target, false) end | codeToICodeBlock({kind=BlockOpMove {isByteMove}, sourceLeft, destRight, length}, context, _, destination) = let (* Moves of 4 or 8 bytes can be done as word moves provided the alignment is correct. Although this will work for strings it is really to handle moves between SysWord and volatileRef in Foreign.Memory. Moves of 1, 2 or 3 bytes or words are converted into a sequence of byte or word moves. *) val isWordMove = case (isByteMove, length) of (true, BICConstnt(l, _)) => if not (isShort l) orelse (toShort l <> 0w4 andalso toShort l <> nativeWordSize) then NONE else let val leng = Word.toInt(toShort l) val moveKind = if toShort l = nativeWordSize then moveNativeWord else Move32Bit val isLeftAligned = case sourceLeft of {index=NONE, offset:int, ...} => offset mod leng = 0 | _ => false val isRightAligned = case destRight of {index=NONE, offset: int, ...} => offset mod leng = 0 | _ => false in if isLeftAligned andalso isRightAligned then SOME moveKind else NONE end | _ => NONE in case isWordMove of SOME moveKind => let val (leftCode, leftUntag, leftMem) = codeAddress(sourceLeft, isByteMove, context) val (rightCode, rightUntag, {base, offset, index, ...}) = codeAddress(destRight, isByteMove, context) val untaggedResReg = newUReg() val code = leftCode @ rightCode @ leftUntag @ rightUntag @ [BlockSimple(LoadArgument { source=MemoryLocation leftMem, dest=untaggedResReg, kind=moveKind}), BlockSimple(StoreArgument {source=RegisterArgument untaggedResReg, base=base, offset=offset, index=index, kind=moveKind, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | _ => let val vec1Reg = newUReg() and vec2Reg = newUReg() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddress(sourceLeft, isByteMove, context) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddress(destRight, isByteMove, context) val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToReg(length, false (* unsigned *), context) val code = leftCode @ rightCode @ lengthCode @ leftUntag @ [BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg})] @ rightUntag @ [BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg})] @ lengthUntag @ [BlockSimple(BlockMove{ srcAddr=vec1Reg, destAddr=vec2Reg, length=lengthArg, isByteMove=isByteMove })] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end end | codeToICodeBlock({kind=BlockOpEqualByte, ...}, _, _, _) = (* TODO: Move the code from codeToICodeRev. However, that is already reversed. *) raise InternalError "codeToICodeBlock - BlockOpEqualByte" (* Already done *) and codeConditionRev(condition, context, jumpOn, jumpLabel, tailCode) = (* General case. Load the value into a register and compare it with 1 (true) *) let val ccRef = newCCRef() val (testCode, testReg) = codeToPRegRev(condition, context, tailCode) val noJumpLabel = newLabel() in BlockLabel noJumpLabel :: BlockFlow(Conditional{ccRef=ccRef, condition=if jumpOn then JE else JNE, trueJump=jumpLabel, falseJump=noJumpLabel}) :: BlockSimple(CompareLiteral{arg1=RegisterArgument testReg, arg2=tag 1, opSize=OpSize32, ccRef=ccRef}) :: testCode end (* The fixed precision functions are also used for arbitrary precision but instead of raising Overflow we need to jump to the code that handles the long format. *) and codeFixedPrecisionArith(BuiltIns.ArithAdd, arg1, BICConstnt(value, _), context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. This should always be a tagged value if the type is correct. However it's possible for it not to be if we have an arbitrary precision value. There will be a run-time check that the value is short and so this code will never be executed. It will generally be edited out by the higher level be we can't rely on that. Because it's never executed we can just put in zero. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPReg(arg1, context) in arg1Code @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithAdd, BICConstnt(value, _), arg2, context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg2Code @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg2, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithAdd, arg1, arg2, context, target, onOverflow) = let val aReg3 = newPReg() and ccRef = newCCRef() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg1Code @ arg2Code @ (* Subtract the tag bit from the second argument, do the addition and check for overflow. *) (* TODO: We should really do the detagging in the transform phase. It can make a better choice of the argument if one of the arguments is already untagged or if we have a constant argument. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=RegisterArgument aReg2, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end (* Subtraction. We can handle the special case of the second argument being a constant but not the first. *) | codeFixedPrecisionArith(BuiltIns.ArithSub, arg1, BICConstnt(value, _), context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPReg(arg1, context) in arg1Code @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithSub, arg1, arg2, context, target, onOverflow) = let val aReg3 = newPReg() val ccRef = newCCRef() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg1Code @ arg2Code @ (* Do the subtraction, test for overflow and afterwards add in the tag bit. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=RegisterArgument aReg2, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, arg1, BICConstnt(value, _), context, target, onOverflow) = let val aReg = newPReg() and argUntagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* Is it better to untag the constant or the register argument? *) val constVal = if isShort value then Word.toLargeIntX(toShort value) else 0 in codeToICodeTarget(arg1, context, false, aReg) @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=argUntagged, operand1=aReg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=argUntagged, operand2=IntegerConstant constVal, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, BICConstnt(value, _), arg2, context, target, onOverflow) = let val aReg = newPReg() and argUntagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* Is it better to untag the constant or the register argument? *) val constVal = if isShort value then Word.toLargeIntX(toShort value) else 0 in codeToICodeTarget(arg2, context, false, aReg) @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=argUntagged, operand1=aReg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=argUntagged, operand2=IntegerConstant constVal, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, arg1, arg2, context, target, onOverflow) = let val aReg1 = newPReg() and aReg2 = newPReg() and arg1Untagged = newUReg() and arg2Untagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* This is almost the same as the word operation except we use a signed shift and check for overflow. *) in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift one argument and subtract the tag from the other. It's possible this could be reordered if we have a value that is already untagged. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true (* Signed shift here. *), cache=NONE, opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=arg2Untagged, operand1=aReg2, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=arg1Untagged, operand2=RegisterArgument arg2Untagged, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithQuot, arg1, arg2, context, target, _) = let val aReg1 = newPReg() and aReg2 = newPReg() val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift both of the arguments to remove the tags. We don't test for zero here - that's done explicitly. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = true, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=quotient, dest=target, isSigned=true, opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithRem, arg1, arg2, context, target, _) = let (* Identical to Quot except that the result is the remainder. *) val aReg1 = newPReg() and aReg2 = newPReg() val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift both of the arguments to remove the tags. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = true, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=remainder, dest=target, isSigned=true, opSize=polyWordOpSize})] end | codeFixedPrecisionArith(_, _, _, _, _, _) = raise InternalError "codeToICode: FixedPrecisionArith - unimplemented operation" (* Generate code for floating point arguments where one of the arguments must be in a register. If the first argument is in a register use that, if the second is in a register and it's commutative use that otherwise load the first argument into a register. *) and codeFPBinaryArgsRev(arg1, arg2, precision, commutative, context, tailCode) = let val (arg1Code, arg1Value) = codeFPArgument(arg1, precision, context, tailCode) val (arg2Code, arg2Value) = codeFPArgument(arg2, precision, context, arg1Code) in case (arg1Value, arg2Value, commutative) of (RegisterArgument fpReg, _, _) => (arg2Code, fpReg, arg2Value) | (_, RegisterArgument fpReg, Commutative) => (arg2Code, fpReg, arg1Value) | (arg1Val, _, _) => let val fpReg = newUReg() val moveOp = case precision of BuiltIns.PrecDouble => MoveDouble | BuiltIns.PrecSingle => MoveFloat in (BlockSimple(LoadArgument{source=arg1Val, dest=fpReg, kind=moveOp}) :: arg2Code, fpReg, arg2Value) end end (* Generate code to evaluate a floating point argument. The aim of this code is to avoid the overhead of untagging a short-precision floating point value in memory. *) and codeFPArgument(BICConstnt(value, _), _, _, tailCode) = let val argVal = (* Single precision constants in 64-bit mode are represented by the value shifted left 32 bits. A word is shifted left one bit so the result is 0w31. *) if isShort value then IntegerConstant(Word.toLargeInt(Word.>>(toShort value, 0w31))) else AddressConstant value in (tailCode, argVal) end | codeFPArgument(arg, precision, context, tailCode) = ( case (precision, wordSize) of (BuiltIns.PrecSingle, 0w8) => (* If this is a single precision value and the word size is 8 the values are tagged. If it is memory we can load the value directly from the high-order word. *) let val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (code, result, _) = codeToICodeRev(arg, context, false, Allowed memOrReg, tailCode) in case result of RegisterArgument argReg => let val fpReg = newUReg() in (BlockSimple(UntagFloat{source=RegisterArgument argReg, dest=fpReg, cache=NONE}) :: code, RegisterArgument fpReg) end | MemoryLocation{offset, base, index, ...} => (code, MemoryLocation{offset=offset+4, base=base, index=index, cache=NONE}) | _ => raise InternalError "codeFPArgument" end | _ => (* Otherwise the value is boxed. *) let val (argCode, argReg) = codeToPRegRev(arg, context, tailCode) in (argCode, wordAt argReg) end ) (* Code an address. The index is optional. *) and codeAddressRev({base, index=SOME index, offset}, true (* byte move *), context, tailCode) = let (* Byte address with index. The index needs to be untagged. *) val indexReg1 = newUReg() val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) val (codeIndex, indexReg) = codeToPRegRev(index, context, codeBase) val untagCode = [BlockSimple(UntagValue{dest=indexReg1, source=indexReg, isSigned=false, cache=NONE, opSize=polyWordOpSize})] val (codeLoadAddr, realBase) = if targetArch = ObjectId32Bit then let val addrReg = newUReg() in ([BlockSimple(LoadEffectiveAddress{ base=SOME baseReg, offset=0, index=ObjectIndex, dest=addrReg, opSize=nativeWordOpSize})], addrReg) end else ([], baseReg) val memResult = {base=realBase, offset=offset, index=MemIndex1 indexReg1, cache=NONE} in (codeLoadAddr @ codeIndex, untagCode, memResult) end | codeAddressRev({base, index=SOME index, offset}, false (* word move *), context, tailCode) = let (* Word address with index. We can avoid untagging the index by adjusting the multiplier and offset *) val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) val (codeIndex, indexReg) = codeToPRegRev(index, context, codeBase) val (codeLoadAddr, realBase) = if targetArch = ObjectId32Bit then let val addrReg = newUReg() in ([BlockSimple(LoadEffectiveAddress{ base=SOME baseReg, offset=0, index=ObjectIndex, dest=addrReg, opSize=nativeWordOpSize})], addrReg) end else ([], baseReg) val memResult = if wordSize = 0w8 then {base=realBase, offset=offset-4, index=MemIndex4 indexReg, cache=NONE} else {base=realBase, offset=offset-2, index=MemIndex2 indexReg, cache=NONE} in (codeLoadAddr @ codeIndex, [], memResult) end | codeAddressRev({base, index=NONE, offset}, _, context, tailCode) = let val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) val memResult = {offset=offset, base=baseReg, index=memIndexOrObject, cache=NONE} in (codeBase, [], memResult) end and codeAddress(addr, isByte, context) = let val (code, untag, res) = codeAddressRev(addr, isByte, context, []) in (List.rev code, untag, res) end (* C-memory operations are slightly different. The base address is a LargeWord.word value. The index is a byte index so may have to be untagged. *) and codeCAddress({base, index=SOME index, offset}, 0w1, context) = let (* Byte address with index. The index needs to be untagged. *) val untaggedBaseReg = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) and (codeIndex, indexReg) = codeToPReg(index, context) (* The index needs to untagged and, if necessary, sign-extended to the native word size. *) val (untagCode, sxReg) = if targetArch = ObjectId32Bit then let val sReg1 = newUReg() and sReg2 = newUReg() in ([BlockSimple(SignExtend32To64{dest=sReg1, source=RegisterArgument indexReg}), BlockSimple(UntagValue{dest=sReg2, source=sReg1, isSigned=true, cache=NONE, opSize=nativeWordOpSize})], sReg2) end else let val sReg = newUReg() in ([BlockSimple(UntagValue{dest=sReg, source=indexReg, isSigned=true, cache=NONE, opSize=nativeWordOpSize})], sReg) end val loadCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] val memResult = {base=untaggedBaseReg, offset=offset, index=MemIndex1 sxReg, cache=NONE} in (codeBase @ codeIndex, loadCode @ untagCode, memResult) end | codeCAddress({base, index=SOME index, offset}, size, context) = let (* Non-byte address with index. By using an appropriate multiplier we can avoid having to untag the index. *) val untaggedBaseReg = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) and (codeIndex, indexReg) = codeToPReg(index, context) (* The index is signed i.e. negative index values are legal. We don't have to do anything special on the native code versions but on 32-in-64 we need to sign extend. *) val (untagCode, sxReg) = if targetArch = ObjectId32Bit then let val sReg = newUReg() in ([BlockSimple(SignExtend32To64{source=RegisterArgument indexReg, dest=sReg})], sReg) end else ([], indexReg) val loadCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] val memResult = case size of 0w2 => {base=untaggedBaseReg, offset=offset-1, index=MemIndex1 sxReg, cache=NONE} | 0w4 => {base=untaggedBaseReg, offset=offset-2, index=MemIndex2 sxReg, cache=NONE} | 0w8 => {base=untaggedBaseReg, offset=offset-4, index=MemIndex4 sxReg, cache=NONE} | _ => raise InternalError "codeCAddress: unknown size" in (codeBase @ codeIndex, loadCode @ untagCode, memResult) end | codeCAddress({base, index=NONE, offset}, _, context) = let val untaggedBaseReg = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) val untagCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] val memResult = {offset=offset, base=untaggedBaseReg, index=NoMemIndex, cache=NONE} in (codeBase, untagCode, memResult) end (* Return an untagged value. If we have a constant just return it. Otherwise return the code to evaluate the argument, the code to untag it and the reference to the untagged register. *) and codeAsUntaggedToRegRev(BICConstnt(value, _), isSigned, _, tailCode) = let (* Should always be short except for unreachable code. *) val untagReg = newUReg() val cval = if isShort value then toShort value else 0w0 val cArg = IntegerConstant(if isSigned then Word.toLargeIntX cval else Word.toLargeInt cval) (* Don't tag *) val untag = [BlockSimple(LoadArgument{source=cArg, dest=untagReg, kind=movePolyWord})] in (tailCode, untag, untagReg) (* Don't tag. *) end | codeAsUntaggedToRegRev(arg, isSigned, context, tailCode) = let val untagReg = newUReg() val (code, srcReg) = codeToPRegRev(arg, context, tailCode) val untag = [BlockSimple(UntagValue{source=srcReg, dest=untagReg, isSigned=isSigned, cache=NONE, opSize=polyWordOpSize})] in (code, untag, untagReg) end and codeAsUntaggedToReg(arg, isSigned, context) = let val (code, untag, untagReg) = codeAsUntaggedToRegRev(arg, isSigned, context, []) in (List.rev code, untag, untagReg) end (* Return the argument as an untagged value. We separate evaluating the argument from untagging because we may have to evaluate other arguments and that could involve a function call and we can't save the value to the stack after we've untagged it. Currently this is only used for byte values but we may have to be careful if we use it for word values on the X86. Moving an untagged value into a register might look like loading a constant address. *) and codeAsUntaggedByte(BICConstnt(value, _), isSigned, _) = let val cval = if isShort value then toShort value else 0w0 val cArg = IntegerConstant(if isSigned then Word.toLargeIntX cval else Word.toLargeInt cval) (* Don't tag *) in ([], [], cArg) end | codeAsUntaggedByte(arg, isSigned, context) = let val untagReg = newUReg() val (code, argReg) = codeToPReg(arg, context) val untag = [BlockSimple(UntagValue{source=argReg, dest=untagReg, isSigned=isSigned, cache=NONE, opSize=OpSize32})] in (code, untag, RegisterArgument untagReg) end (* Allocate memory. This is used both for true variable length cells and also for longer constant length cells. *) and allocateMemoryVariable(numWords, flags, initial, context, destination) = let val target = asTarget destination (* With the exception of flagReg all these registers are modified by the code. So, we have to copy the size value into a new register. *) val sizeReg = newPReg() and initReg = newPReg() val sizeReg2 = newPReg() val untagSizeReg = newUReg() and initAddrReg = newPReg() and allocReg = newPReg() val sizeCode = codeToICodeTarget(numWords, context, false, sizeReg) and (flagsCode, flagUntag, flagArg) = codeAsUntaggedByte(flags, false, context) (* We're better off deferring the initialiser if possible. If the value is a constant we don't have to save it. *) val (initCode, initResult, _) = codeToICode(initial, context, false, Allowed allowDefer) in (sizeCode @ flagsCode @ initCode @ [(* We need to copy the size here because AllocateMemoryVariable modifies the size in order to store the length word. This is unfortunate especially as we're going to untag it anyway. *) BlockSimple(LoadArgument{source=RegisterArgument sizeReg, dest=sizeReg2, kind=movePolyWord}), BlockSimple(AllocateMemoryVariable{size=sizeReg, dest=allocReg, saveRegs=[]})] @ flagUntag @ [BlockSimple(StoreArgument{ source=flagArg, base=allocReg, offset= ~1, index=memIndexOrObject, kind=MoveByte, isMutable=false}), (* We need to copy the address here because InitialiseMem modifies all its arguments. *) BlockSimple( if targetArch = ObjectId32Bit then LoadEffectiveAddress{ base=SOME allocReg, offset=0, index=ObjectIndex, dest=initAddrReg, opSize=nativeWordOpSize} else LoadArgument{source=RegisterArgument allocReg, dest=initAddrReg, kind=movePolyWord}), BlockSimple(UntagValue{source=sizeReg2, dest=untagSizeReg, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(LoadArgument{source=initResult, dest=initReg, kind=movePolyWord}), BlockSimple(InitialiseMem{size=untagSizeReg, init=initReg, addr=initAddrReg}), BlockSimple InitialisationComplete, BlockSimple(LoadArgument{source=RegisterArgument allocReg, dest=target, kind=movePolyWord})], RegisterArgument target, false) end (*Turn the codetree structure into icode. *) val bodyContext = {loopArgs=NONE, stackPtr=0, currHandler=NONE, overflowBlock=ref NONE} val (bodyCode, _, bodyExited) = codeToICodeRev(body, bodyContext, true, SpecificPReg resultTarget, beginInstructions) val icode = if bodyExited then bodyCode else returnInstruction(bodyContext, resultTarget, bodyCode) (* Turn the icode list into basic blocks. The input list is in reverse so as part of this we reverse the list. *) local val resArray = Array.array(!labelCounter, BasicBlock{ block=[], flow=ExitCode }) fun createEntry (blockNo, block, flow) = Array.update(resArray, blockNo, BasicBlock{ block=block, flow=flow}) fun splitCode([], _, _) = (* End of code. We should have had a BeginFunction. *) raise InternalError "splitCode - no begin" | splitCode(BlockBegin args :: _, sinceLabel, flow) = (* Final instruction. Create the initial block and exit. *) createEntry(0, BeginFunction args ::sinceLabel, flow) | splitCode(BlockSimple instr :: rest, sinceLabel, flow) = splitCode(rest, instr :: sinceLabel, flow) | splitCode(BlockLabel label :: rest, sinceLabel, flow) = (* Label - finish this block and start another. *) ( createEntry(label, sinceLabel, flow); (* Default to a jump to this label. That is used if we have assumed a drop-through. *) splitCode(rest, [], Unconditional label) ) | splitCode(BlockExit instr :: rest, _, _) = splitCode(rest, [instr], ExitCode) | splitCode(BlockFlow flow :: rest, _, _) = splitCode(rest, [], flow) | splitCode(BlockRaiseAndHandle(instr, handler) :: rest, _, _) = splitCode(rest, [instr], UnconditionalHandle handler) | splitCode(BlockOptionalHandle{call, handler, label} :: rest, sinceLabel, flow) = let (* A function call within a handler. This could go to the handler but if there is no exception will go to the next instruction. Also includes JumpLoop since the stack check could result in an Interrupt exception. *) in createEntry(label, sinceLabel, flow); splitCode(rest, [call], ConditionalHandle{handler=handler, continue=label}) end in val () = splitCode(icode, [], ExitCode) val resultVector = Array.vector resArray end open ICODETRANSFORM val pregProperties = Vector.fromList(List.rev(! pregPropList)) in codeICodeFunctionToX86{blocks = resultVector, functionName = name, pregProps = pregProperties, ccCount= ! ccRefCounter, debugSwitches = debugSwitches, resultClosure = resultClosure} end fun gencodeLambda(lambda, debugSwitches, closure) = let open DEBUG Universal (*val debugSwitches = [tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), tagInject assemblyCodeTag true] @ debugSwitches*) in codeFunctionToX86(lambda, debugSwitches, closure) end structure Foreign = X86FOREIGN structure Sharing = struct type backendIC = backendIC and bicLoadForm = bicLoadForm and argumentType = argumentType and closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/INITIALISE_.ML b/mlsource/MLCompiler/INITIALISE_.ML index 73870562..97b4030e 100644 --- a/mlsource/MLCompiler/INITIALISE_.ML +++ b/mlsource/MLCompiler/INITIALISE_.ML @@ -1,2075 +1,2070 @@ (* Copyright (c) 2000 Cambridge University Technical Services Limited Updated David C.J. Matthews 2008-9, 2012, 2013, 2015-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Title: Initialise ML Global Declarations. Author: Dave Matthews,Cambridge University Computer Laboratory Copyright Cambridge University 1985 *) functor INITIALISE_ ( structure LEX: LEXSIG structure TYPETREE : TYPETREESIG structure STRUCTVALS : STRUCTVALSIG structure VALUEOPS : VALUEOPSSIG structure CODETREE : CODETREESIG structure EXPORTTREE: EXPORTTREESIG structure DATATYPEREP: DATATYPEREPSIG structure TYPEIDCODE: TYPEIDCODESIG structure MAKE: MAKESIG structure ADDRESS : AddressSig structure DEBUG: DEBUG structure DEBUGGER : DEBUGGER structure PRETTY : PRETTYSIG structure PRINTTABLE: PRINTTABLESIG structure MISC : sig val unescapeString : string -> string exception Conversion of string; (* string to int conversion failure *) end structure VERSION: sig val compilerVersion: string val versionNumber: int end structure UNIVERSALTABLE: sig type universal = Universal.universal type univTable type 'a tag = 'a Universal.tag val univLookup: univTable * 'a tag * string -> 'a option val fold: (string * universal * 'a -> 'a) -> 'a -> univTable -> 'a end sharing STRUCTVALS.Sharing = VALUEOPS.Sharing = TYPETREE.Sharing = EXPORTTREE.Sharing = PRETTY.Sharing = CODETREE.Sharing = MAKE.Sharing = ADDRESS = DATATYPEREP.Sharing = TYPEIDCODE.Sharing = DEBUGGER.Sharing = LEX.Sharing = PRINTTABLE.Sharing sharing STRUCTVALS.Sharing = VALUEOPS.Sharing = TYPETREE.Sharing = EXPORTTREE.Sharing = PRETTY.Sharing = CODETREE.Sharing = MAKE.Sharing = ADDRESS = DATATYPEREP.Sharing = TYPEIDCODE.Sharing = DEBUGGER.Sharing = LEX.Sharing = PRINTTABLE.Sharing = UNIVERSALTABLE ) : sig type gEnv val initGlobalEnv : {globalTable : gEnv, intIsArbitraryPrecision: bool } -> unit end = struct open STRUCTVALS; open TYPETREE open VALUEOPS; open CODETREE; open ADDRESS; open MAKE; open MISC; open EXPORTTREE open DATATYPEREP val intInfType = mkTypeConstruction ("int", intInfConstr, [], []) and realType = mkTypeConstruction ("real", realConstr, [], []) and charType = mkTypeConstruction ("char", charConstr, [], []) and wordType = mkTypeConstruction ("word", wordConstr, [], []) val declInBasis = [DeclaredAt inBasis] fun applyList _ [] = () | applyList f (h :: t) = (f h : unit; applyList f t); fun initGlobalEnv{globalTable : gEnv, intIsArbitraryPrecision: bool } = let val Env globalEnv = MAKE.gEnvAsEnv globalTable val enterGlobalValue = #enterVal globalEnv; val enterGlobalType = #enterType globalEnv; (* Some routines to help make the types. *) local (* careful - STRUCTVALS.intType differs from TYPETREE.intType *) open TYPETREE; in (* Make some type variables *) fun makeEqTV () = mkTypeVar (generalisable, true, false, false) fun makeTV () = mkTypeVar (generalisable, false, false, false) fun makePrintTV() = mkTypeVar (generalisable, false, false, true) fun makeTypeVariable() = makeTv {value=emptyType, level=generalisable, equality=false, nonunifiable=false, printable=false} (* Make some functions *) infixr 5 ->> fun a ->> b = mkFunctionType (a, b); infix 7 **; fun a ** b = mkProductType [a, b]; (* Type identifiers for the types of the declarations. *) val Int = if intIsArbitraryPrecision then intInfType else fixedIntType val String = stringType; val Bool = boolType; val Unit = unitType; val Char = charType; val Word = wordType; val Real = realType val Exn = exnType val mkTypeConstruction = mkTypeConstruction; val () = setPreferredInt(if intIsArbitraryPrecision then intInfConstr else fixedIntConstr) end; fun makePolymorphic(tvs, c) = let open TYPEIDCODE val tvs = List.filter(fn TypeVar tv => not justForEqualityTypes orelse tvEquality tv | _ => false) tvs in if null tvs then c else mkInlproc(c, List.length tvs, "", [], 0) end (* Function to make a type identifier with a pretty printer that just prints "?". None of the types are equality types so the equality function is empty. *) local fun monotypePrinter _ = PRETTY.PrettyString "?" in fun defaultEqAndPrintCode () = let open TypeValue val code = createTypeValue{ eqCode = CodeZero, printCode = mkConst (toMachineWord (ref monotypePrinter)), boxedCode = boxedNever, sizeCode = singleWord } in Global (genCode(code, [], 0) ()) end end fun makeTypeAbbreviation(name, fullName, typeVars, typeResult, locations) = makeTypeConstructor( name, typeVars, makeTypeFunction(basisDescription fullName, (typeVars, typeResult)), locations) (* Make an opaque type and add it to an environment. *) fun makeAndDeclareOpaqueType(typeName, fullName, env) = let val typeconstr = makeTypeConstructor(typeName, [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription fullName), declInBasis); in #enterType env (typeName, TypeConstrSet(typeconstr, [])); mkTypeConstruction (typeName, typeconstr, [], declInBasis) end; (* List of something *) fun List (base : types) : types = mkTypeConstruction ("list", tsConstr listConstr, [base], declInBasis); (* ref something *) fun Ref (base : types) : types = mkTypeConstruction ("ref", refConstr, [base], declInBasis); fun Option (base : types) : types = mkTypeConstruction ("option", tsConstr optionConstr, [base], declInBasis); (* Type-dependent functions. *) fun mkSpecialFun (name:string, typeof:types, opn: typeDependent) : values = makeOverloaded (name, typeof, opn); (* Overloaded functions. *) fun mkOverloaded (name:string) (typeof: types) : values = mkSpecialFun(name, typeof, TypeDep) (* Make a structure. Returns the table as an environment so that entries can be added to the structure. *) fun makeStructure(parentEnv, name) = let val str as Struct{signat=Signatures{tab, ...}, ...} = makeEmptyGlobal name val () = #enterStruct parentEnv (name, str) val Env env = makeEnv tab in env end val () = enterGlobalType ("unit", TypeConstrSet(unitConstr, [])); local val falseCons = mkGconstr ("false", Bool, createNullaryConstructor(EnumForm{tag=0w0, maxTag=0w1}, [], "false"), true, 2, declInBasis) val trueCons = mkGconstr ("true", Bool, createNullaryConstructor(EnumForm{tag=0w1, maxTag=0w1}, [], "true"), true, 2, declInBasis) val boolEnv = makeStructure(globalEnv, "Bool") (* Bool structure *) val notFn = mkGvar("not", Bool ->> Bool, mkUnaryFn BuiltIns.NotBoolean, declInBasis) in val () = #enterType boolEnv ("bool", TypeConstrSet(boolConstr, [trueCons, falseCons])) val () = #enterVal boolEnv ("true", trueCons) val () = #enterVal boolEnv ("false", falseCons) val () = #enterVal boolEnv ("not", notFn) end; val () = enterGlobalType ("int", TypeConstrSet(if intIsArbitraryPrecision then intInfConstr else fixedIntConstr, [])) val () = enterGlobalType ("char", TypeConstrSet(charConstr, [])) val () = enterGlobalType ("string", TypeConstrSet(stringConstr, [])) val () = enterGlobalType ("real", TypeConstrSet(realConstr, [])) val () = (* Enter :: and nil. *) List.app(fn(tv as Value{name, ...}) => enterGlobalValue(name, tv)) (tsConstructors listConstr) val () = enterGlobalType ("list", listConstr); val () = (* Enter NONE and SOME. *) List.app(fn(tv as Value{name, ...}) => enterGlobalValue(name, tv)) (tsConstructors optionConstr) val () = enterGlobalType ("option", optionConstr); local val refCons = let val a = mkTypeVar(generalisable, false, false, false) in mkGconstr ("ref", a ->> Ref a, createUnaryConstructor(RefForm, [a], "ref"), false, 1, declInBasis) end in val () = enterGlobalType ("ref", TypeConstrSet(refConstr, [refCons])); val () = enterGlobalValue ("ref", refCons); end local open BuiltIns fun monoTypePrinter _ = PRETTY.PrettyString "?" val idCode = let open TypeValue val equalLongWordFn = mkInlproc( mkBinary(LargeWordComparison TestEqual, mkLoadArgument 0, mkLoadArgument 1), 2, "EqualLargeWord()", [], 0) val code = createTypeValue{ eqCode=equalLongWordFn, printCode=mkConst (toMachineWord (ref monoTypePrinter)), boxedCode = boxedNever, sizeCode = singleWord } in Global (genCode(code, [], 0) ()) end in val largeWordType = makeTypeConstructor("word", [], makeFreeId(0, idCode, true, basisDescription "LargeWord.word"), declInBasis) val LargeWord = mkTypeConstruction ("LargeWord.word", largeWordType, [], declInBasis) end val () = enterGlobalType ("exn", TypeConstrSet(exnConstr, [])); val () = enterGlobalType ("word", TypeConstrSet(wordConstr, [])); val runCallEnv = makeStructure(globalEnv, "RunCall") fun enterRunCall (name : string, entry : codetree, typ : types) : unit = let val value = mkGvar (name, typ, entry, declInBasis); in #enterVal runCallEnv (name, value) end local (* unsafeCast. Can be used to convert any value to any type. *) val a = makeTV () val b = makeTV () val unsafeCastEntry = mkInlproc (mkLoadArgument 0 (* just the parameter *), 1, "unsafeCast(1)", [], 0) in val () = enterRunCall ("unsafeCast", makePolymorphic([a, b], unsafeCastEntry), a ->> b) end local val a = makeTV() and b = makeTV() open BuiltIns in (* isShort - test if a value is tagged rather than being an address. *) val () = enterRunCall ("isShort", makePolymorphic([a], mkUnaryFn IsTaggedValue), a ->> Bool) (* pointer equality *) val () = enterRunCall ("pointerEq", makePolymorphic([a], mkBinaryFn PointerEq), a ** a ->> Bool) (* load a word. The index is in words and is always zero or positive. *) val () = enterRunCall ("loadWord", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLWord{isImmutable=false})), a ** Word ->> b) (* Load a word from an immutable. *) val () = enterRunCall ("loadWordFromImmutable", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLWord{isImmutable=true})), a ** Word ->> b) (* load a byte. The index is in bytes and is always zero or positive. Probably the result should be a Word8.word value or a char. *) val () = enterRunCall ("loadByte", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLByte{isImmutable=false})), a ** Word ->> b) (* Load a byte from an immutable. *) val () = enterRunCall ("loadByteFromImmutable", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLByte{isImmutable=true})), a ** Word ->> b) (* Get the length of a heap cell. *) val () = enterRunCall ("memoryCellLength", makePolymorphic([a], mkUnaryFn MemoryCellLength), a ->> Word) (* Return the flags. Perhaps this could return a Word8.word value instead of a word. *) val () = enterRunCall ("memoryCellFlags", makePolymorphic([a], mkUnaryFn MemoryCellFlags), a ->> Word) (* Return the number of bytes per word. This is a constant since we have separate pre-built compilers for 32-bit and 64-bit. N.B. The byte order is not a constant since we only have a single pre-built compiler for little-endian and big-endian interpreted code. *) val () = enterRunCall ("bytesPerWord", mkConst(toMachineWord wordSize), Word) (* Store a word *) val () = enterRunCall ("storeWord", makePolymorphic([a, b], mkStoreOperationFn(LoadStoreMLWord{isImmutable=false})), mkProductType[a, Word, b] ->> Unit) (* Store a byte *) val () = enterRunCall ("storeByte", makePolymorphic([a, b], mkStoreOperationFn(LoadStoreMLByte{isImmutable=false})), mkProductType[a, Word, b] ->> Unit) (* Lock a mutable cell. *) val () = enterRunCall ("clearMutableBit", makePolymorphic([a], mkUnaryFn ClearMutableFlag), a ->> Unit) (* Allocate a byte cell. The second argument is the flags byte. It might be better if this were a Word8.word value. *) val () = enterRunCall ("allocateByteMemory", makePolymorphic([a], mkBinaryFn AllocateByteMemory), Word ** Word ->> a) (* Allocate a word cell. *) val () = enterRunCall ("allocateWordMemory", makePolymorphic([a, b], mkAllocateWordMemoryFn), mkProductType[Word, Word, a] ->> b) (* Byte vector operations. *) val () = enterRunCall ("byteVectorEqual", makePolymorphic([a], mkBlockOperationFn BlockOpEqualByte), mkProductType[a, a, Word, Word, Word] ->> Bool) val () = enterRunCall ("byteVectorCompare", makePolymorphic([a], mkBlockOperationFn BlockOpCompareByte), mkProductType[a, a, Word, Word, Word] ->> Int) (* Block moves. *) val () = enterRunCall ("moveBytes", makePolymorphic([a], mkBlockOperationFn (BlockOpMove{isByteMove=true})), mkProductType[a, a, Word, Word, Word] ->> Unit) val () = enterRunCall ("moveWords", makePolymorphic([a], mkBlockOperationFn (BlockOpMove{isByteMove=false})), mkProductType[a, a, Word, Word, Word] ->> Unit) (* Untagged loads and stores. *) val () = enterRunCall ("loadUntagged", mkLoadOperationFn LoadStoreUntaggedUnsigned, String ** Word ->> Word) val () = enterRunCall ("storeUntagged", mkStoreOperationFn LoadStoreUntaggedUnsigned, mkProductType[String, Word, Word] ->> Unit) val () = enterRunCall ("touch", makePolymorphic([a], mkUnaryFn TouchAddress), a ->> Unit) end local val debugOpts = [] (* Place to add debugging if necessary. *) (* [tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), tagInject assemblyCodeTag true] *) fun makeCall rtsCall n entryName = rtsCall (entryName, n, debugOpts) val makeFastCall = makeCall CODETREE.Foreign.rtsCallFast (* We need to wrap this so that the arguments are passed in registers. *) fun makeRunCallTupled (argTypes, resultType, callN) : codetree = let val width = List.length argTypes val name = "rtsCall" ^ Int.toString width; local val f = mkLoadClosure 0 (* first item from enclosing scope *) val tuple = mkLoadArgument 0 (* the inner parameter *) val args = case argTypes of [singleType] => [(tuple, singleType)] | argTypes => let val argVals = List.tabulate(width, fn n => mkInd (n, tuple)) in ListPair.zipEq(argVals, argTypes) end in val innerBody = mkCall (f, args, resultType) end local (* The closure contains the address of the RTS call. *) val f = mkEval(mkConst callN, [mkLoadArgument 0]) val innerLambda = mkInlproc (innerBody, 1, name ^ "(1)", [mkLoadLocal 0], 0) in val outerBody = mkEnv([mkDec (0, f)], innerLambda) end val outerLambda = mkInlproc (outerBody, 1, name, [], 1) in outerLambda end (* "Full" calls pass the thread Id as the first parameter. *) fun makeRunCallTupledFull (argTypes, resultType) = let val width = List.length argTypes val callN = toMachineWord(makeFastCall(width + 1)) val name = "rtsCall" ^ Int.toString width; local val f = mkLoadClosure 0 (* first item from enclosing scope *) val tuple = mkLoadArgument 0 (* the inner parameter *) val args = case argTypes of [singleType] => [(tuple, singleType)] | argTypes => let val argVals = List.tabulate(width, fn n => mkInd (n, tuple)) in ListPair.zipEq(argVals, argTypes) end in val innerBody = mkEnv( [ mkDec(0, mkCall (f, (getCurrentThreadId, GeneralType) :: args, resultType)), mkNullDec checkRTSException ], mkLoadLocal 0) end local (* The closure contains the address of the RTS call. *) val f = mkEval(mkConst callN, [mkLoadArgument 0]) (* This creates the actual call. *) val innerLambda = mkInlproc (innerBody, 1, name ^ "(1)", [mkLoadLocal 0], 1) in val outerBody = mkEnv([mkDec (0, f)], innerLambda) end val outerLambda = mkInlproc (outerBody, 1, name, [], 1) in outerLambda end local val a = makeTV () and b = makeTV () fun makeInlCode(makeCall, name) = let val call1 = toMachineWord(makeCall 1) val body = mkEval(mkConst call1, [mkLoadArgument 0]) val proc = mkInlproc (body, 1, name, [], 0) in makePolymorphic([a, b], proc) end in val rtsCallFast1Entry = makeInlCode(makeFastCall, "rtsCallFast1") end local val a = makeTV () and b = makeTV () and c = makeTV () and d = makeTV () and e = makeTV () and f = makeTV () fun makeRtsCall(n, makeCall) = makeRunCallTupled(List.tabulate(n, fn _ => GeneralType), GeneralType, toMachineWord(makeCall n)) fun makeFullRtsCall n = makeRunCallTupledFull(List.tabulate(n, fn _ => GeneralType), GeneralType) in val rtsCallFull0Entry = makePolymorphic([a], makeFullRtsCall 0) and rtsCallFast0Entry = makePolymorphic([a], makeRtsCall(0, makeFastCall)) val rtsCall0Type = String ->> Unit ->> a val rtsCall1Type = String ->> a ->> b val rtsCallFull1Entry = makePolymorphic([a, b], makeFullRtsCall 1) val rtsCallFull2Entry = makePolymorphic([a, b, c], makeFullRtsCall 2) and rtsCallFast2Entry = makePolymorphic([a, b, c], makeRtsCall(2, makeFastCall)) val rtsCall2Type = String ->> TYPETREE.mkProductType [a,b] ->> c val rtsCallFull3Entry = makePolymorphic([a, b, c, d], makeFullRtsCall 3) val rtsCallFast3Entry = makePolymorphic([a, b, c, d], makeRtsCall(3, makeFastCall)) val rtsCall3Type = String ->> TYPETREE.mkProductType [a,b,c] ->> d val rtsCallFull4Entry = makePolymorphic([a, b, c, d, e], makeFullRtsCall 4) val rtsCallFast4Entry = makePolymorphic([a, b, c, d, e], makeRtsCall(4, makeFastCall)) val rtsCall4Type = String ->> TYPETREE.mkProductType [a,b,c,d] ->> e val rtsCallFull5Entry = makePolymorphic([a, b, c, d, e, f], makeFullRtsCall 5) val rtsCall5Type = String ->> TYPETREE.mkProductType [a,b,c,d,e] ->> f end in val () = enterRunCall ("rtsCallFull0", rtsCallFull0Entry, rtsCall0Type) val () = enterRunCall ("rtsCallFast0", rtsCallFast0Entry, rtsCall0Type) val () = enterRunCall ("rtsCallFull1", rtsCallFull1Entry, rtsCall1Type) val () = enterRunCall ("rtsCallFast1", rtsCallFast1Entry, rtsCall1Type) val () = enterRunCall ("rtsCallFull2", rtsCallFull2Entry, rtsCall2Type) val () = enterRunCall ("rtsCallFast2", rtsCallFast2Entry, rtsCall2Type) val () = enterRunCall ("rtsCallFull3", rtsCallFull3Entry, rtsCall3Type) val () = enterRunCall ("rtsCallFast3", rtsCallFast3Entry, rtsCall3Type) val () = enterRunCall ("rtsCallFast4", rtsCallFast4Entry, rtsCall4Type) val () = enterRunCall ("rtsCallFull4", rtsCallFull4Entry, rtsCall4Type) val () = enterRunCall ("rtsCallFull5", rtsCallFull5Entry, rtsCall5Type) val makeRunCallTupled = makeRunCallTupled (* Needed for reals. *) end local (* Create nullary exception. *) fun makeException0(name, id) = let val exc = Value{ name = name, typeOf = TYPETREE.exnType, access = Global(mkConst(toMachineWord id)), class = Exception, locations = declInBasis, references = NONE, instanceTypes=NONE } in #enterVal runCallEnv (name, exc) end (* Create exception with parameter. *) and makeException1(name, id, exType) = let val exc = Value{ name = name, typeOf = exType ->> TYPETREE.exnType, access = Global(mkConst(toMachineWord id)), class = Exception, locations = declInBasis, references = NONE, instanceTypes=NONE } in #enterVal runCallEnv (name, exc) end (* Exception numbers. Most of these are hard-coded in the RTS. *) val EXC_interrupt = 1 val EXC_syserr = 2 val EXC_size = 4 val EXC_overflow = 5 val EXC_divide = 7 val EXC_conversion = 8 val EXC_XWindows = 10 val EXC_subscript = 11 val EXC_thread = 12 val EXC_Bind = 100 (* In Match compiler. *) val EXC_Match = 101 val EXC_Fail = 103 in val () = List.app makeException0 [ ("Interrupt", EXC_interrupt), ("Size", EXC_size), ("Bind", EXC_Bind), ("Div", EXC_divide), ("Match", EXC_Match), ("Overflow", EXC_overflow), ("Subscript", EXC_subscript) ] val () = List.app makeException1 [ ("Fail", EXC_Fail, String), ("Conversion", EXC_conversion, String), ("XWindows", EXC_XWindows, String), ("Thread", EXC_thread, String), ("SysErr", EXC_syserr, String ** Option LargeWord) ] end (* Standard Basis structures for basic types. These contain the definitions of the basic types and operations on them. The structures are extended in the basis library and overloaded functions are extracted from them. *) local val largeIntEnv = makeStructure(globalEnv, "LargeInt") (* The comparison operations take two arbitrary precision ints and a general "compare" function that returns a fixed precision int. *) val compareType = mkProductType[intInfType, intInfType, intInfType ** intInfType ->> fixedIntType] ->> Bool val arithType = mkProductType[intInfType, intInfType, intInfType ** intInfType ->> intInfType] ->> intInfType fun enterArbitrary(name, oper, typ) = let val value = mkGvar (name, typ, mkArbitraryFn oper, declInBasis) in #enterVal largeIntEnv (name, value) end in val () = #enterType largeIntEnv ("int", TypeConstrSet(intInfConstr, [])) (* These functions are used internally. *) val () = enterArbitrary("less", ArbCompare BuiltIns.TestLess, compareType) val () = enterArbitrary("greater", ArbCompare BuiltIns.TestGreater, compareType) val () = enterArbitrary("lessEq", ArbCompare BuiltIns.TestLessEqual, compareType) val () = enterArbitrary("greaterEq", ArbCompare BuiltIns.TestGreaterEqual, compareType) val () = enterArbitrary("add", ArbArith BuiltIns.ArithAdd, arithType) val () = enterArbitrary("subtract", ArbArith BuiltIns.ArithSub, arithType) val () = enterArbitrary("multiply", ArbArith BuiltIns.ArithMult, arithType) end local val fixedIntEnv = makeStructure(globalEnv, "FixedInt") open BuiltIns fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal fixedIntEnv (name, value) end val compareType = fixedIntType ** fixedIntType ->> Bool and binaryType = fixedIntType ** fixedIntType ->> fixedIntType fun enterComparison(name, test) = enterBinary(name, WordComparison{test=test, isSigned=true}, compareType) and enterBinaryOp(name, oper) = enterBinary(name, FixedPrecisionArith oper, binaryType) in val () = #enterType fixedIntEnv ("int", TypeConstrSet(fixedIntConstr, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("quot", ArithQuot) val () = enterBinaryOp("rem", ArithRem) end local open BuiltIns val largeWordEnv = makeStructure(globalEnv, "LargeWord") fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal largeWordEnv (name, value) end val compareType = LargeWord ** LargeWord ->> Bool and binaryType = LargeWord ** LargeWord ->> LargeWord and shiftType = LargeWord ** Word ->> LargeWord (* The shift amount is a Word. *) fun enterComparison(name, test) = enterBinary(name, LargeWordComparison test, compareType) and enterBinaryOp(name, oper) = enterBinary(name, LargeWordArith oper, binaryType) and enterBinaryLogical(name, oper) = enterBinary(name, LargeWordLogical oper, binaryType) and enterBinaryShift(name, oper) = enterBinary(name, LargeWordShift oper, shiftType) in val () = #enterType largeWordEnv ("word", TypeConstrSet(largeWordType, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("div", ArithDiv) val () = enterBinaryOp("mod", ArithMod) val () = enterBinaryLogical("orb", LogicalOr) val () = enterBinaryLogical("andb", LogicalAnd) val () = enterBinaryLogical("xorb", LogicalXor) val () = enterBinaryShift("<<", ShiftLeft) val () = enterBinaryShift(">>", ShiftRightLogical) val () = enterBinaryShift("~>>", ShiftRightArithmetic) val LargeWord = LargeWord end local val wordStructEnv = makeStructure(globalEnv, "Word") open BuiltIns fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal wordStructEnv (name, value) end val compareType = Word ** Word ->> Bool and binaryType = Word ** Word ->> Word fun enterComparison(name, test) = enterBinary(name, WordComparison{test=test, isSigned=false}, compareType) and enterBinaryOp(name, oper) = enterBinary(name, WordArith oper, binaryType) and enterBinaryLogical(name, oper) = enterBinary(name, WordLogical oper, binaryType) and enterBinaryShift(name, oper) = enterBinary(name, WordShift oper, binaryType) val toLargeWordFn = mkGvar ("toLargeWord", Word ->> LargeWord, mkUnaryFn UnsignedToLongWord, declInBasis) and toLargeWordXFn = mkGvar ("toLargeWordX", Word ->> LargeWord, mkUnaryFn SignedToLongWord, declInBasis) and fromLargeWordFn = mkGvar ("fromLargeWord", LargeWord ->> Word, mkUnaryFn LongWordToTagged, declInBasis) in val () = #enterType wordStructEnv ("word", TypeConstrSet(wordConstr, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("div", ArithDiv) val () = enterBinaryOp("mod", ArithMod) val () = enterBinaryLogical("orb", LogicalOr) val () = enterBinaryLogical("andb", LogicalAnd) val () = enterBinaryLogical("xorb", LogicalXor) val () = enterBinaryShift("<<", ShiftLeft) val () = enterBinaryShift(">>", ShiftRightLogical) val () = enterBinaryShift("~>>", ShiftRightArithmetic) val () = #enterVal wordStructEnv ("toLargeWord", toLargeWordFn) val () = #enterVal wordStructEnv ("toLargeWordX", toLargeWordXFn) val () = #enterVal wordStructEnv ("fromLargeWord", fromLargeWordFn) end local val charEnv = makeStructure(globalEnv, "Char") open BuiltIns (* Comparison functions are the same as Word. *) fun enterComparison(name, test) = let val typ = Char ** Char ->> Bool val entry = mkBinaryFn(WordComparison{test=test, isSigned=false}) val value = mkGvar (name, typ, entry, declInBasis) in #enterVal charEnv (name, value) end in val () = #enterType charEnv ("char", TypeConstrSet(charConstr, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) end local val stringEnv = makeStructure(globalEnv, "String") in val () = #enterType stringEnv ("string", TypeConstrSet(stringConstr, [])) end local val realEnv = makeStructure(globalEnv, "Real") (* These are only used in Real so are included here rather than in RunCall. rtsCallFastRealtoReal is used for functions such as sqrt. rtsCallFastGeneraltoReal is used for Real.fromLargeInt. *) val debugOpts = [] (* Place to add debugging if necessary. *) fun makeFastRealRealCall entryName = CODETREE.Foreign.rtsCallFastRealtoReal (entryName, debugOpts) and makeFastRealRealRealCall entryName = CODETREE.Foreign.rtsCallFastRealRealtoReal (entryName, debugOpts) and makeFastIntInfRealCall entryName = CODETREE.Foreign.rtsCallFastGeneraltoReal (entryName, debugOpts) and makeFastRealGeneralRealCall entryName = CODETREE.Foreign.rtsCallFastRealGeneraltoReal (entryName, debugOpts) val rtsCallFastR_REntry = makeRunCallTupled([DoubleFloatType], DoubleFloatType, toMachineWord makeFastRealRealCall) (* This needs to be tupled. *) val rtsCallFastRR_REntry = makeRunCallTupled([DoubleFloatType, DoubleFloatType], DoubleFloatType, toMachineWord makeFastRealRealRealCall) and rtsCallFastRI_REntry = makeRunCallTupled([DoubleFloatType, GeneralType], DoubleFloatType, toMachineWord makeFastRealGeneralRealCall) val rtsCallFastI_REntry = makeRunCallTupled([GeneralType], DoubleFloatType, toMachineWord makeFastIntInfRealCall) val rtsCallFastF_F = mkGvar ("rtsCallFastR_R", String ->> Real ->> Real, rtsCallFastR_REntry, declInBasis) val rtsCallFastFF_F = mkGvar ("rtsCallFastRR_R", String ->> Real ** Real ->> Real, rtsCallFastRR_REntry, declInBasis) val rtsCallFastFG_F = mkGvar ("rtsCallFastRI_R", String ->> Real ** Int ->> Real, rtsCallFastRI_REntry, declInBasis) val rtsCallFastG_F = mkGvar ("rtsCallFastI_R", String ->> intInfType ->> Real, rtsCallFastI_REntry, declInBasis) fun enterUnary(name, oper, typ) = let val value = mkGvar (name, typ, mkUnaryFn oper, declInBasis) in #enterVal realEnv (name, value) end fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal realEnv (name, value) end val compareType = Real ** Real ->> Bool and binaryType = Real ** Real ->> Real and unaryType = Real ->> Real and realToFixType = Real ->> fixedIntType open BuiltIns IEEEReal fun enterComparison(name, test) = enterBinary(name, RealComparison(test, PrecDouble), compareType) and enterBinaryOp(name, oper) = enterBinary(name, RealArith(oper, PrecDouble), binaryType) in val () = #enterType realEnv ("real", TypeConstrSet(realConstr, [])) val () = #enterVal realEnv ("rtsCallFastR_R", rtsCallFastF_F) val () = #enterVal realEnv ("rtsCallFastRR_R", rtsCallFastFF_F) val () = #enterVal realEnv ("rtsCallFastRI_R", rtsCallFastFG_F) val () = #enterVal realEnv ("rtsCallFastI_R", rtsCallFastG_F) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterComparison("==", TestEqual) (* real is not an eqtype. *) (* Included unordered mainly because it's easy to implement isNan. *) val () = enterComparison("unordered", TestUnordered) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("/", ArithDiv) val () = enterUnary("~", RealNeg PrecDouble, unaryType) val () = enterUnary("abs", RealAbs PrecDouble, unaryType) val () = enterUnary("fromFixedInt", RealFixedInt PrecDouble, fixedIntType ->> Real) val () = enterUnary("truncFix", RealToInt(PrecDouble, TO_ZERO), realToFixType) val () = enterUnary("roundFix", RealToInt(PrecDouble, TO_NEAREST), realToFixType) val () = enterUnary("ceilFix", RealToInt(PrecDouble, TO_POSINF), realToFixType) val () = enterUnary("floorFix", RealToInt(PrecDouble, TO_NEGINF), realToFixType) end local val real32Env = makeStructure(globalEnv, "Real32") val floatType = mkTypeConstruction ("real", floatConstr, [], []) val Float = floatType val debugOpts = [] (* Place to add debugging if necessary. *) fun makeFastFloatFloatCall entryName = CODETREE.Foreign.rtsCallFastFloattoFloat (entryName, debugOpts) and makeFastFloatFloatFloatCall entryName = CODETREE.Foreign.rtsCallFastFloatFloattoFloat (entryName, debugOpts) and makeFastIntInfFloatCall entryName = CODETREE.Foreign.rtsCallFastGeneraltoFloat (entryName, debugOpts) and makeFastFloatGeneralFloatCall entryName = CODETREE.Foreign.rtsCallFastFloatGeneraltoFloat (entryName, debugOpts) val rtsCallFastR_REntry = makeRunCallTupled([SingleFloatType], SingleFloatType, toMachineWord makeFastFloatFloatCall) (* This needs to be tupled. *) val rtsCallFastRR_REntry = makeRunCallTupled([SingleFloatType, SingleFloatType], SingleFloatType, toMachineWord makeFastFloatFloatFloatCall) and rtsCallFastRI_REntry = makeRunCallTupled([SingleFloatType, GeneralType], SingleFloatType, toMachineWord makeFastFloatGeneralFloatCall) val rtsCallFastI_REntry = makeRunCallTupled([GeneralType], SingleFloatType, toMachineWord makeFastIntInfFloatCall) val rtsCallFastF_F = mkGvar ("rtsCallFastF_F", String ->> Float ->> Float, rtsCallFastR_REntry, declInBasis) val rtsCallFastFF_F = mkGvar ("rtsCallFastFF_F", String ->> Float ** Float ->> Float, rtsCallFastRR_REntry, declInBasis) val rtsCallFastFG_F = mkGvar ("rtsCallFastFI_F", String ->> Float ** Int ->> Float, rtsCallFastRI_REntry, declInBasis) val rtsCallFastG_F = mkGvar ("rtsCallFastI_F", String ->> intInfType ->> Float, rtsCallFastI_REntry, declInBasis) fun enterUnary(name, oper, typ) = let val value = mkGvar (name, typ, mkUnaryFn oper, declInBasis) in #enterVal real32Env (name, value) end fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal real32Env (name, value) end val compareType = Float ** Float ->> Bool and binaryType = Float ** Float ->> Float and unaryType = Float ->> Float and floatToFixType = Float ->> fixedIntType open BuiltIns IEEEReal fun enterComparison(name, test) = enterBinary(name, RealComparison(test, PrecSingle), compareType) and enterBinaryOp(name, oper) = enterBinary(name, RealArith(oper, PrecSingle), binaryType) in val () = #enterType real32Env ("real", TypeConstrSet(floatConstr, [])) val () = enterUnary("toLarge", BuiltIns.FloatToDouble, floatType ->> Real) (* Conversion with the current rounding mode. *) - and () = enterUnary("fromReal", BuiltIns.DoubleToFloat NONE, Real ->> floatType) - (* There are various versions of this function for each of the rounding modes. *) - and () = enterUnary("fromRealRound", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_NEAREST), Real ->> floatType) - and () = enterUnary("fromRealTrunc", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_ZERO), Real ->> floatType) - and () = enterUnary("fromRealCeil", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_POSINF), Real ->> floatType) - and () = enterUnary("fromRealFloor", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_NEGINF), Real ->> floatType) + and () = enterUnary("fromReal", BuiltIns.DoubleToFloat, Real ->> floatType) val () = #enterVal real32Env ("rtsCallFastR_R", rtsCallFastF_F) val () = #enterVal real32Env ("rtsCallFastRR_R", rtsCallFastFF_F) val () = #enterVal real32Env ("rtsCallFastRI_R", rtsCallFastFG_F) val () = #enterVal real32Env ("rtsCallFastI_R", rtsCallFastG_F) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterComparison("==", TestEqual) (* Real32.real is not an eqtype. *) val () = enterComparison("unordered", TestUnordered) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("/", ArithDiv) val () = enterUnary("~", RealNeg PrecSingle, unaryType) val () = enterUnary("abs", RealAbs PrecSingle, unaryType) val () = enterUnary("truncFix", RealToInt(PrecSingle, TO_ZERO), floatToFixType) val () = enterUnary("roundFix", RealToInt(PrecSingle, TO_NEAREST), floatToFixType) val () = enterUnary("ceilFix", RealToInt(PrecSingle, TO_POSINF), floatToFixType) val () = enterUnary("floorFix", RealToInt(PrecSingle, TO_NEGINF), floatToFixType) end val bootstrapEnv = makeStructure(globalEnv, "Bootstrap") fun enterBootstrap (name : string, entry : codetree, typ : types) : unit = let val value = mkGvar (name, typ, entry, declInBasis) in #enterVal bootstrapEnv (name, value) end local val threadEnv = makeStructure(globalEnv, "Thread") open TypeValue fun monoTypePrinter _ = PRETTY.PrettyString "?" val code = createTypeValue{ eqCode=equalPointerOrWordFn, printCode=mkConst (toMachineWord (ref monoTypePrinter)), boxedCode=boxedAlways, sizeCode=singleWord } (* Thread.thread type. This is an equality type with pointer equality. *) val threadConstr= makeTypeConstructor ( "thread", [], makeFreeId(0, Global (genCode(code, [], 0) ()), true, basisDescription "thread"), [DeclaredAt inBasis]) val threadType = mkTypeConstruction ("thread", threadConstr, [], []); val selfFunction = mkGvar ("self", Unit ->> threadType, getCurrentThreadIdFn, declInBasis) val atExAddFunction = mkGvar("atomicExchAdd", Ref Word ** Word ->> Word, mkBinaryFn BuiltIns.AtomicExchangeAdd, declInBasis) val atResetFunction = mkGvar("atomicReset", Ref Word ->> Unit, mkUnaryFn BuiltIns.AtomicReset, declInBasis) val cpuPauseFunction = mkGvar("cpuPause", Unit ->> Unit, cpuPauseFn, declInBasis) in val () = #enterType threadEnv ("thread", TypeConstrSet(threadConstr, [])) val () = #enterVal threadEnv ("self", selfFunction) val () = #enterVal threadEnv ("atomicExchAdd", atExAddFunction) val () = #enterVal threadEnv ("atomicReset", atResetFunction) val () = #enterVal threadEnv ("cpuPause", cpuPauseFunction) end local val fmemEnv = makeStructure(globalEnv, "ForeignMemory") val a = makeTV() (* We don't have Word8.word or Word32.word at this point so the easiest way to deal with this is to make them polymorphic. *) val get8Function = mkGvar("get8", LargeWord ** Word ->> a, makePolymorphic([a], mkLoadOperationFn LoadStoreC8), declInBasis) val get16Function = mkGvar("get16", LargeWord ** Word ->> Word, mkLoadOperationFn LoadStoreC16, declInBasis) val get32Function = mkGvar("get32", LargeWord ** Word ->> a, makePolymorphic([a], mkLoadOperationFn LoadStoreC32), declInBasis) val get64Function = mkGvar("get64", LargeWord ** Word ->> LargeWord, mkLoadOperationFn LoadStoreC64, declInBasis) val getFloatFunction = mkGvar("getFloat", LargeWord ** Word ->> Real, mkLoadOperationFn LoadStoreCFloat, declInBasis) val getDoubleFunction = mkGvar("getDouble", LargeWord ** Word ->> Real, mkLoadOperationFn LoadStoreCDouble, declInBasis) val set8Function = mkGvar("set8", mkProductType[LargeWord, Word, a] ->> Unit, makePolymorphic([a], mkStoreOperationFn LoadStoreC8), declInBasis) val set16Function = mkGvar("set16", mkProductType[LargeWord, Word, Word] ->> Unit, mkStoreOperationFn LoadStoreC16, declInBasis) val set32Function = mkGvar("set32", mkProductType[LargeWord, Word, a] ->> Unit, makePolymorphic([a], mkStoreOperationFn LoadStoreC32), declInBasis) val set64Function = mkGvar("set64", mkProductType[LargeWord, Word, LargeWord] ->> Unit, mkStoreOperationFn LoadStoreC64, declInBasis) val setFloatFunction = mkGvar("setFloat", mkProductType[LargeWord, Word, Real] ->> Unit, mkStoreOperationFn LoadStoreCFloat, declInBasis) val setDoubleFunction = mkGvar("setDouble", mkProductType[LargeWord, Word, Real] ->> Unit, mkStoreOperationFn LoadStoreCDouble, declInBasis) val allocCStackFn = mkGvar("allocCStack", Word ->> LargeWord, mkUnaryFn BuiltIns.AllocCStack, declInBasis) val freeCStackFn = mkGvar("freeCStack", LargeWord ** Word ->> Unit, mkBinaryFn BuiltIns.FreeCStack, declInBasis) in val () = #enterVal fmemEnv ("get8", get8Function) val () = #enterVal fmemEnv ("get16", get16Function) val () = #enterVal fmemEnv ("get32", get32Function) val () = #enterVal fmemEnv ("get64", get64Function) val () = #enterVal fmemEnv ("getFloat", getFloatFunction) val () = #enterVal fmemEnv ("getDouble", getDoubleFunction) val () = #enterVal fmemEnv ("set8", set8Function) val () = #enterVal fmemEnv ("set16", set16Function) val () = #enterVal fmemEnv ("set32", set32Function) val () = #enterVal fmemEnv ("set64", set64Function) val () = #enterVal fmemEnv ("setFloat", setFloatFunction) val () = #enterVal fmemEnv ("setDouble", setDoubleFunction) val () = #enterVal fmemEnv ("allocCStack", allocCStackFn) (* Free is a binary operation that takes both the allocated address and the size. The size is used by the compiled code where this is implemented using the C-stack. The allocated address is intended for possible use by the interpreter where so that it can be implemented as malloc/free. *) val () = #enterVal fmemEnv ("freeCStack", freeCStackFn) end local val foreignEnv = makeStructure(globalEnv, "Foreign") local val EXC_foreign = 23 val foreignException = Value{ name = "Foreign", typeOf = String ->> TYPETREE.exnType, access = Global(mkConst(toMachineWord EXC_foreign)), class = Exception, locations = declInBasis, references = NONE, instanceTypes=NONE } in val () = #enterVal foreignEnv ("Foreign", foreignException) end val arg0 = mkLoadArgument 0 val arg1 = mkLoadArgument 1 local val callForeignCall = mkEval(mkConst (toMachineWord CODETREE.Foreign.foreignCall), [arg0]) val innerBody = mkEval(mkLoadClosure 0, [mkInd(0, arg0), mkInd(1, arg0), mkInd(2, arg0)]) val outerBody = mkEnv([mkDec(0, callForeignCall)], mkInlproc(innerBody, 1, "foreignCall(1)(1)", [mkLoadLocal 0], 0)) in val foreignCallEntry = mkInlproc(outerBody, 1, "foreignCall(1)", [], 1) end local (* Build a callback. First apply the compiler to the abi/argtype/restype values. Then apply the result to a function to generate the final C callback code. The C callback code calls the function with two arguments. Here we have to pass it a function that expects a tuple and unwrap it. *) val innerMost = mkInlproc(mkEval(mkLoadClosure 0, [mkTuple[arg0, arg1]]), 2, "buildCallBack(1)(1)2", [mkLoadArgument 0], 0) val resultFn = mkInlproc(mkEval(mkLoadClosure 0, [innerMost]), 1, "buildCallBack(1)(1)", [mkLoadLocal 0], 0) val firstBuild = mkEval(mkConst (toMachineWord CODETREE.Foreign.buildCallBack), [arg0]) val outerBody = mkEnv([mkDec(0, firstBuild)], resultFn) in val buildCallBackEntry = mkInlproc(outerBody, 1, "buildCallBack(1)", [], 1) end (* Abi - an eqtype. An enumerated type or short int. *) local open TypeValue fun monotypePrinter _ = PRETTY.PrettyString "?" val code = createTypeValue{ eqCode = equalTaggedWordFn, printCode = mkConst (toMachineWord (ref monotypePrinter)), boxedCode = boxedNever, sizeCode = singleWord } val abiEqAndPrint = Global (genCode(code, [], 0) ()) in val abiConstr = makeTypeConstructor("abi", [], makeFreeId(0, abiEqAndPrint, true, basisDescription "Foreign.LowLevel.abi"), declInBasis) end val () = #enterType foreignEnv ("abi", TypeConstrSet(abiConstr, [])) val abiType = mkTypeConstruction ("abi", abiConstr, [], declInBasis) (* It would be possible to put the definition of cType in here but it's complicated. It's easier to use an opaque type and put in a cast later. *) val ctypeConstr = makeTypeConstructor("ctype", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "Foreign.LowLevel.ctype"), declInBasis) val () = #enterType foreignEnv ("ctype", TypeConstrSet(ctypeConstr, [])) val ffiType = mkTypeConstruction ("ctype", ctypeConstr, [], declInBasis) val foreignCallType = mkProductType[abiType, List ffiType, ffiType] ->> mkProductType[LargeWord, LargeWord, LargeWord] ->> Unit val buildCallBackType = mkProductType[abiType, List ffiType, ffiType] ->> (mkProductType[LargeWord, LargeWord] ->> Unit) ->> LargeWord fun enterForeign (name, entry, typ) = #enterVal foreignEnv (name, mkGvar (name, typ, entry, declInBasis)) in val () = enterForeign("foreignCall", foreignCallEntry, foreignCallType) val () = enterForeign("buildCallBack", buildCallBackEntry, buildCallBackType) (* Apply the abiList function here. The ABIs depend on the platform in the interpreted version. *) val () = enterForeign("abiList", mkConst(toMachineWord(CODETREE.Foreign.abiList())), List (String ** abiType)) end local fun addVal (name : string, value : 'a, typ : types) : unit = enterBootstrap (name, mkConst (toMachineWord value), typ) (* These are only used during the bootstrap phase. Replacements are installed once the appropriate modules of the basis library are compiled. *) fun intOfString s = let val radix = if String.size s >= 3 andalso String.substring(s, 0, 2) = "0x" orelse String.size s >= 4 andalso String.substring(s, 0, 3) = "~0x" then StringCvt.HEX else StringCvt.DEC in case StringCvt.scanString (Int.scan radix) s of NONE => raise Conversion "Invalid integer constant" | SOME res => res end fun wordOfString s = let val radix = if String.size s > 2 andalso String.sub(s, 2) = #"x" then StringCvt.HEX else StringCvt.DEC in case StringCvt.scanString (Word.scan radix) s of NONE => raise Conversion "Invalid word constant" | SOME res => res end fun unescapeChar (s: string) : char = let fun rdr i = if i = size s then NONE else SOME(String.sub(s, i), i+1) in case Char.scan rdr 0 of NONE => (* Bad conversion *) raise Conversion "Invalid string constant" | SOME(res, _) => res end open PRINTTABLE val convstringCode = genCode(mkConst(toMachineWord unescapeString), [], 0) () val convintCode = genCode(mkConst(toMachineWord intOfString), [], 0) () val convwordCode = genCode(mkConst(toMachineWord wordOfString), [], 0) () val convcharCode = genCode(mkConst(toMachineWord unescapeChar), [], 0) () in (* We need this for compatibility with the 5.8.2 bootstrap. *) val () = addVal ("convString", unescapeString: string -> string, String ->> String) (* Flag to indicate which version of Int to compile *) val () = addVal ("intIsArbitraryPrecision", intIsArbitraryPrecision, Bool) (* Install the overloads now. *) val () = addOverload("convString", stringConstr, convstringCode) val () = addOverload("convInt", fixedIntConstr, convintCode) val () = addOverload("convInt", intInfConstr, convintCode) val () = addOverload("convWord", wordConstr, convwordCode) val () = addOverload("convChar", charConstr, convcharCode) end (* The only reason we have vector here is to get equality right. We need vector to be an equality type and to have a specific equality function. *) local fun polyTypePrinter _ _ = PRETTY.PrettyString "?" (* The equality function takes the base equality type as an argument. The inner function takes two arguments which are the two vectors to compare, checks the lengths and if they're equal applies the base equality to each field. *) val eqCode = mkInlproc( mkProc( mkEnv([ (* Length of the items. *) mkDec(0, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 0)), mkDec(1, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 1)), mkMutualDecs[(2, (* Loop function. *) mkProc( mkIf( (* Finished? *) mkEqualTaggedWord(mkLoadClosure 0, mkLoadArgument 0), CodeTrue, (* Yes, all equal. *) mkIf( mkEval( TypeValue.extractEquality(mkLoadClosure 2), (* Base equality fn *) [ mkLoadOperation(LoadStoreMLWord{isImmutable=true}, mkLoadClosure 3, mkLoadArgument 0), mkLoadOperation(LoadStoreMLWord{isImmutable=true}, mkLoadClosure 4, mkLoadArgument 0) ]), mkEval(mkLoadClosure 1, (* Recursive call with index+1. *) [ mkBinary(BuiltIns.WordArith BuiltIns.ArithAdd, mkLoadArgument 0, mkConst(toMachineWord 1)) ]), CodeFalse (* Not equal elements - result false *) ) ), 1, "vector-loop", [mkLoadLocal 0 (* Length *), mkLoadLocal 2 (* Loop function *), mkLoadClosure 0 (* Base equality function *), mkLoadArgument 0 (* Vector 0 *), mkLoadArgument 1 (* Vector 1 *)], 0))] ], mkIf( (* Test the lengths. *) mkEqualTaggedWord(mkLoadLocal 0, mkLoadLocal 1), (* Equal - test the contents. *) mkEval(mkLoadLocal 2, [CodeZero]), CodeFalse (* Not same length- result false *) ) ), 2, "vector-eq", [mkLoadArgument 0], 3), 1, "vector-eq()", [], 0) val idCode = (* Polytype *) let open TypeValue val code = createTypeValue{ eqCode=eqCode, printCode=mkConst (toMachineWord (ref polyTypePrinter)), boxedCode=mkInlproc(boxedAlways, 1, "boxed-vector", [], 0), sizeCode=mkInlproc(singleWord, 1, "size-vector", [], 0)} in Global (genCode(code, [], 0) ()) end in val vectorType = makeTypeConstructor("vector", [makeTypeVariable()], makeFreeId(1, idCode, true, basisDescription "vector"), declInBasis) val () = enterGlobalType ("vector", TypeConstrSet(vectorType, [])) end (* We also need a type with byte-wise equality. *) local fun monoTypePrinter _ = PRETTY.PrettyString "?" (* This is a monotype equality function that takes two byte vectors and compares them byte-by-byte for equality. Because they are vectors of bytes it's unsafe to load the whole words which could look like addresses if the bottom bit happens to be zero. *) val eqCode = mkProc( mkEnv([ (* Length of the items. *) mkDec(0, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 0)), mkDec(1, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 1)) ], mkIf( (* Test the lengths. *) mkEqualTaggedWord(mkLoadLocal 0, mkLoadLocal 1), (* Equal - test the contents. *) mkEnv([ (* ByteVecEqual takes a byte length so we have to multiply by the number of bytes per word. *) mkDec(2, mkBinary(BuiltIns.WordArith BuiltIns.ArithMult, mkConst(toMachineWord RunCall.bytesPerWord), mkLoadLocal 0)) ], mkBlockOperation{kind=BlockOpEqualByte, leftBase=mkLoadArgument 0, rightBase=mkLoadArgument 1, leftIndex=CodeZero, rightIndex=CodeZero, length=mkLoadLocal 2}), CodeFalse (* Not same length- result false *) ) ), 2, "byteVector-eq", [], 3) val idCode = (* Polytype *) let open TypeValue val code = createTypeValue{ eqCode=eqCode, printCode=mkConst (toMachineWord (ref monoTypePrinter)), boxedCode=boxedAlways, sizeCode=singleWord} in Global (genCode(code, [], 0) ()) end in val byteVectorType = makeTypeConstructor("byteVector", [], makeFreeId(0, idCode, true, basisDescription "byteVector"), declInBasis) val () = #enterType bootstrapEnv ("byteVector", TypeConstrSet(byteVectorType, [])) end (* We also need array and Array2.array to be passed through here so that they have the special property of being eqtypes even if their argument is not. "array" is defined to be in the global environment. *) val () = enterGlobalType ("array", TypeConstrSet(arrayConstr, [])) val () = #enterType bootstrapEnv ("array", TypeConstrSet(array2Constr, [])) val () = #enterType bootstrapEnv ("byteArray", TypeConstrSet(byteArrayConstr, [])) (* "=', '<>', PolyML.print etc are type-specific function which appear to be polymorphic. The compiler recognises these and treats them specially. For (in)equality that means generating type-specific versions of the equality operations; for print etc that means printing in a type-specific way. They can become true polymorphic functions and lose their type-specificity. For (in)equality that means defaulting to structure equality which is normal and expected behaviour. For print etc that means losing the ability to print and just printing "?" so it's important to avoid that happening. "open" treats type-specific functions specially and retains the type-specificity. That's important to allow the prelude code to expand the PolyML structure. *) local val eqType = let val a = makeEqTV () in a ** a ->> Bool end val eqVal = mkSpecialFun("=", eqType, Equal) in val () = enterGlobalValue ("=", eqVal) end local val neqType = let val a = makeEqTV () in a ** a ->> Bool end val neqVal = mkSpecialFun("<>", neqType, NotEqual) in val () = enterGlobalValue ("<>", neqVal) end val polyMLEnv = makeStructure(globalEnv, "PolyML") val enterPolyMLVal = #enterVal polyMLEnv local (* This version of the environment must match that used in the NameSpace structure. *) open TYPETREE (* Create a new structure for them. *) val nameSpaceEnv = makeStructure(polyMLEnv, "NameSpace") (* Substructures. *) val valuesEnv = makeStructure(nameSpaceEnv, "Values") and typesEnv = makeStructure(nameSpaceEnv, "TypeConstrs") and fixesEnv = makeStructure(nameSpaceEnv, "Infixes") and structsEnv = makeStructure(nameSpaceEnv, "Structures") and sigsEnv = makeStructure(nameSpaceEnv, "Signatures") and functsEnv = makeStructure(nameSpaceEnv, "Functors") (* Types for the basic values. These are opaque. *) val valueVal = makeAndDeclareOpaqueType("value", "PolyML.NameSpace.Values.value", valuesEnv) (* Representation of the type of a value. *) val Types = makeAndDeclareOpaqueType("typeExpression", "PolyML.NameSpace.Values.typeExpression", valuesEnv) val typeVal = makeAndDeclareOpaqueType("typeConstr", "PolyML.NameSpace.TypeConstrs.typeConstr", typesEnv) val fixityVal = makeAndDeclareOpaqueType("fixity", "PolyML.NameSpace.Infixes.fixity", fixesEnv) val signatureVal = makeAndDeclareOpaqueType("signatureVal", "PolyML.NameSpace.Signatures.signatureVal", sigsEnv) val structureVal = makeAndDeclareOpaqueType("structureVal", "PolyML.NameSpace.Structures.structureVal", structsEnv) val functorVal = makeAndDeclareOpaqueType("functorVal", "PolyML.NameSpace.Functors.functorVal", functsEnv) (* nameSpace type. Labelled record. *) fun createFields(name, vType): { name: string, typeof: types} list = let val enterFun = String ** vType ->> Unit val lookupFun = String ->> Option vType val allFun = Unit ->> List (String ** vType) in [mkLabelEntry("enter" ^ name, enterFun), mkLabelEntry("lookup" ^ name, lookupFun), mkLabelEntry("all" ^ name, allFun)] end (* We have to use the same names as we use in the env type because we're passing "env" values through the bootstrap. *) val valTypes = [("Val", valueVal), ("Type", typeVal), ("Fix", fixityVal), ("Struct", structureVal), ("Sig", signatureVal), ("Funct", functorVal)] val fields = List.foldl (fn (p,l) => createFields p @ l) [] valTypes val recordType = makeTypeAbbreviation("nameSpace", "PolyML.NameSpace.nameSpace", [], mkLabelled(sortLabels fields, true), declInBasis); val () = #enterType nameSpaceEnv ("nameSpace", TypeConstrSet(recordType, [])); (* The result type of the compiler includes valueVal etc. *) val resultFields = List.map TYPETREE.mkLabelEntry [("values", List(String ** valueVal)), ("fixes", List(String ** fixityVal)), ("types", List(String ** typeVal)), ("structures", List(String ** structureVal)), ("signatures", List(String ** signatureVal)), ("functors", List(String ** functorVal))] in val nameSpaceType = mkTypeConstruction ("nameSpace", recordType, [], declInBasis) val execResult = mkLabelled(sortLabels resultFields, true) type execResult = { fixes: (string * fixStatus) list, values: (string * values) list, structures: (string * structVals) list, signatures: (string * signatures) list, functors: (string * functors) list, types: (string * typeConstrSet) list } val valueVal = valueVal val typeVal = typeVal val fixityVal = fixityVal val signatureVal = signatureVal val structureVal = structureVal val functorVal = functorVal val Types = Types val valuesEnv = valuesEnv and typesEnv = typesEnv and fixesEnv = fixesEnv and structsEnv = structsEnv and sigsEnv = sigsEnv and functsEnv = functsEnv end local val typeconstr = locationConstr val () = #enterType polyMLEnv ("location", typeconstr); in val Location = mkTypeConstruction ("location", tsConstr typeconstr, [], declInBasis) end (* Interface to the debugger. *) local open TYPETREE val debuggerEnv = makeStructure(polyMLEnv, "DebuggerInterface") (* Make these opaque at this level. *) val locationPropList = makeAndDeclareOpaqueType("locationPropList", "PolyML.DebuggerInterface.locationPropList", debuggerEnv) val typeId = makeAndDeclareOpaqueType("typeId", "PolyML.DebuggerInterface.typeId", debuggerEnv) val machineWordType = makeAndDeclareOpaqueType("machineWord", "PolyML.DebuggerInterface.machineWord", debuggerEnv) (* For long term security keep these as different from global types and sigs. Values in the static environment need to be copied before they are global. *) val localType = makeAndDeclareOpaqueType("localType", "PolyML.DebuggerInterface.localType", debuggerEnv) val localTypeConstr = makeAndDeclareOpaqueType("localTypeConstr", "PolyML.DebuggerInterface.localTypeConstr", debuggerEnv) val localSig = makeAndDeclareOpaqueType("localSig", "PolyML.DebuggerInterface.localSig", debuggerEnv) open DEBUGGER (* Entries in the static list. This type is only used within the implementation of DebuggerInterface in the basis library and does not appear in the final signature. *) val environEntryConstr = makeTypeConstructor("environEntry", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "PolyML.DebuggerInterface.environEntry"), declInBasis) val environEntryType = mkTypeConstruction ("environEntry", environEntryConstr, [], declInBasis) val constrs = (* Order is significant. *) [ ("EnvEndFunction", mkProductType[String, Location, localType]), ("EnvException", mkProductType[String, localType, locationPropList]), ("EnvStartFunction", mkProductType[String, Location, localType]), ("EnvStructure", mkProductType[String, localSig, locationPropList]), ("EnvTConstr", String ** localTypeConstr), ("EnvTypeid", typeId ** typeId), ("EnvVConstr", mkProductType[String, localType, Bool, Int, locationPropList]), ("EnvValue", mkProductType[String, localType, locationPropList]) ] (* This representation must match the representation defined in DEBUGGER_.sml. *) val numConstrs = List.length constrs val {constrs=constrReps, ...} = chooseConstrRepr(constrs, []) val constructors = ListPair.map (fn ((s,t), code) => mkGconstr(s, t ->> environEntryType, code, false, numConstrs, declInBasis)) (constrs, constrReps) val () = List.app (fn c => #enterVal debuggerEnv(valName c, c)) constructors (* Put these constructors onto the type. *) val () = #enterType debuggerEnv ("environEntry", TypeConstrSet(environEntryConstr, constructors)) (* Debug state type. *) val debugStateConstr = makeTypeAbbreviation("debugState", "PolyML.DebuggerInterface.debugState", [], mkProductType[List environEntryType, List machineWordType, Location], declInBasis) val () = #enterType debuggerEnv ("debugState", TypeConstrSet(debugStateConstr, [])) val debugStateType = mkTypeConstruction ("debugState", debugStateConstr, [], declInBasis) in val () = applyList (fn (name, v, t) => #enterVal debuggerEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("makeValue", toMachineWord(makeValue: debugState -> string * types * locationProp list * machineWord -> values), debugStateType ->> mkProductType[String, localType, locationPropList, machineWordType] ->> valueVal), ("makeException", toMachineWord(makeException: debugState -> string * types * locationProp list * machineWord -> values), debugStateType ->> mkProductType[String, localType, locationPropList, machineWordType] ->> valueVal), ("makeConstructor", toMachineWord(makeConstructor: debugState -> string * types * bool * int * locationProp list * machineWord -> values), debugStateType ->> mkProductType[String, localType, Bool, Int, locationPropList, machineWordType] ->> valueVal), ("makeAnonymousValue", toMachineWord(makeAnonymousValue: debugState -> types * machineWord -> values), debugStateType ->> mkProductType[localType, machineWordType] ->> valueVal), ("makeStructure", toMachineWord(makeStructure: debugState -> string * signatures * locationProp list * machineWord -> structVals), debugStateType ->> mkProductType[String, localSig, locationPropList, machineWordType] ->> structureVal), ("makeTypeConstr", toMachineWord(makeTypeConstr: debugState -> typeConstrSet -> typeConstrSet), debugStateType ->> localTypeConstr ->> typeVal), ("unitValue", toMachineWord(mkGvar("", unitType, CodeZero, []): values), valueVal), (* Used as a default *) ("setOnEntry", toMachineWord(setOnEntry: (string * PolyML.location -> unit) option -> unit), Option (String ** Location ->> Unit) ->> Unit), ("setOnExit", toMachineWord(setOnExit: (string * PolyML.location -> unit) option -> unit), Option (String ** Location ->> Unit) ->> Unit), ("setOnExitException", toMachineWord(setOnExitException: (string * PolyML.location -> exn -> unit) option -> unit), Option (String ** Location ->> Exn ->> Unit) ->> Unit), ("setOnBreakPoint", toMachineWord(setOnBreakPoint: (PolyML.location * bool ref -> unit) option -> unit), Option (Location ** Ref Bool ->> Unit) ->> Unit) ] end local val typeconstr = contextConstr in val () = #enterType polyMLEnv ("context", typeconstr); val () = List.app(fn(tv as Value{name, ...}) => #enterVal polyMLEnv(name, tv)) (tsConstructors typeconstr) end local val typeconstr = prettyConstr in val () = #enterType polyMLEnv ("pretty", typeconstr); val () = List.app(fn(tv as Value{name, ...}) => #enterVal polyMLEnv(name, tv)) (tsConstructors typeconstr) val PrettyType = mkTypeConstruction ("pretty", tsConstr typeconstr, [], declInBasis) end local val printType = let val a = makePrintTV () in a ->> a end; val printVal = mkSpecialFun("print", printType, Print); in val () = enterPolyMLVal ("print", printVal); end; local val makeStringType = let val a = makePrintTV () in a ->> String end; val makeStringVal = mkSpecialFun("makestring", makeStringType, MakeString); in val () = enterPolyMLVal ("makestring", makeStringVal); end; local val prettyType = let val a = makePrintTV () in a ** fixedIntType ->> PrettyType end; val prettyVal = mkSpecialFun("prettyRepresentation", prettyType, GetPretty); in val () = enterPolyMLVal ("prettyRepresentation", prettyVal); end; local (* addPrettyPrinter is the new function to install a pretty printer. *) val a = makeTV () val b = makeTV () val addPrettyType = (TYPETREE.fixedIntType ->> b ->> a ->> PrettyType) ->> Unit; val addPrettyVal = mkSpecialFun("addPrettyPrinter", addPrettyType, AddPretty); in val () = enterPolyMLVal ("addPrettyPrinter", addPrettyVal); end; (* This goes in RunCall since it's only for the basis library. *) local val addOverloadType = let val a = makeTV () and b = makeTV () in (a ->> b) ->> String ->> Unit end; val addOverloadVal = mkSpecialFun("addOverload", addOverloadType, AddOverload); in val () = #enterVal runCallEnv ("addOverload", addOverloadVal); end local (* Add a function to switch the default integer type. *) fun setType isArbitrary = setPreferredInt(if isArbitrary then intInfConstr else fixedIntConstr) in val () = #enterVal runCallEnv ("setDefaultIntTypeArbitrary", mkGvar ("setDefaultIntTypeArbitrary", Bool ->> Unit, mkConst (toMachineWord setType), declInBasis)) end local val sourceLocVal = mkSpecialFun("sourceLocation", Unit ->> Location, GetLocation); in val () = enterPolyMLVal ("sourceLocation", sourceLocVal); end; local (* This is used as one of the arguments to the compiler function. *) open TYPETREE val uniStructEnv = makeStructure(bootstrapEnv, "Universal") fun enterUniversal (name : string, entry : codetree, typ : types) : unit = let val value = mkGvar (name, typ, entry, declInBasis); in #enterVal uniStructEnv (name, value) end; local fun polyTypePrinter _ _ = PRETTY.PrettyString "?" open TypeValue val idCode = let val code = createTypeValue{ eqCode=CodeZero, (* Not an equality type *) printCode=mkConst (toMachineWord (ref polyTypePrinter)), boxedCode=mkInlproc(boxedEither(* Assume worst case *), 1, "boxed-tag", [], 0), sizeCode=mkInlproc(singleWord, 1, "size-tag", [], 0)} in Global (genCode(code, [], 0) ()) end in (* type 'a tag *) val tagConstr = makeTypeConstructor("tag", [makeTypeVariable()], makeFreeId(1, idCode, false, basisDescription "tag"), declInBasis); val () = #enterType uniStructEnv ("tag", TypeConstrSet(tagConstr, [])) end (* type universal *) val univConstr = makeTypeConstructor("universal", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "universal"), declInBasis); val () = #enterType uniStructEnv ("universal", TypeConstrSet(univConstr, [])); fun Tag base = mkTypeConstruction ("tag", tagConstr, [base], declInBasis) val Universal = mkTypeConstruction ("universal", univConstr, [], declInBasis) val a = makeTV() (* val tagInject : 'a tag -> 'a -> universal *) val injectType = Tag a ->> a ->> Universal val () = enterUniversal ("tagInject", makePolymorphic([a], mkConst (toMachineWord (Universal.tagInject: 'a Universal.tag -> 'a -> Universal.universal))), injectType) (* We don't actually need tagIs and tagProject since this is only used for the compiler. Universal is redefined in the basis library. *) val projectType = Tag a ->> Universal ->> a val () = enterUniversal ("tagProject", makePolymorphic([a], mkConst (toMachineWord(Universal.tagProject: 'a Universal.tag -> Universal.universal -> 'a))), projectType) val testType = Tag a ->> Universal ->> Bool val () = enterUniversal ("tagIs", makePolymorphic([a], mkConst (toMachineWord(Universal.tagIs: 'a Universal.tag -> Universal.universal -> bool))), testType) in val Tag = Tag and Universal = Universal end local open TYPETREE (* Parsetree properties datatype. *) val propConstr = makeTypeConstructor("ptProperties", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "PolyML.ptProperties"), declInBasis); val PtProperties = mkTypeConstruction ("ptProperties", propConstr, [], declInBasis) (* Parsetree type. *) val parseTreeConstr = makeTypeAbbreviation("parseTree", "PolyML.parseTree", [], Location ** List PtProperties, declInBasis); val ParseTree = mkTypeConstruction ("parseTree", parseTreeConstr, [], declInBasis) val () = #enterType polyMLEnv ("parseTree", TypeConstrSet(parseTreeConstr, [])); val constrs = (* Order is significant. *) [ ("PTbreakPoint", Ref Bool), ("PTcompletions", List String), ("PTdeclaredAt", Location), ("PTdefId", fixedIntType), ("PTfirstChild", Unit ->> ParseTree), ("PTnextSibling", Unit ->> ParseTree), ("PTopenedAt", Location), ("PTparent", Unit ->> ParseTree), ("PTpreviousSibling", Unit ->> ParseTree), ("PTprint", fixedIntType ->> PrettyType), ("PTreferences", Bool ** List Location), ("PTrefId", fixedIntType), ("PTstructureAt", Location), ("PTtype", Types) ]; (* This representation must match the representation defined in ExportTree.sml. *) val numConstrs = List.length constrs val {constrs=constrReps, ...} = chooseConstrRepr(constrs, []) val constructors = ListPair.map (fn ((s,t), code) => mkGconstr(s, t ->> PtProperties, code, false, numConstrs, declInBasis)) (constrs, constrReps) val () = List.app (fn c => #enterVal polyMLEnv(valName c, c)) constructors (* Put these constructors onto the type. *) val () = #enterType polyMLEnv ("ptProperties", TypeConstrSet(propConstr, constructors)); in val ParseTree = ParseTree and PtProperties = PtProperties end local open TYPETREE val compilerType : types = mkProductType[nameSpaceType, Unit ->> Option Char, List Universal] ->> mkProductType[Option ParseTree, Option (Unit ->> execResult)] type compilerType = nameSpace * (unit -> char option) * Universal.universal list -> exportTree option * (unit->execResult) option in val () = enterBootstrap ("use", mkConst (toMachineWord ((useIntoEnv globalTable []): string -> unit)), String ->> Unit) val () = enterBootstrap ("useWithParms", mkConst (toMachineWord ((useIntoEnv globalTable): Universal.universal list -> string -> unit)), List Universal ->> String ->> Unit) val () = enterPolyMLVal("compiler", mkGvar ("compiler", compilerType, mkConst (toMachineWord (compiler: compilerType)), declInBasis)); val () = enterBootstrap("globalSpace", mkConst (toMachineWord(gEnvAsNameSpace globalTable: nameSpace)), nameSpaceType) (* Add a print function so we can print a message at the start of a bootstrap phase. *) val () = enterBootstrap("print", mkConst (toMachineWord TextIO.print), String ->> Unit) end; local val ty = TYPETREE.mkOverloadSet[] val addType = ty ** ty ->> ty; val negType = ty ->> ty; val cmpType = ty ** ty ->> Bool; in val () = enterGlobalValue ("+", mkOverloaded "+" addType); val () = enterGlobalValue ("-", mkOverloaded "-" addType); val () = enterGlobalValue ("*", mkOverloaded "*" addType); val () = enterGlobalValue ("~", mkOverloaded "~" negType); val () = enterGlobalValue ("abs", mkOverloaded "abs" negType); val () = enterGlobalValue (">=", mkOverloaded ">=" cmpType); val () = enterGlobalValue ("<=", mkOverloaded "<=" cmpType); val () = enterGlobalValue (">", mkOverloaded ">" cmpType); val () = enterGlobalValue ("<", mkOverloaded "<" cmpType); (* The following overloads are added in ML97 *) val () = enterGlobalValue ("div", mkOverloaded "div" addType); val () = enterGlobalValue ("mod", mkOverloaded "mod" addType); val () = enterGlobalValue ("/", mkOverloaded "/" addType); end; local open DEBUG; local open TYPETREE val fields = [ mkLabelEntry("location", Location), mkLabelEntry("hard", Bool), mkLabelEntry("message", PrettyType), mkLabelEntry("context", Option PrettyType) ] in val errorMessageProcType = mkLabelled(sortLabels fields, true) ->> Unit type errorMessageProcType = { location: location, hard: bool, message: pretty, context: pretty option } -> unit end local open TYPETREE val optNav = Option(Unit->>ParseTree) val fields = [ mkLabelEntry("parent", optNav), mkLabelEntry("next", optNav), mkLabelEntry("previous", optNav) ] in val navigationType = mkLabelled(sortLabels fields, true) type navigationType = { parent: (unit->exportTree) option, next: (unit->exportTree) option, previous: (unit->exportTree) option } end type 'a tag = 'a Universal.tag in val () = applyList (fn (name, v, t) => enterBootstrap(name, mkConst v, t)) [ ("compilerVersion", toMachineWord (VERSION.compilerVersion: string), String), ("compilerVersionNumber", toMachineWord (VERSION.versionNumber: int), Int), ("lineNumberTag", toMachineWord (lineNumberTag : (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("offsetTag", toMachineWord (offsetTag: (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("fileNameTag", toMachineWord (fileNameTag: string tag), Tag String), ("bindingCounterTag", toMachineWord (bindingCounterTag: (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("maxInlineSizeTag", toMachineWord (maxInlineSizeTag: FixedInt.int tag), Tag fixedIntType), ("assemblyCodeTag", toMachineWord (assemblyCodeTag: bool tag), Tag Bool), ("parsetreeTag", toMachineWord (parsetreeTag: bool tag), Tag Bool), ("codetreeTag", toMachineWord (codetreeTag: bool tag), Tag Bool), ("icodeTag", toMachineWord (icodeTag: bool tag), Tag Bool), ("lowlevelOptimiseTag", toMachineWord (lowlevelOptimiseTag: bool tag), Tag Bool), ("codetreeAfterOptTag", toMachineWord (codetreeAfterOptTag: bool tag), Tag Bool), ("inlineFunctorsTag", toMachineWord (inlineFunctorsTag: bool tag), Tag Bool), ("compilerDebugTag", toMachineWord (compilerDebugTag: int tag), Tag Int), ("debugTag", toMachineWord (debugTag: bool tag), Tag Bool), ("printDepthFunTag", toMachineWord (DEBUG.printDepthFunTag: (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("errorDepthTag", toMachineWord (DEBUG.errorDepthTag: FixedInt.int tag), Tag fixedIntType), ("lineLengthTag", toMachineWord (DEBUG.lineLengthTag: FixedInt.int tag), Tag fixedIntType), ("profileAllocationTag", toMachineWord (DEBUG.profileAllocationTag: FixedInt.int tag), Tag fixedIntType), ("printOutputTag", toMachineWord (PRETTY.printOutputTag: (pretty->unit) tag), Tag (PrettyType->>Unit)) , ("compilerOutputTag", toMachineWord (PRETTY.compilerOutputTag: (pretty->unit) tag), Tag (PrettyType->>Unit)), ("errorMessageProcTag", toMachineWord (LEX.errorMessageProcTag: errorMessageProcType tag), Tag errorMessageProcType), ("rootTreeTag", toMachineWord (EXPORTTREE.rootTreeTag: navigation tag), Tag navigationType), ("reportUnreferencedIdsTag", toMachineWord (reportUnreferencedIdsTag: bool tag), Tag Bool), ("reportExhaustiveHandlersTag", toMachineWord (reportExhaustiveHandlersTag: bool tag), Tag Bool), ("narrowOverloadFlexRecordTag", toMachineWord (narrowOverloadFlexRecordTag: bool tag), Tag Bool), ("createPrintFunctionsTag", toMachineWord (createPrintFunctionsTag: bool tag), Tag Bool), ("reportDiscardedValuesTag", toMachineWord (reportDiscardedValuesTag: FixedInt.int tag), Tag fixedIntType) ] end; (* PolyML.CodeTree structure. This exports the CodeTree structure into the ML space. *) local open CODETREE val codetreeEnv = makeStructure(polyMLEnv, "CodeTree") fun createType typeName = makeAndDeclareOpaqueType(typeName, "PolyML.CodeTree." ^ typeName, codetreeEnv) val CodeTree = createType "codetree" and MachineWord = createType "machineWord" and CodeBinding = createType "codeBinding" (* For the moment export these only for the general argument and result types. *) fun simpleFn (code, nArgs, name, closure, nLocals) = mkFunction{body=code, argTypes=List.tabulate(nArgs, fn _ => GeneralType), resultType=GeneralType, name=name, closure=closure, numLocals=nLocals} and simpleInlineFn (code, nArgs, name, closure, nLocals) = mkInlineFunction{body=code, argTypes=List.tabulate(nArgs, fn _ => GeneralType), resultType=GeneralType, name=name, closure=closure, numLocals=nLocals} and simpleCall(func, args) = mkCall(func, List.map (fn c => (c, GeneralType)) args, GeneralType) in val CodeTree = CodeTree val () = applyList (fn (name, v, t) => #enterVal codetreeEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("pretty", toMachineWord (CODETREE.pretty: codetree -> pretty), CodeTree ->> PrettyType), ("mkConstant", toMachineWord(mkConst: machineWord -> codetree), MachineWord ->> CodeTree), ("genCode", toMachineWord (genCode: codetree * Universal.universal list * int -> (unit->codetree)), mkProductType[CodeTree, List Universal, Int] ->> (Unit ->> CodeTree)), ("evalue", toMachineWord (evalue: codetree -> machineWord option), CodeTree ->> Option MachineWord), ("mkFunction", toMachineWord (simpleFn: codetree * int * string * codetree list * int -> codetree), mkProductType[CodeTree, Int, String, List CodeTree, Int] ->> CodeTree), ("mkInlineFunction", toMachineWord (simpleInlineFn: codetree * int * string * codetree list * int -> codetree), mkProductType[CodeTree, Int, String, List CodeTree, Int] ->> CodeTree), ("mkCall", toMachineWord (simpleCall: codetree * codetree list -> codetree), CodeTree ** List CodeTree ->> CodeTree), ("mkLoadLocal", toMachineWord (mkLoadLocal: int -> codetree), Int ->> CodeTree), ("mkLoadArgument", toMachineWord (mkLoadArgument: int -> codetree), Int ->> CodeTree), ("mkLoadClosure", toMachineWord (mkLoadClosure: int -> codetree), Int ->> CodeTree), ("mkDec", toMachineWord (mkDec: int * codetree -> codeBinding), Int ** CodeTree ->> CodeBinding), ("mkInd", toMachineWord (mkInd: int * codetree -> codetree), Int ** CodeTree ->> CodeTree), ("mkIf", toMachineWord (mkIf: codetree * codetree * codetree -> codetree), mkProductType[CodeTree, CodeTree, CodeTree] ->> CodeTree), ("mkWhile", toMachineWord (mkWhile: codetree * codetree -> codetree), CodeTree ** CodeTree ->> CodeTree), ("mkLoop", toMachineWord (mkLoop: codetree list -> codetree), List CodeTree ->> CodeTree), ("mkBeginLoop", toMachineWord (mkBeginLoop: codetree * (int * codetree) list -> codetree), CodeTree ** List(Int ** CodeTree) ->> CodeTree), ("mkEnv", toMachineWord (mkEnv: codeBinding list * codetree -> codetree), List CodeBinding ** CodeTree ->> CodeTree), ("mkMutualDecs", toMachineWord (mkMutualDecs: (int * codetree) list -> codeBinding), List(Int ** CodeTree) ->> CodeBinding), ("mkTuple", toMachineWord (mkTuple: codetree list -> codetree), List CodeTree ->> CodeTree), ("mkRaise", toMachineWord (mkRaise: codetree -> codetree), CodeTree ->> CodeTree), ("mkHandle", toMachineWord (mkHandle: codetree * codetree * int -> codetree), mkProductType[CodeTree, CodeTree, Int] ->> CodeTree), ("mkNullDec", toMachineWord (mkNullDec: codetree -> codeBinding), CodeTree ->> CodeBinding) ] end local (* Finish off the NameSpace structure now we have types such as pretty. *) open TYPETREE (* The exported versions expect full name spaces as arguments. Because we convert the exported versions to machineWord and give them types as data structures the compiler can't actually check that the type we give matched the internal type. *) fun makeTypeEnv NONE = { lookupType = fn _ => NONE, lookupStruct = fn _ => NONE } | makeTypeEnv(SOME(nameSpace: nameSpace)): printTypeEnv = { lookupType = fn s => case #lookupType nameSpace s of NONE => NONE | SOME t => SOME(t, NONE), lookupStruct = fn s => case #lookupStruct nameSpace s of NONE => NONE | SOME t => SOME(t, NONE) } local (* Values substructure. This also has operations related to type expressions. *) fun codeForValue (Value{access = Global code, class = ValBound, ...}) = code | codeForValue _ = raise Fail "Not a global value" and exportedDisplayTypeExp(ty, depth, nameSpace: nameSpace option) = TYPETREE.display(ty, depth, makeTypeEnv nameSpace) and exportedDisplayValues(valu, depth, nameSpace: nameSpace option) = displayValues(valu, depth, makeTypeEnv nameSpace) and propsForValue (Value {locations, typeOf, ...}) = PTtype typeOf :: mapLocationProps locations fun isConstructor (Value{class = Exception, ...}) = true | isConstructor (Value{class = Constructor _, ...}) = true | isConstructor _ = false fun isException (Value{class = Exception, ...}) = true | isException _ = false in val () = applyList (fn (name, v, t) => #enterVal valuesEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord (valName: values -> string), valueVal ->> String), ("print", toMachineWord (printValues: values * FixedInt.int -> pretty), mkProductType[valueVal, fixedIntType] ->> PrettyType), ("printWithType", toMachineWord (exportedDisplayValues: values * FixedInt.int * nameSpace option -> pretty), mkProductType[valueVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("printType", toMachineWord(exportedDisplayTypeExp: types * FixedInt.int * nameSpace option -> pretty), mkProductType[Types, fixedIntType, Option nameSpaceType] ->> PrettyType), ("typeof", toMachineWord (valTypeOf: values -> types), valueVal ->> Types), ("code", toMachineWord (codeForValue: values -> codetree), valueVal ->> CodeTree), ("properties", toMachineWord (propsForValue: values ->ptProperties list), valueVal ->> List PtProperties), ("isConstructor", toMachineWord(isConstructor: values -> bool), valueVal ->> Bool), ("isException", toMachineWord(isException: values -> bool), valueVal ->> Bool) ] end local (* TypeConstrs substructure. *) fun exportedDisplayTypeConstr(tyCons, depth, nameSpace: nameSpace option) = TYPETREE.displayTypeConstrs(tyCons, depth, makeTypeEnv nameSpace) and propsForTypeConstr (TypeConstrSet(TypeConstrs {locations, ...}, _)) = mapLocationProps locations and nameForType (TypeConstrSet(TypeConstrs{name, ...}, _)) = name in val () = applyList (fn (name, v, t) => #enterVal typesEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForType: typeConstrSet -> string), typeVal ->> String), ("print", toMachineWord (exportedDisplayTypeConstr: typeConstrSet * FixedInt.int * nameSpace option -> pretty), mkProductType[typeVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("properties", toMachineWord (propsForTypeConstr: typeConstrSet ->ptProperties list), typeVal ->> List PtProperties) ] end local (* Structures substructure *) fun exportedDisplayStructs(str, depth, nameSpace: nameSpace option) = displayStructures(str, depth, makeTypeEnv nameSpace) and codeForStruct (Struct{access = Global code, ...}) = code | codeForStruct _ = raise Fail "Not a global structure" and propsForStruct (Struct {locations, ...}) = mapLocationProps locations and nameForStruct (Struct{name, ...}) = name fun nameSpaceForStruct(baseStruct as Struct{signat=Signatures { tab, ...}, ...}): nameSpace = let open UNIVERSALTABLE fun lookupVal s = case univLookup (tab, valueVar, s) of NONE => NONE | SOME v => SOME(makeSelectedValue(v, baseStruct)) and lookupType s = case univLookup (tab, typeConstrVar, s) of NONE => NONE | SOME t => SOME(makeSelectedType(t, baseStruct)) and lookupStruct s = case univLookup (tab, structVar, s) of NONE => NONE | SOME s => SOME(makeSelectedStructure(s, baseStruct)) local fun extractItems t tab = UNIVERSALTABLE.fold (fn (s, u, l) => if Universal.tagIs t u then (s, Universal.tagProject t u) :: l else l ) [] tab in fun allValues() = map(fn (s, v) => (s, makeSelectedValue(v, baseStruct))) (extractItems valueVar tab) and allTypes() = map(fn (s, t) => (s, makeSelectedType(t, baseStruct))) (extractItems typeConstrVar tab) and allStructs() = map(fn (s, v) => (s, makeSelectedStructure(v, baseStruct))) (extractItems structVar tab) end fun enterFunction _ = raise Fail "updating a structure is not possible." (* Raise an exception for any attempt to enter a new value. Return empty for the classes that can't exist in a structure. *) in { lookupVal = lookupVal, lookupType = lookupType, lookupStruct = lookupStruct, lookupFix = fn _ => NONE, lookupSig = fn _ => NONE, lookupFunct = fn _ => NONE, enterVal = enterFunction, enterType = enterFunction, enterFix = enterFunction, enterStruct = enterFunction, enterSig = enterFunction, enterFunct = enterFunction, allVal = allValues, allType = allTypes, allStruct = allStructs, allFix = fn () => [], allSig = fn () => [], allFunct = fn () => [] } end in val () = applyList (fn (name, v, t) => #enterVal structsEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForStruct: structVals -> string), structureVal ->> String), ("print", toMachineWord (exportedDisplayStructs: structVals * FixedInt.int * nameSpace option -> pretty), mkProductType[structureVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("code", toMachineWord (codeForStruct: structVals -> codetree), structureVal ->> CodeTree), ("properties", toMachineWord (propsForStruct: structVals ->ptProperties list), structureVal ->> List PtProperties), ("contents", toMachineWord(nameSpaceForStruct: structVals -> nameSpace), structureVal ->> nameSpaceType) ] end local (* Signatures substructure *) fun exportedDisplaySigs(sign, depth, nameSpace: nameSpace option) = displaySignatures(sign, depth, makeTypeEnv nameSpace) and propsForSig (Signatures {locations, ...}) = mapLocationProps locations and nameForSig (Signatures{name, ...}) = name in val () = applyList (fn (name, v, t) => #enterVal sigsEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForSig: signatures -> string), signatureVal ->> String), ("print", toMachineWord (exportedDisplaySigs: signatures * FixedInt.int * nameSpace option -> pretty), mkProductType[signatureVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("properties", toMachineWord (propsForSig: signatures ->ptProperties list), signatureVal ->> List PtProperties) ] end local (* Functors substructure *) fun exportedDisplayFunctors(funct, depth, nameSpace: nameSpace option) = displayFunctors(funct, depth, makeTypeEnv nameSpace) and codeForFunct (Functor{access = Global code, ...}) = code | codeForFunct _ = raise Fail "Not a global functor" and propsForFunctor (Functor {locations, ...}) = mapLocationProps locations and nameForFunctor (Functor{name, ...}) = name in val () = applyList (fn (name, v, t) => #enterVal functsEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForFunctor: functors -> string), functorVal ->> String), ("print", toMachineWord (exportedDisplayFunctors: functors * FixedInt.int * nameSpace option -> pretty), mkProductType[functorVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("code", toMachineWord (codeForFunct: functors -> codetree), functorVal ->> CodeTree), ("properties", toMachineWord (propsForFunctor: functors ->ptProperties list), functorVal ->> List PtProperties) ] end local (* Infixes substructure *) fun nameForFix(FixStatus(s, _)) = s in val () = applyList (fn (name, v, t) => #enterVal fixesEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForFix: fixStatus -> string), fixityVal ->> String), ("print", toMachineWord (displayFixStatus: fixStatus -> pretty), fixityVal ->> PrettyType) ] end in end in () end (* initGlobalEnv *); end;