diff --git a/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64PREASSEMBLY.sig b/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64PREASSEMBLY.sig index ef9218e8..5992e264 100644 --- a/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64PREASSEMBLY.sig +++ b/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64PREASSEMBLY.sig @@ -1,258 +1,260 @@ (* Copyright (c) 2021-2 David C. J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public Licence version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public Licence for more details. You should have received a copy of the GNU Lesser General Public Licence along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* The pre-assembly layer goes below the icode and allows peep-hole optimisation. *) signature ARM64PREASSEMBLY = sig type closureRef type machineWord = Address.machineWord (* XZero and XSP are both encoded as 31 but the interpretation depends on the instruction The datatype definition is included here to allow for pattern matching on XSP and XZero. *) datatype xReg = XReg of Word8.word | XZero | XSP and vReg = VReg of Word8.word val X0: xReg and X1: xReg and X2: xReg and X3: xReg and X4: xReg and X5: xReg and X6: xReg and X7: xReg and X8: xReg and X9: xReg and X10: xReg and X11: xReg and X12: xReg and X13: xReg and X14: xReg and X15: xReg and X16: xReg and X17: xReg and X18: xReg and X19: xReg and X20: xReg and X21: xReg and X22: xReg and X23: xReg and X24: xReg and X25: xReg and X26: xReg and X27: xReg and X28: xReg and X29: xReg and X30: xReg val X_MLHeapLimit: xReg (* ML Heap limit pointer *) and X_MLAssemblyInt: xReg (* ML assembly interface pointer. *) and X_MLHeapAllocPtr: xReg (* ML Heap allocation pointer. *) and X_MLStackPtr: xReg (* ML Stack pointer. *) and X_LinkReg: xReg (* Link reg - return address *) and X_Base32in64: xReg (* X24 is used for the heap base in 32-in-64. *) val V0: vReg and V1: vReg and V2: vReg and V3: vReg and V4: vReg and V5: vReg and V6: vReg and V7: vReg (* Condition for conditional branches etc. *) datatype condition = CondEqual (* Z=1 *) | CondNotEqual (* Z=0 *) | CondCarrySet (* C=1 *) | CondCarryClear (* C=0 *) | CondNegative (* N=1 *) | CondPositive (* N=0 imcludes zero *) | CondOverflow (* V=1 *) | CondNoOverflow (* V=0 *) | CondUnsignedHigher (* C=1 && Z=0 *) | CondUnsignedLowOrEq (* ! (C=1 && Z=0) *) | CondSignedGreaterEq (* N=V *) | CondSignedLess (* N<>V *) | CondSignedGreater (* Z==0 && N=V *) | CondSignedLessEq (* !(Z==0 && N=V) *) val invertTest: condition -> condition (* i.e. jump when the condition is not true. *) val condToString: condition -> string datatype shiftType = ShiftLSL of Word8.word | ShiftLSR of Word8.word | ShiftASR of Word8.word | ShiftNone datatype wordSize = WordSize32 | WordSize64 datatype 'a extend = ExtUXTB of 'a (* Unsigned extend byte *) | ExtUXTH of 'a (* Unsigned extend byte *) | ExtUXTW of 'a (* Unsigned extend byte *) | ExtUXTX of 'a (* Left shift *) | ExtSXTB of 'a (* Sign extend byte *) | ExtSXTH of 'a (* Sign extend halfword *) | ExtSXTW of 'a (* Sign extend word *) | ExtSXTX of 'a (* Left shift *) (* Load/store instructions have only a single bit for the shift. For byte operations this is one bit shift; for others it scales by the size of the operand if set. *) datatype scale = ScaleOrShift | NoScale datatype loadType = Load64 | Load32 | Load16 | Load8 and opSize = OpSize32 | OpSize64 and logicalOp = LogAnd | LogOr | LogXor and floatSize = Float32 | Double64 and shiftDirection = ShiftLeft | ShiftRightLogical | ShiftRightArithmetic and multKind = MultAdd32 | MultSub32 | MultAdd64 | MultSub64 | SignedMultAddLong (* 32bit*32bit + 64bit => 64Bit *) | SignedMultHigh (* High order part of 64bit*64Bit *) and fpUnary = NegFloat | NegDouble | AbsFloat | AbsDouble | ConvFloatToDble | ConvDbleToFloat and fpBinary = MultiplyFP | DivideFP | AddFP | SubtractFP and unscaledType = NoUpdate | PreIndex | PostIndex and condSet = CondSet | CondSetIncr | CondSetInvert | CondSetNegate and bitfieldKind = BFUnsigned | BFSigned | BFInsert and brRegType = BRRBranch | BRRAndLink | BRRReturn - type precodeLabel - val createLabel: unit -> precodeLabel + type label and labelMaker + val createLabelMaker: unit -> labelMaker + and createLabel: labelMaker -> label datatype precode = (* Basic instructions *) AddImmediate of {regN: xReg, regD: xReg, immed: word, shifted: bool, opSize: opSize, setFlags: bool} | SubImmediate of {regN: xReg, regD: xReg, immed: word, shifted: bool, opSize: opSize, setFlags: bool} | AddShiftedReg of {regM: xReg, regN: xReg, regD: xReg, shift: shiftType, opSize: opSize, setFlags: bool} | SubShiftedReg of {regM: xReg, regN: xReg, regD: xReg, shift: shiftType, opSize: opSize, setFlags: bool} | AddExtendedReg of {regM: xReg, regN: xReg, regD: xReg, extend: Word8.word extend, opSize: opSize, setFlags: bool} | SubExtendedReg of {regM: xReg, regN: xReg, regD: xReg, extend: Word8.word extend, opSize: opSize, setFlags: bool} | MultiplyAndAddSub of {regM: xReg, regN: xReg, regA: xReg, regD: xReg, multKind: multKind} | DivideRegs of {regM: xReg, regN: xReg, regD: xReg, isSigned: bool, opSize: opSize} | LogicalShiftedReg of {regM: xReg, regN: xReg, regD: xReg, shift: shiftType, logOp: logicalOp, opSize: opSize, setFlags: bool} | LoadRegScaled of {regT: xReg, regN: xReg, unitOffset: int, loadType: loadType} | LoadFPRegScaled of {regT: vReg, regN: xReg, unitOffset: int, floatSize: floatSize} | StoreRegScaled of {regT: xReg, regN: xReg, unitOffset: int, loadType: loadType} | StoreFPRegScaled of {regT: vReg, regN: xReg, unitOffset: int, floatSize: floatSize} | LoadRegUnscaled of {regT: xReg, regN: xReg, byteOffset: int, loadType: loadType, unscaledType: unscaledType} | StoreRegUnscaled of {regT: xReg, regN: xReg, byteOffset: int, loadType: loadType, unscaledType: unscaledType} | LoadFPRegUnscaled of {regT: vReg, regN: xReg, byteOffset: int, floatSize: floatSize, unscaledType: unscaledType} | StoreFPRegUnscaled of {regT: vReg, regN: xReg, byteOffset: int, floatSize: floatSize, unscaledType: unscaledType} | LoadRegIndexed of {regT: xReg, regN: xReg, regM: xReg, loadType: loadType, option: scale extend} | StoreRegIndexed of {regT: xReg, regN: xReg, regM: xReg, loadType: loadType, option: scale extend} | LoadFPRegIndexed of {regT: vReg, regN: xReg, regM: xReg, floatSize: floatSize, option: scale extend} | StoreFPRegIndexed of {regT: vReg, regN: xReg, regM: xReg, floatSize: floatSize, option: scale extend} (* LoadAcquire and StoreRelease are used for mutables. *) | LoadAcquireReg of {regN: xReg, regT: xReg, loadType: loadType} | StoreReleaseReg of {regN: xReg, regT: xReg, loadType: loadType} (* LoadAcquireExclusiveRegister and StoreReleaseExclusiveRegister are used for mutexes. *) | LoadAcquireExclusiveRegister of {regN: xReg, regT: xReg} | StoreReleaseExclusiveRegister of {regS: xReg, regT: xReg, regN: xReg} | MemBarrier | LoadRegPair of { regT1: xReg, regT2: xReg, regN: xReg, unitOffset: int, loadType: loadType, unscaledType: unscaledType} | StoreRegPair of { regT1: xReg, regT2: xReg, regN: xReg, unitOffset: int, loadType: loadType, unscaledType: unscaledType} | LoadFPRegPair of { regT1: vReg, regT2: vReg, regN: xReg, unitOffset: int, floatSize: floatSize, unscaledType: unscaledType} | StoreFPRegPair of { regT1: vReg, regT2: vReg, regN: xReg, unitOffset: int, floatSize: floatSize, unscaledType: unscaledType} | ConditionalSet of {regD: xReg, regTrue: xReg, regFalse: xReg, cond: condition, condSet: condSet, opSize: opSize} | BitField of {immr: word, imms: word, regN: xReg, regD: xReg, opSize: opSize, bitfieldKind: bitfieldKind} | ShiftRegisterVariable of {regM: xReg, regN: xReg, regD: xReg, opSize: opSize, shiftDirection: shiftDirection} | BitwiseLogical of { bits: Word64.word, regN: xReg, regD: xReg, opSize: opSize, setFlags: bool, logOp: logicalOp} (* Floating point *) | MoveGeneralToFP of { regN: xReg, regD: vReg, floatSize: floatSize} | MoveFPToGeneral of {regN: vReg, regD: xReg, floatSize: floatSize} | CvtIntToFP of { regN: xReg, regD: vReg, floatSize: floatSize, opSize: opSize} | CvtFloatToInt of { round: IEEEReal.rounding_mode, regN: vReg, regD: xReg, floatSize: floatSize, opSize: opSize} | FPBinaryOp of { regM: vReg, regN: vReg, regD: vReg, floatSize: floatSize, fpOp: fpBinary} | FPComparison of { regM: vReg, regN: vReg, floatSize: floatSize} | FPUnaryOp of {regN: vReg, regD: vReg, fpOp: fpUnary} (* Branches and Labels. *) - | SetLabel of precodeLabel - | ConditionalBranch of condition * precodeLabel - | UnconditionalBranch of precodeLabel - | BranchAndLink of precodeLabel + | SetLabel of label + | ConditionalBranch of condition * label + | UnconditionalBranch of label + | BranchAndLink of label | BranchReg of {regD: xReg, brRegType: brRegType } - | LoadLabelAddress of xReg * precodeLabel - | TestBitBranch of { test: xReg, bit: Word8.word, label: precodeLabel, onZero: bool } - | CompareBranch of { test: xReg, label: precodeLabel, onZero: bool, opSize: opSize } + | LoadLabelAddress of xReg * label + | TestBitBranch of { test: xReg, bit: Word8.word, label: label, onZero: bool } + | CompareBranch of { test: xReg, label: label, onZero: bool, opSize: opSize } (* Composite instructions *) | MoveXRegToXReg of {sReg: xReg, dReg: xReg} | LoadNonAddr of xReg * Word64.word | LoadAddr of xReg * machineWord | RTSTrap of { rtsEntry: int, work: xReg, save: xReg list } (* Allocate memory - bytes includes the length word and rounding. *) | AllocateMemoryFixedSize of { bytes: word, dest: xReg, save: xReg list, work: xReg } (* Allocate memory - sizeReg is number of ML words needed for cell. *) | AllocateMemoryVariableSize of { sizeReg: xReg, dest: xReg, save: xReg list, work: xReg } (* Branch table for indexed case. startLabel is the address of the first label in the list. The branch table is a sequence of unconditional branches. *) - | BranchTable of { startLabel: precodeLabel, brTable: precodeLabel list } + | BranchTable of { startLabel: label, brTable: label list } | LoadGlobalHeapBaseInCallback of xReg (* Wrapper for BitField *) val shiftConstant: { direction: shiftDirection, regD: xReg, regN: xReg, shift: word, opSize: opSize } -> precode (* Convenient sequences. N.B. These are in reverse order. *) val boxDouble: {source: vReg, destination: xReg, workReg: xReg, saveRegs: xReg list} * precode list -> precode list and boxFloat: {source: vReg, destination: xReg, workReg: xReg, saveRegs: xReg list} * precode list -> precode list and boxSysWord: {source: xReg, destination: xReg, workReg: xReg, saveRegs: xReg list} * precode list -> precode list (* Create the vector of code from the list of instructions and update the closure reference to point to it. *) val generateFinalCode: {instrs: precode list, name: string, parameters: Universal.universal list, resultClosure: closureRef, - profileObject: machineWord, labelCount: int} -> unit + profileObject: machineWord, labelMaker: labelMaker} -> unit (* Offsets in the assembly code interface pointed at by X26 These are in units of 64-bits NOT bytes. *) val heapOverflowCallOffset: int and stackOverflowCallOffset: int and stackOverflowXCallOffset: int and exceptionHandlerOffset: int and stackLimitOffset: int and threadIdOffset: int and heapLimitPtrOffset: int and heapAllocPtrOffset: int and mlStackPtrOffset: int and exceptionPacketOffset: int val is32in64: bool and isBigEndian: bool val isEncodableBitPattern: Word64.word * wordSize -> bool structure Sharing: sig type closureRef = closureRef type loadType = loadType type opSize = opSize type logicalOp = logicalOp type floatSize = floatSize type shiftDirection = shiftDirection type multKind = multKind type fpUnary = fpUnary type fpBinary = fpBinary type unscaledType = unscaledType type condSet = condSet type bitfieldKind = bitfieldKind type brRegType = brRegType type precode = precode type xReg = xReg type vReg = vReg - type precodeLabel = precodeLabel + type label = label + type labelMaker = labelMaker type condition = condition type shiftType = shiftType type wordSize = wordSize type 'a extend = 'a extend type scale = scale end end; diff --git a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64ForeignCall.sml b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64ForeignCall.sml index 0b791ac9..5a8b1319 100644 --- a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64ForeignCall.sml +++ b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64ForeignCall.sml @@ -1,854 +1,861 @@ (* Copyright (c) 2021-2 David C. J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public Licence version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public Licence for more details. You should have received a copy of the GNU Lesser General Public Licence along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor Arm64ForeignCall( structure CodeArray: CODEARRAY and Arm64PreAssembly: ARM64PREASSEMBLY and Debug: DEBUG sharing CodeArray.Sharing = Arm64PreAssembly.Sharing ): FOREIGNCALL = struct open CodeArray Arm64PreAssembly exception InternalError = Misc.InternalError and Foreign = Foreign.Foreign datatype fastArgs = FastArgFixed | FastArgDouble | FastArgFloat val makeEntryPoint: string -> machineWord = RunCall.rtsCallFull1 "PolyCreateEntryPointObject" (* Turn an index into an absolute address. *) fun indexToAbsoluteAddress(iReg, absReg) = if is32in64 then [AddShiftedReg{regM=iReg, regN=X_Base32in64, regD=absReg, shift=ShiftLSL 0w2, opSize=OpSize64, setFlags=false}] else if iReg = absReg then [] else [MoveXRegToXReg{sReg=iReg, dReg=absReg}] fun unboxDouble(addrReg, workReg, valReg) = if is32in64 then indexToAbsoluteAddress(addrReg, workReg) @ [LoadFPRegScaled{regT=valReg, regN=workReg, unitOffset=0, floatSize=Double64}] else [LoadFPRegScaled{regT=valReg, regN=addrReg, unitOffset=0, floatSize=Double64}] fun unboxOrUntagSingle(addrReg, workReg, valReg) = if is32in64 then [LoadFPRegIndexed{regN=X_Base32in64, regM=addrReg, regT=valReg, option=ExtUXTX ScaleOrShift, floatSize=Float32}] else [ shiftConstant{direction=ShiftRightLogical, shift=0w32, regN=addrReg, regD=workReg, opSize=OpSize64}, MoveGeneralToFP{regN=workReg, regD=valReg, floatSize=Float32} ] fun boxOrTagFloat{floatReg, fixedReg, workReg, saveRegs} = if is32in64 then List.rev(boxFloat({source=floatReg, destination=fixedReg, workReg=workReg, saveRegs=saveRegs}, [])) else [ MoveFPToGeneral{regN=floatReg, regD=fixedReg, floatSize=Float32}, shiftConstant{direction=ShiftLeft, shift=0w32, regN=fixedReg, regD=fixedReg, opSize=OpSize64}, BitwiseLogical{regN=fixedReg, regD=fixedReg, bits=0w1, logOp=LogOr, setFlags=false, opSize=OpSize64} ] (* Call the RTS. Previously this did not check for exceptions raised in the RTS and instead there was code added after each call. Doing it after the call doesn't affect the time taken but makes the code larger especially as this is needed in every arbitrary precision operation. Currently we clear the RTS exception packet field before the call. The field is cleared in "full" calls that may raise an exception but not in fast calls. They may not raise an exception but the packet may not have been cleared from a previous call. *) fun rtsCallFastGeneral (functionName, argFormats, resultFormat, debugSwitches) = let val entryPointAddr = makeEntryPoint functionName (* The maximum we currently have is five so we don't need to worry about stack args. *) fun loadArgs([], _, _, _) = [] | loadArgs(FastArgFixed :: argTypes, srcReg :: srcRegs, fixed :: fixedRegs, fpRegs) = if srcReg = fixed then loadArgs(argTypes, srcRegs, fixedRegs, fpRegs) (* Already in the right reg *) else MoveXRegToXReg{sReg=srcReg, dReg=fixed} :: loadArgs(argTypes, srcRegs, fixedRegs, fpRegs) | loadArgs(FastArgDouble :: argTypes, srcReg :: srcRegs, fixedRegs, fp :: fpRegs) = (* Unbox the value into a fp reg. *) unboxDouble(srcReg, srcReg, fp) @ loadArgs(argTypes, srcRegs, fixedRegs, fpRegs) | loadArgs(FastArgFloat :: argTypes, srcReg :: srcRegs, fixedRegs, fp :: fpRegs) = (* Untag and move into the fp reg *) unboxOrUntagSingle(srcReg, srcReg, fp) @ loadArgs(argTypes, srcRegs, fixedRegs, fpRegs) | loadArgs _ = raise InternalError "rtsCall: Too many arguments" - val noRTSException = createLabel() + val labelMaker = createLabelMaker() + + val noRTSException = createLabel labelMaker val instructions = loadArgs(argFormats, (* ML Arguments *) [X0, X1, X2, X3, X4, X5, X6, X7], (* C fixed pt args *) [X0, X1, X2, X3, X4, X5, X6, X7], (* C floating pt args *) [V0, V1, V2, V3, V4, V5, V6, V7]) @ (* Clear the RTS exception state. *) LoadNonAddr(X16, 0w1) :: [ StoreRegScaled{regT=X16, regN=X_MLAssemblyInt, unitOffset=exceptionPacketOffset, loadType=Load64}, (* Move X30 to X23, a callee-save register. *) (* Note: maybe we should push X24 just in case this is the only reachable reference to the code. *) LogicalShiftedReg{regN=XZero, regM=X_LinkReg, regD=X23, shift=ShiftNone, logOp=LogOr, opSize=OpSize64, setFlags=false}, LoadAddr(X16, entryPointAddr) (* Load entry point *) ] @ indexToAbsoluteAddress(X16, X16) @ [ LoadRegScaled{regT=X16, regN=X16, unitOffset=0, loadType=Load64}, (* Load the actual address. *) (* Store the current heap allocation pointer. *) StoreRegScaled{regT=X_MLHeapAllocPtr, regN=X_MLAssemblyInt, unitOffset=heapAllocPtrOffset, loadType=Load64}, (* For the moment save and restore the ML stack pointer. No RTS call should change it and it's callee-save but just in case... *) StoreRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=mlStackPtrOffset, loadType=Load64}, BranchReg{regD=X16, brRegType=BRRAndLink}, (* Call the function. *) (* Restore the ML stack pointer. *) LoadRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=mlStackPtrOffset, loadType=Load64}, (* Load the heap allocation ptr and limit. We could have GCed in the RTS call. *) LoadRegScaled{regT=X_MLHeapAllocPtr, regN=X_MLAssemblyInt, unitOffset=heapAllocPtrOffset, loadType=Load64}, LoadRegScaled{regT=X_MLHeapLimit, regN=X_MLAssemblyInt, unitOffset=heapLimitPtrOffset, loadType=Load64}, (* Check the RTS exception. *) LoadRegScaled{regT=X16, regN=X_MLAssemblyInt, unitOffset=exceptionPacketOffset, loadType=Load64}, SubImmediate{regN=X16, regD=XZero, immed=0w1, shifted=false, setFlags=true, opSize=OpSize64}, ConditionalBranch(CondEqual, noRTSException), (* If it isn't then raise the exception. *) MoveXRegToXReg{sReg=X16, dReg=X0}, LoadRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset, loadType=Load64}, LoadRegScaled{regT=X16, regN=X_MLStackPtr, unitOffset=0, loadType=Load64}, BranchReg{regD=X16, brRegType=BRRBranch}, SetLabel noRTSException ] @ ( case resultFormat of FastArgFixed => [] | FastArgDouble => (* This must be boxed. *) List.rev(boxDouble({source=V0, destination=X0, workReg=X1, saveRegs=[]}, [])) | FastArgFloat => (* This must be tagged or boxed *) boxOrTagFloat{floatReg=V0, fixedReg=X0, workReg=X1, saveRegs=[]} ) @ [ BranchReg{regD=X23, brRegType=BRRReturn} ] val closure = makeConstantClosure() val () = generateFinalCode{instrs=instructions, name=functionName, parameters=debugSwitches, resultClosure=closure, - profileObject=createProfileObject(), labelCount=1} + profileObject=createProfileObject(), labelMaker=labelMaker} in closureAsAddress closure end fun rtsCallFast (functionName, nArgs, debugSwitches) = rtsCallFastGeneral (functionName, List.tabulate(nArgs, fn _ => FastArgFixed), FastArgFixed, debugSwitches) (* RTS call with one double-precision floating point argument and a floating point result. *) fun rtsCallFastRealtoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgDouble], FastArgDouble, debugSwitches) (* RTS call with two double-precision floating point arguments and a floating point result. *) fun rtsCallFastRealRealtoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgDouble, FastArgDouble], FastArgDouble, debugSwitches) (* RTS call with one double-precision floating point argument, one fixed point argument and a floating point result. *) fun rtsCallFastRealGeneraltoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgDouble, FastArgFixed], FastArgDouble, debugSwitches) (* RTS call with one general (i.e. ML word) argument and a floating point result. This is used only to convert arbitrary precision values to floats. *) fun rtsCallFastGeneraltoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFixed], FastArgDouble, debugSwitches) (* Operations on Real32.real values. *) fun rtsCallFastFloattoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFloat], FastArgFloat, debugSwitches) fun rtsCallFastFloatFloattoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFloat, FastArgFloat], FastArgFloat, debugSwitches) (* RTS call with one double-precision floating point argument, one fixed point argument and a floating point result. *) fun rtsCallFastFloatGeneraltoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFloat, FastArgFixed], FastArgFloat, debugSwitches) (* RTS call with one general (i.e. ML word) argument and a floating point result. This is used only to convert arbitrary precision values to floats. *) fun rtsCallFastGeneraltoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFixed], FastArgFloat, debugSwitches) (* There is only one ABI value. *) datatype abi = ARM64Abi fun abiList () = [("default", ARM64Abi)] fun alignUp(s, align) = Word.andb(s + align-0w1, ~ align) val getThreadDataCall = makeEntryPoint "PolyArm64GetThreadData" (* This must match the type in Foreign.LowLevel. Once this is bootstrapped we could use that type but note that this is the type we use within the compiler and we build Foreign.LowLevel AFTER compiling this. *) datatype cTypeForm = CTypeFloatingPt | CTypePointer | CTypeSignedInt | CTypeUnsignedInt | CTypeStruct of cType list | CTypeVoid withtype cType = { typeForm: cTypeForm, align: word, size: word } (* Load a byte, halfword, word or long *) fun loadAlignedValue(reg, base, offset, size) = let val _ = offset mod size = 0w0 orelse raise InternalError "loadAlignedValue: not aligned" val loadOp = case size of 0w8 => Load64 | 0w4 => Load32 | 0w2 => Load16 | 0w1 => Load8 | _ => raise InternalError "loadAlignedValue: invalid length" in LoadRegScaled{regT=reg, regN=base, unitOffset=Word.toInt(offset div size), loadType=loadOp} end (* Store a register into upto 8 bytes. Most values will involve a single store but odd-sized structs can require shifts and multiple stores. N.B. May modify the source register. *) and storeUpTo8(reg, base, offset, size) = let val storeOp = if size = 0w8 then Load64 else if size >= 0w4 then Load32 else if size >= 0w2 then Load16 else Load8 in [StoreRegUnscaled{regT=reg, regN=base, byteOffset=offset, loadType=storeOp, unscaledType=NoUpdate}] end @ ( if size = 0w6 orelse size = 0w7 then [ shiftConstant{direction=ShiftRightLogical, regN=reg, regD=reg, shift=0w32, opSize=OpSize64 }, StoreRegUnscaled{regT=reg, regN=base, byteOffset=offset+4, loadType=Load16, unscaledType=NoUpdate} ] else [] ) @ ( if size = 0w3 orelse size = 0w5 orelse size = 0w7 then [ shiftConstant{direction=ShiftRightLogical, regN=reg, regD=reg, shift=(size-0w1)*0w8, opSize=OpSize64 }, StoreRegUnscaled{regT=reg, regN=base, byteOffset=offset+Word.toInt(size-0w1), loadType=Load8, unscaledType=NoUpdate} ] else [] ) (* Extract the elements of structures. *) fun unwrap(CTypeStruct ctypes, _) = List.foldr(fn({typeForm, size, ...}, l) => unwrap(typeForm, size) @ l) [] ctypes | unwrap (ctype, size) = [(ctype, size)] (* Structures of up to four floating point values of the same precision are treated specially. *) datatype argClass = ArgClassHFA of Word8.word * bool (* 1 - 4 floating pt values *) | ArgLargeStruct (* > 16 bytes and not an HFA *) | ArgSmall (* Scalars or small structures *) fun classifyArg(ctype, size) = case unwrap (ctype, size) of [(CTypeFloatingPt, 0w4)] => ArgClassHFA(0w1, false) | [(CTypeFloatingPt, 0w4), (CTypeFloatingPt, 0w4)] => ArgClassHFA(0w2, false) | [(CTypeFloatingPt, 0w4), (CTypeFloatingPt, 0w4), (CTypeFloatingPt, 0w4)] => ArgClassHFA(0w3, false) | [(CTypeFloatingPt, 0w4), (CTypeFloatingPt, 0w4), (CTypeFloatingPt, 0w4), (CTypeFloatingPt, 0w4)] => ArgClassHFA(0w4, false) | [(CTypeFloatingPt, 0w8)] => ArgClassHFA(0w1, true) | [(CTypeFloatingPt, 0w8), (CTypeFloatingPt, 0w8)] => ArgClassHFA(0w2, true) | [(CTypeFloatingPt, 0w8), (CTypeFloatingPt, 0w8), (CTypeFloatingPt, 0w8)] => ArgClassHFA(0w3, true) | [(CTypeFloatingPt, 0w8), (CTypeFloatingPt, 0w8), (CTypeFloatingPt, 0w8), (CTypeFloatingPt, 0w8)] => ArgClassHFA(0w4, true) | _ => if size > 0w16 then ArgLargeStruct else ArgSmall (* Can we load this in a single instruction? *) fun alignedLoadStore(_, 0w1) = true | alignedLoadStore(addr, 0w2) = addr mod 0w2 = 0w0 | alignedLoadStore(addr, 0w4) = addr mod 0w4 = 0w0 | alignedLoadStore(addr, 0w8) = addr mod 0w8 = 0w0 | alignedLoadStore(addr, 0w16) = addr mod 0w8 = 0w0 (* Can use load-pair. *) | alignedLoadStore _ = false (* This builds a piece of code that takes three arguments and returns a unit result. All three arguments are SysWord.word values i.e. ML addresses containing the address of the actual C value. The first argument (X0) is the address of the function to call. The second argument (X1) points to a struct that contains the argument(s) for the function. The arguments have to be unpacked from the struct into the appropriate registers or to the C stack. The third argument (X2) points to a piece of memory to receive the result of the call. It may be empty if the function returns void. It may only be as big as required for the result type. *) fun foreignCall(_: abi, args: cType list, result: cType): Address.machineWord = let val resultAreaPtr = X19 (* Unboxed value from X2 - This is callee save. *) val argPtrReg = X9 (* A scratch register that isn't used for arguments. *) val entryPtReg = X16 (* Contains the address of the function to call. *) val argWorkReg = X10 (* Used in loading arguments if necessary. *) and argWorkReg2 = X11 and structSpacePtr = X12 and argWorkReg3 = X13 and argWorkReg4 = X14 + val labelMaker = createLabelMaker() + fun loadArgs([], stackOffset, _, _, _, code, largeStructSpace) = (code, stackOffset, largeStructSpace) | loadArgs(arg::args, stackOffset, argOffset, gRegNo, fpRegNo, code, largeStructSpace) = let val {size, align, typeForm, ...} = arg val newArgOffset = alignUp(argOffset, align) in case classifyArg(typeForm, size) of ArgClassHFA(numItems, isDouble) => if fpRegNo + numItems <= 0w8 then let val scale = if isDouble then 0w8 else 0w4 (* Load the values to the floating point registers. *) fun loadFPRegs(0w0, _, _) = [] | loadFPRegs(0w1, fpRegNo, offset) = [LoadFPRegScaled{regT=VReg fpRegNo, regN=argPtrReg, unitOffset=offset, floatSize=if isDouble then Double64 else Float32}] | loadFPRegs(n, fpRegNo, offset) = (LoadFPRegPair{regT1=VReg fpRegNo, regT2=VReg(fpRegNo+0w1), regN=argPtrReg, unitOffset=offset, floatSize=if isDouble then Double64 else Float32, unscaledType=NoUpdate} :: loadFPRegs(n-0w2, fpRegNo+0w2, offset+2)) in loadArgs(args, stackOffset, newArgOffset+size, gRegNo, fpRegNo+numItems, loadFPRegs(numItems, fpRegNo, Word.toInt(newArgOffset div scale)) @ code, largeStructSpace) end else let (* If we have insufficient number of registers we discard any that are left and push the argument to the stack. *) (* The floating point value or structure is copied to the stack as a contiguous area. Use general registers to copy the data. It could be on a 4-byte alignment. In the typical case of a single floating point value this will just be a single load and store. *) fun copyData(0w0, _, _) = [] | copyData(n, srcOffset, stackOffset) = if isDouble then LoadRegScaled{loadType=Load64, regT=argWorkReg2, regN=argPtrReg, unitOffset=srcOffset} :: StoreRegScaled{loadType=Load64, regT=argWorkReg2, regN=XSP, unitOffset=stackOffset} :: copyData(n-0w1, srcOffset+1, stackOffset+1) else LoadRegScaled{loadType=Load32, regT=argWorkReg2, regN=argPtrReg, unitOffset=srcOffset} :: StoreRegScaled{loadType=Load32, regT=argWorkReg2, regN=XSP, unitOffset=stackOffset} :: copyData(n-0w1, srcOffset+1, stackOffset+1) val copyToStack = if isDouble then copyData(numItems, Word.toInt(newArgOffset div 0w8), stackOffset) else copyData(numItems, Word.toInt(newArgOffset div 0w4), stackOffset*2) (* The overall size is rounded up to a multiple of 8 *) val newStackOffset = stackOffset + Word.toInt(alignUp(size, 0w8) div 0w8) in loadArgs(args, newStackOffset, newArgOffset+size, gRegNo, 0w8, copyToStack @ code, largeStructSpace) end | _ => let (* Load an aligned argument into one or two registers or copy it to the stack. *) fun loadArgumentValues(argSize, sourceOffset, sourceBase, newStructSpace, preCode) = if gRegNo <= 0w6 orelse (size <= 0w8 andalso gRegNo <= 0w7) then (* There are sufficient registers *) let val (loadInstr, nextGReg) = if argSize = 0w16 then ([LoadRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=XReg gRegNo, regT2=XReg(gRegNo+0w1), regN=sourceBase, unitOffset=Word.toInt(sourceOffset div 0w8)}], gRegNo+0w2) else ([loadAlignedValue(XReg gRegNo, sourceBase, sourceOffset, size)], gRegNo+0w1) in loadArgs(args, stackOffset, newArgOffset+size, nextGReg, fpRegNo, preCode @ loadInstr @ code, newStructSpace) end else if argSize = 0w16 then loadArgs(args, stackOffset+2, newArgOffset+size, 0w8, fpRegNo, preCode @ LoadRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=argWorkReg2, regT2=argWorkReg3, regN=sourceBase, unitOffset=Word.toInt(sourceOffset div 0w8)} :: StoreRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=argWorkReg2, regT2=argWorkReg3, regN=XSP, unitOffset=stackOffset} :: code, newStructSpace) else loadArgs(args, stackOffset+1, newArgOffset+size, 0w8, fpRegNo, preCode @ loadAlignedValue(argWorkReg2, sourceBase, sourceOffset, argSize) :: StoreRegScaled{loadType=Load64, regT=argWorkReg2, regN=XSP, unitOffset=stackOffset} :: code, newStructSpace) in if alignedLoadStore(newArgOffset, size) then loadArgumentValues(size, newArgOffset, argPtrReg, largeStructSpace, []) else (* General case. Either a large structure or a small structure that can't easily be loaded, First copy it to the stack, and either pass the address or load it once it's aligned. *) let val newStructSpace = alignUp(largeStructSpace + size, 0w16) - val loopLabel = createLabel() + val loopLabel = createLabel labelMaker (* The address of the area we're copying to is in argRegNo. *) val argRegNo = if gRegNo < 0w8 then XReg gRegNo else argWorkReg (* Copy from the end back to the start. *) val copyToStructSpace = [ AddImmediate{opSize=OpSize64, setFlags=false, regN=structSpacePtr, regD=argRegNo, immed=largeStructSpace, shifted=false}, AddImmediate{opSize=OpSize64, setFlags=false, regN=argRegNo, regD=argWorkReg2, immed=size, shifted=false}, (* End of dest area *) AddImmediate{opSize=OpSize64, setFlags=false, regN=argPtrReg, regD=argWorkReg3, immed=newArgOffset+size, shifted=false}, (* end of source *) SetLabel loopLabel, LoadRegUnscaled{loadType=Load8, unscaledType=PreIndex, regT=argWorkReg4, regN=argWorkReg3, byteOffset= ~1}, StoreRegUnscaled{loadType=Load8, unscaledType=PreIndex, regT=argWorkReg4, regN=argWorkReg2, byteOffset= ~1}, SubShiftedReg{opSize=OpSize64, setFlags=true, regM=argWorkReg2, regN=argRegNo, regD=XZero, shift=ShiftNone}, (* At start? *) ConditionalBranch(CondNotEqual, loopLabel) ] in if size > 0w16 then (* Large struct - pass by reference *) ( if gRegNo < 0w8 then loadArgs(args, stackOffset, newArgOffset+size, gRegNo+0w1, fpRegNo, copyToStructSpace @ code, newStructSpace) else loadArgs(args, stackOffset+1, newArgOffset+size, 0w8, fpRegNo, copyToStructSpace @ StoreRegScaled{loadType=Load64, regT=argWorkReg, regN=XSP, unitOffset=stackOffset} :: code, newStructSpace) ) else (* Small struct. Since it's now in an area at least 16 bytes and properly aligned we can load it. *) (* argRegNo points to where we copied it *) loadArgumentValues(if size > 0w8 then 0w16 else 0w8, 0w0, argRegNo, newStructSpace, copyToStructSpace) end end end local val {size, typeForm, ...} = result (* Store a result register into the result area. In almost all cases this is very simple: the only complication is with structs of odd sizes. *) fun storeResult(reg, offset, size) = storeUpTo8(reg, resultAreaPtr, offset, size) in val (getResult, passArgAddress) = if typeForm = CTypeVoid then ([], false) else case classifyArg(typeForm, size) of (* Floating point values are returned in s0-sn, d0-dn. *) ArgClassHFA(numItems, isDouble) => let fun storeFPRegs(0w0, _, _) = [] | storeFPRegs(0w1, fpRegNo, offset) = [StoreFPRegScaled{regT=VReg fpRegNo, regN=resultAreaPtr, unitOffset=offset, floatSize=if isDouble then Double64 else Float32}] | storeFPRegs(n, fpRegNo, offset) = StoreFPRegPair{regT1=VReg fpRegNo, regT2=VReg(fpRegNo+0w1), regN=resultAreaPtr, unitOffset=offset, floatSize=if isDouble then Double64 else Float32, unscaledType=NoUpdate} :: storeFPRegs(n-0w2, fpRegNo+0w2, offset+2) in (storeFPRegs(numItems, 0w0 (* V0-Vn*), 0), false) end | ArgLargeStruct => ([], true) (* Structures larger than 16 bytes are passed by reference. *) | _ => if size = 0w16 then ([StoreRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X0, regT2=X1, regN=resultAreaPtr, unitOffset=0}], false) else if size > 0w8 then (StoreRegScaled{loadType=Load64, regT=X0, regN=resultAreaPtr, unitOffset=0} :: storeResult(X1, 8, size-0w8), false) else (storeResult(X0, 0, size), false) end val (argCode, argStack, largeStructSpace) = loadArgs(args, 0, 0w0, 0w0, 0w0, if passArgAddress (* If we have to pass the address of the result struct it goes in X8. *) then [MoveXRegToXReg{sReg=resultAreaPtr, dReg=X8}] else [], 0w0) val stackSpaceRequired = alignUp(Word.fromInt argStack * 0w8, 0w16) + largeStructSpace val instructions = [(* Push the return address to the stack. We could put it in a callee-save register but there's a very small chance that this could be the last reference to a piece of code. *) StoreRegUnscaled{loadType=Load64, unscaledType=PreIndex, regT=X30, regN=X_MLStackPtr, byteOffset= ~8}, (* Save heap ptr. Needed in case we have a callback. *) StoreRegScaled{loadType=Load64, regT=X_MLHeapAllocPtr, regN=X_MLAssemblyInt, unitOffset=heapAllocPtrOffset} ] @ indexToAbsoluteAddress(X0, X0) @ (* Load the entry point address. *) LoadRegScaled{loadType=Load64, regT=entryPtReg, regN=X0, unitOffset=0} :: ( (* Unbox the address of the result area into a callee save resgister. This is where the result will be stored on return if it is anything other than a struct. We have to put the C address in there now because an ML address wouldn't be updated by a possible GC in a callback. *) if #typeForm(result) <> CTypeVoid then indexToAbsoluteAddress(X2, X2) @ [LoadRegScaled{loadType=Load64, regT=resultAreaPtr, regN=X2, unitOffset=0}] else [] ) @ [StoreRegScaled{loadType=Load64, regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=mlStackPtrOffset}] @ (* Save the stack pointer. *) ( if stackSpaceRequired = 0w0 then [] else [SubImmediate{opSize=OpSize64, setFlags=false, regN=XSP, regD=XSP, immed=stackSpaceRequired, shifted=false}] ) @ ( (* If we need to copy a struct load a register with a pointer to the area for it. *) if largeStructSpace = 0w0 then [] else [AddImmediate{opSize=OpSize64, setFlags=false, regN=XSP, regD=structSpacePtr, immed=stackSpaceRequired-largeStructSpace, shifted=false}] ) @ ( (* The second argument is a SysWord containing the address of a malloced area of memory with the actual arguments in it. *) if null args then [] else indexToAbsoluteAddress(X1, X1) @ [LoadRegScaled{loadType=Load64, regT=argPtrReg, regN=X1, unitOffset=0}] ) @ argCode @ [BranchReg{regD=X16, brRegType=BRRAndLink}] @ (* Call the function. *) (* Restore the C stack value in case it's been changed by a callback. *) ( if stackSpaceRequired = 0w0 then [] else [AddImmediate{opSize=OpSize64, setFlags=false, regN=XSP, regD=XSP, immed=stackSpaceRequired, shifted=false}] ) @ [ (* Reload the ML stack pointer even though it's callee save. If we've made a callback the ML stack could have grown and so moved to a different address. *) LoadRegScaled{loadType=Load64, regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=mlStackPtrOffset}, (* Load the heap allocation ptr and limit in case of a callback. *) LoadRegScaled{loadType=Load64, regT=X_MLHeapAllocPtr, regN=X_MLAssemblyInt, unitOffset=heapAllocPtrOffset}, LoadRegScaled{loadType=Load64, regT=X_MLHeapLimit, regN=X_MLAssemblyInt, unitOffset=heapLimitPtrOffset} ] @ (* Store the result in the destination. *) getResult @ (* Pop the return address and return. *) [ LoadRegUnscaled{regT=X30, regN=X_MLStackPtr, byteOffset= 8, loadType=Load64, unscaledType=PostIndex}, BranchReg{regD=X30,brRegType=BRRReturn} ] val functionName = "foreignCall" val debugSwitches = [(*Universal.tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), Universal.tagInject Debug.assemblyCodeTag true*)] val closure = makeConstantClosure() val () = generateFinalCode{instrs=instructions, name=functionName, parameters=debugSwitches, resultClosure=closure, - profileObject=createProfileObject(), labelCount=0} + profileObject=createProfileObject(), labelMaker=labelMaker} in closureAsAddress closure end (* Build a callback function. The arguments are the abi, the list of argument types and the result type. The result is an ML function that takes an ML function, f, as its argument, registers it as a callback and returns the C function as its result. When the C function is called the arguments are copied into temporary memory and the vector passed to f along with the address of the memory for the result. "f" stores the result in it when it returns and the result is then passed back as the result of the callback. N.B. This returns a closure cell which contains the address of the code. It can be used as a SysWord.word value except that while it exists the code will not be GCed. *) fun buildCallBack(_: abi, args: cType list, result: cType): Address.machineWord = let val argWorkReg = X10 (* Used in loading arguments if necessary. *) and argWorkReg2 = X11 and argWorkReg3 = X13 and argWorkReg4 = X14 + val labelMaker = createLabelMaker() + (* The stack contains a 32-byte result area then an aligned area for the arguments. *) (* Store the argument values to the structure that will be passed to the ML callback function. *) (* Note. We've loaded the frame pointer with the original stack ptr-96 so we can access any stack arguments from that. *) fun moveArgs([], _, _, _, _, moveFromStack) = moveFromStack | moveArgs(arg::args, stackSpace, argOffset, gRegNo, fpRegNo, moveFromStack) = let val {size, align, typeForm, ...} = arg val newArgOffset = alignUp(argOffset, align) in case classifyArg(typeForm, size) of ArgClassHFA(numItems, isDouble) => if fpRegNo + numItems <= 0w8 then let val scale = if isDouble then 0w8 else 0w4 (* Store the values from the FP registers. *) fun storeFPRegs(0w0, _, _) = [] | storeFPRegs(0w1, fpRegNo, offset) = [StoreFPRegScaled{regT=VReg fpRegNo, regN=XSP, unitOffset=offset, floatSize=if isDouble then Double64 else Float32}] | storeFPRegs(n, fpRegNo, offset) = StoreFPRegPair{regT1=VReg fpRegNo, regT2=VReg(fpRegNo+0w1), regN=XSP, unitOffset=offset, floatSize=if isDouble then Double64 else Float32, unscaledType=NoUpdate} :: storeFPRegs(n-0w2, fpRegNo+0w2, offset+2) in moveArgs(args, stackSpace, newArgOffset+size, gRegNo, fpRegNo+numItems, storeFPRegs(numItems, fpRegNo, Word.toInt(newArgOffset div scale)) @ moveFromStack) end else let (* Load the arguments from the stack and store into the result area. *) fun copyData(0w0, _, _) = [] | copyData(n, dstOffset, stackOffset) = if isDouble then LoadRegScaled{loadType=Load64, regT=argWorkReg2, regN=X29, unitOffset=stackOffset} :: StoreRegScaled{loadType=Load64, regT=argWorkReg2, regN=XSP, unitOffset=dstOffset} :: copyData(n-0w1, dstOffset+1, stackOffset+1) else LoadRegScaled{loadType=Load32, regT=argWorkReg2, regN=X29, unitOffset=stackOffset} :: StoreRegScaled{loadType=Load32, regT=argWorkReg2, regN=XSP, unitOffset=dstOffset} :: copyData(n-0w1, dstOffset+1, stackOffset+1) val copyFromStack = if isDouble then copyData(numItems, Word.toInt(newArgOffset div 0w8), stackSpace) else copyData(numItems, Word.toInt(newArgOffset div 0w4), stackSpace*2) (* The overall size is rounded up to a multiple of 8 *) val newStackOffset = stackSpace + Word.toInt(alignUp(size, 0w8) div 0w8) in moveArgs(args, newStackOffset, newArgOffset+size, gRegNo, 0w8, copyFromStack @ moveFromStack) end | _ => if alignedLoadStore(newArgOffset, size) andalso (gRegNo <= 0w6 orelse gRegNo = 0w7 andalso size <= 0w8) then (* Usual case: argument passed in one or two registers. *) ( if size > 0w8 then moveArgs(args, stackSpace, newArgOffset+size, gRegNo + 0w2, fpRegNo, StoreRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=XReg gRegNo, regT2=XReg(gRegNo+0w1), regN=XSP, unitOffset=Word.toInt(newArgOffset div 0w8)} :: moveFromStack) else moveArgs(args, stackSpace, newArgOffset+size, gRegNo + 0w1, fpRegNo, storeUpTo8(XReg gRegNo, XSP, Word.toInt newArgOffset, size) @ moveFromStack) ) else (* General case. Store the argument registers if necessary and then use a byte copy to copy into the argument area. This sorts out any odd alignments or lengths. In some cases the source will be in memory already. *) let (* The source is either the register value or the value on the stack. *) val (argRegNo, nextGReg, newStack, loadArg) = if size > 0w16 then ( if gRegNo < 0w8 then (XReg gRegNo, gRegNo + 0w1, stackSpace, []) else (argWorkReg, 0w8, stackSpace+1, [LoadRegScaled{loadType=Load64, regT=argWorkReg, regN=X29, unitOffset=stackSpace}]) ) else let val regsNeeded = if size > 0w8 then 0w2 else 0w1 in if gRegNo + regsNeeded <= 0w8 then (XReg gRegNo, gRegNo+regsNeeded, stackSpace, [if size > 0w8 then StoreRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=XReg gRegNo, regT2=XReg(gRegNo+0w1), regN=XSP, unitOffset=2} else StoreRegScaled{loadType=Load64, regT=XReg gRegNo, regN=XSP, unitOffset=2}, AddImmediate{opSize=OpSize64, setFlags=false, regD=XReg gRegNo, regN=XSP, immed=0w16, shifted=false}]) else (* Being passed on the stack *) (argWorkReg, 0w8, stackSpace+Word8.toInt regsNeeded, [AddImmediate{opSize=OpSize64, setFlags=false, regD=argWorkReg, regN=X29, immed=Word.fromInt stackSpace*0w8, shifted=false}]) end - val loopLabel = createLabel() + val loopLabel = createLabel labelMaker val copyCode = [ AddImmediate{opSize=OpSize64, setFlags=false, regN=argRegNo, regD=argWorkReg3, immed=size, shifted=false}, (* End of source area *) AddImmediate{opSize=OpSize64, setFlags=false, regN=XSP, regD=argWorkReg2, immed=newArgOffset+size, shifted=false}, (* end of dest *) SetLabel loopLabel, LoadRegUnscaled{loadType=Load8, unscaledType=PreIndex, regT=argWorkReg4, regN=argWorkReg3, byteOffset= ~1}, StoreRegUnscaled{loadType=Load8, unscaledType=PreIndex, regT=argWorkReg4, regN=argWorkReg2, byteOffset= ~1}, SubShiftedReg{opSize=OpSize64, setFlags=true, regM=argWorkReg3, regN=argRegNo, regD=XZero, shift=ShiftNone}, (* At start? *) ConditionalBranch(CondNotEqual, loopLabel) ] in moveArgs(args, newStack, newArgOffset+size, nextGReg, fpRegNo, loadArg @ copyCode @ moveFromStack) end end val copyArgsFromRegsAndStack = moveArgs(args, 12 (* Offset to first stack arg *), 0w32 (* Size of result area *), 0w0, 0w0, []) local fun getNextSize (arg, argOffset) = let val {size, align, ...} = arg in alignUp(argOffset, align) + size end in val argumentSpace = alignUp(List.foldl getNextSize 0w0 args, 0w16) end local val {size, typeForm, ...} = result in (* Load the results from the result area except that if we're passing the result structure by reference this is done by the caller. Generally similar to how arguments are passed in a call. *) val (loadResults, resultByReference) = if typeForm = CTypeVoid then ([], false) else case classifyArg(typeForm, size) of ArgClassHFA(numItems, isDouble) => let (* Load the values to the floating point registers. *) fun loadFPRegs(0w0, _, _) = [] | loadFPRegs(0w1, fpRegNo, offset) = [LoadFPRegScaled{regT=VReg fpRegNo, regN=XSP, unitOffset=offset, floatSize=if isDouble then Double64 else Float32}] | loadFPRegs(n, fpRegNo, offset) = LoadFPRegPair{regT1=VReg fpRegNo, regT2=VReg(fpRegNo+0w1), regN=XSP, unitOffset=offset, unscaledType=NoUpdate, floatSize=if isDouble then Double64 else Float32} :: loadFPRegs(n-0w2, fpRegNo+0w2, offset+2) in (loadFPRegs(numItems, 0w0, 0 (* result area *)), false) end | ArgLargeStruct => ([], true) (* Structures larger than 16 bytes are passed by reference. *) | _ => (* We've allocated a 32-byte area aligned onto a 16-byte boundary so we can simply load one or two registers. *) if size > 0w8 then ([LoadRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X0, regT2=X1, regN=XSP, unitOffset=0}], false) else ([LoadRegScaled{loadType=Load64, regT=X0, regN=XSP, unitOffset=0}], false) end val instructions = [ (* Push LR, FP and the callee-save registers. *) StoreRegPair{loadType=Load64, unscaledType=PreIndex, regT1=X29, regT2=X30, regN=XSP, unitOffset= ~12}, MoveXRegToXReg{sReg=XSP, dReg=X29}, StoreRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X19, regT2=X20, regN=X29, unitOffset=2}, StoreRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X21, regT2=X22, regN=X29, unitOffset=4}, StoreRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X23, regT2=X24, regN=X29, unitOffset=6}, StoreRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X25, regT2=X26, regN=X29, unitOffset=8}, StoreRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X27, regT2=X28, regN=X29, unitOffset=10}, (* Reserve space for the arguments and results. *) SubImmediate{opSize=OpSize64, setFlags=false, regN=XSP, regD=XSP, immed=argumentSpace+0w32, shifted=false}, (* We passed the function we're calling in X9 but we need to move it to a callee-save register before we call the RTS. *) MoveXRegToXReg{sReg=X9, dReg=X20} ] @ (* Save X8 if we're going to need it. *) (if resultByReference then [StoreRegScaled{loadType=Load64, regT=X8, regN=XSP, unitOffset=0}] else []) @ (* Now we've saved X24 we can move the global heap base into it. *) (if is32in64 then [MoveXRegToXReg{sReg=X10, dReg=X_Base32in64}] else []) @ copyArgsFromRegsAndStack @ [LoadAddr(X0, getThreadDataCall)] @ ( if is32in64 then [AddShiftedReg{setFlags=false, opSize=OpSize64, regM=X0, regN=X_Base32in64, regD=X0, shift=ShiftLSL 0w2}] else [] ) @ [ (* Call into the RTS to get the thread data ptr. *) LoadRegScaled{loadType=Load64, regT=X0, regN=X0, unitOffset=0}, BranchReg{regD=X0, brRegType=BRRAndLink}, MoveXRegToXReg{sReg=X0, dReg=X_MLAssemblyInt}, (* Load the ML regs. *) LoadRegScaled{loadType=Load64, regT=X_MLHeapLimit, regN=X_MLAssemblyInt, unitOffset=heapLimitPtrOffset}, LoadRegScaled{loadType=Load64, regT=X_MLHeapAllocPtr, regN=X_MLAssemblyInt, unitOffset=heapAllocPtrOffset}, LoadRegScaled{loadType=Load64, regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=mlStackPtrOffset}, (* Prepare the arguments. They are both syswords so have to be boxed. First load the address of the argument area which is after the 32-byte result area. *) AddImmediate{opSize=OpSize64, setFlags=false, regN=XSP, regD=X2, immed=0w32, shifted=false} ] @ List.rev(boxSysWord({source=X2, destination=X0, workReg=X3, saveRegs=[]}, [])) @ (* Address of arguments. *) ( (* Result area pointer. If we're returning by reference this is the original value of X8 otherwise it's the address of the 32 bytes we've reserved. *) if resultByReference then [LoadRegScaled{loadType=Load64, regT=X2, regN=XSP, unitOffset=0}] else [MoveXRegToXReg{sReg=XSP, dReg=X2}] ) @ List.rev(boxSysWord({source=X2, destination=X1, workReg=X3, saveRegs=[]}, [])) @ (* Put the ML closure pointer, originally in X9 now in X20, into the ML closure pointer register, X8. Then call the ML code. *) [MoveXRegToXReg{sReg=X20, dReg=X8}] @ ( if is32in64 then [ AddShiftedReg{regM=X8, regN=X_Base32in64, regD=X16, shift=ShiftLSL 0w2, opSize=OpSize64, setFlags=false}, LoadRegScaled{loadType=Load64, regT=X16, regN=X16, unitOffset=0} ] else [LoadRegScaled{loadType=Load64, regT=X16, regN=X8, unitOffset=0}] ) @ [ BranchReg{regD=X16, brRegType=BRRAndLink}, (* Save the ML stack and heap pointers. We could have allocated or grown the stack. The limit pointer is maintained by the RTS. *) StoreRegScaled{loadType=Load64, regT=X_MLHeapAllocPtr, regN=X_MLAssemblyInt, unitOffset=heapAllocPtrOffset}, StoreRegScaled{loadType=Load64, regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=mlStackPtrOffset} ] @ loadResults @ (* Load the return values *) [ (* Restore the callee-save registers and return. *) MoveXRegToXReg{sReg=X29, dReg=XSP}, LoadRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X19, regT2=X20, regN=X29, unitOffset=2}, LoadRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X21, regT2=X22, regN=X29, unitOffset=4}, LoadRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X23, regT2=X24, regN=X29, unitOffset=6}, LoadRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X25, regT2=X26, regN=X29, unitOffset=8}, LoadRegPair{loadType=Load64, unscaledType=NoUpdate, regT1=X27, regT2=X28, regN=X29, unitOffset=10}, LoadRegPair{loadType=Load64, unscaledType=PostIndex, regT1=X29, regT2=X30, regN=XSP, unitOffset=12}, BranchReg{regD=X30, brRegType=BRRReturn} ] val functionName = "foreignCallBack(2)" val debugSwitches = [(*Universal.tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), Universal.tagInject Debug.assemblyCodeTag true*)] val closure = makeConstantClosure() val () = generateFinalCode{instrs=instructions, name=functionName, parameters=debugSwitches, - resultClosure=closure, profileObject=createProfileObject(), labelCount=0} + resultClosure=closure, profileObject=createProfileObject(), labelMaker=labelMaker} val stage2Code = closureAsAddress closure fun resultFunction f = let (* Generate a small function to load the address of f into a register and then jump to stage2. The idea is that it should be possible to generate this eventually in a single RTS call. That could be done by using a version of this as a model. *) val instructions = if is32in64 then (* Get the global heap base into X10. *) [ LoadGlobalHeapBaseInCallback X10, LoadAddr(X9, Address.toMachineWord f), (* Have to load the actual address at run-time. *) LoadAddr(X16, stage2Code), AddShiftedReg{setFlags=false, opSize=OpSize64, regM=X16, regN=X10, regD=X16, shift=ShiftLSL 0w2}, LoadRegScaled{loadType=Load64, regT=X16, regN=X16, unitOffset=0}, BranchReg{regD=X16, brRegType=BRRBranch} ] else let (* We can extract the actual code address in the native address version. *) val codeAddress = Address.loadWord(Address.toAddress stage2Code, 0w0) in [ LoadAddr(X9, Address.toMachineWord f), LoadAddr(X16, codeAddress), BranchReg{regD=X16, brRegType=BRRBranch} ] end val functionName = "foreignCallBack(1)" val debugSwitches = [(*Universal.tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), Universal.tagInject Debug.assemblyCodeTag true*)] val closure = makeConstantClosure() val () = generateFinalCode{instrs=instructions, name=functionName, parameters=debugSwitches, - resultClosure=closure, profileObject=createProfileObject(), labelCount=0} + resultClosure=closure, profileObject=createProfileObject(), + labelMaker=createLabelMaker()} val res = closureAsAddress closure (*val _ = print("Address is " ^ (LargeWord.toString(RunCall.unsafeCast res)) ^ "\n")*) in res end in Address.toMachineWord resultFunction end end; diff --git a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64ICodeToArm64Code.ML b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64ICodeToArm64Code.ML index 0e34bcfd..99da294c 100644 --- a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64ICodeToArm64Code.ML +++ b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64ICodeToArm64Code.ML @@ -1,1217 +1,1219 @@ (* Copyright David C. J. Matthews 2021-2 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor Arm64ICodeToArm64Code( structure Arm64PreAssembly: ARM64PREASSEMBLY structure Debug: DEBUG structure Arm64ICode: ARM64ICODE structure Identify: ARM64IDENTIFYREFERENCES structure IntSet: INTSET structure Pretty: PRETTY structure Strongly: sig val stronglyConnectedComponents: {nodeAddress: 'a -> int, arcs: 'a -> int list } -> 'a list -> 'a list list end sharing Arm64PreAssembly.Sharing = Arm64ICode.Sharing = Identify.Sharing = IntSet ): ARM64ICODEGENERATE = struct open Identify open Arm64ICode open Arm64PreAssembly open Address exception InternalError = Misc.InternalError (* Reversed cons and append to make the code easier to read. *) infix 5 <::> <@> fun tl <::> hd = hd :: tl (*and snd <@> fst = fst @ snd*) (* These aren't currently used for anything. *) val workReg1 = X16 and workReg2 = X17 fun icodeToArm64Code {blocks: basicBlockConcrete vector, functionName, stackRequired, debugSwitches, resultClosure, profileObject, ...} = let val numBlocks = Vector.length blocks (* Load from and store to stack. *) fun loadFromStack(destReg, wordOffset, code) = if wordOffset >= 4096 then (LoadRegIndexed{regT=destReg, regN=X_MLStackPtr, regM=destReg, option=ExtUXTX ScaleOrShift, loadType=Load64}) :: [LoadNonAddr(destReg, Word64.fromInt wordOffset)] @ code else (LoadRegScaled{regT=destReg, regN=X_MLStackPtr, unitOffset=wordOffset, loadType=Load64}) :: code and storeToStack(sourceReg, wordOffset, workReg, code) = if wordOffset >= 4096 then (StoreRegIndexed{regT=sourceReg, regN=X_MLStackPtr, regM=workReg, option=ExtUXTX ScaleOrShift, loadType=Load64}) :: [LoadNonAddr(workReg, Word64.fromInt wordOffset)] @ code else (StoreRegScaled{regT=sourceReg, regN=X_MLStackPtr, unitOffset=wordOffset, loadType=Load64}) :: code datatype srcAndDest = IsInReg of xReg | IsOnStack of int local (* The registers are numbered from 0. Choose values that don't conflict with the stack addresses. *) fun regNo(XReg r) = ~1 - Word8.toInt r | regNo _ = ~1 - 31 type node = {src: srcAndDest, dst: srcAndDest } fun nodeAddress({dst=IsInReg r, ...}: node) = regNo r | nodeAddress({dst=IsOnStack a, ...}) = a fun arcs({src=IsOnStack wordOffset, ...}: node) = [wordOffset] | arcs{src=IsInReg r, ...} = [regNo r] in val stronglyConnected = Strongly.stronglyConnectedComponents { nodeAddress=nodeAddress, arcs=arcs } end (* This is a general function for moving values into registers or to the stack where it is possible that the source values might also be in use as destinations. The stack is used for destinations only for tail recursive calls. *) fun moveMultipleValues(moves, code) = let fun moveValues ([], code) = code (* We're done. *) | moveValues (arguments, code) = let (* stronglyConnectedComponents does two things. It detects loops where it's not possible to move items without breaking the loop but more importantly it orders the dependencies so that if there are no loops we can load the source and store it in the destination knowing that we won't overwrite anything we might later need. *) val ordered = stronglyConnected arguments fun loadIntoReg(IsInReg sReg, dReg, code) = if sReg = dReg then code else (MoveXRegToXReg{sReg=sReg, dReg=dReg}) :: code | loadIntoReg(IsOnStack wordOffset, dReg, code) = loadFromStack(dReg, wordOffset, code) fun moveEachValue ([], code) = code | moveEachValue ([{dst=IsInReg dReg, src}] :: rest, code) = moveEachValue(rest, loadIntoReg(src, dReg, code)) | moveEachValue ([{dst=IsOnStack wordOffset, src=IsInReg sReg}] :: rest, code) = (* Storing into the stack. *) moveEachValue(rest, storeToStack(sReg, wordOffset, workReg1, code)) | moveEachValue ([{dst=IsOnStack dstOffset, src=IsOnStack srcOffset}] :: rest, code) = (* Copy a stack location - needs a load and store unless the address is the same. *) if dstOffset = srcOffset then moveEachValue(rest, code) else moveEachValue(rest, storeToStack(workReg2, dstOffset, workReg1, loadFromStack(workReg2, srcOffset, code))) | moveEachValue((cycle as first :: _ :: _) :: rest, code) = (* We have a cycle. *) let (* We need to exchange some of the arguments. Doing an exchange here will set the destination with the correct source. However we have to process every subsequent entry with the swapped registers. That may well mean that one of those entries becomes trivial. We also need to rerun stronglyConnectedComponents on at least the rest of this cycle. It's easiest to flatten the rest and do everything. *) (* Exchange the source and destination. We don't have an exchange instruction and there's a further complication. We could be copying between stack locations and their offsets could be > 4096. Since we've only got two work registers we need to use the hardware stack as an extra location. Stack-stack exchange is very rare so the extra overhead to handle the general case is worth it. *) local fun storeToDest(sReg, IsInReg dReg, _, code) = (MoveXRegToXReg{sReg=sReg, dReg=dReg}) :: code | storeToDest(sReg, IsOnStack wordOffset, work, code) = storeToStack(sReg, wordOffset, work, code) in fun exchange(IsInReg arg1Reg, arg2, code) = (MoveXRegToXReg{sReg=workReg2, dReg=arg1Reg}) :: storeToDest(arg1Reg, arg2, workReg1, loadIntoReg(arg2, workReg2, code)) | exchange(arg1, IsInReg arg2Reg, code) = (MoveXRegToXReg{sReg=workReg2, dReg=arg2Reg}) :: storeToDest(arg2Reg, arg1, workReg1, loadIntoReg(arg1, workReg2, code)) | exchange(arg1, arg2, code) = (* The hardware stack must be 16-byte aligned. *) storeToDest(workReg2, arg2, workReg1, (LoadRegUnscaled{regT=workReg2, regN=XSP, byteOffset=16, loadType=Load64, unscaledType=PostIndex}) :: storeToDest(workReg2, arg1, workReg1, loadIntoReg(arg2, workReg2, (StoreRegUnscaled{regT=workReg2, regN=XSP, byteOffset= ~16, loadType=Load64, unscaledType=PreIndex}) :: loadIntoReg(arg1, workReg2, code)))) end (* Try to find either a register-register move or a register-stack move. If not use the first. If there's a stack-register move there will also be a register-stack so we don't need to look for both. *) val {dst=selectDst, src=selectSrc} = first (* This includes this entry but after the swap we'll eliminate it. *) val flattened = List.foldl(fn (a, b) => a @ b) [] (cycle :: rest) val destAsSource = selectDst fun match(s1: srcAndDest, s2) = s1 = s2 fun swapSources{src, dst} = if match(src, selectSrc) then {src=destAsSource, dst=dst} else if match(src, destAsSource) then {src=selectSrc, dst=dst} else {src=src, dst=dst} val exchangeCode = exchange(selectDst, selectSrc, code) in moveValues(List.map swapSources flattened, exchangeCode) end | moveEachValue(([]) :: _, _) = (* This should not happen - avoid warning. *) raise InternalError "moveEachValue - empty set" in moveEachValue(ordered, code) end in moveValues(moves, code) end (* Where we have multiple specific registers as either source or destination there is the potential that a destination register if currently in use as a source. *) fun moveMultipleRegisters(regPairList, code) = let val regPairsAsDests = List.map(fn {src, dst} => {src=IsInReg src, dst=IsInReg dst}) regPairList in moveMultipleValues(regPairsAsDests, code) end fun moveIfNecessary({src, dst}, code) = if src = dst then code else MoveXRegToXReg{sReg=src, dReg=dst} :: code (* Add a constant word to the source register and put the result in the destination. regW is used as a work register if necessary. This is used both for addition and subtraction. *) fun addConstantWord({regS, regD, value=0w0, ...}, code) = if regS = regD then code else MoveXRegToXReg{sReg=regS, dReg=regD} :: code | addConstantWord({regS, regD, regW, value}, code) = let (* If we have to load the constant it's better if the top 32-bits are zero if possible. *) val (isSub, unsigned) = if value > Word64.<<(0w1, 0w63) then (true, ~ value) else (false, value) in if unsigned < Word64.<<(0w1, 0w24) then (* We can put up to 24 in a shifted and an unshifted constant. *) let val w = Word.fromLarge(Word64.toLarge unsigned) val high = Word.andb(Word.>>(w, 0w12), 0wxfff) val low = Word.andb(w, 0wxfff) val addSub = if isSub then SubImmediate else AddImmediate in if high <> 0w0 then ( (if low <> 0w0 then [addSub{regN=regD, regD=regD, immed=low, shifted=false, setFlags=false, opSize=OpSize64}] else []) @ addSub{regN=regS, regD=regD, immed=high, shifted=true, setFlags=false, opSize=OpSize64} :: code ) else addSub{regN=regS, regD=regD, immed=low, shifted=false, setFlags=false, opSize=OpSize64} :: code end else let (* To minimise the constant and increase the chances that it will fit in a single word look to see if we can shift it. *) fun getShift(value, shift) = if Word64.andb(value, 0w1) = 0w0 then getShift(Word64.>>(value, 0w1), shift+0w1) else (value, shift) val (shifted, shift) = getShift(unsigned, 0w0) in code <::> LoadNonAddr(regW, shifted) <::> (if isSub then SubShiftedReg else AddShiftedReg) {regM=regW, regN=regS, regD=regD, shift=ShiftLSL shift, setFlags=false, opSize=OpSize64} end end - val startOfFunctionLabel = createLabel() (* Used for recursive calls/jumps *) + val labelMaker = createLabelMaker() - val blockToLabelMap = Vector.tabulate(numBlocks, fn _ => createLabel()) + val startOfFunctionLabel = createLabel labelMaker (* Used for recursive calls/jumps *) + + val blockToLabelMap = Vector.tabulate(numBlocks, fn _ => createLabel labelMaker) fun getBlockLabel blockNo = Vector.sub(blockToLabelMap, blockNo) fun codeExtended _ (MoveRegister{source, dest, ...}, code) = moveIfNecessary({src=source, dst=dest}, code) | codeExtended _ (LoadNonAddressConstant{source, dest, ...}, code) = code <::> LoadNonAddr(dest, source) | codeExtended _ (LoadAddressConstant{source, dest, ...}, code) = code <::> LoadAddr(dest, source) | codeExtended _ (LoadWithConstantOffset{dest, base, byteOffset, loadType, ...}, code) = if byteOffset < 0 then LoadRegUnscaled{regT=dest, regN=base, byteOffset=byteOffset, loadType=loadType, unscaledType=NoUpdate} :: code else let val unitOffset = case loadType of Load64 => Int.quot(byteOffset, 8) | Load32 => Int.quot(byteOffset, 4) | Load16 => Int.quot(byteOffset, 2) | Load8 => byteOffset in LoadRegScaled{regT=dest, regN=base, unitOffset=unitOffset, loadType=loadType} :: code end | codeExtended _ (LoadFPWithConstantOffset{dest, base, byteOffset, floatSize, ...}, code) = if byteOffset < 0 then (LoadFPRegUnscaled{regT=dest, regN=base, byteOffset=byteOffset, floatSize=floatSize, unscaledType=NoUpdate}) :: code else let val unitOffset = Int.quot(byteOffset, case floatSize of Float32 => 4 | Double64 => 8) in (LoadFPRegScaled{regT=dest, regN=base, unitOffset=unitOffset, floatSize=floatSize}) :: code end | codeExtended _ (LoadWithIndexedOffset{dest, base, index, loadType, signExtendIndex, ...}, code) = let val baseReg = base and indexReg = index (* In 32-in-64 the index is a 32-bit value in the low order 32-bits. It may be signed if it is a C address. *) val scaleType = if is32in64 andalso signExtendIndex then ExtSXTW else ExtUXTX val scale = case loadType of Load8 => NoScale | _ => ScaleOrShift in (LoadRegIndexed{regT=dest, regN=baseReg, regM=indexReg, option=scaleType scale, loadType=loadType}) :: code end | codeExtended _ (LoadFPWithIndexedOffset{dest, base, index, floatSize, signExtendIndex, ...}, code) = let val baseReg = base and indexReg = index (* In 32-in-64 the index is a 32-bit value in the low order 32-bits. It may be signed if it is a C address. *) val scaleType = if is32in64 andalso signExtendIndex then ExtSXTW else ExtUXTX in (LoadFPRegIndexed{regT=dest, regN=baseReg, regM=indexReg, option=scaleType ScaleOrShift, floatSize=floatSize}) :: code end | codeExtended _ (GetThreadId { dest}, code) = (* Load the thread id. This is always a 64-bit value. *) (LoadRegScaled{regT=dest, regN=X_MLAssemblyInt, unitOffset=threadIdOffset, loadType=Load64}) :: code | codeExtended _ (ObjectIndexAddressToAbsolute{source, dest, ...}, code) = (AddShiftedReg{regM=source, regN=X_Base32in64, regD=dest, shift=ShiftLSL 0w2, setFlags=false, opSize=OpSize64}) :: code | codeExtended _ (AbsoluteToObjectIndex{source, dest, ...}, code) = let val destReg = dest in code <::> (SubShiftedReg{regM=X_Base32in64, regN=source, regD=destReg, shift=ShiftNone, setFlags=false, opSize=OpSize64}) <::> (shiftConstant{shift=0w2, regN=destReg, regD=destReg, direction=ShiftRightLogical, opSize=OpSize64}) end | codeExtended _ (AllocateMemoryFixed{ bytesRequired, dest, saveRegs }, code) = code <::> AllocateMemoryFixedSize{ bytes=Word.fromLarge bytesRequired, dest=dest, save=saveRegs, work=workReg1 } | codeExtended _ (AllocateMemoryVariable{ size, dest, saveRegs }, code) = code <::> AllocateMemoryVariableSize{ sizeReg=size, dest=dest, save=saveRegs, work=workReg1 } | codeExtended _ (InitialiseMem{ size, addr, init}, code) = let val sizeReg = size and addrReg = addr and initReg = init - val exitLabel = createLabel() and loopLabel = createLabel() + val exitLabel = createLabel labelMaker and loopLabel = createLabel labelMaker (* This uses a loop to initialise. It's possible the size is zero so we have to check at the top of the loop. *) val (bShift, offset, loadType) = if is32in64 then (0w2, ~4, Load32) else (0w3, ~8, Load64) in code <::> (* Add the length in bytes so we point at the end. *) AddShiftedReg{regM=sizeReg, regN=addrReg, regD=workReg1, shift=ShiftLSL bShift, setFlags=false, opSize=OpSize64} <::> SetLabel loopLabel <::> (* Are we at the start? *) SubShiftedReg{regM=workReg1, regN=addrReg, regD=XZero, shift=ShiftNone, setFlags=true, opSize=OpSize64} <::> ConditionalBranch(CondEqual, exitLabel) <::> StoreRegUnscaled{regT=initReg, regN=workReg1, byteOffset=offset, loadType=loadType, unscaledType=PreIndex } <::> UnconditionalBranch loopLabel <::> SetLabel exitLabel end | codeExtended _ (BeginLoop, code) = code | codeExtended _ (JumpLoop{regArgs, stackArgs, checkInterrupt}, code) = let (* TODO: We could have a single list and use ArgOnStack and ArgInReg to distinguish. *) fun convertArg(ArgOnStack{wordOffset, ...}) = IsOnStack wordOffset | convertArg(ArgInReg reg) = IsInReg(reg) val extStackArgs = map (fn {wordOffset, src, ...} => {src=convertArg src, dst=IsOnStack wordOffset}) stackArgs val extRegArgs = map (fn {dst, src} => {src=convertArg src, dst=convertArg(ArgInReg dst)}) regArgs val code2 = moveMultipleValues(extStackArgs @ extRegArgs, code) in case checkInterrupt of NONE => code2 | SOME saveRegs => let - val skipCheck = createLabel() + val skipCheck = createLabel labelMaker in code2 <::> (* Put in stack-check code to allow this to be interrupted. *) LoadRegScaled{regT=workReg1, regN=X_MLAssemblyInt, unitOffset=stackLimitOffset, loadType=Load64} <::> SubShiftedReg{regM=workReg1, regN=X_MLStackPtr, regD=XZero, shift=ShiftNone, setFlags=true, opSize=OpSize64} <::> ConditionalBranch(CondCarrySet, skipCheck) <::> RTSTrap{rtsEntry=stackOverflowCallOffset, work=workReg1, save=saveRegs} <::> SetLabel skipCheck end end | codeExtended _ (StoreWithConstantOffset{source, base, byteOffset, loadType, ...}, code) = if byteOffset < 0 then (StoreRegUnscaled{regT=source, regN=base, byteOffset=byteOffset, loadType=loadType, unscaledType=NoUpdate}) :: code else let val unitOffset = case loadType of Load64 => Int.quot(byteOffset, 8) | Load32 => Int.quot(byteOffset, 4) | Load16 => Int.quot(byteOffset, 2) | Load8 => byteOffset in (StoreRegScaled{regT=source, regN=base, unitOffset=unitOffset, loadType=loadType}) :: code end | codeExtended _ (StoreFPWithConstantOffset{source, base, byteOffset, floatSize, ...}, code) = if byteOffset < 0 then (StoreFPRegUnscaled{regT=source, regN=base, byteOffset=byteOffset, floatSize=floatSize, unscaledType=NoUpdate}) :: code else let val unitOffset = Int.quot(byteOffset, case floatSize of Float32 => 4 | Double64 => 8) in (StoreFPRegScaled{regT=source, regN=base, unitOffset=unitOffset, floatSize=floatSize}) :: code end | codeExtended _ (StoreWithIndexedOffset{source, base, index, loadType, signExtendIndex, ...}, code) = let val baseReg = base and indexReg = index (* In 32-in-64 the index is a 32-bit value in the low order 32-bits. It may be signed if it is a C address. *) val scaleType = if is32in64 andalso signExtendIndex then ExtSXTW else ExtUXTX val scale = case loadType of Load8 => NoScale | _ => ScaleOrShift in (StoreRegIndexed{regT=source, regN=baseReg, regM=indexReg, option=scaleType scale, loadType=loadType}) :: code end | codeExtended _ (StoreFPWithIndexedOffset{source, base, index, floatSize, signExtendIndex, ...}, code) = let val baseReg = base and indexReg = index (* In 32-in-64 the index is a 32-bit value in the low order 32-bits. It may be signed if it is a C address. *) val scaleType = if is32in64 andalso signExtendIndex then ExtSXTW else ExtUXTX in (StoreFPRegIndexed{regT=source, regN=baseReg, regM=indexReg, option=scaleType ScaleOrShift, floatSize=floatSize}) :: code end | codeExtended _ (AddSubImmediate{ source, dest, immed, isAdd, length, ccRef}, code) = let val destReg = dest in ((if isAdd then AddImmediate else SubImmediate) {regN=source, regD=destReg, immed=immed, shifted=false, opSize=length, setFlags=isSome ccRef}) :: code end | codeExtended _ (AddSubRegister{ base, shifted, dest, isAdd, length, ccRef, shift}, code) = let val destReg = dest in ( (if isAdd then AddShiftedReg else SubShiftedReg) {regN=base, regM=shifted, regD=destReg, shift=shift, opSize=length, setFlags=isSome ccRef}) :: code end | codeExtended _ (LogicalImmediate{ source, dest, immed, logOp, length, ccRef}, code) = let val destReg = dest in (BitwiseLogical{regN=source, regD=destReg, bits=immed, opSize=length, logOp=logOp, setFlags=isSome ccRef}) :: code end | codeExtended _ (LogicalRegister{ base, shifted, dest, logOp, length, ccRef, shift}, code) = let (* There are also versions of AND/OR/XOR which operate on a complement (NOT) of the shifted register. It's probably not worth looking for a use for them. *) val destReg = dest in (LogicalShiftedReg{regN=base, regM=shifted, regD=destReg, shift=shift, opSize=length, logOp=logOp, setFlags=isSome ccRef}) :: code end | codeExtended _ (ShiftRegister{ direction, dest, source, shift, opSize }, code) = (ShiftRegisterVariable{regN=source, regM=shift, regD=dest, shiftDirection=direction, opSize=opSize}) :: code | codeExtended _ (Multiplication{ kind, dest, sourceA, sourceM, sourceN }, code) = let val destReg = dest and srcAReg = sourceA and srcNReg = sourceN and srcMReg = sourceM in (MultiplyAndAddSub{regM=srcMReg, regN=srcNReg, regA=srcAReg, regD=destReg, multKind=kind}) :: code end | codeExtended _ (Division{ isSigned, dest, dividend, divisor, opSize }, code) = (DivideRegs{regN=dividend, regM=divisor, regD=dest, isSigned=isSigned, opSize=opSize}) :: code | codeExtended _ (BeginFunction{regArgs, ...}, code) = let - val skipCheck = createLabel() + val skipCheck = createLabel labelMaker val defaultWords = 10 (* This is wired into the RTS. *) val workRegister = workReg1 val debugTrapAlways = false (* Can be set to true for debugging *) (* Test with either the stack-pointer or a high-water value. The RTS assumes that X9 has been used as the high-water if it is called through stackOverflowXCallOffset rather than stackOverflowCallOffset *) val (testReg, entryPt, code1) = if stackRequired <= defaultWords then (X_MLStackPtr, stackOverflowCallOffset, code) else (X9, stackOverflowXCallOffset, addConstantWord({regS=X_MLStackPtr, regD=X9, regW=workRegister, value= ~ (Word64.fromLarge(Word.toLarge nativeWordSize)) * Word64.fromInt stackRequired}, code)) (* Skip the RTS call if there is enough stack. N.B. The RTS can modify the end-of-stack value to force a trap here even if there is really enough stack. *) val code2 = (if debugTrapAlways then [] else [ConditionalBranch(CondCarrySet, skipCheck), SubShiftedReg{regM=workRegister, regN=testReg, regD=XZero, shift=ShiftNone, setFlags=true, opSize=OpSize64}]) @ (* Load the end-of-stack value. *) LoadRegScaled{regT=workRegister, regN=X_MLAssemblyInt, unitOffset=stackLimitOffset, loadType=Load64} :: code1 val code3 = code2 <::> RTSTrap{rtsEntry=entryPt, work=workReg1, save=List.map #2 regArgs} <::> SetLabel skipCheck val usedRegs = regArgs fun mkPair(pr, rr) = {src=rr,dst=pr} val regPairs = List.map mkPair usedRegs in moveMultipleRegisters(regPairs, code3) end | codeExtended _ (TailRecursiveCall{callKind, regArgs, stackArgs, stackAdjust, currStackSize}, code) = let fun convertArg(ArgOnStack{wordOffset, ...}) = IsOnStack wordOffset | convertArg(ArgInReg reg) = IsInReg(reg) val extStackArgs = map (fn {stack, src} => {dst=IsOnStack(stack+currStackSize), src=convertArg src}) stackArgs val extRegArgs = map (fn (a, r) => {src=convertArg a, dst=IsInReg r}) regArgs (* Tail recursive calls are complicated because we generally have to overwrite the existing stack. That means storing the arguments in the right order to avoid overwriting a value that we are using for a different argument. *) fun codeTailCall(arguments, stackAdjust, code) = if stackAdjust < 0 then let (* If the function we're calling takes more arguments on the stack than the current function we will have to extend the stack. Do that by pushing the argument whose offset is at -1. Then adjust all the offsets and repeat. *) val {src=argM1, ...} = valOf(List.find(fn {dst=IsOnStack ~1, ...} => true | _ => false) arguments) fun renumberArgs [] = [] | renumberArgs ({dst=IsOnStack ~1, ...} :: args) = renumberArgs args (* Remove the one we've done. *) | renumberArgs ({dst, src} :: args) = let val newDest = case dst of IsOnStack d => IsOnStack(d+1) | regDest => regDest val newSrc = case src of IsOnStack wordOffset => IsOnStack(wordOffset+1) | other => other in {dst=newDest, src=newSrc} :: renumberArgs args end val pushCode = case argM1 of IsOnStack wordOffset => (StoreRegUnscaled{regT=workReg2, regN=X_MLStackPtr, byteOffset= ~8, unscaledType=PreIndex, loadType=Load64}) :: loadFromStack(workReg2, wordOffset, code) | IsInReg reg => (StoreRegUnscaled{regT=reg, regN=X_MLStackPtr, byteOffset= ~8, unscaledType=PreIndex, loadType=Load64}) :: code in codeTailCall(renumberArgs arguments, stackAdjust+1, pushCode) end else let val loadArgs = moveMultipleValues(arguments, code) in if stackAdjust = 0 then loadArgs else addConstantWord({regS=X_MLStackPtr, regD=X_MLStackPtr, regW=workReg1, value=Word64.fromInt stackAdjust * Word.toLarge nativeWordSize}, loadArgs) end val setArgumentsCode = codeTailCall(extStackArgs @ extRegArgs, stackAdjust+currStackSize, code) val jumpToFunctionCode = case callKind of Recursive => [(UnconditionalBranch startOfFunctionLabel)] | ConstantCode m => if is32in64 then raise InternalError "ConstantCode" (* Not currently *) else [(BranchReg{regD=workReg1, brRegType=BRRBranch}), (LoadAddr(workReg1, m))] | FullCall => if is32in64 then [BranchReg{regD=workReg1, brRegType=BRRBranch}, LoadRegScaled{regT=workReg1, regN=workReg1, unitOffset=0, loadType=Load64}, AddShiftedReg{regM=X8, regN=X_Base32in64, regD=workReg1, shift=ShiftLSL 0w2, setFlags=false, opSize=OpSize64}] else [BranchReg{regD=workReg1, brRegType=BRRBranch}, LoadRegScaled{regT=workReg1, regN=X8, unitOffset=0, loadType=Load64}] in jumpToFunctionCode @ setArgumentsCode end | codeExtended _ (FunctionCall{callKind, regArgs=regArgs, stackArgs=stackArgs, dests, saveRegs, ...}, code) = let local fun pushStackArgs ([], _, code) = code | pushStackArgs (ArgOnStack {wordOffset, ...} ::args, argNum, code) = let (* Have to adjust the offsets of stack arguments. *) val adjustedOffset = wordOffset+argNum in pushStackArgs(args, argNum+1, loadFromStack(workReg1, adjustedOffset, code) <::> StoreRegUnscaled{regT=workReg1, regN=X_MLStackPtr, byteOffset= ~8, unscaledType=PreIndex, loadType=Load64}) end | pushStackArgs (ArgInReg reg ::args, argNum, code) = pushStackArgs(args, argNum+1, code <::> (StoreRegUnscaled{regT=reg, regN=X_MLStackPtr, byteOffset= ~8, unscaledType=PreIndex, loadType=Load64})) val pushedArgs = pushStackArgs(stackArgs, 0, code (* Initial code *)) (* We have to adjust any stack offset to account for the arguments we've pushed. *) val numStackArgs = List.length stackArgs fun convertArg(ArgOnStack{wordOffset, ...}) = IsOnStack(wordOffset+numStackArgs) | convertArg(ArgInReg reg) = IsInReg(reg) in val extRegArgs = map (fn (a, r) => {src=convertArg a, dst=IsInReg r}) regArgs val loadArgs = moveMultipleValues(extRegArgs, pushedArgs) end (* Push the registers before the call and pop them afterwards. *) fun makeSavesAndCall([], code) = ( case callKind of Recursive => code <::> (BranchAndLink startOfFunctionLabel) | ConstantCode m => if is32in64 then raise InternalError "ConstantCode" (* Not currently *) else code <::> (LoadAddr(workReg1, m)) <::> (BranchReg{regD=workReg1, brRegType=BRRAndLink}) | FullCall => if is32in64 then code <::> AddShiftedReg{regM=X8, regN=X_Base32in64, regD=workReg1, shift=ShiftLSL 0w2, setFlags=false, opSize=OpSize64} <::> LoadRegScaled{regT=workReg1, regN=workReg1, unitOffset=0, loadType=Load64} <::> BranchReg{regD=workReg1, brRegType=BRRAndLink} else code <::> LoadRegScaled{regT=workReg1, regN=X8, unitOffset=0, loadType=Load64} <::> BranchReg{regD=workReg1, brRegType=BRRAndLink} ) | makeSavesAndCall(reg::regs, code) = let val areg = reg in makeSavesAndCall(regs, code <::> StoreRegUnscaled{regT=areg, regN=X_MLStackPtr, byteOffset= ~8, loadType=Load64, unscaledType=PreIndex}) <::> LoadRegUnscaled{regT=areg, regN=X_MLStackPtr, byteOffset= 8, loadType=Load64, unscaledType=PostIndex} end (* Results. These go from the specific result register into the allocated register. *) val resultPairs = List.map(fn (pr, rr) => {src=rr,dst=pr}) dests in moveMultipleRegisters(resultPairs, makeSavesAndCall(saveRegs, loadArgs)) end | codeExtended _ (ReturnResultFromFunction { results, returnReg, numStackArgs }, code) = let fun resetStack(0, code) = code | resetStack(nItems, code) = addConstantWord({regS=X_MLStackPtr, regD=X_MLStackPtr, regW=X3, value=Word64.fromLarge(Word.toLarge nativeWordSize) * Word64.fromInt nItems}, code) (* Return results. This goes from the allocated register into the specific register rr. *) val resultPairs = List.map(fn (pr, rr) => {src=pr,dst=rr}) results in BranchReg{regD=returnReg, brRegType=BRRReturn} :: resetStack(numStackArgs, moveMultipleRegisters(resultPairs, code)) end | codeExtended _ (RaiseExceptionPacket{ packetReg }, code) = (* We need a work register here. It can be any register other than X0 since we don't preserve registers across calls. *) (* Copy the handler "register" into the stack pointer. Then jump to the address in the first word. The second word is the next handler. This is set up in the handler. We have a lot more raises than handlers since most raises are exceptional conditions such as overflow so it makes sense to minimise the code in each raise. *) moveIfNecessary({src=packetReg, dst=X0}, code) <::> LoadRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset, loadType=Load64} <::> LoadRegScaled{regT=workReg1, regN=X_MLStackPtr, unitOffset=0, loadType=Load64} <::> BranchReg{regD=workReg1, brRegType=BRRBranch } | codeExtended _ (PushToStack{ source, copies, ... }, code) = let val reg = source val _ = copies > 0 orelse raise InternalError "PushToStack: copies<1" fun pushn(0, c) = c | pushn(n, c) = pushn(n-1, (StoreRegUnscaled{regT=reg, regN=X_MLStackPtr, byteOffset= ~8, loadType=Load64, unscaledType=PreIndex}) :: c) in pushn(copies, code) end | codeExtended _ (LoadStack{ dest, wordOffset, ... }, code) = loadFromStack(dest, wordOffset, code) | codeExtended _ (StoreToStack{ source, stackOffset, ... }, code) = (* Store into the stack to set a field of a container. Always 64-bits. *) storeToStack(source, stackOffset, workReg1, code) | codeExtended _ (ContainerAddress{ dest, stackOffset, ... }, code) = (* Set the register to an offset in the stack. *) let val _ = stackOffset >= 0 orelse raise InternalError "codeGenICode: ContainerAddress - negative offset" val byteOffset = stackOffset * Word.toInt nativeWordSize in if byteOffset >= 4096 then code <::> LoadNonAddr(dest, Word64.fromInt byteOffset) <::> AddShiftedReg{regN=X_MLStackPtr, regM=dest, regD=dest, shift=ShiftNone, setFlags=false, opSize=OpSize64} else code <::> AddImmediate{regN=X_MLStackPtr, regD=dest, immed=Word.fromInt byteOffset, shifted=false, setFlags=false, opSize=OpSize64} end | codeExtended _ (ResetStackPtr{ numWords, ... }, code) = addConstantWord({regS=X_MLStackPtr, regD=X_MLStackPtr, regW=workReg1, value=Word64.fromInt numWords * Word.toLarge nativeWordSize}, code) | codeExtended _ (TagValue{ source, dest, isSigned=_, opSize }, code) = (* Shift left by one bit and add one. *) code <::> shiftConstant{ direction=ShiftLeft, regD=dest, regN=source, shift=0w1, opSize=opSize } <::> BitwiseLogical{ bits=0w1, regN=dest, regD=dest, opSize=opSize, setFlags=false, logOp=LogOr} | codeExtended _ (UntagValue{ source, dest, isSigned, opSize }, code) = code <::> shiftConstant{ direction=if isSigned then ShiftRightArithmetic else ShiftRightLogical, regD=dest, regN=source, shift=0w1, opSize=opSize } | codeExtended _ (BoxLarge{ source, dest, saveRegs }, code) = boxSysWord({source=source, destination=dest, workReg=workReg1, saveRegs=saveRegs}, code) | codeExtended _ (UnboxLarge{ source, dest }, code) = let (* Unbox a large word. The argument is a poly word. *) val destReg = dest and srcReg = source in if is32in64 then LoadRegScaled{regT=destReg, regN=destReg, unitOffset=0, loadType=Load64} :: AddShiftedReg{regM=srcReg, regN=X_Base32in64, regD=destReg, shift=ShiftLSL 0w2, setFlags=false, opSize=OpSize64} :: code else LoadRegScaled{regT=destReg, regN=srcReg, unitOffset=0, loadType=Load64} :: code end | codeExtended _ (BoxTagFloat{ floatSize=Double64, source, dest, saveRegs }, code) = boxDouble({source=source, destination=dest, workReg=workReg1, saveRegs=saveRegs}, code) | codeExtended _ (BoxTagFloat{ floatSize=Float32, source, dest, saveRegs }, code) = let val floatReg = source and fixedReg = dest in if is32in64 then boxFloat({source=floatReg, destination=fixedReg, workReg=workReg1, saveRegs=saveRegs}, code) else code <::> MoveFPToGeneral{regN=floatReg, regD=fixedReg, floatSize=Float32} <::> shiftConstant{ direction=ShiftLeft, shift=0w32, regN=fixedReg, regD=fixedReg, opSize=OpSize64} <::> BitwiseLogical{ bits=0w1, regN=fixedReg, regD=fixedReg, opSize=OpSize64, setFlags=false, logOp=LogOr} end | codeExtended _ (UnboxTagFloat { floatSize=Double64, source, dest }, code) = if is32in64 then code <::> AddShiftedReg{regM=source, regN=X_Base32in64, regD=workReg1, shift=ShiftLSL 0w2, setFlags=false, opSize=OpSize64} <::> LoadFPRegScaled{regT=dest, regN=workReg1, unitOffset=0, floatSize=Double64} else code <::> LoadFPRegScaled{regT=dest, regN=source, unitOffset=0, floatSize=Double64} | codeExtended _ (UnboxTagFloat { floatSize=Float32, source, dest }, code) = (* This is tagged in native 64-bits. In 32-in-64 we're loading 32-bits so we can use an indexed load directly. *) if is32in64 then code <::> LoadFPRegIndexed{regN=X_Base32in64, regM=source, regT=dest, option=ExtUXTX ScaleOrShift, floatSize=Float32} else code <::> shiftConstant{direction=ShiftRightLogical, shift=0w32, regN=source, regD=workReg1, opSize=OpSize64} <::> MoveGeneralToFP{regN=workReg1, regD=dest, floatSize=Float32} | codeExtended _ (LoadAcquire{dest, base, loadType, ...}, code) = LoadAcquireReg{regT=dest, regN=base, loadType=loadType} :: code | codeExtended _ (StoreRelease{source, base, loadType, ...}, code) = StoreReleaseReg{regT=source, regN=base, loadType=loadType} :: code | codeExtended _ (BitFieldShift{ source, dest, isSigned, length, immr, imms }, code) = BitField{immr=immr, imms=imms, regN=source, regD=dest, bitfieldKind=if isSigned then BFSigned else BFUnsigned, opSize=length} :: code | codeExtended _ (BitFieldInsert{ source, destAsSource, dest, length, immr, imms }, code) = let (* If we're using BitFieldMove we retain some of the bits of the destination. The higher levels require us to treat that as a source. *) val _ = source = dest andalso raise InternalError "codeExtended: bitfield: dest=source" in BitField{immr=immr, imms=imms, regN=source, regD=dest, bitfieldKind=BFInsert, opSize=length} :: moveIfNecessary({src=destAsSource, dst=dest}, code) end | codeExtended {flow} (IndexedCaseOperation{testReg}, code) = let (* testReg contains the original value after the lowest value has been subtracted. Since both the original value and the lowest value were tagged it contains a shifted but untagged value. *) (* This should only be within a block with an IndexedBr flow type. *) val cases = case flow of IndexedBr cases => cases | _ => raise InternalError "codeGenICode: IndexedCaseOperation" val caseLabels = map getBlockLabel cases - val tableLabel = createLabel() + val tableLabel = createLabel labelMaker in code <::> LoadLabelAddress(workReg1, tableLabel) <::> (* Add the value shifted by one since it's already shifted. *) AddShiftedReg{regN=workReg1, regD=workReg1, regM=testReg, shift=ShiftLSL 0w1, setFlags=false, opSize=OpSize64} <::> BranchReg{regD=workReg1, brRegType=BRRBranch} <::> BranchTable{ startLabel=tableLabel, brTable=caseLabels } end | codeExtended {flow} (PushExceptionHandler, code) = let (* This should only be within a block with a SetHandler flow type. *) val handleLabel = case flow of SetHandler{ handler, ...} => handler | _ => raise InternalError "codeGenICode: PushExceptionHandler" val labelRef = getBlockLabel handleLabel in (* Push the old handler and the handler entry point and set the "current handler" to point to the stack after we've pushed these. *) code <::> LoadRegScaled{regT=workReg1, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset, loadType=Load64} <::> LoadLabelAddress(workReg2, labelRef) <::> StoreRegPair{regT1=workReg2, regT2=workReg1, regN=X_MLStackPtr, unitOffset= ~2, unscaledType=PreIndex, loadType=Load64} <::> StoreRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset, loadType=Load64} end | codeExtended _ (PopExceptionHandler, code) = (* Remove and discard the handler we've set up. Pop the previous handler and put into "current handler". *) code <::> LoadRegPair{regT1=XZero, regT2=workReg2, regN=X_MLStackPtr, unitOffset=2, unscaledType=PostIndex, loadType=Load64} <::> StoreRegScaled{regT=workReg2, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset, loadType=Load64} | codeExtended _ (BeginHandler{packetReg}, code) = let val beginHandleCode = code <::> (* The exception raise code resets the stack pointer to the value in the exception handler so this is probably redundant. Leave it for the moment, *) LoadRegScaled{regT=X_MLStackPtr, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset, loadType=Load64} <::> LoadRegPair{regT1=XZero, regT2=workReg2, regN=X_MLStackPtr, unitOffset=2, unscaledType=PostIndex, loadType=Load64} <::> StoreRegScaled{regT=workReg2, regN=X_MLAssemblyInt, unitOffset=exceptionHandlerOffset, loadType=Load64} in moveIfNecessary({src=X0, dst=packetReg }, beginHandleCode) end | codeExtended _ (CompareByteVectors{vec1Addr, vec2Addr, length, ...}, code) = let (* Construct a loop to compare two vectors of bytes. *) val vec1Reg = vec1Addr and vec2Reg = vec2Addr and lenReg = length - val loopLabel = createLabel() and exitLabel = createLabel() + val loopLabel = createLabel labelMaker and exitLabel = createLabel labelMaker in code <::> (* Set the CC to Equal before we start in case length = 0 *) SubShiftedReg{regM=lenReg, regN=lenReg, regD=XZero, shift=ShiftNone, setFlags=true, opSize=OpSize64} <::> SetLabel loopLabel <::> (* Start of loop *) CompareBranch{ test=lenReg, label=exitLabel, onZero=true, opSize=OpSize64} <::> (* Go to the end when len = zero *) (* Load the bytes for the comparison and increment each. *) LoadRegUnscaled{regT=workReg1, regN=vec1Reg, byteOffset=1, unscaledType=PostIndex, loadType=Load8} <::> LoadRegUnscaled{regT=workReg2, regN=vec2Reg, byteOffset=1, unscaledType=PostIndex, loadType=Load8} <::> SubImmediate{regN=lenReg, regD=lenReg, immed=0w1, shifted=false, setFlags=false, opSize=OpSize64} <::> (* Decr len *) (* Compare *) SubShiftedReg{regM=workReg2, regN=workReg1, regD=XZero, shift=ShiftNone, setFlags=true, opSize=OpSize64} <::> ConditionalBranch(CondEqual, loopLabel) <::> (* Loop if they're equal *) SetLabel exitLabel end | codeExtended _ (BlockMove{srcAddr, destAddr, length, isByteMove}, code) = let (* Construct a loop to move the data. *) val srcReg = srcAddr and destReg = destAddr and lenReg = length - val loopLabel = createLabel() and exitLabel = createLabel() + val loopLabel = createLabel labelMaker and exitLabel = createLabel labelMaker val (offset, loadType) = if isByteMove then (1, Load8) else if is32in64 then (4, Load32) else (8, Load64) in code <::> SetLabel loopLabel (* Start of loop *) <::> CompareBranch{ test=lenReg, label=exitLabel, onZero=true, opSize=OpSize64} <::> (* Exit when length = 0 *) LoadRegUnscaled{regT=workReg1, regN=srcReg, byteOffset=offset, loadType=loadType, unscaledType=PostIndex} <::> StoreRegUnscaled{regT=workReg1, regN=destReg, byteOffset=offset, loadType=loadType, unscaledType=PostIndex} <::> SubImmediate{regN=lenReg, regD=lenReg, immed=0w1, shifted=false, setFlags=false, opSize=OpSize64} <::> (* Decr len *) UnconditionalBranch loopLabel <::> (* Back to the start *) SetLabel exitLabel end | codeExtended _ (AddSubXSP{ source, dest, isAdd }, code) = let val allocFreeCode = (if isAdd then AddExtendedReg else SubExtendedReg) {regM=source, regN=XSP, regD=XSP, extend=ExtUXTX 0w0, setFlags=false, opSize=OpSize64} :: code in case dest of XZero => allocFreeCode | destReg => (* We have to use add here to get the SP into the destination instead of the usual move. *) AddImmediate{regN=XSP, regD=destReg, immed=0w0, shifted=false, setFlags=false, opSize=OpSize64} :: allocFreeCode end | codeExtended _ (TouchValue _, code) = code (* Don't need to do anything now. *) (* Used in mutex operations. *) | codeExtended _ (LoadAcquireExclusive{ base, dest }, code) = LoadAcquireExclusiveRegister{regN=base, regT=dest} :: code | codeExtended _ (StoreReleaseExclusive{ base, source, result }, code) = StoreReleaseExclusiveRegister{regS=result, regT=source, regN=base} :: code | codeExtended _ (MemoryBarrier, code) = code <::> MemBarrier | codeExtended _ (ConvertIntToFloat{ source, dest, srcSize, destSize}, code) = (CvtIntToFP{regN=source, regD=dest, floatSize=destSize, opSize=srcSize}) :: code | codeExtended _ (ConvertFloatToInt{ source, dest, srcSize, destSize, rounding}, code) = (CvtFloatToInt{regN=source, regD=dest, round=rounding, floatSize=srcSize, opSize=destSize}) :: code | codeExtended _ (UnaryFloatingPt{ source, dest, fpOp}, code) = (FPUnaryOp{regN=source, regD=dest, fpOp=fpOp}) :: code | codeExtended _ (BinaryFloatingPoint{ arg1, arg2, dest, fpOp, opSize}, code) = (FPBinaryOp{regN=arg1, regM=arg2, regD=dest, floatSize=opSize, fpOp=fpOp}) :: code | codeExtended _ (CompareFloatingPoint{ arg1, arg2, opSize, ...}, code) = (FPComparison{regN=arg1, regM=arg2, floatSize=opSize}) :: code | codeExtended _ (CacheCheck{ arg1, arg2 }, code) = let - val okLabel = createLabel() + val okLabel = createLabel labelMaker in (code <::> SubShiftedReg {regM=arg1, regN=arg2, regD=XZero, shift=ShiftNone, opSize=OpSize64, setFlags=true} <::> ConditionalBranch(CondEqual, okLabel) <::> MoveXRegToXReg{sReg=XZero, dReg=X16} <::> LoadRegScaled{regT=X16, regN=X16, unitOffset=0, loadType=Load16} <::> SetLabel okLabel) end local (* processed - set to true when a block has been processed. *) val processed = Array.array(numBlocks, false) fun haveProcessed n = Array.sub(processed, n) (* Find the blocks that reference this one. This isn't essential but allows us to try to generate blocks in the order of the control flow. This in turn may allow us to use short branches rather than long ones. *) val labelRefs = Array.array(numBlocks, []) datatype flowCode = FlowCodeSimple of int | FlowCodeCMove of {code: precode list, trueJump: int, falseJump: int} (* Process this recursively to set the references. If we have unreachable blocks, perhaps because they've been merged, we don't want to include them in the reference counting. This shouldn't happen now that IdentifyReferences removes unreferenced blocks. *) fun setReferences fromLabel toLabel = case Array.sub(labelRefs, toLabel) of [] => (* Not yet visited at all. *) let val BasicBlock{ flow, ...} = Vector.sub(blocks, toLabel) val refs = case flow of ExitCode => [] | Unconditional lab => [lab] | Conditional{trueJump, falseJump, ... } => [trueJump, falseJump] | IndexedBr labs => labs | SetHandler { handler, continue } => [handler, continue] | UnconditionalHandle _ => [] | ConditionalHandle { continue, ...} => [continue] val () = if fromLabel >= 0 then Array.update(labelRefs, toLabel, [fromLabel]) else () in List.app (setReferences toLabel) refs end | refs => (* We've visiting this at least once. Just add us to the list. *) Array.update(labelRefs, toLabel, fromLabel :: refs) val _ = setReferences 0 0 (* Process the blocks. We keep the "stack" explicit rather than using recursion because this allows us to select both arms of a conditional branch sooner. *) fun genCode(toDo, lastFlow, code) = case List.filter (not o haveProcessed) toDo of [] => let (* There's nothing left to do. We may need to add a final branch to the end. *) val finalBranch = case lastFlow of ExitCode => [] | IndexedBr _ => [] | Unconditional dest => [(UnconditionalBranch(getBlockLabel dest))] | Conditional { condition, trueJump, falseJump, ...} => [ (UnconditionalBranch(getBlockLabel falseJump)), (ConditionalBranch(condition, getBlockLabel trueJump)) ] | SetHandler { continue, ...} => [(UnconditionalBranch(getBlockLabel continue))] | UnconditionalHandle _ => [] | ConditionalHandle { continue, ...} => [(UnconditionalBranch(getBlockLabel continue))] in finalBranch @ code (* Done. *) end | stillToDo as head :: _ => let local (* Check the references. If all the sources that lead up to this have already been we won't have any backward jumps. *) fun available dest = List.all haveProcessed (Array.sub(labelRefs, dest)) val continuation = case lastFlow of ExitCode => NONE | IndexedBr _ => NONE (* We could put the last branch in here. *) | Unconditional dest => if not (haveProcessed dest) andalso available dest then SOME(FlowCodeSimple dest) else NONE | Conditional {trueJump, falseJump, condition, ...} => (* We can usually choose either destination and in nearly all cases it won't matter. The default branch is not to take forward jumps so if there is reason to believe that one branch is more likely we should follow that branch now and leave the other. If we have Cond(No)Overflow we assume that overflow is unusual. If one branch raises an exception we assume that that is unusual. *) let val (first, second) = case (condition, Vector.sub(blocks, falseJump)) of (CondNoOverflow, _) => (trueJump, falseJump) | (_, BasicBlock{ flow=ExitCode, block, ...}) => if List.exists(fn RaiseExceptionPacket _ => true | _ => false) block then (trueJump, falseJump) else (falseJump, trueJump) | _ => (falseJump, trueJump) in if not (haveProcessed first) andalso available first then SOME(FlowCodeSimple first) else if not (haveProcessed second) andalso available second then SOME(FlowCodeSimple second) else NONE end | SetHandler { continue, ... } => (* We want the continuation if possible. We'll need a branch round the handler so that won't help. *) if not (haveProcessed continue) andalso available continue then SOME(FlowCodeSimple continue) else NONE | UnconditionalHandle _ => NONE | ConditionalHandle _ => NONE in (* First choice - continue the existing block. Second choice - the first item whose sources have all been processed. Third choice - something from the list. *) val picked = case continuation of SOME c => c | NONE => case List.find available stillToDo of SOME c => FlowCodeSimple c | NONE => FlowCodeSimple head end in case picked of FlowCodeSimple picked => let val () = Array.update(processed, picked, true) (* Code to terminate the previous block. *) val startCode = case lastFlow of ExitCode => [] | IndexedBr _ => [] | UnconditionalHandle _ => [] | Unconditional dest => if dest = picked then [] else [(UnconditionalBranch(getBlockLabel dest))] | ConditionalHandle { continue, ...} => if continue = picked then [] else [(UnconditionalBranch(getBlockLabel continue))] | SetHandler { continue, ... } => if continue = picked then [] else [(UnconditionalBranch(getBlockLabel continue))] | Conditional { condition, trueJump, falseJump, ...} => if picked = falseJump (* Usual case. *) then [(ConditionalBranch(condition, getBlockLabel trueJump))] else if picked = trueJump then (* We have a jump to the true condition. Invert the jump. This is more than an optimisation. Because this immediately precedes the true block we're not going to generate a label. *) [(ConditionalBranch(invertTest condition, getBlockLabel falseJump))] else [ (UnconditionalBranch(getBlockLabel falseJump)), (ConditionalBranch(condition, getBlockLabel trueJump)) ] (* Code-generate the body with the code we've done so far at the end. Add a label at the start if necessary. *) local (* If the previous block dropped through to this and this was the only reference then we don't need a label. *) fun onlyJumpingHere (lab: int) = if lab <> picked then false else case Array.sub(labelRefs, picked) of [singleton] => singleton = lab | _ => false val noLabel = case lastFlow of ExitCode => picked = 0 (* Unless this was the first block. *) | Unconditional dest => onlyJumpingHere dest | Conditional { trueJump, falseJump, ...} => onlyJumpingHere trueJump orelse onlyJumpingHere falseJump | IndexedBr _ => false | SetHandler _ => false | UnconditionalHandle _ => false | ConditionalHandle { continue, ...} => onlyJumpingHere continue in val startLabel = if noLabel then [] else [(SetLabel(getBlockLabel picked))] end val BasicBlock { flow, block, ...} = Vector.sub(blocks, picked) local fun genCodeBlock(instr, code) = codeExtended {flow=flow} (instr, code) in val bodyCode = List.foldl genCodeBlock (startLabel @ startCode @ code) block end val addSet = case flow of ExitCode => [] | IndexedBr cases => cases | Unconditional dest => [dest] | Conditional {trueJump, falseJump, ...} => [falseJump, trueJump] | SetHandler { handler, continue } => [handler, continue] | UnconditionalHandle _ => [] | ConditionalHandle { continue, ...} => [continue] in genCode(addSet @ stillToDo, flow, bodyCode) end | FlowCodeCMove{code, trueJump, falseJump} => let (* We've generated a conditional move and possibly a return. If the trueJump and falseJump are only ever referenced from this block they're done, otherwise we still need to do them. *) val _ = case Array.sub(labelRefs, trueJump) of [_] => Array.update(processed, trueJump, true) | _ => () val _ = case Array.sub(labelRefs, falseJump) of [_] => Array.update(processed, falseJump, true) | _ => () val BasicBlock { flow, ...} = Vector.sub(blocks, trueJump) val addSet = case flow of ExitCode => [] | Unconditional dest => [dest] | _ => raise InternalError "FlowCodeCMove" in genCode(addSet @ stillToDo, flow, code) end end in val ops = genCode([0], ExitCode, [(SetLabel startOfFunctionLabel)]) end in generateFinalCode{instrs=List.rev ops, name=functionName, resultClosure=resultClosure, - parameters=debugSwitches, profileObject=profileObject, labelCount=0} + parameters=debugSwitches, profileObject=profileObject, labelMaker= labelMaker} end structure Sharing = struct type ('genReg, 'optGenReg, 'fpReg) basicBlock = ('genReg, 'optGenReg, 'fpReg) basicBlock and xReg = xReg and vReg = vReg and closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64PreAssembly.ML b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64PreAssembly.ML index 00d24425..f1bda622 100644 --- a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64PreAssembly.ML +++ b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64PreAssembly.ML @@ -1,895 +1,973 @@ (* Copyright (c) 2021-2 David C. J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public Licence version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public Licence for more details. You should have received a copy of the GNU Lesser General Public Licence along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* The pre-assembly layer goes below the icode and allows peep-hole optimisation. *) functor Arm64PreAssembly( structure Arm64Assembly: ARM64ASSEMBLY structure Debug: DEBUG structure Pretty: PRETTY ): ARM64PREASSEMBLY = struct open Arm64Assembly exception InternalError = Misc.InternalError (* Reversed cons and append to make the code easier to read. *) infix 5 <::> <@> fun tl <::> hd = hd :: tl and snd <@> fst = fst @ snd (* Many of the datatypes are inherited from Arm64Assembly *) datatype loadType = Load64 | Load32 | Load16 | Load8 and opSize = OpSize32 | OpSize64 and logicalOp = LogAnd | LogOr | LogXor and floatSize = Float32 | Double64 and shiftDirection = ShiftLeft | ShiftRightLogical | ShiftRightArithmetic and multKind = MultAdd32 | MultSub32 | MultAdd64 | MultSub64 | SignedMultAddLong (* 32bit*32bit + 64bit => 64Bit *) | SignedMultHigh (* High order part of 64bit*64Bit *) and fpUnary = NegFloat | NegDouble | AbsFloat | AbsDouble | ConvFloatToDble | ConvDbleToFloat and fpBinary = MultiplyFP | DivideFP | AddFP | SubtractFP and unscaledType = NoUpdate | PreIndex | PostIndex and condSet = CondSet | CondSetIncr | CondSetInvert | CondSetNegate and bitfieldKind = BFUnsigned | BFSigned | BFInsert and brRegType = BRRBranch | BRRAndLink | BRRReturn - type precodeLabel = labels + datatype label = Label of int + type labelMaker = int ref + fun createLabelMaker() = ref 0 + fun createLabel(r as ref n) = Label n before r := n+1 datatype precode = (* Basic instructions *) AddImmediate of {regN: xReg, regD: xReg, immed: word, shifted: bool, opSize: opSize, setFlags: bool} | SubImmediate of {regN: xReg, regD: xReg, immed: word, shifted: bool, opSize: opSize, setFlags: bool} | AddShiftedReg of {regM: xReg, regN: xReg, regD: xReg, shift: shiftType, opSize: opSize, setFlags: bool} | SubShiftedReg of {regM: xReg, regN: xReg, regD: xReg, shift: shiftType, opSize: opSize, setFlags: bool} | AddExtendedReg of {regM: xReg, regN: xReg, regD: xReg, extend: Word8.word extend, opSize: opSize, setFlags: bool} | SubExtendedReg of {regM: xReg, regN: xReg, regD: xReg, extend: Word8.word extend, opSize: opSize, setFlags: bool} | MultiplyAndAddSub of {regM: xReg, regN: xReg, regA: xReg, regD: xReg, multKind: multKind} | DivideRegs of {regM: xReg, regN: xReg, regD: xReg, isSigned: bool, opSize: opSize} | LogicalShiftedReg of {regM: xReg, regN: xReg, regD: xReg, shift: shiftType, logOp: logicalOp, opSize: opSize, setFlags: bool} | LoadRegScaled of {regT: xReg, regN: xReg, unitOffset: int, loadType: loadType} | LoadFPRegScaled of {regT: vReg, regN: xReg, unitOffset: int, floatSize: floatSize} | StoreRegScaled of {regT: xReg, regN: xReg, unitOffset: int, loadType: loadType} | StoreFPRegScaled of {regT: vReg, regN: xReg, unitOffset: int, floatSize: floatSize} | LoadRegUnscaled of {regT: xReg, regN: xReg, byteOffset: int, loadType: loadType, unscaledType: unscaledType} | StoreRegUnscaled of {regT: xReg, regN: xReg, byteOffset: int, loadType: loadType, unscaledType: unscaledType} | LoadFPRegUnscaled of {regT: vReg, regN: xReg, byteOffset: int, floatSize: floatSize, unscaledType: unscaledType} | StoreFPRegUnscaled of {regT: vReg, regN: xReg, byteOffset: int, floatSize: floatSize, unscaledType: unscaledType} | LoadRegIndexed of {regT: xReg, regN: xReg, regM: xReg, loadType: loadType, option: scale extend} | StoreRegIndexed of {regT: xReg, regN: xReg, regM: xReg, loadType: loadType, option: scale extend} | LoadFPRegIndexed of {regT: vReg, regN: xReg, regM: xReg, floatSize: floatSize, option: scale extend} | StoreFPRegIndexed of {regT: vReg, regN: xReg, regM: xReg, floatSize: floatSize, option: scale extend} (* LoadAcquire and StoreRelease are used for mutables. *) | LoadAcquireReg of {regN: xReg, regT: xReg, loadType: loadType} | StoreReleaseReg of {regN: xReg, regT: xReg, loadType: loadType} (* LoadAcquireExclusiveRegister and StoreReleaseExclusiveRegister are used for mutexes. *) | LoadAcquireExclusiveRegister of {regN: xReg, regT: xReg} | StoreReleaseExclusiveRegister of {regS: xReg, regT: xReg, regN: xReg} | MemBarrier | LoadRegPair of { regT1: xReg, regT2: xReg, regN: xReg, unitOffset: int, loadType: loadType, unscaledType: unscaledType} | StoreRegPair of { regT1: xReg, regT2: xReg, regN: xReg, unitOffset: int, loadType: loadType, unscaledType: unscaledType} | LoadFPRegPair of { regT1: vReg, regT2: vReg, regN: xReg, unitOffset: int, floatSize: floatSize, unscaledType: unscaledType} | StoreFPRegPair of { regT1: vReg, regT2: vReg, regN: xReg, unitOffset: int, floatSize: floatSize, unscaledType: unscaledType} | ConditionalSet of {regD: xReg, regTrue: xReg, regFalse: xReg, cond: condition, condSet: condSet, opSize: opSize} | BitField of {immr: word, imms: word, regN: xReg, regD: xReg, opSize: opSize, bitfieldKind: bitfieldKind} | ShiftRegisterVariable of {regM: xReg, regN: xReg, regD: xReg, opSize: opSize, shiftDirection: shiftDirection} | BitwiseLogical of { bits: Word64.word, regN: xReg, regD: xReg, opSize: opSize, setFlags: bool, logOp: logicalOp} (* Floating point *) | MoveGeneralToFP of { regN: xReg, regD: vReg, floatSize: floatSize} | MoveFPToGeneral of {regN: vReg, regD: xReg, floatSize: floatSize} | CvtIntToFP of { regN: xReg, regD: vReg, floatSize: floatSize, opSize: opSize} | CvtFloatToInt of { round: IEEEReal.rounding_mode, regN: vReg, regD: xReg, floatSize: floatSize, opSize: opSize} | FPBinaryOp of { regM: vReg, regN: vReg, regD: vReg, floatSize: floatSize, fpOp: fpBinary} | FPComparison of { regM: vReg, regN: vReg, floatSize: floatSize} | FPUnaryOp of {regN: vReg, regD: vReg, fpOp: fpUnary} (* Branches and Labels. *) - | SetLabel of precodeLabel - | ConditionalBranch of condition * precodeLabel - | UnconditionalBranch of precodeLabel - | BranchAndLink of precodeLabel + | SetLabel of label + | ConditionalBranch of condition * label + | UnconditionalBranch of label + | BranchAndLink of label | BranchReg of {regD: xReg, brRegType: brRegType } - | LoadLabelAddress of xReg * precodeLabel - | TestBitBranch of { test: xReg, bit: Word8.word, label: precodeLabel, onZero: bool } - | CompareBranch of { test: xReg, label: precodeLabel, onZero: bool, opSize: opSize } + | LoadLabelAddress of xReg * label + | TestBitBranch of { test: xReg, bit: Word8.word, label: label, onZero: bool } + | CompareBranch of { test: xReg, label: label, onZero: bool, opSize: opSize } (* Composite instructions *) | MoveXRegToXReg of {sReg: xReg, dReg: xReg} | LoadNonAddr of xReg * Word64.word | LoadAddr of xReg * machineWord | RTSTrap of { rtsEntry: int, work: xReg, save: xReg list } | AllocateMemoryFixedSize of { bytes: word, dest: xReg, save: xReg list, work: xReg } | AllocateMemoryVariableSize of { sizeReg: xReg, dest: xReg, save: xReg list, work: xReg } (* Branch table for indexed case. startLabel is the address of the first label in the list. The branch table is a sequence of unconditional branches. *) - | BranchTable of { startLabel: precodeLabel, brTable: precodeLabel list } + | BranchTable of { startLabel: label, brTable: label list } | LoadGlobalHeapBaseInCallback of xReg - fun toAssembler([], code) = code - - | toAssembler(AddImmediate{regN, regD, immed, shifted, opSize, setFlags} :: rest, code) = - let - val instr = - case (opSize, setFlags) of - (OpSize64, false) => addImmediate - | (OpSize32, false) => addImmediate32 - | (OpSize64, true) => addSImmediate - | (OpSize32, true) => addSImmediate32 - in - toAssembler(rest, code <::> instr{regN=regN, regD=regD, immed=immed, shifted=shifted}) - end - - | toAssembler(SubImmediate{regN, regD, immed, shifted, opSize, setFlags} :: rest, code) = - let - val instr = - case (opSize, setFlags) of - (OpSize64, false) => subImmediate - | (OpSize32, false) => subImmediate32 - | (OpSize64, true) => subSImmediate - | (OpSize32, true) => subSImmediate32 - in - toAssembler(rest, code <::> instr{regN=regN, regD=regD, immed=immed, shifted=shifted}) - end - - | toAssembler(AddShiftedReg{regM, regN, regD, shift, opSize, setFlags} :: rest, code) = - let - val instr = - case (opSize, setFlags) of - (OpSize64, false) => addShiftedReg - | (OpSize32, false) => addShiftedReg32 - | (OpSize64, true) => addSShiftedReg - | (OpSize32, true) => addSShiftedReg32 - in - toAssembler(rest, code <::> instr{regM=regM, regN=regN, regD=regD, shift=shift}) - end - - | toAssembler(SubShiftedReg{regM, regN, regD, shift, opSize, setFlags} :: rest, code) = - let - val instr = - case (opSize, setFlags) of - (OpSize64, false) => subShiftedReg - | (OpSize32, false) => subShiftedReg32 - | (OpSize64, true) => subSShiftedReg - | (OpSize32, true) => subSShiftedReg32 - in - toAssembler(rest, code <::> instr{regM=regM, regN=regN, regD=regD, shift=shift}) - end - - | toAssembler(AddExtendedReg{regM, regN, regD, extend, opSize, setFlags} :: rest, code) = - (* Add/SubExtended are only used to access XSP. *) - let - val instr = - case (opSize, setFlags) of - (OpSize64, false) => addExtendedReg - | (OpSize32, false) => raise InternalError "AddExtendedReg; 32" - | (OpSize64, true) => addSExtendedReg - | (OpSize32, true) => raise InternalError "AddExtendedReg; 32" - in - toAssembler(rest, code <::> instr{regM=regM, regN=regN, regD=regD, extend=extend}) - end - - | toAssembler(SubExtendedReg{regM, regN, regD, extend, opSize, setFlags} :: rest, code) = - let - val instr = - case (opSize, setFlags) of - (OpSize64, false) => subExtendedReg - | (OpSize32, false) => raise InternalError "AddExtendedReg; 32" - | (OpSize64, true) => subSExtendedReg - | (OpSize32, true) => raise InternalError "AddExtendedReg; 32" - in - toAssembler(rest, code <::> instr{regM=regM, regN=regN, regD=regD, extend=extend}) - end - - | toAssembler(MultiplyAndAddSub{regM, regN, regA, regD, multKind} :: rest, code) = - let - val instr = - case multKind of - MultAdd32 => multiplyAndAdd32{regM=regM, regN=regN, regA=regA, regD=regD} - | MultSub32 => multiplyAndSub32{regM=regM, regN=regN, regA=regA, regD=regD} - | MultAdd64 => multiplyAndAdd{regM=regM, regN=regN, regA=regA, regD=regD} - | MultSub64 => multiplyAndSub{regM=regM, regN=regN, regA=regA, regD=regD} - | SignedMultAddLong => signedMultiplyAndAddLong{regM=regM, regN=regN, regA=regA, regD=regD} - | SignedMultHigh => signedMultiplyHigh{regM=regM, regN=regN, regD=regD} - in - toAssembler(rest, code <::> instr) - end - - | toAssembler(DivideRegs{regM, regN, regD, isSigned, opSize} :: rest, code) = - let - val instr = - case (isSigned, opSize) of - (true, OpSize64) => signedDivide - | (true, OpSize32) => signedDivide32 - | (false, OpSize64) => unsignedDivide - | (false, OpSize32) => unsignedDivide32 - in - toAssembler(rest, code <::> instr{regN=regN, regM=regM, regD=regD}) - end - - | toAssembler(LogicalShiftedReg{regM, regN, regD, shift, logOp, opSize, setFlags} :: rest, code) = - let - val instr = - case (logOp, setFlags, opSize) of - (LogAnd, false, OpSize64) => andShiftedReg - | (LogAnd, true, OpSize64) => andsShiftedReg - | (LogOr, false, OpSize64) => orrShiftedReg - | (LogXor, false, OpSize64) => eorShiftedReg - - | (LogAnd, false, OpSize32) => andShiftedReg32 - | (LogAnd, true, OpSize32) => andsShiftedReg32 - | (LogOr, false, OpSize32) => orrShiftedReg32 - | (LogXor, false, OpSize32) => eorShiftedReg32 - - | _ => raise InternalError "setFlags not valid with OR or XOR" - (* There are also versions of AND/OR/XOR which operate on a complement (NOT) - of the shifted register. It's probably not worth looking for a use for them. *) - in - toAssembler(rest, code <::> instr{regN=regN, regM=regM, regD=regD, shift=shift}) - end - - | toAssembler(LoadRegScaled{regT, regN, unitOffset, loadType} :: rest, code) = - let - val instr = - case loadType of - Load64 => loadRegScaled - | Load32 => loadRegScaled32 - | Load16 => loadRegScaled16 - | Load8 => loadRegScaledByte - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, unitOffset=unitOffset}) - end - - | toAssembler(StoreRegScaled{regT, regN, unitOffset, loadType} :: rest, code) = - let - val instr = - case loadType of - Load64 => storeRegScaled - | Load32 => storeRegScaled32 - | Load16 => storeRegScaled16 - | Load8 => storeRegScaledByte - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, unitOffset=unitOffset}) - end - - | toAssembler(LoadFPRegScaled{regT, regN, unitOffset, floatSize} :: rest, code) = - let - val instr = - case floatSize of - Float32 => loadRegScaledFloat - | Double64 => loadRegScaledDouble - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, unitOffset=unitOffset}) - end - - | toAssembler(StoreFPRegScaled{regT, regN, unitOffset, floatSize} :: rest, code) = - let - val instr = - case floatSize of - Float32 => storeRegScaledFloat - | Double64 => storeRegScaledDouble - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, unitOffset=unitOffset}) - end - - | toAssembler(LoadRegUnscaled{regT, regN, byteOffset, loadType, unscaledType} :: rest, code) = - let - val instr = - case (loadType, unscaledType) of - (Load64, NoUpdate) => loadRegUnscaled - | (Load32, NoUpdate) => loadRegUnscaled32 - | (Load16, NoUpdate) => loadRegUnscaled16 - | (Load8, NoUpdate) => loadRegUnscaledByte - | (Load64, PreIndex) => loadRegPreIndex - | (Load32, PreIndex) => loadRegPreIndex32 - | (Load16, PreIndex) => raise InternalError "loadRegPreIndex16" - | (Load8, PreIndex) => loadRegPreIndexByte - | (Load64, PostIndex) => loadRegPostIndex - | (Load32, PostIndex) => loadRegPostIndex32 - | (Load16, PostIndex) => raise InternalError "loadRegPostIndex16" - | (Load8, PostIndex) => loadRegPostIndexByte - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, byteOffset=byteOffset}) - end - - | toAssembler(LoadFPRegUnscaled{regT, regN, byteOffset, floatSize, unscaledType} :: rest, code) = - let - val instr = - case (floatSize, unscaledType) of - (Float32, NoUpdate) => loadRegUnscaledFloat - | (Double64, NoUpdate) => loadRegUnscaledDouble - | _ => raise InternalError "LoadFPRegUnscaled: pre/post indexed" - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, byteOffset=byteOffset}) - end - - | toAssembler(StoreRegUnscaled{regT, regN, byteOffset, loadType, unscaledType} :: rest, code) = - let - val instr = - case (loadType, unscaledType) of - (Load64, NoUpdate) => storeRegUnscaled - | (Load32, NoUpdate) => storeRegUnscaled32 - | (Load16, NoUpdate) => storeRegUnscaled16 - | (Load8, NoUpdate) => storeRegUnscaledByte - | (Load64, PreIndex) => storeRegPreIndex - | (Load32, PreIndex) => storeRegPreIndex32 - | (Load16, PreIndex) => raise InternalError "storeRegPreIndex16" - | (Load8, PreIndex) => storeRegPreIndexByte - | (Load64, PostIndex) => storeRegPostIndex - | (Load32, PostIndex) => storeRegPostIndex32 - | (Load16, PostIndex) => raise InternalError "storeRegPostIndex16" - | (Load8, PostIndex) => storeRegPostIndexByte - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, byteOffset=byteOffset}) - end - - | toAssembler(StoreFPRegUnscaled{regT, regN, byteOffset, floatSize, unscaledType} :: rest, code) = - let - val instr = - case (floatSize, unscaledType) of - (Float32, NoUpdate) => storeRegUnscaledFloat - | (Double64, NoUpdate) => storeRegUnscaledDouble - | _ => raise InternalError "StoreFPRegUnscaled: pre/post indexed" - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, byteOffset=byteOffset}) - end - - | toAssembler(LoadRegIndexed{regT, regN, regM, loadType, option} :: rest, code) = - let - val instr = - case loadType of - Load64 => loadRegIndexed - | Load32 => loadRegIndexed32 - | Load16 => loadRegIndexed16 - | Load8 => loadRegIndexedByte - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, regM=regM, option=option}) - end - - | toAssembler(StoreRegIndexed{regT, regN, regM, loadType, option} :: rest, code) = - let - val instr = - case loadType of - Load64 => storeRegIndexed - | Load32 => storeRegIndexed32 - | Load16 => storeRegIndexed16 - | Load8 => storeRegIndexedByte - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, regM=regM, option=option}) - end - - | toAssembler(LoadFPRegIndexed{regT, regN, regM, floatSize, option} :: rest, code) = - let - val instr = - case floatSize of - Float32 => loadRegIndexedFloat - | Double64 => loadRegIndexedDouble - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, regM=regM, option=option}) - end - - | toAssembler(StoreFPRegIndexed{regT, regN, regM, floatSize, option} :: rest, code) = - let - val instr = - case floatSize of - Float32 => storeRegIndexedFloat - | Double64 => storeRegIndexedDouble - in - toAssembler(rest, code <::> instr{regT=regT, regN=regN, regM=regM, option=option}) - end - - | toAssembler(LoadAcquireReg{regN, regT, loadType} :: rest, code) = - let - val loadInstr = - case loadType of - Load64 => loadAcquire - | Load32 => loadAcquire32 - | Load8 => loadAcquireByte - | _ => raise InternalError "LoadAcquire: Unsupported size" (* Not used *) - in - toAssembler(rest, code <::> loadInstr{regT=regT, regN=regN}) - end - - | toAssembler(StoreReleaseReg{regN, regT, loadType} :: rest, code) = - let - val storeInstr = - case loadType of - Load64 => storeRelease - | Load32 => storeRelease32 - | Load8 => storeReleaseByte - | _ => raise InternalError "StoreRelease: Unsupported size" (* Not used *) - in - toAssembler(rest, code <::> storeInstr{regT=regT, regN=regN}) - end - - | toAssembler(LoadAcquireExclusiveRegister{regN, regT} :: rest, code) = - toAssembler(rest, code <::> loadAcquireExclusiveRegister{regN=regN, regT=regT}) - - | toAssembler(StoreReleaseExclusiveRegister{regN, regT, regS} :: rest, code) = - toAssembler(rest, code <::> storeReleaseExclusiveRegister{regN=regN, regT=regT, regS=regS}) - - | toAssembler(MemBarrier :: rest, code) = - toAssembler(rest, code <::> dmbIsh) - - | toAssembler(LoadRegPair{ regT1, regT2, regN, unitOffset, loadType, unscaledType} :: rest, code) = - let - val instr = - case (loadType, unscaledType) of - (Load64, NoUpdate) => loadPairOffset - | (Load64, PreIndex) => loadPairPreIndexed - | (Load64, PostIndex) => loadPairPostIndexed - | (Load32, NoUpdate) => loadPairOffset32 - | (Load32, PreIndex) => loadPairPreIndexed32 - | (Load32, PostIndex) => loadPairPostIndexed32 - | _ => raise InternalError "LoadRegPair: unimplemented" - in - toAssembler(rest, code <::> instr{regT1=regT1, regT2=regT2, regN=regN, unitOffset=unitOffset}) - end - - | toAssembler(StoreRegPair{ regT1, regT2, regN, unitOffset, loadType, unscaledType} :: rest, code) = - let - val instr = - case (loadType, unscaledType) of - (Load64, NoUpdate) => storePairOffset - | (Load64, PreIndex) => storePairPreIndexed - | (Load64, PostIndex) => storePairPostIndexed - | (Load32, NoUpdate) => storePairOffset32 - | (Load32, PreIndex) => storePairPreIndexed32 - | (Load32, PostIndex) => storePairPostIndexed32 - | _ => raise InternalError "StoreRegPair: unimplemented" - in - toAssembler(rest, code <::> instr{regT1=regT1, regT2=regT2, regN=regN, unitOffset=unitOffset}) - end - - | toAssembler(LoadFPRegPair{ regT1, regT2, regN, unitOffset, floatSize, unscaledType} :: rest, code) = - let - val instr = - case (floatSize, unscaledType) of - (Double64, NoUpdate) => loadPairOffsetDouble - | (Double64, PreIndex) => loadPairPreIndexedDouble - | (Double64, PostIndex) => loadPairPostIndexedDouble - | (Float32, NoUpdate) => loadPairOffsetFloat - | (Float32, PreIndex) => loadPairPreIndexedFloat - | (Float32, PostIndex) => loadPairPostIndexedFloat - in - toAssembler(rest, code <::> instr{regT1=regT1, regT2=regT2, regN=regN, unitOffset=unitOffset}) - end - - | toAssembler(StoreFPRegPair{ regT1, regT2, regN, unitOffset, floatSize, unscaledType} :: rest, code) = - let - val instr = - case (floatSize, unscaledType) of - (Double64, NoUpdate) => storePairOffsetDouble - | (Double64, PreIndex) => storePairPreIndexedDouble - | (Double64, PostIndex) => storePairPostIndexedDouble - | (Float32, NoUpdate) => storePairOffsetFloat - | (Float32, PreIndex) => storePairPreIndexedFloat - | (Float32, PostIndex) => storePairPostIndexedFloat - in - toAssembler(rest, code <::> instr{regT1=regT1, regT2=regT2, regN=regN, unitOffset=unitOffset}) - end - - | toAssembler(ConditionalSet{regD, regTrue, regFalse, cond, condSet, opSize} :: rest, code) = - let - val instr = - case (condSet, opSize) of - (CondSet, OpSize64) => conditionalSet - | (CondSetIncr, OpSize64) => conditionalSetIncrement - | (CondSetInvert, OpSize64) => conditionalSetInverted - | (CondSetNegate, OpSize64) => conditionalSetNegated - | (CondSet, OpSize32) => conditionalSet32 - | (CondSetIncr, OpSize32) => conditionalSetIncrement32 - | (CondSetInvert, OpSize32) => conditionalSetInverted32 - | (CondSetNegate, OpSize32) => conditionalSetNegated32 - in - toAssembler(rest, code <::> instr{regD=regD, regTrue=regTrue, regFalse=regFalse, cond=cond}) - end - - | toAssembler(BitField{immr, imms, regN, regD, opSize, bitfieldKind} :: rest, code) = - let - val bfInstr = - case (bitfieldKind, opSize) of - (BFSigned, OpSize64) => signedBitfieldMove64 - | (BFUnsigned, OpSize64) => unsignedBitfieldMove64 - | (BFInsert, OpSize64) => bitfieldMove64 - | (BFSigned, OpSize32) => signedBitfieldMove32 - | (BFUnsigned, OpSize32) => unsignedBitfieldMove32 - | (BFInsert, OpSize32) => bitfieldMove32 - in - toAssembler(rest, code <::> bfInstr{immr=immr, imms=imms, regN=regN, regD=regD}) - end - - | toAssembler(ShiftRegisterVariable{regM, regN, regD, opSize, shiftDirection} :: rest, code) = - let - val instr = - case (shiftDirection, opSize) of - (ShiftLeft, OpSize64) => logicalShiftLeftVariable - | (ShiftLeft, OpSize32) => logicalShiftLeftVariable32 - | (ShiftRightLogical, OpSize64) => logicalShiftRightVariable - | (ShiftRightLogical, OpSize32) => logicalShiftRightVariable32 - | (ShiftRightArithmetic, OpSize64) => arithmeticShiftRightVariable - | (ShiftRightArithmetic, OpSize32) => arithmeticShiftRightVariable32 - in - toAssembler(rest, code <::> instr{regN=regN, regM=regM, regD=regD}) - end - - | toAssembler(BitwiseLogical{ bits, regN, regD, opSize, setFlags, logOp} :: rest, code) = - let - val instr = - case (logOp, setFlags, opSize) of - (LogAnd, false, OpSize64) => bitwiseAndImmediate - | (LogAnd, true, OpSize64) => bitwiseAndSImmediate - | (LogOr, false, OpSize64) => bitwiseOrImmediate - | (LogXor, false, OpSize64) => bitwiseXorImmediate - - | (LogAnd, false, OpSize32) => bitwiseAndImmediate32 - | (LogAnd, true, OpSize32) => bitwiseAndSImmediate32 - | (LogOr, false, OpSize32) => bitwiseOrImmediate32 - | (LogXor, false, OpSize32) => bitwiseXorImmediate32 - - | _ => raise InternalError "flags not valid with OR or XOR" - in - toAssembler(rest, code <::> instr{regN=regN, regD=regD, bits=bits}) - end - - | toAssembler(MoveGeneralToFP{ regN, regD, floatSize=Float32} :: rest, code) = - toAssembler(rest, code <::> moveGeneralToFloat{regN=regN, regD=regD}) - | toAssembler(MoveGeneralToFP{ regN, regD, floatSize=Double64} :: rest, code) = - toAssembler(rest, code <::> moveGeneralToDouble{regN=regN, regD=regD}) - - | toAssembler(MoveFPToGeneral{ regN, regD, floatSize=Float32} :: rest, code) = - toAssembler(rest, code <::> moveFloatToGeneral{regN=regN, regD=regD}) - | toAssembler(MoveFPToGeneral{ regN, regD, floatSize=Double64} :: rest, code) = - toAssembler(rest, code <::> moveDoubleToGeneral{regN=regN, regD=regD}) - - | toAssembler(CvtIntToFP{ regN, regD, floatSize, opSize} :: rest, code) = - let - val instr = - case (opSize, floatSize) of - (OpSize32, Float32) => convertInt32ToFloat - | (OpSize64, Float32) => convertIntToFloat - | (OpSize32, Double64) => convertInt32ToDouble - | (OpSize64, Double64) => convertIntToDouble - in - toAssembler(rest, code <::> instr{regN=regN, regD=regD}) - end - - | toAssembler(CvtFloatToInt{ round, regN, regD, floatSize, opSize} :: rest, code) = - let - val instr = - case (floatSize, opSize) of - (Float32, OpSize32) => convertFloatToInt32 - | (Float32, OpSize64) => convertFloatToInt - | (Double64, OpSize32) => convertDoubleToInt32 - | (Double64, OpSize64) => convertDoubleToInt - in - toAssembler(rest, code <::> instr round {regN=regN, regD=regD}) - end - - | toAssembler(FPBinaryOp{ regM, regN, regD, floatSize, fpOp} :: rest, code) = - let - val instr = - case (fpOp, floatSize) of - (MultiplyFP, Float32) => multiplyFloat - | (DivideFP, Float32) => divideFloat - | (AddFP, Float32) => addFloat - | (SubtractFP, Float32) => subtractFloat - | (MultiplyFP, Double64) => multiplyDouble - | (DivideFP, Double64) => divideDouble - | (AddFP, Double64) => addDouble - | (SubtractFP, Double64) => subtractDouble - in - toAssembler(rest, code <::> instr {regN=regN, regM=regM, regD=regD}) - end - - | toAssembler(FPComparison{ regM, regN, floatSize} :: rest, code) = - toAssembler(rest, code <::> (case floatSize of Float32 => compareFloat | Double64 => compareDouble){regN=regN, regM=regM}) - - | toAssembler(FPUnaryOp{ regN, regD, fpOp} :: rest, code) = - let - val instr = - case fpOp of - NegFloat => negFloat | NegDouble => negDouble - | AbsFloat => absFloat | AbsDouble => absDouble - | ConvFloatToDble => convertFloatToDouble - | ConvDbleToFloat => convertDoubleToFloat - in - toAssembler(rest, code <::> instr {regN=regN, regD=regD}) - end - - | toAssembler(SetLabel label :: rest, code) = toAssembler(rest, code <::> setLabel label) - - | toAssembler(ConditionalBranch(cond, label) :: rest, code) = toAssembler(rest, code <::> conditionalBranch(cond, label)) - - | toAssembler(UnconditionalBranch label :: rest, code) = toAssembler(rest, code <::> unconditionalBranch label) - - | toAssembler(BranchAndLink label :: rest, code) = toAssembler(rest, code <::> branchAndLink label) - - | toAssembler(BranchReg{regD, brRegType=BRRBranch} :: rest, code) = toAssembler(rest, code <::> branchRegister regD) - | toAssembler(BranchReg{regD, brRegType=BRRAndLink} :: rest, code) = toAssembler(rest, code <::> branchAndLinkReg regD) - | toAssembler(BranchReg{regD, brRegType=BRRReturn} :: rest, code) = toAssembler(rest, code <::> returnRegister regD) - - | toAssembler(LoadLabelAddress(reg, label) :: rest, code) = toAssembler(rest, code <::> loadLabelAddress(reg, label)) - - | toAssembler(TestBitBranch{ test, bit, label, onZero } :: rest, code) = - toAssembler(rest, code <::> (if onZero then testBitBranchZero else testBitBranchNonZero)(test, bit, label)) + (* Optimise the pre-assembler code and then generate the final code. *) + fun generateFinalCode {instrs, name, parameters, resultClosure, profileObject, labelMaker=ref labelCount} = + let + val labelTargets = Array.tabulate(labelCount, fn i => (Arm64Assembly.createLabel(), i) ) + + (* Follow the chain of forwarded labels. *) + local + fun forwardLab(labelNo, labels) = + let + val dest as (_, dNo) = Array.sub(labelTargets, labelNo) + in + if dNo = labelNo + then dest + (* This should not happen but just in case... *) + else if List.exists(fn i => i = dNo) labels + then raise InternalError "Infinite loop" + else forwardLab(dNo, dNo::labels) + end + in + fun getLabel labelNo = forwardLab(labelNo, [labelNo]) + val getLabelTarget = #1 o getLabel + end + + fun toAssembler([], code) = code + + | toAssembler(AddImmediate{regN, regD, immed, shifted, opSize, setFlags} :: rest, code) = + let + val instr = + case (opSize, setFlags) of + (OpSize64, false) => addImmediate + | (OpSize32, false) => addImmediate32 + | (OpSize64, true) => addSImmediate + | (OpSize32, true) => addSImmediate32 + in + toAssembler(rest, code <::> instr{regN=regN, regD=regD, immed=immed, shifted=shifted}) + end + + | toAssembler(SubImmediate{regN, regD, immed, shifted, opSize, setFlags} :: rest, code) = + let + val instr = + case (opSize, setFlags) of + (OpSize64, false) => subImmediate + | (OpSize32, false) => subImmediate32 + | (OpSize64, true) => subSImmediate + | (OpSize32, true) => subSImmediate32 + in + toAssembler(rest, code <::> instr{regN=regN, regD=regD, immed=immed, shifted=shifted}) + end + + | toAssembler(AddShiftedReg{regM, regN, regD, shift, opSize, setFlags} :: rest, code) = + let + val instr = + case (opSize, setFlags) of + (OpSize64, false) => addShiftedReg + | (OpSize32, false) => addShiftedReg32 + | (OpSize64, true) => addSShiftedReg + | (OpSize32, true) => addSShiftedReg32 + in + toAssembler(rest, code <::> instr{regM=regM, regN=regN, regD=regD, shift=shift}) + end + + | toAssembler(SubShiftedReg{regM, regN, regD, shift, opSize, setFlags} :: rest, code) = + let + val instr = + case (opSize, setFlags) of + (OpSize64, false) => subShiftedReg + | (OpSize32, false) => subShiftedReg32 + | (OpSize64, true) => subSShiftedReg + | (OpSize32, true) => subSShiftedReg32 + in + toAssembler(rest, code <::> instr{regM=regM, regN=regN, regD=regD, shift=shift}) + end + + | toAssembler(AddExtendedReg{regM, regN, regD, extend, opSize, setFlags} :: rest, code) = + (* Add/SubExtended are only used to access XSP. *) + let + val instr = + case (opSize, setFlags) of + (OpSize64, false) => addExtendedReg + | (OpSize32, false) => raise InternalError "AddExtendedReg; 32" + | (OpSize64, true) => addSExtendedReg + | (OpSize32, true) => raise InternalError "AddExtendedReg; 32" + in + toAssembler(rest, code <::> instr{regM=regM, regN=regN, regD=regD, extend=extend}) + end + + | toAssembler(SubExtendedReg{regM, regN, regD, extend, opSize, setFlags} :: rest, code) = + let + val instr = + case (opSize, setFlags) of + (OpSize64, false) => subExtendedReg + | (OpSize32, false) => raise InternalError "AddExtendedReg; 32" + | (OpSize64, true) => subSExtendedReg + | (OpSize32, true) => raise InternalError "AddExtendedReg; 32" + in + toAssembler(rest, code <::> instr{regM=regM, regN=regN, regD=regD, extend=extend}) + end + + | toAssembler(MultiplyAndAddSub{regM, regN, regA, regD, multKind} :: rest, code) = + let + val instr = + case multKind of + MultAdd32 => multiplyAndAdd32{regM=regM, regN=regN, regA=regA, regD=regD} + | MultSub32 => multiplyAndSub32{regM=regM, regN=regN, regA=regA, regD=regD} + | MultAdd64 => multiplyAndAdd{regM=regM, regN=regN, regA=regA, regD=regD} + | MultSub64 => multiplyAndSub{regM=regM, regN=regN, regA=regA, regD=regD} + | SignedMultAddLong => signedMultiplyAndAddLong{regM=regM, regN=regN, regA=regA, regD=regD} + | SignedMultHigh => signedMultiplyHigh{regM=regM, regN=regN, regD=regD} + in + toAssembler(rest, code <::> instr) + end + + | toAssembler(DivideRegs{regM, regN, regD, isSigned, opSize} :: rest, code) = + let + val instr = + case (isSigned, opSize) of + (true, OpSize64) => signedDivide + | (true, OpSize32) => signedDivide32 + | (false, OpSize64) => unsignedDivide + | (false, OpSize32) => unsignedDivide32 + in + toAssembler(rest, code <::> instr{regN=regN, regM=regM, regD=regD}) + end + + | toAssembler(LogicalShiftedReg{regM, regN, regD, shift, logOp, opSize, setFlags} :: rest, code) = + let + val instr = + case (logOp, setFlags, opSize) of + (LogAnd, false, OpSize64) => andShiftedReg + | (LogAnd, true, OpSize64) => andsShiftedReg + | (LogOr, false, OpSize64) => orrShiftedReg + | (LogXor, false, OpSize64) => eorShiftedReg + + | (LogAnd, false, OpSize32) => andShiftedReg32 + | (LogAnd, true, OpSize32) => andsShiftedReg32 + | (LogOr, false, OpSize32) => orrShiftedReg32 + | (LogXor, false, OpSize32) => eorShiftedReg32 + + | _ => raise InternalError "setFlags not valid with OR or XOR" + (* There are also versions of AND/OR/XOR which operate on a complement (NOT) + of the shifted register. It's probably not worth looking for a use for them. *) + in + toAssembler(rest, code <::> instr{regN=regN, regM=regM, regD=regD, shift=shift}) + end + + | toAssembler(LoadRegScaled{regT, regN, unitOffset, loadType} :: rest, code) = + let + val instr = + case loadType of + Load64 => loadRegScaled + | Load32 => loadRegScaled32 + | Load16 => loadRegScaled16 + | Load8 => loadRegScaledByte + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, unitOffset=unitOffset}) + end + + | toAssembler(StoreRegScaled{regT, regN, unitOffset, loadType} :: rest, code) = + let + val instr = + case loadType of + Load64 => storeRegScaled + | Load32 => storeRegScaled32 + | Load16 => storeRegScaled16 + | Load8 => storeRegScaledByte + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, unitOffset=unitOffset}) + end + + | toAssembler(LoadFPRegScaled{regT, regN, unitOffset, floatSize} :: rest, code) = + let + val instr = + case floatSize of + Float32 => loadRegScaledFloat + | Double64 => loadRegScaledDouble + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, unitOffset=unitOffset}) + end + + | toAssembler(StoreFPRegScaled{regT, regN, unitOffset, floatSize} :: rest, code) = + let + val instr = + case floatSize of + Float32 => storeRegScaledFloat + | Double64 => storeRegScaledDouble + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, unitOffset=unitOffset}) + end + + | toAssembler(LoadRegUnscaled{regT, regN, byteOffset, loadType, unscaledType} :: rest, code) = + let + val instr = + case (loadType, unscaledType) of + (Load64, NoUpdate) => loadRegUnscaled + | (Load32, NoUpdate) => loadRegUnscaled32 + | (Load16, NoUpdate) => loadRegUnscaled16 + | (Load8, NoUpdate) => loadRegUnscaledByte + | (Load64, PreIndex) => loadRegPreIndex + | (Load32, PreIndex) => loadRegPreIndex32 + | (Load16, PreIndex) => raise InternalError "loadRegPreIndex16" + | (Load8, PreIndex) => loadRegPreIndexByte + | (Load64, PostIndex) => loadRegPostIndex + | (Load32, PostIndex) => loadRegPostIndex32 + | (Load16, PostIndex) => raise InternalError "loadRegPostIndex16" + | (Load8, PostIndex) => loadRegPostIndexByte + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, byteOffset=byteOffset}) + end + + | toAssembler(LoadFPRegUnscaled{regT, regN, byteOffset, floatSize, unscaledType} :: rest, code) = + let + val instr = + case (floatSize, unscaledType) of + (Float32, NoUpdate) => loadRegUnscaledFloat + | (Double64, NoUpdate) => loadRegUnscaledDouble + | _ => raise InternalError "LoadFPRegUnscaled: pre/post indexed" + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, byteOffset=byteOffset}) + end + + | toAssembler(StoreRegUnscaled{regT, regN, byteOffset, loadType, unscaledType} :: rest, code) = + let + val instr = + case (loadType, unscaledType) of + (Load64, NoUpdate) => storeRegUnscaled + | (Load32, NoUpdate) => storeRegUnscaled32 + | (Load16, NoUpdate) => storeRegUnscaled16 + | (Load8, NoUpdate) => storeRegUnscaledByte + | (Load64, PreIndex) => storeRegPreIndex + | (Load32, PreIndex) => storeRegPreIndex32 + | (Load16, PreIndex) => raise InternalError "storeRegPreIndex16" + | (Load8, PreIndex) => storeRegPreIndexByte + | (Load64, PostIndex) => storeRegPostIndex + | (Load32, PostIndex) => storeRegPostIndex32 + | (Load16, PostIndex) => raise InternalError "storeRegPostIndex16" + | (Load8, PostIndex) => storeRegPostIndexByte + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, byteOffset=byteOffset}) + end + + | toAssembler(StoreFPRegUnscaled{regT, regN, byteOffset, floatSize, unscaledType} :: rest, code) = + let + val instr = + case (floatSize, unscaledType) of + (Float32, NoUpdate) => storeRegUnscaledFloat + | (Double64, NoUpdate) => storeRegUnscaledDouble + | _ => raise InternalError "StoreFPRegUnscaled: pre/post indexed" + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, byteOffset=byteOffset}) + end + + | toAssembler(LoadRegIndexed{regT, regN, regM, loadType, option} :: rest, code) = + let + val instr = + case loadType of + Load64 => loadRegIndexed + | Load32 => loadRegIndexed32 + | Load16 => loadRegIndexed16 + | Load8 => loadRegIndexedByte + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, regM=regM, option=option}) + end + + | toAssembler(StoreRegIndexed{regT, regN, regM, loadType, option} :: rest, code) = + let + val instr = + case loadType of + Load64 => storeRegIndexed + | Load32 => storeRegIndexed32 + | Load16 => storeRegIndexed16 + | Load8 => storeRegIndexedByte + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, regM=regM, option=option}) + end + + | toAssembler(LoadFPRegIndexed{regT, regN, regM, floatSize, option} :: rest, code) = + let + val instr = + case floatSize of + Float32 => loadRegIndexedFloat + | Double64 => loadRegIndexedDouble + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, regM=regM, option=option}) + end + + | toAssembler(StoreFPRegIndexed{regT, regN, regM, floatSize, option} :: rest, code) = + let + val instr = + case floatSize of + Float32 => storeRegIndexedFloat + | Double64 => storeRegIndexedDouble + in + toAssembler(rest, code <::> instr{regT=regT, regN=regN, regM=regM, option=option}) + end + + | toAssembler(LoadAcquireReg{regN, regT, loadType} :: rest, code) = + let + val loadInstr = + case loadType of + Load64 => loadAcquire + | Load32 => loadAcquire32 + | Load8 => loadAcquireByte + | _ => raise InternalError "LoadAcquire: Unsupported size" (* Not used *) + in + toAssembler(rest, code <::> loadInstr{regT=regT, regN=regN}) + end + + | toAssembler(StoreReleaseReg{regN, regT, loadType} :: rest, code) = + let + val storeInstr = + case loadType of + Load64 => storeRelease + | Load32 => storeRelease32 + | Load8 => storeReleaseByte + | _ => raise InternalError "StoreRelease: Unsupported size" (* Not used *) + in + toAssembler(rest, code <::> storeInstr{regT=regT, regN=regN}) + end + + | toAssembler(LoadAcquireExclusiveRegister{regN, regT} :: rest, code) = + toAssembler(rest, code <::> loadAcquireExclusiveRegister{regN=regN, regT=regT}) + + | toAssembler(StoreReleaseExclusiveRegister{regN, regT, regS} :: rest, code) = + toAssembler(rest, code <::> storeReleaseExclusiveRegister{regN=regN, regT=regT, regS=regS}) + + | toAssembler(MemBarrier :: rest, code) = + toAssembler(rest, code <::> dmbIsh) + + | toAssembler(LoadRegPair{ regT1, regT2, regN, unitOffset, loadType, unscaledType} :: rest, code) = + let + val instr = + case (loadType, unscaledType) of + (Load64, NoUpdate) => loadPairOffset + | (Load64, PreIndex) => loadPairPreIndexed + | (Load64, PostIndex) => loadPairPostIndexed + | (Load32, NoUpdate) => loadPairOffset32 + | (Load32, PreIndex) => loadPairPreIndexed32 + | (Load32, PostIndex) => loadPairPostIndexed32 + | _ => raise InternalError "LoadRegPair: unimplemented" + in + toAssembler(rest, code <::> instr{regT1=regT1, regT2=regT2, regN=regN, unitOffset=unitOffset}) + end + + | toAssembler(StoreRegPair{ regT1, regT2, regN, unitOffset, loadType, unscaledType} :: rest, code) = + let + val instr = + case (loadType, unscaledType) of + (Load64, NoUpdate) => storePairOffset + | (Load64, PreIndex) => storePairPreIndexed + | (Load64, PostIndex) => storePairPostIndexed + | (Load32, NoUpdate) => storePairOffset32 + | (Load32, PreIndex) => storePairPreIndexed32 + | (Load32, PostIndex) => storePairPostIndexed32 + | _ => raise InternalError "StoreRegPair: unimplemented" + in + toAssembler(rest, code <::> instr{regT1=regT1, regT2=regT2, regN=regN, unitOffset=unitOffset}) + end + + | toAssembler(LoadFPRegPair{ regT1, regT2, regN, unitOffset, floatSize, unscaledType} :: rest, code) = + let + val instr = + case (floatSize, unscaledType) of + (Double64, NoUpdate) => loadPairOffsetDouble + | (Double64, PreIndex) => loadPairPreIndexedDouble + | (Double64, PostIndex) => loadPairPostIndexedDouble + | (Float32, NoUpdate) => loadPairOffsetFloat + | (Float32, PreIndex) => loadPairPreIndexedFloat + | (Float32, PostIndex) => loadPairPostIndexedFloat + in + toAssembler(rest, code <::> instr{regT1=regT1, regT2=regT2, regN=regN, unitOffset=unitOffset}) + end + + | toAssembler(StoreFPRegPair{ regT1, regT2, regN, unitOffset, floatSize, unscaledType} :: rest, code) = + let + val instr = + case (floatSize, unscaledType) of + (Double64, NoUpdate) => storePairOffsetDouble + | (Double64, PreIndex) => storePairPreIndexedDouble + | (Double64, PostIndex) => storePairPostIndexedDouble + | (Float32, NoUpdate) => storePairOffsetFloat + | (Float32, PreIndex) => storePairPreIndexedFloat + | (Float32, PostIndex) => storePairPostIndexedFloat + in + toAssembler(rest, code <::> instr{regT1=regT1, regT2=regT2, regN=regN, unitOffset=unitOffset}) + end + + | toAssembler(ConditionalSet{regD, regTrue, regFalse, cond, condSet, opSize} :: rest, code) = + let + val instr = + case (condSet, opSize) of + (CondSet, OpSize64) => conditionalSet + | (CondSetIncr, OpSize64) => conditionalSetIncrement + | (CondSetInvert, OpSize64) => conditionalSetInverted + | (CondSetNegate, OpSize64) => conditionalSetNegated + | (CondSet, OpSize32) => conditionalSet32 + | (CondSetIncr, OpSize32) => conditionalSetIncrement32 + | (CondSetInvert, OpSize32) => conditionalSetInverted32 + | (CondSetNegate, OpSize32) => conditionalSetNegated32 + in + toAssembler(rest, code <::> instr{regD=regD, regTrue=regTrue, regFalse=regFalse, cond=cond}) + end + + | toAssembler(BitField{immr, imms, regN, regD, opSize, bitfieldKind} :: rest, code) = + let + val bfInstr = + case (bitfieldKind, opSize) of + (BFSigned, OpSize64) => signedBitfieldMove64 + | (BFUnsigned, OpSize64) => unsignedBitfieldMove64 + | (BFInsert, OpSize64) => bitfieldMove64 + | (BFSigned, OpSize32) => signedBitfieldMove32 + | (BFUnsigned, OpSize32) => unsignedBitfieldMove32 + | (BFInsert, OpSize32) => bitfieldMove32 + in + toAssembler(rest, code <::> bfInstr{immr=immr, imms=imms, regN=regN, regD=regD}) + end + + | toAssembler(ShiftRegisterVariable{regM, regN, regD, opSize, shiftDirection} :: rest, code) = + let + val instr = + case (shiftDirection, opSize) of + (ShiftLeft, OpSize64) => logicalShiftLeftVariable + | (ShiftLeft, OpSize32) => logicalShiftLeftVariable32 + | (ShiftRightLogical, OpSize64) => logicalShiftRightVariable + | (ShiftRightLogical, OpSize32) => logicalShiftRightVariable32 + | (ShiftRightArithmetic, OpSize64) => arithmeticShiftRightVariable + | (ShiftRightArithmetic, OpSize32) => arithmeticShiftRightVariable32 + in + toAssembler(rest, code <::> instr{regN=regN, regM=regM, regD=regD}) + end + + | toAssembler(BitwiseLogical{ bits, regN, regD, opSize, setFlags, logOp} :: rest, code) = + let + val instr = + case (logOp, setFlags, opSize) of + (LogAnd, false, OpSize64) => bitwiseAndImmediate + | (LogAnd, true, OpSize64) => bitwiseAndSImmediate + | (LogOr, false, OpSize64) => bitwiseOrImmediate + | (LogXor, false, OpSize64) => bitwiseXorImmediate + + | (LogAnd, false, OpSize32) => bitwiseAndImmediate32 + | (LogAnd, true, OpSize32) => bitwiseAndSImmediate32 + | (LogOr, false, OpSize32) => bitwiseOrImmediate32 + | (LogXor, false, OpSize32) => bitwiseXorImmediate32 + + | _ => raise InternalError "flags not valid with OR or XOR" + in + toAssembler(rest, code <::> instr{regN=regN, regD=regD, bits=bits}) + end + + | toAssembler(MoveGeneralToFP{ regN, regD, floatSize=Float32} :: rest, code) = + toAssembler(rest, code <::> moveGeneralToFloat{regN=regN, regD=regD}) + | toAssembler(MoveGeneralToFP{ regN, regD, floatSize=Double64} :: rest, code) = + toAssembler(rest, code <::> moveGeneralToDouble{regN=regN, regD=regD}) + + | toAssembler(MoveFPToGeneral{ regN, regD, floatSize=Float32} :: rest, code) = + toAssembler(rest, code <::> moveFloatToGeneral{regN=regN, regD=regD}) + | toAssembler(MoveFPToGeneral{ regN, regD, floatSize=Double64} :: rest, code) = + toAssembler(rest, code <::> moveDoubleToGeneral{regN=regN, regD=regD}) + + | toAssembler(CvtIntToFP{ regN, regD, floatSize, opSize} :: rest, code) = + let + val instr = + case (opSize, floatSize) of + (OpSize32, Float32) => convertInt32ToFloat + | (OpSize64, Float32) => convertIntToFloat + | (OpSize32, Double64) => convertInt32ToDouble + | (OpSize64, Double64) => convertIntToDouble + in + toAssembler(rest, code <::> instr{regN=regN, regD=regD}) + end + + | toAssembler(CvtFloatToInt{ round, regN, regD, floatSize, opSize} :: rest, code) = + let + val instr = + case (floatSize, opSize) of + (Float32, OpSize32) => convertFloatToInt32 + | (Float32, OpSize64) => convertFloatToInt + | (Double64, OpSize32) => convertDoubleToInt32 + | (Double64, OpSize64) => convertDoubleToInt + in + toAssembler(rest, code <::> instr round {regN=regN, regD=regD}) + end + + | toAssembler(FPBinaryOp{ regM, regN, regD, floatSize, fpOp} :: rest, code) = + let + val instr = + case (fpOp, floatSize) of + (MultiplyFP, Float32) => multiplyFloat + | (DivideFP, Float32) => divideFloat + | (AddFP, Float32) => addFloat + | (SubtractFP, Float32) => subtractFloat + | (MultiplyFP, Double64) => multiplyDouble + | (DivideFP, Double64) => divideDouble + | (AddFP, Double64) => addDouble + | (SubtractFP, Double64) => subtractDouble + in + toAssembler(rest, code <::> instr {regN=regN, regM=regM, regD=regD}) + end + + | toAssembler(FPComparison{ regM, regN, floatSize} :: rest, code) = + toAssembler(rest, code <::> (case floatSize of Float32 => compareFloat | Double64 => compareDouble){regN=regN, regM=regM}) + + | toAssembler(FPUnaryOp{ regN, regD, fpOp} :: rest, code) = + let + val instr = + case fpOp of + NegFloat => negFloat | NegDouble => negDouble + | AbsFloat => absFloat | AbsDouble => absDouble + | ConvFloatToDble => convertFloatToDouble + | ConvDbleToFloat => convertDoubleToFloat + in + toAssembler(rest, code <::> instr {regN=regN, regD=regD}) + end + + | toAssembler(SetLabel(Label lab) :: rest, code) = toAssembler(rest, code <::> setLabel(getLabelTarget lab)) + + | toAssembler(ConditionalBranch(cond, Label lab) :: rest, code) = toAssembler(rest, code <::> conditionalBranch(cond, getLabelTarget lab)) + + | toAssembler(UnconditionalBranch(Label lab) :: rest, code) = toAssembler(rest, code <::> unconditionalBranch(getLabelTarget lab)) + + | toAssembler(BranchAndLink(Label lab) :: rest, code) = toAssembler(rest, code <::> branchAndLink(getLabelTarget lab)) + + | toAssembler(BranchReg{regD, brRegType=BRRBranch} :: rest, code) = toAssembler(rest, code <::> branchRegister regD) + | toAssembler(BranchReg{regD, brRegType=BRRAndLink} :: rest, code) = toAssembler(rest, code <::> branchAndLinkReg regD) + | toAssembler(BranchReg{regD, brRegType=BRRReturn} :: rest, code) = toAssembler(rest, code <::> returnRegister regD) + + | toAssembler(LoadLabelAddress(reg, Label lab) :: rest, code) = toAssembler(rest, code <::> loadLabelAddress(reg, getLabelTarget lab)) + + | toAssembler(TestBitBranch{ test, bit, label=Label lab, onZero } :: rest, code) = + toAssembler(rest, code <::> (if onZero then testBitBranchZero else testBitBranchNonZero)(test, bit, getLabelTarget lab)) + + | toAssembler(CompareBranch{ test, label=Label lab, onZero, opSize } :: rest, code) = + let + val instr = + case (onZero, opSize) of + (true, OpSize64) => compareBranchZero + | (false, OpSize64) => compareBranchNonZero + | (true, OpSize32) => compareBranchZero32 + | (false, OpSize32) => compareBranchNonZero32 + in + toAssembler(rest, code <::> instr(test, getLabelTarget lab)) + end + + (* Register-register moves - special case for XSP. *) + | toAssembler(MoveXRegToXReg{sReg=XSP, dReg} :: rest, code) = + toAssembler(rest, code <::> addImmediate{regN=XSP, regD=dReg, immed=0w0, shifted=false}) + | toAssembler(MoveXRegToXReg{sReg, dReg=XSP} :: rest, code) = + toAssembler(rest, code <::> addImmediate{regN=sReg, regD=XSP, immed=0w0, shifted=false}) + | toAssembler(MoveXRegToXReg{sReg, dReg} :: rest, code) = + toAssembler(rest, code <::> orrShiftedReg{regN=XZero, regM=sReg, regD=dReg, shift=ShiftNone}) + + | toAssembler(LoadNonAddr(xReg, value) :: rest, code) = + let + (* Load a non-address constant. Tries to use movz/movn/movk if + that can be done easily, othewise uses loadNonAddressConstant to + load the value from the non-address constant area. *) + fun extW (v, h) = Word.andb(Word.fromLarge(LargeWord.>>(Word64.toLarge v, h*0w16)), 0wxffff) + val hw0 = extW(value, 0w3) and hw1 = extW(value, 0w2) + and hw2 = extW(value, 0w1) and hw3 = extW(value, 0w0) + val nextCode = + if value < 0wx100000000 + then + let + (* 32-bit constants can be loaded using at most a movz and movk but + various cases can be reduced since all 32-bit operations set + the top word to zero. *) + val hi = hw2 + and lo = hw3 + in + (* 32-bit constants can be loaded with at most a movz and a movk but + it may be that there is something shorter. *) + if hi = 0w0 + then code <::> moveZero32{regD=xReg, immediate=lo, shift=0w0} + else if hi = 0wxffff + then code <::> moveNot32{regD=xReg, immediate=Word.xorb(0wxffff, lo), shift=0w0} + else if lo = 0w0 + then code <::> moveZero32{regD=xReg, immediate=hi, shift=0w16} + else if isEncodableBitPattern(value, WordSize32) + then code <::> bitwiseOrImmediate32{bits=value, regN=XZero, regD=xReg} + else (* Have to use two instructions *) + code <::> + moveZero32{regD=xReg, immediate=lo, shift=0w0} <::> + moveKeep{regD=xReg, immediate=hi, shift=0w16} + end + else if hw0 = 0wxffff andalso hw1 = 0wxffff andalso hw2 = 0wxffff + then code <::> moveNot{regD=xReg, immediate=Word.xorb(0wxffff, hw3), shift=0w0} + else if hw1 = 0w0 andalso hw2 = 0w0 + then (* This is common for length words with a flags byte *) + code <::> moveZero32{regD=xReg, immediate=hw3, shift=0w0} <::> + moveKeep{regD=xReg, immediate=hw0, shift=0w48} + else code <::> loadNonAddressConstant(xReg, value) + in + toAssembler(rest, nextCode) + end + + | toAssembler(LoadAddr(dReg, source) :: rest, code) = toAssembler(rest, loadAddressConstant(dReg, source) :: code) + + | toAssembler(RTSTrap{ rtsEntry, work, save } :: rest, code) = + let + (* Because X30 is used in the branchAndLink it has to be pushed + across any trap. *) + val saveX30 = List.exists (fn r => r = X30) save + val preserve = List.filter (fn r => r <> X30) save + in + toAssembler(rest, + code <@> + (if saveX30 then [storeRegPreIndex{regT=X30, regN=X_MLStackPtr, byteOffset= ~8}] else []) <::> + loadRegScaled{regT=work, regN=X_MLAssemblyInt, unitOffset=rtsEntry} <::> + branchAndLinkReg work <::> + registerMask preserve <@> + (if saveX30 then [loadRegPostIndex{regT=X30, regN=X_MLStackPtr, byteOffset= 8}] else []) + ) + end + + | toAssembler(AllocateMemoryFixedSize{ bytes, dest, save, work } :: rest, code) = + let + val label = Arm64Assembly.createLabel() + val saveX30 = List.exists (fn r => r = X30) save + val preserve = List.filter (fn r => r <> X30) save + + val allocCode = + code <::> + (* Subtract the number of bytes required from the heap pointer. *) + subImmediate{regN=X_MLHeapAllocPtr, regD=dest, immed=bytes, shifted=false} <::> + (* Compare the result with the heap limit. *) + subSShiftedReg{regM=X_MLHeapLimit, regN=dest, regD=XZero, shift=ShiftNone} <::> + conditionalBranch(CondCarrySet, label) <@> + (if saveX30 then [storeRegPreIndex{regT=X30, regN=X_MLStackPtr, byteOffset= ~8}] else []) <::> + loadRegScaled{regT=work, regN=X_MLAssemblyInt, unitOffset=heapOverflowCallOffset} <::> + branchAndLinkReg work <::> + registerMask preserve <@> + (if saveX30 then [loadRegPostIndex{regT=X30, regN=X_MLStackPtr, byteOffset= 8}] else []) <::> + setLabel label <::> + (* Update the heap pointer. *) + orrShiftedReg{regN=XZero, regM=dest, regD=X_MLHeapAllocPtr, shift=ShiftNone} + in + toAssembler(rest, allocCode) + end + + | toAssembler(AllocateMemoryVariableSize{ sizeReg, dest, save, work } :: rest, code) = + let + val trapLabel = Arm64Assembly.createLabel() and noTrapLabel = Arm64Assembly.createLabel() + val saveX30 = List.exists (fn r => r = X30) save + val preserve = List.filter (fn r => r <> X30) save + + val allocCode = + ( + (* Subtract the size into the result register. Subtract a further word for + the length word and round down in 32-in-64. *) + if is32in64 + then code <::> + subShiftedReg{regM=sizeReg, regN=X_MLHeapAllocPtr, regD=dest, shift=ShiftLSL 0w2} <::> + subImmediate{regN=dest, regD=dest, immed=0w4, shifted=false} <::> + bitwiseAndImmediate{bits= ~ 0w8, regN=dest, regD=dest} + else code <::> + subShiftedReg{regM=sizeReg, regN=X_MLHeapAllocPtr, regD=dest, shift=ShiftLSL 0w3} <::> + subImmediate{regN=dest, regD=dest, immed=0w8, shifted=false} + ) <::> + (* Check against the limit. If the size is large enough it is possible that this could wrap round. + To check for that we trap if either the result is less than the limit or if it is + now greater than the allocation pointer. *) + subSShiftedReg{regM=X_MLHeapLimit, regN=dest, regD=XZero, shift=ShiftNone} <::> + conditionalBranch(CondCarryClear, trapLabel) <::> + subSShiftedReg{regM=X_MLHeapAllocPtr, regN=dest, regD=XZero, shift=ShiftNone} <::> + conditionalBranch(CondCarryClear, noTrapLabel) <::> + setLabel trapLabel <@> + (if saveX30 then [storeRegPreIndex{regT=X30, regN=X_MLStackPtr, byteOffset= ~8}] else []) <::> + loadRegScaled{regT=work, regN=X_MLAssemblyInt, unitOffset=heapOverflowCallOffset} <::> + branchAndLinkReg work <::> + registerMask preserve <@> + (if saveX30 then [loadRegPostIndex{regT=X30, regN=X_MLStackPtr, byteOffset= 8}] else []) <::> + setLabel noTrapLabel <::> + (* Update the heap pointer. *) + orrShiftedReg{regN=XZero, regM=dest, regD=X_MLHeapAllocPtr, shift=ShiftNone} + in + toAssembler(rest, allocCode) + end + + | toAssembler(BranchTable{ startLabel=Label lab, brTable } :: rest, code) = + toAssembler(rest, + List.foldl (fn (Label lab, code) => (unconditionalBranch(getLabelTarget lab)) :: code) + (code <::> setLabel(getLabelTarget lab)) brTable) + + | toAssembler(LoadGlobalHeapBaseInCallback dest :: rest, code) = + toAssembler(rest, + code <@> List.rev(loadGlobalHeapBaseInCallback dest)) + + (* Optimisation passes. *) + + fun forward([], list, rep) = reverse(list, [], rep) + + | forward(SetLabel(Label srcLab) :: (ubr as UnconditionalBranch(Label destLab)) :: tl, list, _) = + if srcLab = destLab + (* We should never get this because there should always be a stack-check to + allow a loop to be broken. If that ever changes we need to retain the label. *) + then raise InternalError "Infinite loop detected" + else (* Mark this to forward to its destination. *) + ( + Array.update(labelTargets, srcLab, getLabel destLab); + forward(ubr :: tl, list, true) + ) - | toAssembler(CompareBranch{ test, label, onZero, opSize } :: rest, code) = - let - val instr = - case (onZero, opSize) of - (true, OpSize64) => compareBranchZero - | (false, OpSize64) => compareBranchNonZero - | (true, OpSize32) => compareBranchZero32 - | (false, OpSize32) => compareBranchNonZero32 - in - toAssembler(rest, code <::> instr(test, label)) - end + | forward(SetLabel(Label jmpLab1) :: (tl as SetLabel(Label jmpLab2) :: _), list, _) = + (* Eliminate adjacent labels. They complicate the other tests although they + don't incur any run-time cost. *) + ( + (* Any reference to the first label is forwarded to the second. *) + Array.update(labelTargets, jmpLab1, getLabel jmpLab2); + forward(tl, list, true) + ) - (* Register-register moves - special case for XSP. *) - | toAssembler(MoveXRegToXReg{sReg=XSP, dReg} :: rest, code) = - toAssembler(rest, code <::> addImmediate{regN=XSP, regD=dReg, immed=0w0, shifted=false}) - | toAssembler(MoveXRegToXReg{sReg, dReg=XSP} :: rest, code) = - toAssembler(rest, code <::> addImmediate{regN=sReg, regD=XSP, immed=0w0, shifted=false}) - | toAssembler(MoveXRegToXReg{sReg, dReg} :: rest, code) = - toAssembler(rest, code <::> orrShiftedReg{regN=XZero, regM=sReg, regD=dReg, shift=ShiftNone}) + | forward((ubr as UnconditionalBranch(Label ubrLab)) :: (tl as SetLabel(Label jumpLab) :: _), list, rep) = + (* Eliminate unconditional jumps to the next instruction. *) + if ubrLab = jumpLab + then forward(tl, list, true) + else forward(tl, ubr :: list, rep) - | toAssembler(LoadNonAddr(xReg, value) :: rest, code) = - let - (* Load a non-address constant. Tries to use movz/movn/movk if - that can be done easily, othewise uses loadNonAddressConstant to - load the value from the non-address constant area. *) - fun extW (v, h) = Word.andb(Word.fromLarge(LargeWord.>>(Word64.toLarge v, h*0w16)), 0wxffff) - val hw0 = extW(value, 0w3) and hw1 = extW(value, 0w2) - and hw2 = extW(value, 0w1) and hw3 = extW(value, 0w0) - val nextCode = - if value < 0wx100000000 - then - let - (* 32-bit constants can be loaded using at most a movz and movk but - various cases can be reduced since all 32-bit operations set - the top word to zero. *) - val hi = hw2 - and lo = hw3 - in - (* 32-bit constants can be loaded with at most a movz and a movk but - it may be that there is something shorter. *) - if hi = 0w0 - then code <::> moveZero32{regD=xReg, immediate=lo, shift=0w0} - else if hi = 0wxffff - then code <::> moveNot32{regD=xReg, immediate=Word.xorb(0wxffff, lo), shift=0w0} - else if lo = 0w0 - then code <::> moveZero32{regD=xReg, immediate=hi, shift=0w16} - else if isEncodableBitPattern(value, WordSize32) - then code <::> bitwiseOrImmediate32{bits=value, regN=XZero, regD=xReg} - else (* Have to use two instructions *) - code <::> - moveZero32{regD=xReg, immediate=lo, shift=0w0} <::> - moveKeep{regD=xReg, immediate=hi, shift=0w16} - end - else if hw0 = 0wxffff andalso hw1 = 0wxffff andalso hw2 = 0wxffff - then code <::> moveNot{regD=xReg, immediate=Word.xorb(0wxffff, hw3), shift=0w0} - else if hw1 = 0w0 andalso hw2 = 0w0 - then (* This is common for length words with a flags byte *) - code <::> moveZero32{regD=xReg, immediate=hw3, shift=0w0} <::> - moveKeep{regD=xReg, immediate=hw0, shift=0w48} - else code <::> loadNonAddressConstant(xReg, value) - in - toAssembler(rest, nextCode) - end + | forward((cbr as ConditionalBranch(test, Label cbrLab)) :: (ubr as UnconditionalBranch(Label ubrLab)) :: + (tl as SetLabel(Label jumpLab) :: _), list, rep) = + if cbrLab = jumpLab + then (* We have a conditional branch followed by an unconditional branch followed by the destination of + the conditional branch. Eliminate the unconditional branch by reversing the test. + This can often happen if one branch of an if-then-else has been reduced to zero + because the same register has been chosen for the input and output. *) + forward(tl (* Leave the label just in case it's used elsewhere*), + ConditionalBranch(invertTest test, Label ubrLab) :: list, true) - | toAssembler(LoadAddr(dReg, source) :: rest, code) = toAssembler(rest, loadAddressConstant(dReg, source) :: code) + else forward(ubr :: tl, cbr :: list, rep) - | toAssembler(RTSTrap{ rtsEntry, work, save } :: rest, code) = - let - (* Because X30 is used in the branchAndLink it has to be pushed - across any trap. *) - val saveX30 = List.exists (fn r => r = X30) save - val preserve = List.filter (fn r => r <> X30) save - in - toAssembler(rest, - code <@> - (if saveX30 then [storeRegPreIndex{regT=X30, regN=X_MLStackPtr, byteOffset= ~8}] else []) <::> - loadRegScaled{regT=work, regN=X_MLAssemblyInt, unitOffset=rtsEntry} <::> - branchAndLinkReg work <::> - registerMask preserve <@> - (if saveX30 then [loadRegPostIndex{regT=X30, regN=X_MLStackPtr, byteOffset= 8}] else []) - ) - end + | forward(hd :: tl, list, rep) = forward(tl, hd :: list, rep) + + and reverse([], list, rep) = (list, rep) - | toAssembler(AllocateMemoryFixedSize{ bytes, dest, save, work } :: rest, code) = - let - val label = createLabel() - val saveX30 = List.exists (fn r => r = X30) save - val preserve = List.filter (fn r => r <> X30) save - - val allocCode = - code <::> - (* Subtract the number of bytes required from the heap pointer. *) - subImmediate{regN=X_MLHeapAllocPtr, regD=dest, immed=bytes, shifted=false} <::> - (* Compare the result with the heap limit. *) - subSShiftedReg{regM=X_MLHeapLimit, regN=dest, regD=XZero, shift=ShiftNone} <::> - conditionalBranch(CondCarrySet, label) <@> - (if saveX30 then [storeRegPreIndex{regT=X30, regN=X_MLStackPtr, byteOffset= ~8}] else []) <::> - loadRegScaled{regT=work, regN=X_MLAssemblyInt, unitOffset=heapOverflowCallOffset} <::> - branchAndLinkReg work <::> - registerMask preserve <@> - (if saveX30 then [loadRegPostIndex{regT=X30, regN=X_MLStackPtr, byteOffset= 8}] else []) <::> - setLabel label <::> - (* Update the heap pointer. *) - orrShiftedReg{regN=XZero, regM=dest, regD=X_MLHeapAllocPtr, shift=ShiftNone} - in - toAssembler(rest, allocCode) - end + | reverse(hd :: tl, list, rep) = reverse(tl, hd :: list, rep) - | toAssembler(AllocateMemoryVariableSize{ sizeReg, dest, save, work } :: rest, code) = - let - val trapLabel = createLabel() and noTrapLabel = createLabel() - val saveX30 = List.exists (fn r => r = X30) save - val preserve = List.filter (fn r => r <> X30) save - - val allocCode = - ( - (* Subtract the size into the result register. Subtract a further word for - the length word and round down in 32-in-64. *) - if is32in64 - then code <::> - subShiftedReg{regM=sizeReg, regN=X_MLHeapAllocPtr, regD=dest, shift=ShiftLSL 0w2} <::> - subImmediate{regN=dest, regD=dest, immed=0w4, shifted=false} <::> - bitwiseAndImmediate{bits= ~ 0w8, regN=dest, regD=dest} - else code <::> - subShiftedReg{regM=sizeReg, regN=X_MLHeapAllocPtr, regD=dest, shift=ShiftLSL 0w3} <::> - subImmediate{regN=dest, regD=dest, immed=0w8, shifted=false} - ) <::> - (* Check against the limit. If the size is large enough it is possible that this could wrap round. - To check for that we trap if either the result is less than the limit or if it is - now greater than the allocation pointer. *) - subSShiftedReg{regM=X_MLHeapLimit, regN=dest, regD=XZero, shift=ShiftNone} <::> - conditionalBranch(CondCarryClear, trapLabel) <::> - subSShiftedReg{regM=X_MLHeapAllocPtr, regN=dest, regD=XZero, shift=ShiftNone} <::> - conditionalBranch(CondCarryClear, noTrapLabel) <::> - setLabel trapLabel <@> - (if saveX30 then [storeRegPreIndex{regT=X30, regN=X_MLStackPtr, byteOffset= ~8}] else []) <::> - loadRegScaled{regT=work, regN=X_MLAssemblyInt, unitOffset=heapOverflowCallOffset} <::> - branchAndLinkReg work <::> - registerMask preserve <@> - (if saveX30 then [loadRegPostIndex{regT=X30, regN=X_MLStackPtr, byteOffset= 8}] else []) <::> - setLabel noTrapLabel <::> - (* Update the heap pointer. *) - orrShiftedReg{regN=XZero, regM=dest, regD=X_MLHeapAllocPtr, shift=ShiftNone} - in - toAssembler(rest, allocCode) - end + (* Repeat scans through the code until there are no further changes. *) + fun repeat ops = + case forward(ops, [], false) of + (list, false) => list + | (list, true) => repeat list - | toAssembler(BranchTable{ startLabel, brTable } :: rest, code) = - toAssembler(rest, - List.foldl (fn (label, code) => (unconditionalBranch label) :: code) - (code <::> setLabel startLabel) brTable) + val optimised = repeat instrs - | toAssembler(LoadGlobalHeapBaseInCallback dest :: rest, code) = - toAssembler(rest, - code <@> List.rev(loadGlobalHeapBaseInCallback dest)) + in + generateCode{instrs=List.rev(toAssembler(optimised, [])), name=name, parameters=parameters, + resultClosure=resultClosure, profileObject=profileObject} + end (* Constant shifts are encoded in the immr and imms fields of the bit-field instruction. *) fun shiftConstant{ direction, regD, regN, shift, opSize } = let val (bitfieldKind, immr, imms) = case (direction, opSize) of (ShiftLeft, OpSize64) => (BFUnsigned, Word.~ shift mod 0w64, 0w64-0w1-shift) | (ShiftLeft, OpSize32) => (BFUnsigned, Word.~ shift mod 0w32, 0w32-0w1-shift) | (ShiftRightLogical, OpSize64) => (BFUnsigned, shift, 0wx3f) | (ShiftRightLogical, OpSize32) => (BFUnsigned, shift, 0wx1f) | (ShiftRightArithmetic, OpSize64) => (BFSigned, shift, 0wx3f) | (ShiftRightArithmetic, OpSize32) => (BFSigned, shift, 0wx1f) in BitField{ regN=regN, regD=regD, opSize=opSize, immr=immr, imms=imms, bitfieldKind=bitfieldKind } end (* These sequences are used both in the ML code-generator and in the FFI code so it is convenient to have them here and share the code. *) local fun allocateWords(fixedReg, workReg, words, bytes, regMask, code) = let val (lengthWord, setLength, flagShift) = if is32in64 then (~4, Load32, 0w24) else (~8, Load64, 0w56) in code <::> AllocateMemoryFixedSize{ bytes=bytes, dest=fixedReg, save=regMask, work=X16 } <::> LoadNonAddr(workReg, Word64.orb(words, Word64.<<(Word64.fromLarge(Word8.toLarge Address.F_bytes), flagShift))) <::> (* Store the length word. Have to use the unaligned version because offset is -ve. *) StoreRegUnscaled{regT=workReg, regN=fixedReg, byteOffset= lengthWord, loadType=setLength, unscaledType=NoUpdate} end fun absoluteAddressToIndex(reg, code) = if is32in64 then code <::> SubShiftedReg{regM=X_Base32in64, regN=reg, regD=reg, shift=ShiftNone, opSize=OpSize64, setFlags=false} <::> shiftConstant{direction=ShiftRightLogical, regN=reg, regD=reg, shift=0w2, opSize=OpSize64} else code in fun boxDouble({source, destination, workReg, saveRegs}, code) = absoluteAddressToIndex(destination, allocateWords(destination, workReg, if is32in64 then 0w2 else 0w1, 0w16, saveRegs, code) <::> StoreFPRegScaled{regT=source, regN=destination, unitOffset=0, floatSize=Double64}) and boxSysWord({source, destination, workReg, saveRegs}, code) = absoluteAddressToIndex(destination, allocateWords(destination, workReg, if is32in64 then 0w2 else 0w1, 0w16, saveRegs, code) <::> StoreRegScaled{regT=source, regN=destination, unitOffset=0, loadType=Load64}) and boxFloat({source, destination, workReg, saveRegs}, code) = absoluteAddressToIndex(destination, allocateWords(destination, workReg, 0w1, 0w8, saveRegs, code) <::> StoreFPRegScaled{regT=source, regN=destination, unitOffset=0, floatSize=Float32}) end - (* Optimise the pre-assembler code and then generate the final code. *) - fun generateFinalCode {instrs, name, parameters, resultClosure, profileObject, labelCount=_} = - let - - in - generateCode{instrs=List.rev(toAssembler(instrs, [])), name=name, parameters=parameters, - resultClosure=resultClosure, profileObject=profileObject} - end - - structure Sharing = struct type closureRef = closureRef type loadType = loadType type opSize = opSize type logicalOp = logicalOp type floatSize = floatSize type shiftDirection = shiftDirection type multKind = multKind type fpUnary = fpUnary type fpBinary = fpBinary type unscaledType = unscaledType type condSet = condSet type bitfieldKind = bitfieldKind type brRegType = brRegType type precode = precode type xReg = xReg type vReg = vReg - type precodeLabel = precodeLabel + type label = label + type labelMaker = labelMaker type condition = condition type shiftType = shiftType type wordSize = wordSize type 'a extend = 'a extend type scale = scale type instr = instr end end;