diff --git a/libpolyml/arm64.cpp b/libpolyml/arm64.cpp index 92dd8360..30414887 100644 --- a/libpolyml/arm64.cpp +++ b/libpolyml/arm64.cpp @@ -1,813 +1,825 @@ /* Machine-dependent code for ARM64 Copyright David C.J. Matthews 2020-21. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ // Currently this is just copied from the interpreted version. #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) 0 #endif #ifdef HAVE_STRING_H #include #endif #include "globals.h" #include "machine_dep.h" #include "sys.h" #include "profiling.h" #include "arb.h" #include "processes.h" #include "run_time.h" #include "gc.h" #include "diagnostics.h" #include "polystring.h" #include "save_vec.h" #include "memmgr.h" #include "scanaddrs.h" #include "rtsentry.h" #include "bytecode.h" #include "int_opcodes.h" /* * ARM64 register use: * X0 First argument and return value * X1-X7 Second-eighth argument * X8 Indirect result (C), ML closure pointer on entry * X9-X15 Volatile scratch registers * X16-17 Intra-procedure-call (C). Only used for special cases in ML. * X18 Platform register. Not used in ML. * X19-X24 Non-volatile (C). Scratch registers (ML). * X25 ML Heap limit pointer * X26 ML assembly interface pointer. Non-volatile (C). * X27 ML Heap allocation pointer. Non-volatile (C). * X28 ML Stack pointer. Non-volatile (C). * X29 Frame pointer (C). Not used in ML * X30 Link register. Not used in ML. * X31 Stack pointer (C). Not used in ML. Also zero register. * * Floating point registers: * V0 First argument and return value * V1-V7 Second-eighth argument * V8-V15 Non volatile. Not currently used in ML. * V16-V31 Volatile. Not currently used in ML. * * The ML calling conventions generally follow the C ABI except that * all registers are volatile and X28 is used for the stack. */ /* the amount of ML stack space to reserve for registers, C exception handling etc. The compiler requires us to reserve 2 stack-frames worth (2 * 20 words). We actually reserve slightly more than this. */ // Arm64 instructions are all 32-bit values. typedef uint32_t* arm64CodePointer; #define OVERFLOW_STACK_SIZE 50 // X26 always points at this area when executing ML code. // The offsets are built into the assembly code and some are built into // the code generator so this must not be changed without checking these. typedef struct _AssemblyArgs { public: byte* enterInterpreter; // These are filled in with the functions. byte* heapOverFlowCall; byte* stackOverFlowCall; byte* stackOverFlowCallEx; byte* trapHandlerEntry; stackItem* handlerRegister; // Current exception handler stackItem* stackLimit; // Lower limit of stack stackItem exceptionPacket; // Set if there is an exception PolyWord threadId; // My thread id. Saves having to call into RTS for it. stackItem registers[25]; // Save/load area for registers X0-X25 inclusive double fpRegisters[8]; // Save/load area for floating point regs D0-D7 PolyWord* localMbottom; // Base of memory + 1 word PolyWord* localMpointer; // X27 Allocation ptr + 1 word stackItem* stackPtr; // X28 Current stack pointer arm64CodePointer linkRegister; // X30 - Link register (return address) arm64CodePointer entryPoint; // PC address to return to byte returnReason; // Reason for returning from ML - Set by assembly code. } AssemblyArgs; class Arm64TaskData: public TaskData, ByteCodeInterpreter { public: Arm64TaskData(); ~Arm64TaskData() {} unsigned allocReg; // The register to take the allocated space. POLYUNSIGNED allocWords; // The words to allocate. AssemblyArgs assemblyInterface; uint32_t saveRegisterMask; // Registers that need to be updated by a GC. virtual void GarbageCollect(ScanAddress *process); void ScanStackAddress(ScanAddress *process, stackItem& val, StackSpace *stack); virtual void EnterPolyCode(); // Start running ML virtual void SetException(poly_exn *exc) { assemblyInterface.exceptionPacket = (PolyWord)exc; } virtual void InterruptCode(); // AddTimeProfileCount is used in time profiling. virtual bool AddTimeProfileCount(SIGNALCONTEXT *context); virtual void InitStackFrame(TaskData *newTask, Handle proc); // Atomic exchange-and-add. Used in the process module to release a mutex and in the // interpreter. It needs to use the same instruction that compiled code uses. virtual POLYSIGNED AtomicExchAdd(PolyObject* mutexp, POLYSIGNED incr); // Set a mutex to zero. virtual void AtomicReset(PolyObject* mutexp); // Return the minimum space occupied by the stack. Used when setting a limit. // N.B. This is PolyWords not native words. virtual uintptr_t currentStackSpace(void) const { return (this->stack->top - (PolyWord*)assemblyInterface.stackPtr) + OVERFLOW_STACK_SIZE; } virtual void addProfileCount(POLYUNSIGNED words) { addSynchronousCount(interpreterPc, words); } virtual void CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length); //void HeapOverflowTrap(); void SetMemRegisters(); void SaveMemRegisters(); void HandleTrap(); // ByteCode overrides. The interpreter and native code states need to be in sync. // The interpreter is only used during the initial bootstrap. virtual void ClearExceptionPacket() { assemblyInterface.exceptionPacket = TAGGED(0); } virtual PolyWord GetExceptionPacket() { return assemblyInterface.exceptionPacket; } virtual stackItem* GetHandlerRegister() { return assemblyInterface.handlerRegister; } virtual void SetHandlerRegister(stackItem* hr) { assemblyInterface.handlerRegister = hr; } void Interpret(); void EndBootStrap() { mixedCode = true; } PLock interruptLock; virtual void HandleStackOverflow(uintptr_t space); }; class Arm64Dependent : public MachineDependent { public: Arm64Dependent() : mustInterpret(false) {} // Create a task data object. virtual TaskData* CreateTaskData(void) { return new Arm64TaskData(); } virtual Architectures MachineArchitecture(void); virtual void SetBootArchitecture(char arch, unsigned wordLength); // The ARM has separate instruction and data caches. virtual void FlushInstructionCache(void* p, POLYUNSIGNED bytes); // During the first bootstrap phase this is interpreted. bool mustInterpret; }; static Arm64Dependent arm64Dependent; MachineDependent* machineDependent = &arm64Dependent; Architectures Arm64Dependent::MachineArchitecture(void) { // During the first phase of the bootstrap we // compile the interpreted version. if (mustInterpret) return MA_Interpreted; return MA_Arm64; } // Values for the returnReason byte enum RETURN_REASON { RETURN_HEAP_OVERFLOW = 1, RETURN_STACK_OVERFLOW = 2, RETURN_STACK_OVERFLOWEX = 3, RETURN_ENTER_INTERPRETER = 4 }; extern "C" { // These are declared in the assembly code segment. void Arm64AsmEnterCompiledCode(void*); int Arm64AsmCallExtraRETURN_ENTER_INTERPRETER(void); int Arm64AsmCallExtraRETURN_HEAP_OVERFLOW(void); int Arm64AsmCallExtraRETURN_STACK_OVERFLOW(void); int Arm64AsmCallExtraRETURN_STACK_OVERFLOWEX(void); // This is declared here and called from the assembly code. // It avoids having a call to an external in the assembly code // which sometimes gives problems with position-indepent code. void Arm64TrapHandler(PolyWord threadId); }; Arm64TaskData::Arm64TaskData() : ByteCodeInterpreter(&assemblyInterface.stackPtr, &assemblyInterface.stackLimit), allocReg(0), allocWords(0), saveRegisterMask(0) { assemblyInterface.enterInterpreter = (byte*)Arm64AsmCallExtraRETURN_ENTER_INTERPRETER; assemblyInterface.heapOverFlowCall = (byte*)Arm64AsmCallExtraRETURN_HEAP_OVERFLOW; assemblyInterface.stackOverFlowCall = (byte*)Arm64AsmCallExtraRETURN_STACK_OVERFLOW; assemblyInterface.stackOverFlowCallEx = (byte*)Arm64AsmCallExtraRETURN_STACK_OVERFLOWEX; assemblyInterface.trapHandlerEntry = (byte*)Arm64TrapHandler; interpreterPc = 0; mixedCode = !arm64Dependent.mustInterpret; } void Arm64Dependent::SetBootArchitecture(char arch, unsigned wordLength) { if (arch == 'I') mustInterpret = true; else if (arch != 'A') Crash("Boot file has unexpected architecture code: %c", arch); } // The ARM has separate instruction and data caches so we must flush // the cache when creating or modifying code. void Arm64Dependent::FlushInstructionCache(void* p, POLYUNSIGNED bytes) { #ifdef _WIN32 ::FlushInstructionCache(GetCurrentProcess(), p, bytes); #elif defined (__GNUC__) __builtin___clear_cache(p, (char*)p + bytes); #elif (defined (__clang__) && defined (__APPLE__)) sys_icache_invalidate(p, bytes); #else #error "No code to flush the instruction cache." #endif } void Arm64TaskData::GarbageCollect(ScanAddress *process) { TaskData::GarbageCollect(process); ByteCodeInterpreter::GarbageCollect(process); if (assemblyInterface.exceptionPacket.w().IsDataPtr()) { PolyObject* obj = assemblyInterface.exceptionPacket.w().AsObjPtr(); obj = process->ScanObjectAddress(obj); assemblyInterface.exceptionPacket = (PolyWord)obj; } if (stack != 0) { stackItem*stackPtr = assemblyInterface.stackPtr; // Now the values on the stack. for (stackItem* q = stackPtr; q < (stackItem*)stack->top; q++) ScanStackAddress(process, *q, stack); } } // Process a value within the stack. void Arm64TaskData::ScanStackAddress(ScanAddress *process, stackItem& stackItem, StackSpace *stack) { // We may have return addresses on the stack which could look like // tagged values. Check whether the value is in the code area before // checking whether it is untagged. #ifdef POLYML32IN64 // In 32-in-64 return addresses always have the top 32 bits non-zero. if (stackItem.argValue < ((uintptr_t)1 << 32)) { // It's either a tagged integer or an object pointer. if (stackItem.w().IsDataPtr()) { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } } else { // Could be a code address or a stack address. MemSpace* space = gMem.SpaceForAddress(stackItem.codeAddr - 1); if (space == 0 || space->spaceType != ST_CODE) return; PolyObject* obj = gMem.FindCodeObject(stackItem.codeAddr); ASSERT(obj != 0); // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } #else // The -1 here is because we may have a zero-sized cell in the last // word of a space. MemSpace* space = gMem.SpaceForAddress(stackItem.codeAddr - 1); if (space == 0) return; // In particular we may have one of the assembly code addresses. if (space->spaceType == ST_CODE) { PolyObject* obj = gMem.FindCodeObject(stackItem.codeAddr); // If it is actually an integer it might be outside a valid code object. if (obj == 0) { ASSERT(stackItem.w().IsTagged()); // It must be an integer } else // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } else if (space->spaceType == ST_LOCAL && stackItem.w().IsDataPtr()) // Local values must be word addresses. { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } #endif } // Copy a stack void Arm64TaskData::CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length) { #ifdef POLYML32IN64 old_length = old_length / 2; new_length = new_length / 2; #endif /* Moves a stack, updating all references within the stack */ stackItem*old_base = (stackItem*)old_stack; stackItem*new_base = (stackItem*)new_stack; stackItem*old_top = old_base + old_length; /* Calculate the offset of the new stack from the old. If the frame is being extended objects in the new frame will be further up the stack than in the old one. */ uintptr_t offset = new_base - old_base + new_length - old_length; stackItem *oldSp = assemblyInterface.stackPtr; assemblyInterface.stackPtr = oldSp + offset; assemblyInterface.handlerRegister = assemblyInterface.handlerRegister + offset; /* Skip the unused part of the stack. */ uintptr_t i = oldSp - old_base; ASSERT(i <= old_length); i = old_length - i; stackItem *old = oldSp; stackItem *newp = assemblyInterface.stackPtr; while (i--) { stackItem old_word = *old++; if (old_word.w().IsDataPtr() && old_word.stackAddr >= old_base && old_word.stackAddr <= old_top) old_word.stackAddr = old_word.stackAddr + offset; else if (old_word.w().IsDataPtr() && IsHeapAddress(old_word.stackAddr)) { stackItem* addr = (stackItem*)old_word.w().AsStackAddr(); if (addr >= old_base && addr <= old_top) { addr += offset; old_word = PolyWord::FromStackAddr((PolyWord*)addr); } } *newp++ = old_word; } ASSERT(old == ((stackItem*)old_stack) + old_length); ASSERT(newp == ((stackItem*)new_stack) + new_length); } void Arm64TaskData::EnterPolyCode() /* Called from "main" to enter the code. */ { assemblyInterface.stackLimit = (stackItem*)((PolyWord*)this->stack->stack() + OVERFLOW_STACK_SIZE); if (arm64Dependent.mustInterpret) { PolyWord closure = assemblyInterface.registers[8]; *(--assemblyInterface.stackPtr) = closure; /* Closure address */ interpreterPc = *(POLYCODEPTR*)closure.AsObjPtr(); Interpret(); ASSERT(0); // Should never return } SetMemRegisters(); // Jump into the ML code. This code sets up the registers and puts the // address of the assemblyInterface into X26 Arm64AsmEnterCompiledCode(&assemblyInterface); // This should never return ASSERT(0); } void Arm64TaskData::Interpret() { while (true) { switch (RunInterpreter(this)) { case ReturnCall: // After the call there will be an enter-int instruction so that when this // returns we will re-enter the interpreter. The number of arguments for // this call is after that. while ((uintptr_t)interpreterPc & 3) { ASSERT(interpreterPc[0] == INSTR_no_op); interpreterPc++; } ASSERT(interpreterPc[0] == 0xe9); numTailArguments = interpreterPc[12]; case ReturnTailCall: { ClearExceptionPacket(); // Pop the closure. PolyWord closureWord = *assemblyInterface.stackPtr++; PolyObject* closure = closureWord.AsObjPtr(); arm64CodePointer cp = *(arm64CodePointer*)closure; if (cp[0] == 0xAA1E03E9 && cp[1] == 0xF9400350 && cp[2] == 0xD63F0200) { // If the code we're going to is interpreted push back the closure and // continue. interpreterPc = (POLYCODEPTR)cp; assemblyInterface.stackPtr--; HandleStackOverflow(128); // Make sure we have space since we're bypassing the check. continue; } assemblyInterface.registers[8] = closureWord; // Put closure in the closure reg. // Pop the return address. We may need to align this to a word boundary. POLYCODEPTR originalReturn = (POLYCODEPTR)((assemblyInterface.stackPtr++)->codeAddr); while ((uintptr_t)originalReturn & 3) { ASSERT(originalReturn[0] == INSTR_no_op); originalReturn++; } // Get the arguments into the correct registers. // Load the register arguments. The first 8 arguments go into X0-X7. // These will have been the first arguments to be pushed so will be // furthest away on the stack. // Note: we don't currently pass any arguments in the FP regs. for (unsigned i = 0; i < numTailArguments && i < 8; i++) assemblyInterface.registers[i] = assemblyInterface.stackPtr[numTailArguments - i - 1]; // If there are any more arguments these need to be shifted down the stack. while (numTailArguments > 8) { numTailArguments--; assemblyInterface.stackPtr[numTailArguments] = assemblyInterface.stackPtr[numTailArguments - 8]; } // Remove the register arguments assemblyInterface.stackPtr += numTailArguments > 8 ? 8 : numTailArguments; assemblyInterface.linkRegister = (arm64CodePointer)originalReturn; // Set the return address to caller assemblyInterface.entryPoint = *(arm64CodePointer*)closure; // Entry point to callee interpreterPc = 0; // No longer in the interpreter (See SaveMemRegs) return; } case ReturnReturn: { ClearExceptionPacket(); // Returning from an interpreted function. Normally we'll be returning to // interpreted code. if ((uintptr_t)interpreterPc & 3) // ARM64 addresses will always be 4-byte aligned. continue; arm64CodePointer cp = (arm64CodePointer)interpreterPc; if (cp[0] == 0xAA1E03E9 && cp[1] == 0xF9400350 && cp[2] == 0xD63F0200) continue; // Pop the value we're returning. Set the entry point to the code we're returning to. assemblyInterface.registers[0] = *assemblyInterface.stackPtr++; assemblyInterface.entryPoint = cp; interpreterPc = 0; // No longer in the interpreter (See SaveMemRegs) return; } } } } // Called from the assembly code as a result of a trap i.e. a request for // a GC or to extend the stack. void Arm64TrapHandler(PolyWord threadId) { Arm64TaskData* taskData = (Arm64TaskData*)TaskData::FindTaskForId(threadId); taskData->HandleTrap(); } void Arm64TaskData::HandleTrap() { SaveMemRegisters(); // Update globals from the memory registers. switch (this->assemblyInterface.returnReason) { case RETURN_HEAP_OVERFLOW: // The heap has overflowed. // The register mask is the word after the return. saveRegisterMask = *assemblyInterface.entryPoint++; ASSERT(0); // TODO //HeapOverflowTrap(); // Computes a value for allocWords only break; case RETURN_STACK_OVERFLOW: case RETURN_STACK_OVERFLOWEX: { // The register mask is the word after the return. saveRegisterMask = *assemblyInterface.entryPoint++; ASSERT(0); // TODO uintptr_t min_size = 0; // Size in PolyWords #if (0) if (assemblyInterface.returnReason == RETURN_STACK_OVERFLOW) { min_size = (this->stack->top - (PolyWord*)assemblyInterface.stackPtr) + OVERFLOW_STACK_SIZE * sizeof(uintptr_t) / sizeof(PolyWord); } else { // Stack limit overflow. If the required stack space is larger than // the fixed overflow size the code will calculate the limit in %EDI. stackItem* stackP = regDI().stackAddr; min_size = (this->stack->top - (PolyWord*)stackP) + OVERFLOW_STACK_SIZE * sizeof(uintptr_t) / sizeof(PolyWord); } #endif HandleStackOverflow(min_size); break; } case RETURN_ENTER_INTERPRETER: { interpreterPc = (POLYCODEPTR)assemblyInterface.linkRegister; byte reasonCode = *interpreterPc++; // Sort out arguments. assemblyInterface.exceptionPacket = TAGGED(0); if (reasonCode == 0xff) { // Exception handler. ASSERT(0); // Not used assemblyInterface.exceptionPacket = assemblyInterface.registers[0]; // Get the exception packet // We're already in the exception handler but we still have to // adjust the stack pointer and pop the current exception handler. assemblyInterface.stackPtr = assemblyInterface.handlerRegister; assemblyInterface.stackPtr++; assemblyInterface.handlerRegister = (assemblyInterface.stackPtr++)[0].stackAddr; } else if (reasonCode >= 128) { // Start of function. unsigned numArgs = reasonCode - 128; // We need the stack to contain: // The closure, the return address, the arguments. - // Push the register args. - ASSERT(numArgs == 1); // We only ever call functions with one argument. - *(--assemblyInterface.stackPtr) = assemblyInterface.registers[0]; + // The stack will currently contain the stack arguments. + // Add space for the register arguments + if (numArgs > 8) + assemblyInterface.stackPtr -= 8; + else assemblyInterface.stackPtr -= numArgs; + // Move up any stack arguments. + for (unsigned n = numArgs; n > 8; n--) + { + assemblyInterface.stackPtr[n - 8 - 1] = assemblyInterface.stackPtr[n - 1]; + } + // Store the register arguments + for (unsigned n = 0; n < numArgs && n < 8; n++) + assemblyInterface.stackPtr[numArgs - n - 1] = assemblyInterface.registers[n]; + + // Finally push the return address and closure pointer *(--assemblyInterface.stackPtr) = assemblyInterface.registers[9]; // Return address - value of X30 before enter-int *(--assemblyInterface.stackPtr) = assemblyInterface.registers[8]; // Closure } else { // Return from call. Push X0 *(--assemblyInterface.stackPtr) = assemblyInterface.registers[0]; } Interpret(); break; } default: Crash("Unknown return reason code %u", this->assemblyInterface.returnReason); } SetMemRegisters(); } void Arm64TaskData::HandleStackOverflow(uintptr_t space) { uintptr_t min_size = (this->stack->top - (PolyWord*)assemblyInterface.stackPtr) + OVERFLOW_STACK_SIZE + space; try { // The stack check has failed. This may either be because we really have // overflowed the stack or because the stack limit value has been adjusted // to result in a call here. CheckAndGrowStack(this, min_size); } catch (IOException&) { // We may get an exception while handling this if we run out of store } { PLocker l(&interruptLock); // Set the stack limit. This clears any interrupt and also sets the // correct value if we've grown the stack. assemblyInterface.stackLimit = (stackItem*)stack->bottom + OVERFLOW_STACK_SIZE; } try { processes->ProcessAsynchRequests(this); // Release and re-acquire use of the ML memory to allow another thread // to GC. processes->ThreadReleaseMLMemory(this); processes->ThreadUseMLMemory(this); } catch (IOException&) { } } void Arm64TaskData::InitStackFrame(TaskData* parentTask, Handle proc) /* Initialise stack frame. */ { StackSpace* space = this->stack; StackObject* stack = (StackObject*)space->stack(); uintptr_t stack_size = space->spaceSize() * sizeof(PolyWord) / sizeof(stackItem); assemblyInterface.stackPtr = (stackItem*)stack + stack_size; assemblyInterface.stackLimit = (stackItem*)space->bottom + OVERFLOW_STACK_SIZE; assemblyInterface.handlerRegister = assemblyInterface.stackPtr; // Store the argument and the closure. assemblyInterface.registers[8] = proc->Word(); // Closure assemblyInterface.registers[0] = TAGGED(0); // Argument assemblyInterface.linkRegister = 0; // We never return // Have to set the register mask in case we get a GC before the thread starts. saveRegisterMask = (1 << 8) | 1; // X8 and X0 } // This is called from a different thread so we have to be careful. void Arm64TaskData::InterruptCode() { PLocker l(&interruptLock); // Set the stack limit pointer to the top of the stack to cause // a trap when we next check for stack overflow. // We use a lock here to ensure that we always use the current value of the // stack. The thread we're interrupting could be growing the stack at this point. if (stack != 0) assemblyInterface.stackLimit = (stackItem*)(stack->top - 1); } // Called before entering ML code from the run-time system void Arm64TaskData::SetMemRegisters() { // Copy the current store limits into variables before we go into the assembly code. // If we haven't yet set the allocation area or we don't have enough we need // to create one (or a new one). if (allocPointer <= allocLimit + allocWords) { if (allocPointer < allocLimit) Crash("Bad length in heap overflow trap"); // Find some space to allocate in. Updates taskData->allocPointer and // returns a pointer to the newly allocated space (if allocWords != 0) PolyWord* space = processes->FindAllocationSpace(this, allocWords, true); if (space == 0) { // We will now raise an exception instead of returning. // Set allocWords to zero so we don't set the allocation register // since that could be holding the exception packet. allocWords = 0; } // Undo the allocation just now. allocPointer += allocWords; } if (this->allocWords != 0) { // If we have had a heap trap we actually do the allocation here. // We will have already garbage collected and recovered sufficient space. // This also happens if we have just trapped because of store profiling. allocPointer -= allocWords; // Now allocate // Set the allocation register to this area. N.B. This is an absolute address. #if (0) if (this->allocReg < 15) get_reg(this->allocReg)[0].codeAddr = (POLYCODEPTR)(this->allocPointer + 1); /* remember: it's off-by-one */ #endif allocWords = 0; } // If we have run out of store, either just above or while allocating in the RTS, // allocPointer and allocLimit will have been set to zero as part of the GC. We will // now be raising an exception which may free some store but we need to come back here // before we allocate anything. The compiled code uses unsigned arithmetic to check for // heap overflow but only after subtracting the space required. We need to make sure // that the values are still non-negative after substracting any object size. if (allocPointer == 0) allocPointer += MAX_OBJECT_SIZE; if (allocLimit == 0) allocLimit += MAX_OBJECT_SIZE; assemblyInterface.localMbottom = allocLimit + 1; assemblyInterface.localMpointer = allocPointer + 1; // If we are profiling store allocation we set mem_hl so that a trap // will be generated. if (profileMode == kProfileStoreAllocation) assemblyInterface.localMbottom = assemblyInterface.localMpointer; assemblyInterface.threadId = threadObject; } // This is called whenever we have returned from ML to C. void Arm64TaskData::SaveMemRegisters() { // The normal return is to the link register address. assemblyInterface.entryPoint = assemblyInterface.linkRegister; if (interpreterPc == 0) // Not if we're already in the interpreter allocPointer = assemblyInterface.localMpointer - 1; allocWords = 0; assemblyInterface.exceptionPacket = TAGGED(0); saveRegisterMask = 0; } // As far as possible we want locking and unlocking an ML mutex to be fast so // we try to implement the code in the assembly code using appropriate // interlocked instructions. That does mean that if we need to lock and // unlock an ML mutex in this code we have to use the same, machine-dependent, // code to do it. These are defaults that are used where there is no // machine-specific code. #if defined(_MSC_VER) // This saves having to define it in the MASM assembly code. static POLYSIGNED Arm64AsmAtomicExchangeAndAdd(PolyObject* mutexp, POLYSIGNED addend) { # if (SIZEOF_POLYWORD == 8) return InterlockedExchangeAdd64((LONG64*)mutexp, addend); # else return InterlockedExchangeAdd((LONG*)mutexp, addend); # endif } #else extern "C" { // This is only defined in the GAS assembly code POLYSIGNED Arm64AsmAtomicExchangeAndAdd(PolyObject*, POLYSIGNED); } #endif // Do the exchange-and-add POLYSIGNED Arm64TaskData::AtomicExchAdd(PolyObject* mutexp, POLYSIGNED incr) { return Arm64AsmAtomicExchangeAndAdd(mutexp, incr - TAGGED(0).AsSigned()/* Remove the tag */); } // Release a mutex. Because the atomic increment and decrement // use the hardware atomic load-and-add we can simply set this to zero. void Arm64TaskData::AtomicReset(PolyObject* mutexp) { mutexp->Set(0, TAGGED(0)); // Set this to released. } bool Arm64TaskData::AddTimeProfileCount(SIGNALCONTEXT *context) { if (interpreterPc != 0) { // See if the PC we've got is an ML code address. MemSpace *space = gMem.SpaceForAddress(interpreterPc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { incrementCountAsynch(interpreterPc); return true; } } return false; } extern "C" { POLYEXTERNALSYMBOL POLYUNSIGNED PolyInterpretedEnterIntMode(); POLYEXTERNALSYMBOL POLYUNSIGNED PolyEndBootstrapMode(FirstArgument threadId, PolyWord function); } // Do we require EnterInt instructions and if so for which architecture? // 0 = > None; 1 => X86_32, 2 => X86_64. 3 => X86_32_in_64. 4 => ARM_64. POLYUNSIGNED PolyInterpretedEnterIntMode() { return TAGGED(4).AsUnsigned(); } // End the first stage of bootstrap mode and run a new function. // The first stage is always interpreted. Once that is complete every function will have // at least an executable "enter-interpreter" stub so it can be called as machine code. POLYUNSIGNED PolyEndBootstrapMode(FirstArgument threadId, PolyWord function) { TaskData* taskData = TaskData::FindTaskForId(threadId); ASSERT(taskData != 0); taskData->PreRTSCall(); Handle pushedFunction = taskData->saveVec.push(function); arm64Dependent.mustInterpret = false; ((Arm64TaskData*)taskData)->EndBootStrap(); taskData->InitStackFrame(taskData, pushedFunction); taskData->EnterPolyCode(); // Should never return. ASSERT(0); return TAGGED(0).AsUnsigned(); } // No machine-specific calls in the interpreter. struct _entrypts machineSpecificEPT[] = { { "PolyInterpretedEnterIntMode", (polyRTSFunction)&PolyInterpretedEnterIntMode }, { "PolyEndBootstrapMode", (polyRTSFunction)&PolyEndBootstrapMode }, { NULL, NULL} // End of list. }; diff --git a/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64ASSEMBLY.sml b/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64ASSEMBLY.sml index de72364d..de3cc02f 100644 --- a/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64ASSEMBLY.sml +++ b/mlsource/MLCompiler/CodeTree/Arm64Code/ARM64ASSEMBLY.sml @@ -1,462 +1,508 @@ (* Copyright (c) 2021 David C. J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public Licence version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public Licence for more details. You should have received a copy of the GNU Lesser General Public Licence along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor ARM64ASSEMBLY ( structure Debug: DEBUG and Pretty: PRETTYSIG and CodeArray: CODEARRAYSIG ) : Arm64Assembly = struct open CodeArray Address exception InternalError = Misc.InternalError exception Fallback (* Define this here in case we want to raise it. *) infix 5 << <<+ <<- >> >>+ >>- ~>> ~>>+ ~>>- (* Shift operators *) infix 3 andb orb xorb andbL orbL xorbL andb8 orb8 xorb8 val op << = Word.<< and op >> = Word.>> and op ~>> = Word.~>> and op andb = Word.andb and op orb = Word.orb val wordToWord8 = Word8.fromLargeWord o Word.toLargeWord and word8ToWord = Word.fromLargeWord o Word8.toLargeWord datatype xReg = XReg of Word8.word | XZero | XSP datatype instr = SimpleInstr of word | LoadLiteral of xReg * int (* 31 in the register field can either mean the zero register or the hardware stack pointer. Which meaning depends on the instruction. *) fun xRegOrXZ(XReg w) = w | xRegOrXZ XZero = 0w31 | xRegOrXZ XSP = raise InternalError "XSP not valid here" and xRegOrXSP(XReg w) = w | xRegOrXSP XZero = raise InternalError "XZero not valid here" | xRegOrXSP XSP = 0w31 (* There are cases where it isn't clear. *) and xRegOnly (XReg w) = w | xRegOnly XZero = raise InternalError "XZero not valid here" | xRegOnly XSP = raise InternalError "XSP not valid here" val X0 = XReg 0w0 and X1 = XReg 0w1 and X2 = XReg 0w2 and X3 = XReg 0w3 and X4 = XReg 0w4 and X5 = XReg 0w5 and X6 = XReg 0w6 and X7 = XReg 0w7 and X8 = XReg 0w8 and X9 = XReg 0w9 and X10= XReg 0w10 and X11 = XReg 0w11 and X12 = XReg 0w12 and X13 = XReg 0w13 and X14= XReg 0w14 and X15 = XReg 0w15 and X16 = XReg 0w16 and X17 = XReg 0w17 and X18= XReg 0w18 and X19 = XReg 0w19 and X20 = XReg 0w20 and X21 = XReg 0w21 and X22= XReg 0w22 and X23 = XReg 0w23 and X24 = XReg 0w24 and X25 = XReg 0w25 and X26= XReg 0w26 and X27 = XReg 0w27 and X28 = XReg 0w28 and X29 = XReg 0w29 and X30= XReg 0w30 val X_MLHeapLimit = X25 (* ML Heap limit pointer *) and X_MLAssemblyInt = X26 (* ML assembly interface pointer. *) and X_MLHeapAllocPtr = X27 (* ML Heap allocation pointer. *) and X_MLStackPtr = X28 (* ML Stack pointer. *) and X_LinkReg = X30 (* Link reg - return address *) datatype code = Code of { instructions: instr list ref, (* Code in reverse order. *) constVec: machineWord list ref, (* Constant area constant values. *) functionName: string, (* Name of the function. *) printAssemblyCode:bool, (* Whether to print the code when we finish. *) printStream: string->unit (* The stream to use *) } fun codeCreate (name, parameters) = let val printStream = Pretty.getSimplePrinter(parameters, []) in Code { instructions = ref nil, constVec = ref [], functionName = name, printAssemblyCode = Debug.getParameter Debug.assemblyCodeTag parameters, printStream = printStream } end fun addConstToVec (valu, Code{constVec, ...}) = let (* Search the list to see if the constant is already there. *) fun findConst valu [] num = (* Add to the list *) ( constVec := ! constVec @ [valu]; num ) | findConst valu (h :: t) num = if wordEq (valu, h) then num else findConst valu t (num + 1) (* Not equal *) in findConst valu (! constVec) 0 end fun addInstr (instr, Code{instructions, ...}) = instructions := SimpleInstr instr :: ! instructions - val retCode = 0wxD65F03C0 - and nopCode = 0wxD503201F - - fun genRetCode code = addInstr(retCode, code) + val nopCode = 0wxD503201F (* Add a 12-bit constant, possibly shifted by 12 bits. *) fun genAddRegConstant({sReg, dReg, cValue, shifted}, code) = let val () = if cValue < 0 orelse cValue >= 0x400 then raise InternalError "genAddRegConstant: Value > 12 bits" else () in addInstr(0wx91000000 orb (if shifted then 0wx400000 else 0w0) orb (Word.fromInt cValue << 0w10) orb (word8ToWord(xRegOrXSP sReg) << 0w5) orb word8ToWord(xRegOrXSP dReg), code) end (* Loads: There are two versions of this on the ARM. There is a version that takes a signed 9-bit byte offset and a version that takes an unsigned 12-bit word offset. *) (* Load an aligned value using an unsigned offset.. *) fun loadRegAligned({dest, base, wordOffset}, code) = let val _ = (wordOffset >= 0 andalso wordOffset < 0x1000) orelse raise InternalError "loadRegAligned: value out of range" in addInstr(0wxF9400000 orb (Word.fromInt wordOffset << 0w10) orb (word8ToWord(xRegOrXSP base) << 0w5) orb word8ToWord(xRegOnly dest), code) end + (* and corresponding store. *) + and storeRegAligned({dest, base, wordOffset}, code) = + let + val _ = (wordOffset >= 0 andalso wordOffset < 0x1000) + orelse raise InternalError "storeRegAligned: value out of range" + in + addInstr(0wxF9000000 orb (Word.fromInt wordOffset << 0w10) orb + (word8ToWord(xRegOrXSP base) << 0w5) orb word8ToWord(xRegOnly dest), code) + end + + (* Push a register to the ML stack. This uses a pre-increment store to x28 *) fun genPushReg(xReg, code) = addInstr(0wxF81F8F80 orb word8ToWord(xRegOnly xReg), code) (* Pop a register from the ML stack. *) fun genPopReg(xReg, code) = addInstr(0wxF8408780 orb word8ToWord(xRegOnly xReg), code) fun genLoadConstant(xReg, valu, code as Code{instructions, ...}) = let val cNum = addConstToVec(valu, code) in instructions := LoadLiteral(xReg, cNum) :: ! instructions end (* Move an unsigned constant into the low 16-bits of a register. *) fun genMoveShortConstToReg(xReg, constnt, code) = if constnt < 0 orelse constnt >= 65536 then raise InternalError "genMoveShortConstToReg: constant out of range" else addInstr(0wxD2800000 orb (Word.fromInt constnt << 0w5) orb word8ToWord(xRegOnly xReg), code) - + + (* Jump to the address in the register and put the address of the + next instruction into X30. *) + fun genBranchAndLinkReg(dest, code) = + addInstr(0wxD63F0000 orb (word8ToWord(xRegOnly dest) << 0w5), code) + + (* Jump to the address in the register. *) + fun genBranchRegister(dest, code) = + addInstr(0wxD61F0000 orb (word8ToWord(xRegOnly dest) << 0w5), code) + + (* Jump to the address in the register and hint this is a return. *) + fun genReturnRegister(dest, code) = + addInstr(0wxD65F0000 orb (word8ToWord(xRegOnly dest) << 0w5), code) + (* Size of each code word. *) fun codeSize _ = 1 (* Number of 32-bit words *) fun foldCode startIc foldFn ops = let fun doFold(oper :: operList, ic) = doFold(operList, (* Get the size BEFORE any possible change. *) ic + Word.fromInt(codeSize oper) * 0w4 before foldFn(oper, ic)) | doFold(_, ic) = ic in doFold(ops, startIc) end (* Store a 32-bit value in the code *) fun set32(value, addrs, seg) = let fun putBytes(value, a, seg, i) = if i = 0w4 then () else ( byteVecSet(seg, a+i, wordToWord8(value andb 0wxff)); putBytes(value >> 0w8, a, seg, i+0w1) ) in putBytes(value, addrs, seg, 0w0) end fun genCode(ops, Code {constVec, ...}) = let (* First pass - set the labels. *) fun setLabelsAndSizes ops = Word.fromInt(List.length ops) val codeSize = setLabelsAndSizes ops (* Number of 32-bit instructions *) val wordsOfCode = (codeSize + 0w1) div 0w2 (* Round up to 64-bits *) val paddingWord = if Word.andb(codeSize, 0w1) = 0w1 then [SimpleInstr nopCode] else [] val segSize = wordsOfCode + Word.fromInt(List.length(! constVec)) + 0w4 (* 4 extra words *) val codeVec = byteVecMake segSize fun genCodeWords(SimpleInstr code, byteNo) = set32(code, byteNo, codeVec) | genCodeWords(LoadLiteral(xReg, cNum), byteNo) = let (* The offset is in 32-bit words. The first of the constants is at offset wordsOfCode+3 *) val offsetOfConstant = (wordsOfCode+0w3+Word.fromInt cNum)*0w2 - (byteNo >> 0w2) val _ = offsetOfConstant < 0wx100000 orelse raise InternalError "Offset to constant is too large" val code = 0wx58000000 orb (offsetOfConstant << 0w5) orb word8ToWord(xRegOnly xReg) in set32(code, byteNo, codeVec) end in foldCode 0w0 genCodeWords (ops @ paddingWord); (codeVec (* Return the completed code. *), wordsOfCode (* And the size in 64-bit words. *)) end (* Store a 64-bit value in the code *) fun set64(value, wordNo, seg) = let val addrs = wordNo * 0w8 fun putBytes(value, a, seg, i) = if i = 0w8 then () else ( byteVecSet(seg, a+i, Word8.fromInt(value mod 256)); putBytes(value div 256, a, seg, i+0w1) ) in putBytes(value, addrs, seg, 0w0) end (* Print the instructions in the code. *) fun printCode (codeVec, functionName, wordsOfCode, printStream) = let val numInstructions = wordsOfCode * 0w2 (* Words is number of 64-bit words *) fun printHex (v, n) = let val s = Word.fmt StringCvt.HEX v val pad = CharVector.tabulate(Int.max(0, n-size s), fn _ => #"0") in printStream pad; printStream s end (* Each instruction is 32-bytes. *) fun printWordAt wordNo = let val byteNo = wordNo << 0w2 val () = printHex(byteNo, 6) (* Address *) val () = printStream "\t" val wordValue = word8ToWord (codeVecGet (codeVec, byteNo)) orb (word8ToWord (codeVecGet (codeVec, byteNo+0w1)) << 0w8) orb (word8ToWord (codeVecGet (codeVec, byteNo+0w2)) << 0w16) orb (word8ToWord (codeVecGet (codeVec, byteNo+0w3)) << 0w24) val () = printHex(wordValue, 8) (* Instr as hex *) val () = printStream "\t" in - if wordValue = 0wxD65F03C0 - then printStream "ret" + if (wordValue andb 0wxfffffc1f) = 0wxD61F0000 + then + let + val rN = (wordValue andb 0wx3e0) >> 0w5 + in + printStream "br\tx"; + printStream(Word.fmt StringCvt.DEC rN) + end + + else if (wordValue andb 0wxfffffc1f) = 0wxD63F0000 + then + let + val rN = (wordValue andb 0wx3e0) >> 0w5 + in + printStream "blr\tx"; + printStream(Word.fmt StringCvt.DEC rN) + end + + else if (wordValue andb 0wxfffffc1f) = 0wxD65F0000 + then + let + val rN = (wordValue andb 0wx3e0) >> 0w5 + in + printStream "ret\tx"; + printStream(Word.fmt StringCvt.DEC rN) + end else if wordValue = 0wxD503201F then printStream "nop" else if (wordValue andb 0wxffe00000) = 0wxD2800000 then let (* Move immediate, zeroing the rest of the register and with no shift. *) val rD = wordValue andb 0wx1f val imm16 = (wordValue andb 0wx1fffe) >> 0w5 in printStream "mov\tx"; printStream(Word.fmt StringCvt.DEC rD); printStream ",#"; printStream(Word.fmt StringCvt.DEC imm16) end else if (wordValue andb 0wxffe00c00) = 0wxF8000C00 then let (* Store with pre-indexing *) val rT = wordValue andb 0wx1f and rN = (wordValue andb 0wx3e0) >> 0w5 and imm9 = (wordValue andb 0wx1ff000) >> 0w12 val imm9Text = if imm9 > 0wxff then "-" ^ Word.fmt StringCvt.DEC (0wx200 - imm9) else Word.fmt StringCvt.DEC imm9 in printStream "str\tx"; printStream(Word.fmt StringCvt.DEC rT); printStream ",[x"; printStream(Word.fmt StringCvt.DEC rN); printStream ",#"; printStream imm9Text; printStream "]!" end else if (wordValue andb 0wxffe00c00) = 0wxF8400400 then let (* Load with post-indexing *) val rT = wordValue andb 0wx1f and rN = (wordValue andb 0wx3e0) >> 0w5 and imm9 = (wordValue andb 0wx1ff000) >> 0w12 val imm9Text = if imm9 > 0wxff then "-" ^ Word.fmt StringCvt.DEC(0wx200 - imm9) else Word.fmt StringCvt.DEC imm9 in printStream "ldr\tx"; printStream(Word.fmt StringCvt.DEC rT); printStream ",[x"; printStream(Word.fmt StringCvt.DEC rN); printStream "],#"; printStream imm9Text end else if (wordValue andb 0wxff800000) = 0wx91000000 then let (* Add a 12-bit immediate with possible shift. *) val rD = wordValue andb 0wx1f and rN = (wordValue andb 0wx3e0) >> 0w5 and imm12 = (wordValue andb 0wx3ffc00) >> 0w10 and shiftBit = wordValue andb 0wx400000 val imm = if shiftBit <> 0w0 then imm12 << 0w12 else imm12 in printStream "add\tx"; printStream(Word.fmt StringCvt.DEC rD); printStream ",x"; printStream(Word.fmt StringCvt.DEC rN); printStream ",#"; printStream(Word.fmt StringCvt.DEC imm) end else if (wordValue andb 0wxff000000) = 0wx58000000 then let (* Load from a PC-relative address i.e. the constant area. *) val rT = wordValue andb 0wx1f (* The offset is in 32-bit words *) val byteOffset = (wordValue andb 0wx00ffffe0) >> (0w5-0w2) val constantValue = codeVecGetWord(codeVec, (byteOffset+byteNo) >> 0w3) in printStream "ldr\tx"; printStream(Word.fmt StringCvt.DEC rT); printStream ",0x"; printStream(Word.fmt StringCvt.HEX (byteOffset+byteNo)); printStream "\t// "; printStream(stringOfWord constantValue) end - else if (wordValue andb 0wxffc00000) = 0wxF9400000 + else if (wordValue andb 0wxff800000) = 0wxF9000000 then let - (* Load with an unsigned offset. The offset is in units of 64-bits. *) + (* Load/Store with an unsigned offset. The offset is in units of 64-bits. *) + val opc = if (wordValue andb 0wx400000) = 0w0 then "str" else "ldr" val rT = wordValue andb 0wx1f and rN = (wordValue andb 0wx3e0) >> 0w5 and imm12 = (wordValue andb 0wx3ffc00) >> 0w10 in - printStream "ldr\tx"; printStream(Word.fmt StringCvt.DEC rT); + printStream opc; printStream "\tx"; printStream(Word.fmt StringCvt.DEC rT); printStream ",[x"; printStream(Word.fmt StringCvt.DEC rN); printStream ",#"; printStream(Word.fmt StringCvt.DEC(imm12*0w8)); printStream "]" end else printStream "?" ; printStream "\n" end fun printAll i = if i = numInstructions then () else (printWordAt i; printAll(i+0w1)) in printStream functionName; printStream ":\n"; printAll 0w0 end (* Adds the constants onto the code, and copies the code into a new segment *) fun generateCode {code as Code{ instructions = ref instrs, printAssemblyCode, printStream, functionName, constVec, ...}, maxStack, resultClosure} = let local val codeList = List.rev instrs (* Add a stack check. We need to do this for native code. *) in val codeList = if maxStack < 128 then codeList else raise InternalError "TODO" (* SimpleCode[opcode_stackSize16, Word8.fromInt maxStack, Word8.fromInt(maxStack div 256)] :: codeList *) end val (byteVec, wordsOfCode) = genCode(codeList, code) (* +3 for profile count, function name and constants count *) val numOfConst = List.length(! constVec) val segSize = wordsOfCode + Word.fromInt numOfConst + 0w4 val firstConstant = wordsOfCode + 0w3 (* Add 3 for no of consts, fn name and profile count. *) (* Put in the number of constants. This must go in before we actually put in any constants. *) local val lastWord = segSize - 0w1 in val () = set64(numOfConst + 2, wordsOfCode, byteVec) (* Set the last word of the code to the (negative) byte offset of the start of the code area from the end of this word. *) val () = set64((numOfConst + 3) * ~8, lastWord, byteVec) end (* Now we've filled in all the size info we need to convert the segment into a proper code segment before it's safe to put in any ML values. *) val codeVec = byteVecToCodeVec(byteVec, resultClosure) local val name : string = functionName val nameWord : machineWord = toMachineWord name in val () = codeVecPutWord (codeVec, wordsOfCode+0w1, nameWord) end (* Profile ref. A byte ref used by the profiler in the RTS. *) local val v = RunCall.allocateByteMemory(0w1, Word.fromLargeWord(Word8.toLargeWord(Word8.orb(F_mutable, F_bytes)))) fun clear 0w0 = () | clear i = (assignByte(v, i-0w1, 0w0); clear (i-0w1)) val () = clear(wordSize) in val () = codeVecPutWord (codeVec, wordsOfCode+0w2, toMachineWord v) end (* and then copy the constants from the constant list. *) local fun setConstant(value, num) = ( codeVecPutWord (codeVec, firstConstant + num, value); num+0w1 ) in val _ = List.foldl setConstant 0w0 (!constVec) end in if printAssemblyCode then (* print out the code *) (printCode (codeVec, functionName, wordsOfCode, printStream); printStream"\n") else (); codeVecLock(codeVec, resultClosure) end (* copyCode *) structure Sharing = struct type code = code type closureRef = closureRef type instr = instr type xReg = xReg end end; diff --git a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64Assembly.sig b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64Assembly.sig index d21fe7c1..bcd4c056 100644 --- a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64Assembly.sig +++ b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64Assembly.sig @@ -1,80 +1,88 @@ (* Copyright (c) 2021 David C. J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public Licence version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public Licence for more details. You should have received a copy of the GNU Lesser General Public Licence along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) signature Arm64Assembly = sig type code type closureRef type instr type machineWord = Address.machineWord (* Create a code value for the function. *) val codeCreate: string * Universal.universal list -> code type xReg val X0: xReg and X1: xReg and X2: xReg and X3: xReg and X4: xReg and X5: xReg and X6: xReg and X7: xReg and X8: xReg and X9: xReg and X10: xReg and X11: xReg and X12: xReg and X13: xReg and X14: xReg and X15: xReg and X16: xReg and X17: xReg and X18: xReg and X19: xReg and X20: xReg and X21: xReg and X22: xReg and X23: xReg and X24: xReg and X25: xReg and X26: xReg and X27: xReg and X28: xReg and X29: xReg and X30: xReg (* XZero and XSP are both encoded as 31 but the interpretation depends on the instruction *) val XZero: xReg and XSP: xReg val X_MLHeapLimit: xReg (* ML Heap limit pointer *) and X_MLAssemblyInt: xReg (* ML assembly interface pointer. *) and X_MLHeapAllocPtr: xReg (* ML Heap allocation pointer. *) and X_MLStackPtr: xReg (* ML Stack pointer. *) and X_LinkReg: xReg (* Link reg - return address *) - val genRetCode: code -> unit + + (* Jump to the address in the register and put the address of the + next instruction into X30. *) + val genBranchAndLinkReg: xReg * code -> unit + (* Jump to the address in the register. *) + and genBranchRegister: xReg * code -> unit + (* Jump to the address in the register and hint this is a return. *) + and genReturnRegister: xReg * code -> unit (* Push a register to the ML stack *) val genPushReg: xReg * code -> unit (* Pop a register from the ML stack. *) val genPopReg: xReg * code -> unit (* Move a short constant to a register. Currently limited to unsigned 16-bits. *) val genMoveShortConstToReg: xReg * int * code -> unit (* Move a long constant to a register. *) val genLoadConstant: xReg * machineWord * code -> unit (* Add a 12-bit constant, possibly shifted by 12 bits *) val genAddRegConstant: {sReg: xReg, dReg: xReg, cValue: int, shifted: bool} * code -> unit - (* Load an aligned word using a 12-bit offset. *) + (* Load/Store an aligned word using a 12-bit offset. *) val loadRegAligned: {dest: xReg, base: xReg, wordOffset: int} * code -> unit + and storeRegAligned: {dest: xReg, base: xReg, wordOffset: int} * code -> unit (* copyCode - create the vector of code and update the closure reference to point to it. *) val generateCode: {code: code, maxStack: int, resultClosure: closureRef} -> unit exception Fallback (* During development only. *) structure Sharing: sig type code = code type closureRef = closureRef type instr = instr type xReg = xReg end end; diff --git a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64GenCode.sml b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64GenCode.sml index 3f0e1ba6..9a378f54 100644 --- a/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64GenCode.sml +++ b/mlsource/MLCompiler/CodeTree/Arm64Code/Arm64GenCode.sml @@ -1,1283 +1,1332 @@ (* Copyright (c) 2021 David C. J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public Licence version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public Licence for more details. You should have received a copy of the GNU Lesser General Public Licence along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor Arm64GenCode ( structure FallBackCG: GENCODESIG and BackendTree: BackendIntermediateCodeSig and CodeArray: CODEARRAYSIG and Arm64Assembly: Arm64Assembly and Debug: DEBUG sharing FallBackCG.Sharing = BackendTree.Sharing = CodeArray.Sharing = Arm64Assembly.Sharing ) : GENCODESIG = struct open BackendTree CodeArray Arm64Assembly Address exception InternalError = Misc.InternalError (* tag a short constant *) fun tag c = 2 * c + 1 (* shift a short constant, but don't set tag bit *) fun semitag c = 2 * c (* Remove items from the stack. If the second argument is true the value on the top of the stack has to be moved. TODO: This works only for offsets up to 256 words. *) fun resetStack(0, _, _) = () | resetStack(nItems, true, code) = ( genPopReg(X0, code); resetStack(nItems, false, code); genPushReg(X0, code) ) | resetStack(nItems, false, code) = genAddRegConstant({sReg=X_MLStackPtr, dReg=X_MLStackPtr, cValue=Word.toInt wordSize * nItems, shifted=false}, code) (* Load a constant and push it to the stack. *) fun pushConst(w, code) = if isShort w andalso toShort w < 0w32768 (* So tagged value will fit. *) then let val cVal = tag(Word.toInt(toShort w)) in genMoveShortConstToReg(X0, cVal, code); genPushReg(X0, code) end else (genLoadConstant(X0, w, code); genPushReg(X0, code)) (* Load a local value. TODO: the offset is limited to 12-bits. *) fun genLocal(offset, code) = (loadRegAligned({dest=X0, base=X_MLStackPtr, wordOffset=offset}, code); genPushReg(X0, code)) (* Load a value at an offset from the address on the top of the stack. TODO: the offset is limited to 12-bits. *) fun genIndirect(offset, code) = (genPopReg(X0, code); loadRegAligned({dest=X0, base=X0, wordOffset=offset}, code); genPushReg(X0, code)) - - fun genOpcode _ = raise Fallback - - fun genContainer _ = raise Fallback - fun createLabel _ = raise Fallback - fun setLabel _ = raise Fallback - fun genSetStackVal _ = raise Fallback - fun putBranchInstruction _ = raise Fallback - fun genRaiseEx _ = raise Fallback - fun genPushHandler _ = raise Fallback - fun genLdexc _ = raise Fallback - fun genCase _ = raise Fallback - fun genTuple _ = raise Fallback - fun genMoveToContainer _ = raise Fallback - fun genEqualWordConst _ = raise Fallback - fun genAllocMutableClosure _ = raise Fallback - fun genMoveToMutClosure _ = raise Fallback - fun genLock _ = raise Fallback - fun genClosure _ = raise Fallback - fun genCallClosure _ = raise Fallback - fun genTailCall _ = raise Fallback - fun genIsTagged _ = raise Fallback - fun genDoubleToFloat _ = raise Fallback - fun genRealToInt _ = raise Fallback - fun genFloatToInt _ = raise Fallback + + fun toDo() = raise Fallback + + fun genOpcode _ = toDo() + + fun genContainer _ = toDo() + fun createLabel _ = toDo() + fun setLabel _ = toDo() + fun genSetStackVal _ = toDo() + fun putBranchInstruction _ = toDo() + fun genRaiseEx _ = toDo() + fun genPushHandler _ = toDo() + fun genLdexc _ = toDo() + fun genCase _ = toDo() + fun genTuple _ = toDo() + fun genMoveToContainer _ = toDo() + fun genEqualWordConst _ = toDo() + fun genAllocMutableClosure _ = toDo() + fun genMoveToMutClosure _ = toDo() + fun genLock _ = toDo() + fun genClosure _ = toDo() + fun genIsTagged _ = toDo() + fun genDoubleToFloat _ = toDo() + fun genRealToInt _ = toDo() + fun genFloatToInt _ = toDo() val opcode_notBoolean = 0 val opcode_cellLength = 0 and opcode_cellFlags = 0 and opcode_clearMutable = 0 and opcode_atomicExchAdd = 0 and opcode_atomicReset = 0 and opcode_longWToTagged = 0 and opcode_signedToLongW = 0 and opcode_unsignedToLongW = 0 and opcode_realAbs = 0 and opcode_realNeg = 0 and opcode_fixedIntToReal = 0 and opcode_fixedIntToFloat = 0 and opcode_floatToReal = 0 and opcode_floatAbs = 0 and opcode_floatNeg = 0 val opcode_equalWord = 0 and opcode_lessSigned = 0 and opcode_lessUnsigned = 0 and opcode_lessEqSigned = 0 and opcode_lessEqUnsigned = 0 and opcode_greaterSigned = 0 and opcode_greaterUnsigned = 0 and opcode_greaterEqSigned = 0 and opcode_greaterEqUnsigned = 0 val opcode_fixedAdd = 0 val opcode_fixedSub = 0 val opcode_fixedMult = 0 val opcode_fixedQuot = 0 val opcode_fixedRem = 0 val opcode_wordAdd = 0 val opcode_wordSub = 0 val opcode_wordMult = 0 val opcode_wordDiv = 0 val opcode_wordMod = 0 val opcode_wordAnd = 0 val opcode_wordOr = 0 val opcode_wordXor = 0 val opcode_wordShiftLeft = 0 val opcode_wordShiftRLog = 0 val opcode_wordShiftRArith = 0 val opcode_allocByteMem = 0 val opcode_lgWordEqual = 0 val opcode_lgWordLess = 0 val opcode_lgWordLessEq = 0 val opcode_lgWordGreater = 0 val opcode_lgWordGreaterEq = 0 val opcode_lgWordAdd = 0 val opcode_lgWordSub = 0 val opcode_lgWordMult = 0 val opcode_lgWordDiv = 0 val opcode_lgWordMod = 0 val opcode_lgWordAnd = 0 val opcode_lgWordOr = 0 val opcode_lgWordXor = 0 val opcode_lgWordShiftLeft = 0 val opcode_lgWordShiftRLog = 0 val opcode_lgWordShiftRArith = 0 val opcode_realEqual = 0 val opcode_realLess = 0 val opcode_realLessEq = 0 val opcode_realGreater = 0 val opcode_realGreaterEq = 0 val opcode_realUnordered = 0 val opcode_realAdd = 0 val opcode_realSub = 0 val opcode_realMult = 0 val opcode_realDiv = 0 val opcode_floatEqual = 0 val opcode_floatLess = 0 val opcode_floatLessEq = 0 val opcode_floatGreater = 0 val opcode_floatGreaterEq = 0 val opcode_floatUnordered = 0 val opcode_floatAdd = 0 val opcode_floatSub = 0 val opcode_floatMult = 0 val opcode_floatDiv = 0 val opcode_getThreadId = 0 val opcode_allocWordMemory = 0 val opcode_alloc_ref = 0 val opcode_loadMLWord = 0 val opcode_loadMLByte = 0 val opcode_loadC8 = 0 val opcode_loadC16 = 0 val opcode_loadC32 = 0 val opcode_loadC64 = 0 val opcode_loadCFloat = 0 val opcode_loadCDouble = 0 val opcode_loadUntagged = 0 val opcode_storeMLWord = 0 val opcode_storeMLByte = 0 val opcode_storeC8 = 0 val opcode_storeC16 = 0 val opcode_storeC32 = 0 val opcode_storeC64 = 0 val opcode_storeCFloat = 0 val opcode_storeCDouble = 0 val opcode_storeUntagged = 0 val opcode_blockMoveWord = 0 val opcode_blockMoveByte = 0 val opcode_blockEqualByte = 0 val opcode_blockCompareByte = 0 val opcode_deleteHandler = 0 val opcode_allocCSpace = 0 val opcode_freeCSpace = 0 val opcode_arbAdd = 0 val opcode_arbSubtract = 0 val opcode_arbMultiply = 0 val Jump = 0 val JumpBack = 0 val JumpFalse = 0 val JumpTrue = 0 val SetHandler = 0 val word0 = toMachineWord 0; val DummyValue : machineWord = word0; (* used as result of "raise e" etc. *) type caseForm = { cases : (backendIC * word) list, test : backendIC, caseType: caseType, default : backendIC } (* Where the result, if any, should go *) datatype whereto = NoResult (* discard result *) | ToStack (* Need a result but it can stay on the pseudo-stack *); (* Are we at the end of the function. *) datatype tail = EndOfProc | NotEnd (* Code generate a function or global declaration *) fun codegen (pt, cvec, resultClosure, numOfArgs, localCount, parameters) = let datatype decEntry = StackAddr of int | Empty val decVec = Array.array (localCount, Empty) (* Count of number of items on the stack. *) val realstackptr = ref 1 (* The closure ptr is already there *) (* Maximum size of the stack. *) val maxStack = ref 1 (* Push a value onto the stack. *) fun incsp () = ( realstackptr := !realstackptr + 1; if !realstackptr > !maxStack then maxStack := !realstackptr else () ) (* An entry has been removed from the stack. *) fun decsp () = realstackptr := !realstackptr - 1; fun pushLocalStackValue addr = ( genLocal(!realstackptr + addr, cvec); incsp() ) (* generates code from the tree *) fun gencde (pt : backendIC, whereto : whereto, tailKind : tail, loopAddr) : unit = let (* Save the stack pointer value here. We may want to reset the stack. *) val oldsp = !realstackptr; (* Operations on ML memory always have the base as an ML address. Word operations are always word aligned. The higher level will have extracted any constant offset and scaled it if necessary. That's helpful for the X86 but not for the interpreter. We have to turn them back into indexes. *) fun genMLAddress({base, index, offset}, scale) = ( gencde (base, ToStack, NotEnd, loopAddr); offset mod scale = 0 orelse raise InternalError "genMLAddress"; case (index, offset div scale) of (NONE, soffset) => (pushConst (toMachineWord soffset, cvec); incsp()) | (SOME indexVal, 0) => gencde (indexVal, ToStack, NotEnd, loopAddr) | (SOME indexVal, soffset) => ( gencde (indexVal, ToStack, NotEnd, loopAddr); pushConst (toMachineWord soffset, cvec); genOpcode(opcode_wordAdd, cvec) ) ) (* Load the address, index value and offset for non-byte operations. Because the offset has already been scaled by the size of the operand we have to load the index and offset separately. *) fun genCAddress{base, index, offset} = ( gencde (base, ToStack, NotEnd, loopAddr); case index of NONE => (pushConst (toMachineWord 0, cvec); incsp()) | SOME indexVal => gencde (indexVal, ToStack, NotEnd, loopAddr); pushConst (toMachineWord offset, cvec); incsp() ) val () = case pt of BICEval evl => genEval (evl, tailKind) | BICExtract ext => (* This may just be being used to discard a value which isn't used on this branch. *) if whereto = NoResult then () else ( case ext of BICLoadArgument locn => (* The register arguments appear in order on the stack, followed by the stack argumens in reverse order. *) if locn < 8 then pushLocalStackValue (locn+1) else pushLocalStackValue (numOfArgs-locn+8) | BICLoadLocal locn => ( case Array.sub (decVec, locn) of StackAddr n => pushLocalStackValue (~ n) | _ => (* Should be on the stack, not a function. *) raise InternalError "locaddr: bad stack address" ) | BICLoadClosure locn => ( pushLocalStackValue ~1; (* The closure itself. *) genIndirect(locn+1 (* The first word is the code *), cvec) ) | BICLoadRecursive => pushLocalStackValue ~1 (* The closure itself - first value on the stack. *) ) | BICField {base, offset} => (gencde (base, ToStack, NotEnd, loopAddr); genIndirect (offset, cvec)) | BICLoadContainer {base, offset} => (gencde (base, ToStack, NotEnd, loopAddr); genIndirect (offset, cvec)) | BICLambda lam => genProc (lam, false, fn () => ()) | BICConstnt(w, _) => (pushConst (w, cvec); incsp ()) | BICCond (testPart, thenPart, elsePart) => genCond (testPart, thenPart, elsePart, whereto, tailKind, loopAddr) | BICNewenv(decls, exp) => let (* Processes a list of entries. *) (* Mutually recursive declarations. May be either lambdas or constants. Recurse down the list pushing the addresses of the closure vectors, then unwind the recursion and fill them in. *) fun genMutualDecs [] = () | genMutualDecs ({lambda, addr, ...} :: otherDecs) = genProc (lambda, true, fn() => ( Array.update (decVec, addr, StackAddr (! realstackptr)); genMutualDecs (otherDecs) )) fun codeDecls(BICRecDecs dl) = genMutualDecs dl | codeDecls(BICDecContainer{size, addr}) = ( (* If this is a container we have to process it here otherwise it will be removed in the stack adjustment code. *) genContainer(size, cvec); (* Push the address of this container. *) realstackptr := !realstackptr + size + 1; (* Pushes N words plus the address. *) Array.update (decVec, addr, StackAddr(!realstackptr)) ) | codeDecls(BICDeclar{value, addr, ...}) = ( gencde (value, ToStack, NotEnd, loopAddr); Array.update (decVec, addr, StackAddr(!realstackptr)) ) | codeDecls(BICNullBinding exp) = gencde (exp, NoResult, NotEnd, loopAddr) in List.app codeDecls decls; gencde (exp, whereto, tailKind, loopAddr) end | BICBeginLoop {loop=body, arguments} => (* Execute the body which will contain at least one Loop instruction. There will also be path(s) which don't contain Loops and these will drop through. *) let val args = List.map #1 arguments (* Evaluate each of the arguments, pushing the result onto the stack. *) fun genLoopArg ({addr, value, ...}) = ( gencde (value, ToStack, NotEnd, loopAddr); Array.update (decVec, addr, StackAddr (!realstackptr)); !realstackptr (* Return the posn on the stack. *) ) val argIndexList = map genLoopArg args; val startSp = ! realstackptr; (* Remember the current top of stack. *) val startLoop = createLabel () val () = setLabel(startLoop, cvec) (* Start of loop *) in (* Process the body, passing the jump-back address down for the Loop instruction(s). *) gencde (body, whereto, tailKind, SOME(startLoop, startSp, argIndexList)) (* Leave the arguments on the stack. They can be cleared later if needed. *) end | BICLoop argList => (* Jump back to the enclosing BeginLoop. *) let val (startLoop, startSp, argIndexList) = case loopAddr of SOME l => l | NONE => raise InternalError "No BeginLoop for Loop instr" (* Evaluate the arguments. First push them to the stack because evaluating an argument may depend on the current value of others. Only when we've evaluated all of them can we overwrite the original argument positions. *) fun loadArgs ([], []) = !realstackptr - startSp (* The offset of all the args. *) | loadArgs (arg:: argList, _ :: argIndexList) = let (* Evaluate all the arguments. *) val () = gencde (arg, ToStack, NotEnd, NONE); val argOffset = loadArgs(argList, argIndexList); in genSetStackVal(argOffset, cvec); (* Copy the arg over. *) decsp(); (* The argument has now been popped. *) argOffset end | loadArgs _ = raise InternalError "loadArgs: Mismatched arguments"; val _: int = loadArgs(List.map #1 argList, argIndexList) in if !realstackptr <> startSp then resetStack (!realstackptr - startSp, false, cvec) (* Remove any local variables. *) else (); (* Jump back to the start of the loop. *) putBranchInstruction(JumpBack, startLoop, cvec) end | BICRaise exp => ( gencde (exp, ToStack, NotEnd, loopAddr); genRaiseEx cvec ) | BICHandle {exp, handler, exPacketAddr} => let (* Save old handler *) val () = genPushHandler cvec val () = incsp () val handlerLabel = createLabel() val () = putBranchInstruction (SetHandler, handlerLabel, cvec) val () = incsp() (* Code generate the body; "NotEnd" because we have to come back to remove the handler; "ToStack" because delHandler needs a result to carry down. *) val () = gencde (exp, ToStack, NotEnd, loopAddr) (* Now get out of the handler and restore the old one. *) val () = genOpcode(opcode_deleteHandler, cvec) val skipHandler = createLabel() val () = putBranchInstruction (Jump, skipHandler, cvec) val () = realstackptr := oldsp val () = setLabel (handlerLabel, cvec) (* Push the exception packet and set the address. *) val () = genLdexc cvec val () = incsp () val () = Array.update (decVec, exPacketAddr, StackAddr(!realstackptr)) val () = gencde (handler, ToStack, NotEnd, loopAddr) (* Have to remove the exception packet. *) val () = resetStack(1, true, cvec) val () = decsp() (* Finally fix-up the jump around the handler *) val () = setLabel (skipHandler, cvec) in () end | BICCase ({cases, test, default, firstIndex, ...}) => let val () = gencde (test, ToStack, NotEnd, loopAddr) (* Label to jump to at the end of each case. *) val exitJump = createLabel() val () = if firstIndex = 0w0 then () else ( (* Subtract lower limit. Don't check for overflow. Instead allow large value to wrap around and check in "case" instruction. *) pushConst(toMachineWord firstIndex, cvec); genOpcode(opcode_wordSub, cvec) ) (* Generate the case instruction followed by the table of jumps. *) val nCases = List.length cases val caseLabels = genCase (nCases, cvec) val () = decsp () (* The default case, if any, follows the case statement. *) (* If we have a jump to the default set it to jump here. *) local fun fixDefault(NONE, defCase) = setLabel(defCase, cvec) | fixDefault(SOME _, _) = () in val () = ListPair.appEq fixDefault (cases, caseLabels) end val () = gencde (default, whereto, tailKind, loopAddr); fun genCases(SOME body, label) = ( (* First exit from the previous case or the default if this is the first. *) putBranchInstruction(Jump, exitJump, cvec); (* Remove the result - the last case will leave it. *) case whereto of ToStack => decsp () | NoResult => (); (* Fix up the jump to come here. *) setLabel(label, cvec); gencde (body, whereto, tailKind, loopAddr) ) | genCases(NONE, _) = () val () = ListPair.appEq genCases (cases, caseLabels) (* Finally set the exit jump to come here. *) val () = setLabel (exitJump, cvec) in () end | BICTuple recList => let val size = List.length recList in (* Move the fields into the vector. *) List.app(fn v => gencde (v, ToStack, NotEnd, loopAddr)) recList; genTuple (size, cvec); realstackptr := !realstackptr - (size - 1) end | BICSetContainer{container, tuple, filter} => (* Copy the contents of a tuple into a container. If the tuple is a Tuple instruction we can avoid generating the tuple and then unpacking it and simply copy the fields that make up the tuple directly into the container. *) ( case tuple of BICTuple cl => (* Simply set the container from the values. *) let (* Load the address of the container. *) val _ = gencde (container, ToStack, NotEnd, loopAddr); fun setValues([], _, _) = () | setValues(v::tl, sourceOffset, destOffset) = if sourceOffset < BoolVector.length filter andalso BoolVector.sub(filter, sourceOffset) then ( gencde (v, ToStack, NotEnd, loopAddr); (* Move the entry into the container. This instruction pops the value to be moved but not the destination. *) genMoveToContainer(destOffset, cvec); decsp(); setValues(tl, sourceOffset+1, destOffset+1) ) else setValues(tl, sourceOffset+1, destOffset) in setValues(cl, 0, 0) (* The container address is still on the stack. *) end | _ => let (* General case. *) (* First the target tuple, then the container. *) val () = gencde (tuple, ToStack, NotEnd, loopAddr) val () = gencde (container, ToStack, NotEnd, loopAddr) val last = BoolVector.foldli(fn (i, true, _) => i | (_, false, n) => n) ~1 filter fun copy (sourceOffset, destOffset) = if BoolVector.sub(filter, sourceOffset) then ( (* Duplicate the tuple address . *) genLocal(1, cvec); genIndirect(sourceOffset, cvec); genMoveToContainer(destOffset, cvec); if sourceOffset = last then () else copy (sourceOffset+1, destOffset+1) ) else copy(sourceOffset+1, destOffset) in copy (0, 0) (* The container and tuple addresses are still on the stack. *) end ) | BICTagTest { test, tag, ... } => ( gencde (test, ToStack, NotEnd, loopAddr); genEqualWordConst(tag, cvec) ) | BICNullary {oper=BuiltIns.GetCurrentThreadId} => ( genOpcode(opcode_getThreadId, cvec); incsp() ) | BICNullary {oper=BuiltIns.CheckRTSException} => ( (* Do nothing. This is done in the RTS call. *) ) | BICNullary {oper=BuiltIns.CPUPause} => ( (* Do nothing. It's really only a hint. *) ) | BICUnary { oper, arg1 } => let open BuiltIns val () = gencde (arg1, ToStack, NotEnd, loopAddr) in case oper of NotBoolean => genOpcode(opcode_notBoolean, cvec) | IsTaggedValue => genIsTagged cvec | MemoryCellLength => genOpcode(opcode_cellLength, cvec) | MemoryCellFlags => genOpcode(opcode_cellFlags, cvec) | ClearMutableFlag => genOpcode(opcode_clearMutable, cvec) | AtomicReset => genOpcode(opcode_atomicReset, cvec) | LongWordToTagged => genOpcode(opcode_longWToTagged, cvec) | SignedToLongWord => genOpcode(opcode_signedToLongW, cvec) | UnsignedToLongWord => genOpcode(opcode_unsignedToLongW, cvec) | RealAbs PrecDouble => genOpcode(opcode_realAbs, cvec) | RealNeg PrecDouble => genOpcode(opcode_realNeg, cvec) | RealFixedInt PrecDouble => genOpcode(opcode_fixedIntToReal, cvec) | RealAbs PrecSingle => genOpcode(opcode_floatAbs, cvec) | RealNeg PrecSingle => genOpcode(opcode_floatNeg, cvec) | RealFixedInt PrecSingle => genOpcode(opcode_fixedIntToFloat, cvec) | FloatToDouble => genOpcode(opcode_floatToReal, cvec) | DoubleToFloat rnding => genDoubleToFloat(rnding, cvec) | RealToInt (PrecDouble, rnding) => genRealToInt(rnding, cvec) | RealToInt (PrecSingle, rnding) => genFloatToInt(rnding, cvec) | TouchAddress => resetStack(1, false, cvec) (* Discard this *) | AllocCStack => genOpcode(opcode_allocCSpace, cvec) end | BICBinary { oper=BuiltIns.WordComparison{test=BuiltIns.TestEqual, ...}, arg1, arg2=BICConstnt(w, _) } => let val () = gencde (arg1, ToStack, NotEnd, loopAddr) in genEqualWordConst(toShort w, cvec) end | BICBinary { oper=BuiltIns.WordComparison{test=BuiltIns.TestEqual, ...}, arg1=BICConstnt(w, _), arg2 } => let val () = gencde (arg2, ToStack, NotEnd, loopAddr) in genEqualWordConst(toShort w, cvec) end | BICBinary { oper, arg1, arg2 } => let open BuiltIns val () = gencde (arg1, ToStack, NotEnd, loopAddr) val () = gencde (arg2, ToStack, NotEnd, loopAddr) in case oper of WordComparison{test=TestEqual, ...} => genOpcode(opcode_equalWord, cvec) | WordComparison{test=TestLess, isSigned=true} => genOpcode(opcode_lessSigned, cvec) | WordComparison{test=TestLessEqual, isSigned=true} => genOpcode(opcode_lessEqSigned, cvec) | WordComparison{test=TestGreater, isSigned=true} => genOpcode(opcode_greaterSigned, cvec) | WordComparison{test=TestGreaterEqual, isSigned=true} => genOpcode(opcode_greaterEqSigned, cvec) | WordComparison{test=TestLess, isSigned=false} => genOpcode(opcode_lessUnsigned, cvec) | WordComparison{test=TestLessEqual, isSigned=false} => genOpcode(opcode_lessEqUnsigned, cvec) | WordComparison{test=TestGreater, isSigned=false} => genOpcode(opcode_greaterUnsigned, cvec) | WordComparison{test=TestGreaterEqual, isSigned=false} => genOpcode(opcode_greaterEqUnsigned, cvec) | WordComparison{test=TestUnordered, ...} => raise InternalError "WordComparison: TestUnordered" | PointerEq => genOpcode(opcode_equalWord, cvec) | FixedPrecisionArith ArithAdd => genOpcode(opcode_fixedAdd, cvec) | FixedPrecisionArith ArithSub => genOpcode(opcode_fixedSub, cvec) | FixedPrecisionArith ArithMult => genOpcode(opcode_fixedMult, cvec) | FixedPrecisionArith ArithQuot => genOpcode(opcode_fixedQuot, cvec) | FixedPrecisionArith ArithRem => genOpcode(opcode_fixedRem, cvec) | FixedPrecisionArith ArithDiv => raise InternalError "TODO: FixedPrecisionArith ArithDiv" | FixedPrecisionArith ArithMod => raise InternalError "TODO: FixedPrecisionArith ArithMod" | WordArith ArithAdd => genOpcode(opcode_wordAdd, cvec) | WordArith ArithSub => genOpcode(opcode_wordSub, cvec) | WordArith ArithMult => genOpcode(opcode_wordMult, cvec) | WordArith ArithDiv => genOpcode(opcode_wordDiv, cvec) | WordArith ArithMod => genOpcode(opcode_wordMod, cvec) | WordArith _ => raise InternalError "WordArith - unimplemented instruction" | WordLogical LogicalAnd => genOpcode(opcode_wordAnd, cvec) | WordLogical LogicalOr => genOpcode(opcode_wordOr, cvec) | WordLogical LogicalXor => genOpcode(opcode_wordXor, cvec) | WordShift ShiftLeft => genOpcode(opcode_wordShiftLeft, cvec) | WordShift ShiftRightLogical => genOpcode(opcode_wordShiftRLog, cvec) | WordShift ShiftRightArithmetic => genOpcode(opcode_wordShiftRArith, cvec) | AllocateByteMemory => genOpcode(opcode_allocByteMem, cvec) | LargeWordComparison TestEqual => genOpcode(opcode_lgWordEqual, cvec) | LargeWordComparison TestLess => genOpcode(opcode_lgWordLess, cvec) | LargeWordComparison TestLessEqual => genOpcode(opcode_lgWordLessEq, cvec) | LargeWordComparison TestGreater => genOpcode(opcode_lgWordGreater, cvec) | LargeWordComparison TestGreaterEqual => genOpcode(opcode_lgWordGreaterEq, cvec) | LargeWordComparison TestUnordered => raise InternalError "LargeWordComparison: TestUnordered" | LargeWordArith ArithAdd => genOpcode(opcode_lgWordAdd, cvec) | LargeWordArith ArithSub => genOpcode(opcode_lgWordSub, cvec) | LargeWordArith ArithMult => genOpcode(opcode_lgWordMult, cvec) | LargeWordArith ArithDiv => genOpcode(opcode_lgWordDiv, cvec) | LargeWordArith ArithMod => genOpcode(opcode_lgWordMod, cvec) | LargeWordArith _ => raise InternalError "LargeWordArith - unimplemented instruction" | LargeWordLogical LogicalAnd => genOpcode(opcode_lgWordAnd, cvec) | LargeWordLogical LogicalOr => genOpcode(opcode_lgWordOr, cvec) | LargeWordLogical LogicalXor => genOpcode(opcode_lgWordXor, cvec) | LargeWordShift ShiftLeft => genOpcode(opcode_lgWordShiftLeft, cvec) | LargeWordShift ShiftRightLogical => genOpcode(opcode_lgWordShiftRLog, cvec) | LargeWordShift ShiftRightArithmetic => genOpcode(opcode_lgWordShiftRArith, cvec) | RealComparison (TestEqual, PrecDouble) => genOpcode(opcode_realEqual, cvec) | RealComparison (TestLess, PrecDouble) => genOpcode(opcode_realLess, cvec) | RealComparison (TestLessEqual, PrecDouble) => genOpcode(opcode_realLessEq, cvec) | RealComparison (TestGreater, PrecDouble) => genOpcode(opcode_realGreater, cvec) | RealComparison (TestGreaterEqual, PrecDouble) => genOpcode(opcode_realGreaterEq, cvec) | RealComparison (TestUnordered, PrecDouble) => genOpcode(opcode_realUnordered, cvec) | RealComparison (TestEqual, PrecSingle) => genOpcode(opcode_floatEqual, cvec) | RealComparison (TestLess, PrecSingle) => genOpcode(opcode_floatLess, cvec) | RealComparison (TestLessEqual, PrecSingle) => genOpcode(opcode_floatLessEq, cvec) | RealComparison (TestGreater, PrecSingle) => genOpcode(opcode_floatGreater, cvec) | RealComparison (TestGreaterEqual, PrecSingle) => genOpcode(opcode_floatGreaterEq, cvec) | RealComparison (TestUnordered, PrecSingle) => genOpcode(opcode_floatUnordered, cvec) | RealArith (ArithAdd, PrecDouble) => genOpcode(opcode_realAdd, cvec) | RealArith (ArithSub, PrecDouble) => genOpcode(opcode_realSub, cvec) | RealArith (ArithMult, PrecDouble) => genOpcode(opcode_realMult, cvec) | RealArith (ArithDiv, PrecDouble) => genOpcode(opcode_realDiv, cvec) | RealArith (ArithAdd, PrecSingle) => genOpcode(opcode_floatAdd, cvec) | RealArith (ArithSub, PrecSingle) => genOpcode(opcode_floatSub, cvec) | RealArith (ArithMult, PrecSingle) => genOpcode(opcode_floatMult, cvec) | RealArith (ArithDiv, PrecSingle) => genOpcode(opcode_floatDiv, cvec) | RealArith _ => raise InternalError "RealArith - unimplemented instruction" | FreeCStack => genOpcode(opcode_freeCSpace, cvec) | AtomicExchangeAdd => genOpcode(opcode_atomicExchAdd, cvec) ; decsp() (* Removes one item from the stack. *) end | BICAllocateWordMemory {numWords as BICConstnt(length, _), flags as BICConstnt(flagByte, _), initial } => if isShort length andalso toShort length = 0w1 andalso isShort flagByte andalso toShort flagByte = 0wx40 then (* This is a very common case. *) ( gencde (initial, ToStack, NotEnd, loopAddr); genOpcode(opcode_alloc_ref, cvec) ) else let val () = gencde (numWords, ToStack, NotEnd, loopAddr) val () = gencde (flags, ToStack, NotEnd, loopAddr) val () = gencde (initial, ToStack, NotEnd, loopAddr) in genOpcode(opcode_allocWordMemory, cvec); decsp(); decsp() end | BICAllocateWordMemory { numWords, flags, initial } => let val () = gencde (numWords, ToStack, NotEnd, loopAddr) val () = gencde (flags, ToStack, NotEnd, loopAddr) val () = gencde (initial, ToStack, NotEnd, loopAddr) in genOpcode(opcode_allocWordMemory, cvec); decsp(); decsp() end | BICLoadOperation { kind=LoadStoreMLWord _, address={base, index=NONE, offset}} => ( (* If the index is a constant, frequently zero, we can use indirection. The offset is a byte count so has to be divided by the word size but it should always be an exact multiple. *) gencde (base, ToStack, NotEnd, loopAddr); offset mod Word.toInt wordSize = 0 orelse raise InternalError "gencde: BICLoadOperation - not word multiple"; genIndirect (offset div Word.toInt wordSize, cvec) ) | BICLoadOperation { kind=LoadStoreMLWord _, address} => ( genMLAddress(address, Word.toInt wordSize); genOpcode(opcode_loadMLWord, cvec); decsp() ) | BICLoadOperation { kind=LoadStoreMLByte _, address} => ( genMLAddress(address, 1); genOpcode(opcode_loadMLByte, cvec); decsp() ) | BICLoadOperation { kind=LoadStoreC8, address} => ( genCAddress address; genOpcode(opcode_loadC8, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC16, address} => ( genCAddress address; genOpcode(opcode_loadC16, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC32, address} => ( genCAddress address; genOpcode(opcode_loadC32, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC64, address} => ( genCAddress address; genOpcode(opcode_loadC64, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreCFloat, address} => ( genCAddress address; genOpcode(opcode_loadCFloat, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreCDouble, address} => ( genCAddress address; genOpcode(opcode_loadCDouble, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreUntaggedUnsigned, address} => ( genMLAddress(address, Word.toInt wordSize); genOpcode(opcode_loadUntagged, cvec); decsp() ) | BICStoreOperation { kind=LoadStoreMLWord _, address, value } => ( genMLAddress(address, Word.toInt wordSize); gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeMLWord, cvec); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreMLByte _, address, value } => ( genMLAddress(address, 1); gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeMLByte, cvec); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC8, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC8, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC16, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC16, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC32, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC32, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC64, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC64, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreCFloat, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeCFloat, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreCDouble, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeCDouble, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreUntaggedUnsigned, address, value} => ( genMLAddress(address, Word.toInt wordSize); gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeUntagged, cvec); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpMove{isByteMove=true}, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, 1); genMLAddress(destRight, 1); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockMoveByte, cvec); decsp(); decsp(); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpMove{isByteMove=false}, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, Word.toInt wordSize); genMLAddress(destRight, Word.toInt wordSize); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockMoveWord, cvec); decsp(); decsp(); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpEqualByte, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, 1); genMLAddress(destRight, 1); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockEqualByte, cvec); decsp(); decsp(); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpCompareByte, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, 1); genMLAddress(destRight, 1); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockCompareByte, cvec); decsp(); decsp(); decsp(); decsp() ) | BICArbitrary { oper, arg1, arg2, ... } => let open BuiltIns val () = gencde (arg1, ToStack, NotEnd, loopAddr) val () = gencde (arg2, ToStack, NotEnd, loopAddr) in case oper of ArithAdd => genOpcode(opcode_arbAdd, cvec) | ArithSub => genOpcode(opcode_arbSubtract, cvec) | ArithMult => genOpcode(opcode_arbMultiply, cvec) | _ => raise InternalError "Unknown arbitrary precision operation"; decsp() (* Removes one item from the stack. *) end in (* body of gencde *) (* This ensures that there is precisely one item on the stack if whereto = ToStack and no items if whereto = NoResult. *) case whereto of ToStack => let val newsp = oldsp + 1; val adjustment = !realstackptr - newsp val () = if adjustment = 0 then () else if adjustment < ~1 then raise InternalError ("gencde: bad adjustment " ^ Int.toString adjustment) (* Hack for declarations that should push values, but don't *) else if adjustment = ~1 then pushConst (DummyValue, cvec) else resetStack (adjustment, true, cvec) in realstackptr := newsp end | NoResult => let val adjustment = !realstackptr - oldsp val () = if adjustment = 0 then () else if adjustment < 0 then raise InternalError ("gencde: bad adjustment " ^ Int.toString adjustment) else resetStack (adjustment, false, cvec) in realstackptr := oldsp end end (* gencde *) (* doNext is only used for mutually recursive functions where a function may not be able to fill in its closure if it does not have all the remaining declarations. *) (* TODO: This always creates the closure on the heap even when makeClosure is false. *) and genProc ({ closure=[], localCount, body, argTypes, name, ...}: bicLambdaForm, mutualDecs, doNext: unit -> unit) : unit = let (* Create a one word item for the closure. This is returned for recursive references and filled in with the address of the code when we've finished. *) val closure = makeConstantClosure() val newCode : code = codeCreate(name, parameters); (* Code-gen function. No non-local references. *) val () = codegen (body, newCode, closure, List.length argTypes, localCount, parameters); val () = pushConst(closureAsAddress closure, cvec); val () = incsp(); in if mutualDecs then doNext () else () end | genProc ({ localCount, body, name, argTypes, closure, ...}, mutualDecs, doNext) = let (* Full closure required. *) val resClosure = makeConstantClosure() val newCode = codeCreate (name, parameters) (* Code-gen function. *) val () = codegen (body, newCode, resClosure, List.length argTypes, localCount, parameters) val closureVars = List.length closure (* Size excluding the code address *) in if mutualDecs then let (* Have to make the closure now and fill it in later. *) val () = pushConst(toMachineWord resClosure, cvec) val () = genAllocMutableClosure(closureVars, cvec) val () = incsp () val entryAddr : int = !realstackptr val () = doNext () (* Any mutually recursive functions. *) (* Push the address of the vector - If we have processed other closures the vector will no longer be on the top of the stack. *) val () = pushLocalStackValue (~ entryAddr) (* Load items for the closure. *) fun loadItems ([], _) = () | loadItems (v :: vs, addr : int) = let (* Generate an item and move it into the clsoure *) val () = gencde (BICExtract v, ToStack, NotEnd, NONE) (* The closure "address" excludes the code address. *) val () = genMoveToMutClosure(addr, cvec) val () = decsp () in loadItems (vs, addr + 1) end val () = loadItems (closure, 0) val () = genLock cvec (* Lock it. *) (* Remove the extra reference. *) val () = resetStack (1, false, cvec) in realstackptr := !realstackptr - 1 end else let (* Put it on the stack. *) val () = pushConst (toMachineWord resClosure, cvec) val () = incsp () val () = List.app (fn pt => gencde (BICExtract pt, ToStack, NotEnd, NONE)) closure val () = genClosure (closureVars, cvec) in realstackptr := !realstackptr - closureVars end end and genCond (testCode, thenCode, elseCode, whereto, tailKind, loopAddr) = let (* andalso and orelse are turned into conditionals with constants. Convert this into a series of tests. *) fun genTest(BICConstnt(w, _), jumpOn, targetLabel) = let val cVal = case toShort w of 0w0 => false | 0w1 => true | _ => raise InternalError "genTest" in if cVal = jumpOn then putBranchInstruction (Jump, targetLabel, cvec) else () end | genTest(BICUnary { oper=BuiltIns.NotBoolean, arg1 }, jumpOn, targetLabel) = genTest(arg1, not jumpOn, targetLabel) | genTest(BICCond (testPart, thenPart, elsePart), jumpOn, targetLabel) = let val toElse = createLabel() and exitJump = createLabel() in genTest(testPart, false, toElse); genTest(thenPart, jumpOn, targetLabel); putBranchInstruction (Jump, exitJump, cvec); setLabel (toElse, cvec); genTest(elsePart, jumpOn, targetLabel); setLabel (exitJump, cvec) end | genTest(testCode, jumpOn, targetLabel) = ( gencde (testCode, ToStack, NotEnd, loopAddr); putBranchInstruction(if jumpOn then JumpTrue else JumpFalse, targetLabel, cvec); decsp() (* conditional branch pops a value. *) ) val toElse = createLabel() and exitJump = createLabel() val () = genTest(testCode, false, toElse) val () = gencde (thenCode, whereto, tailKind, loopAddr) (* Get rid of the result from the stack. If there is a result then the ``else-part'' will push it. *) val () = case whereto of ToStack => decsp () | NoResult => () val () = putBranchInstruction (Jump, exitJump, cvec) (* start of "else part" *) val () = setLabel (toElse, cvec) val () = gencde (elseCode, whereto, tailKind, loopAddr) val () = setLabel (exitJump, cvec) in () end (* genCond *) and genEval (eval, tailKind : tail) : unit = let val argList : backendIC list = List.map #1 (#argList eval) val argsToPass : int = List.length argList; (* Load arguments *) fun loadArgs [] = () | loadArgs (v :: vs) = let (* Push each expression onto the stack. *) val () = gencde(v, ToStack, NotEnd, NONE) in loadArgs vs end; - (* Called after the args and the closure to call have been pushed - onto the stack. *) - fun callClosure () : unit = - case tailKind of - NotEnd => (* Normal call. *) genCallClosure cvec - - | EndOfProc => (* Tail recursive call. *) - let - (* Get the return address onto the top of the stack. *) - val () = pushLocalStackValue 0 - - (* Slide the return address, closure and args over the - old closure, return address and args, and reset the - stack. Then jump to the closure. *) - val () = - genTailCall(argsToPass + 2, !realstackptr - 1 + (numOfArgs - argsToPass), cvec); - (* It's "-1" not "-2", because we didn't bump the realstackptr - when we pushed the return address. SPF 3/1/97 *) - in - () - end - (* Have to guarantee that the expression to return the function is evaluated before the arguments. *) (* Returns true if evaluating it later is safe. *) fun safeToLeave (BICConstnt _) = true | safeToLeave (BICLambda _) = true | safeToLeave (BICExtract _) = true | safeToLeave (BICField {base, ...}) = safeToLeave base | safeToLeave (BICLoadContainer {base, ...}) = safeToLeave base | safeToLeave _ = false val () = if (case argList of [] => true | _ => safeToLeave (#function eval)) then let (* Can load the args first. *) val () = loadArgs argList in gencde (#function eval, ToStack, NotEnd, NONE) end else let (* The expression for the function is too complicated to risk leaving. It might have a side-effect and we must ensure that any side-effects it has are done before the arguments are loaded. *) val () = gencde(#function eval, ToStack, NotEnd, NONE); val () = loadArgs(argList); (* Load the function again. *) val () = genLocal(argsToPass, cvec); in incsp () end - val () = callClosure () (* Call the function. *) - in (* body of genEval *) - realstackptr := !realstackptr - argsToPass (* Args popped by caller. *) + case tailKind of + NotEnd => (* Normal call. *) + let + val () = genPopReg(X8, cvec) (* Pop the closure pointer. *) + (* We need to put the first 8 arguments into registers and + leave the rest on the stack. *) + fun loadArg(n, reg) = + if argsToPass > n + then loadRegAligned({dest=reg, base=X_MLStackPtr, wordOffset=argsToPass-n-1}, cvec) + else () + val () = loadArg(0, X0) + val () = loadArg(1, X1) + val () = loadArg(2, X2) + val () = loadArg(3, X3) + val () = loadArg(4, X4) + val () = loadArg(5, X5) + val () = loadArg(6, X6) + val () = loadArg(7, X7) + in + loadRegAligned({dest=X9, base=X8, wordOffset=0}, cvec); (* Entry point *) + genBranchAndLinkReg(X9, cvec); + (* We have popped the closure pointer. The caller has popped the stack + arguments and we have pushed the result value. The register arguments + are still on the stack. *) + genPushReg (X0, cvec); + realstackptr := !realstackptr - Int.max(argsToPass-8, 0) (* Args popped by caller. *) + end + + | EndOfProc => (* Tail recursive call. *) + let + val () = genPopReg(X8, cvec) (* Pop the closure pointer. *) + val () = decsp() + (* Get the return address into X30. *) + val () = loadRegAligned({dest=X30, base=X_MLStackPtr, wordOffset= !realstackptr}, cvec) + + (* Load the register arguments *) + fun loadArg(n, reg) = + if argsToPass > n + then loadRegAligned({dest=reg, base=X_MLStackPtr, wordOffset=argsToPass-n-1}, cvec) + else () + val () = loadArg(0, X0) + val () = loadArg(1, X1) + val () = loadArg(2, X2) + val () = loadArg(3, X3) + val () = loadArg(4, X4) + val () = loadArg(5, X5) + val () = loadArg(6, X6) + val () = loadArg(7, X7) + (* We need to move the stack arguments into the original argument area. *) + + (* This is the total number of words that this function is responsible for. + It includes the stack arguments that the caller expects to be removed. *) + val itemsOnStack = !realstackptr + 1 + numOfArgs + + (* Stack arguments are moved using X9. *) + fun moveStackArg n = + if n < 8 + then () + else + let + val () = loadArg(n, X9) + val destOffset = itemsOnStack - (n-8) - 1 + val () = storeRegAligned({dest=X9, base=X_MLStackPtr, wordOffset=destOffset}, cvec) + in + moveStackArg(n-1) + end + + val () = moveStackArg (argsToPass-1) + in + resetStack(itemsOnStack - Int.max(argsToPass-8, 0), false, cvec); + loadRegAligned({dest=X9, base=X8, wordOffset=0}, cvec); (* Entry point *) + genBranchRegister(X9, cvec) + (* Since we're not returning we can ignore the stack pointer value. *) + end end (* Push the arguments passed in registers. *) val () = if numOfArgs >= 8 then genPushReg (X7, cvec) else () val () = if numOfArgs >= 7 then genPushReg (X6, cvec) else () val () = if numOfArgs >= 6 then genPushReg (X5, cvec) else () val () = if numOfArgs >= 5 then genPushReg (X4, cvec) else () val () = if numOfArgs >= 4 then genPushReg (X3, cvec) else () val () = if numOfArgs >= 3 then genPushReg (X2, cvec) else () val () = if numOfArgs >= 2 then genPushReg (X1, cvec) else () val () = if numOfArgs >= 1 then genPushReg (X0, cvec) else () val () = genPushReg (X30, cvec) val () = genPushReg (X8, cvec) (* Push closure pointer *) (* Generate the function. *) (* Assume we always want a result. There is otherwise a problem if the called routine returns a result of type void (i.e. no result) but the caller wants a result (e.g. the identity function). *) val () = gencde (pt, ToStack, EndOfProc, NONE) val () = genPopReg(X0, cvec) (* Value to return => pop into X0 *) val () = resetStack(1, false, cvec) (* Skip over the pushed closure *) val () = genPopReg(X30, cvec) (* Return address => pop into X30 *) val () = resetStack(numOfArgs, false, cvec) (* Remove the arguments *) - val () = genRetCode cvec (* Jump to X30 *) + val () = genReturnRegister(X30, cvec) (* Jump to X30 *) in (* body of codegen *) (* Having code-generated the body of the function, it is copied into a new data segment. *) generateCode{code = cvec, maxStack = !maxStack, resultClosure=resultClosure} end (* codegen *) fun gencodeLambda(lambda as { name, body, argTypes, localCount, ...}:bicLambdaForm, parameters, closure) = (let val debugSwitchLevel = Debug.getParameter Debug.compilerDebugTag parameters val _ = debugSwitchLevel <> 0 orelse raise Fallback (* make the code buffer for the new function. *) val newCode : code = codeCreate (name, parameters) (* This function must have no non-local references. *) in codegen (body, newCode, closure, List.length argTypes, localCount, parameters) end) handle Fallback => FallBackCG.gencodeLambda(lambda, parameters, closure) structure Foreign = FallBackCG.Foreign structure Sharing = struct open BackendTree.Sharing type closureRef = closureRef end end;