diff --git a/libpolyml/interpret.cpp b/libpolyml/interpret.cpp index f56b1531..78deb84a 100644 --- a/libpolyml/interpret.cpp +++ b/libpolyml/interpret.cpp @@ -1,2331 +1,2338 @@ /* Title: An interpreter for a compact instruction set. Author: Dave Matthews, Cambridge University Computer Laboratory Copyright (c) 2000-7 Cambridge University Technical Services Limited Further development Copyright David C.J. Matthews 2015-18. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #ifdef HAVE_STDIO_H #include #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) 0 #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_FLOAT_H #include #endif #ifdef HAVE_MATH_H #include #endif #include // Currently just for isnan. #include "globals.h" #include "int_opcodes.h" #include "machine_dep.h" #include "sys.h" #include "profiling.h" #include "arb.h" #include "reals.h" #include "processes.h" #include "run_time.h" #include "gc.h" #include "diagnostics.h" #include "polystring.h" #include "save_vec.h" #include "memmgr.h" #include "scanaddrs.h" +#include "rtsentry.h" #if (SIZEOF_VOIDP == 8) #define IS64BITS 1 #endif #define arg1 (pc[0] + pc[1]*256) #define arg2 (pc[2] + pc[3]*256) const PolyWord True = TAGGED(1); const PolyWord False = TAGGED(0); const PolyWord Zero = TAGGED(0); #define CHECKED_REGS 2 #define UNCHECKED_REGS 0 #define EXTRA_STACK 0 // Don't need any extra - signals aren't handled on the Poly stack. /* the amount of ML stack space to reserve for registers, C exception handling etc. The compiler requires us to reserve 2 stack-frames worth (2 * 20 words) plus whatever we require for the register save area. We actually reserve slightly more than this. SPF 3/3/97 */ #define OVERFLOW_STACK_SIZE \ (50 + \ CHECKED_REGS + \ UNCHECKED_REGS + \ EXTRA_STACK) // This duplicates some code in reals.cpp but is now updated. #define DOUBLESIZE (sizeof(double)/sizeof(POLYUNSIGNED)) union realdb { double dble; POLYUNSIGNED puns[DOUBLESIZE]; }; #define LGWORDSIZE (sizeof(uintptr_t) / sizeof(PolyWord)) // We're using float for Real32 so it needs to be 32-bits. // Assume that's true for the moment. #if (SIZEOF_FLOAT != 4) #error "Float is not 32-bits. Please report this" #endif union flt { float fl; int32_t i; }; class IntTaskData: public TaskData { public: IntTaskData(): interrupt_requested(false), overflowPacket(0), dividePacket(0) {} virtual void GarbageCollect(ScanAddress *process); void ScanStackAddress(ScanAddress *process, PolyWord &val, StackSpace *stack); virtual Handle EnterPolyCode(); // Start running ML // Switch to Poly and return with the io function to call. int SwitchToPoly(); virtual void SetException(poly_exn *exc); virtual void InterruptCode(); // AddTimeProfileCount is used in time profiling. virtual bool AddTimeProfileCount(SIGNALCONTEXT *context); virtual void InitStackFrame(TaskData *newTask, Handle proc, Handle arg); // These aren't implemented in the interpreted version. virtual Handle EnterCallbackFunction(Handle func, Handle args) { ASSERT(0); return 0; } // Increment or decrement the first word of the object pointed to by the // mutex argument and return the new value. virtual Handle AtomicIncrement(Handle mutexp); // Set a mutex to one. virtual void AtomicReset(Handle mutexp); // Return the minimum space occupied by the stack. Used when setting a limit. virtual uintptr_t currentStackSpace(void) const { return (this->stack->top - this->taskSp) + OVERFLOW_STACK_SIZE; } virtual void addProfileCount(POLYUNSIGNED words) { add_count(this, taskPc, words); } virtual void CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length); bool interrupt_requested; // Allocate memory on the heap. Returns with the address of the cell. Does not set the // length word or any of the data. PolyObject *allocateMemory(POLYUNSIGNED words, POLYCODEPTR &pc, PolyWord *&sp) { words++; // Add the size of the length word. // N.B. The allocation area may be empty so that both of these are zero. if (this->allocPointer >= this->allocLimit + words) { this->allocPointer -= words; return (PolyObject *)(this->allocPointer+1); } // Insufficient space. SaveInterpreterState(pc, sp); // Find some space to allocate in. Returns a pointer to the newly allocated space. // N.B. This may return zero if the heap is exhausted and it has set this // up for an exception. Generally it allocates by decrementing allocPointer // but if the required memory is large it may allocate in a separate area. PolyWord *space = processes->FindAllocationSpace(this, words, true); LoadInterpreterState(pc, sp); if (space == 0) return 0; return (PolyObject *)(space+1); } // Put a real result in a "box" PolyObject *boxDouble(double d, POLYCODEPTR &pc, PolyWord *&sp) { PolyObject *mem = this->allocateMemory(DOUBLESIZE, pc, sp); if (mem == 0) return 0; mem->SetLengthWord(DOUBLESIZE, F_BYTE_OBJ); union realdb uniondb; uniondb.dble = d; // Copy the words. Depending on the word length this may copy one or more words. for (unsigned i = 0; i < DOUBLESIZE; i++) mem->Set(i, PolyWord::FromUnsigned(uniondb.puns[i])); return mem; } // Extract a double value from a box. double unboxDouble(PolyWord p) { union realdb uniondb; for (unsigned i = 0; i < DOUBLESIZE; i++) uniondb.puns[i] = p.AsObjPtr()->Get(i).AsUnsigned(); return uniondb.dble; } // Largely copied from reals.cpp #if (SIZEOF_FLOAT < SIZEOF_POLYWORD) // Typically for 64-bit mode. Use a tagged representation. // The code-generator on the X86/64 assumes the float is in the // high order word. #define FLT_SHIFT ((SIZEOF_POLYWORD-SIZEOF_FLOAT)*8) float unboxFloat(PolyWord p) { union flt argx; argx.i = p.AsSigned() >> FLT_SHIFT; return argx.fl; } PolyObject *boxFloat(float f, POLYCODEPTR &pc, PolyWord *&sp) { union flt argx; argx.fl = f; PolyWord p = PolyWord::FromSigned(((POLYSIGNED)argx.i << FLT_SHIFT) + 1); return p.AsObjPtr(); // Temporarily cast it to this even though it isn't really } #else // Typically for 32-bit mode. Use a boxed representation. PolyObject *boxFloat(float f, POLYCODEPTR &pc, PolyWord *&sp) { PolyObject *mem = this->allocateMemory(1, pc, sp); if (mem == 0) return 0; mem->SetLengthWord(1, F_BYTE_OBJ); union flt argx; argx.fl = f; mem->Set(0, PolyWord::FromSigned(argx.i)); return mem; } // Extract a double value from a box. float unboxFloat(PolyWord p) { union flt argx; argx.i = (int32_t)p.AsObjPtr()->Get(0).AsSigned(); return argx.fl; } #endif // Update the copies in the task object void SaveInterpreterState(POLYCODEPTR pc, PolyWord *sp) { taskPc = pc; taskSp = sp; } // Update the local state void LoadInterpreterState(POLYCODEPTR &pc, PolyWord *&sp) { pc = taskPc; sp = taskSp; } POLYCODEPTR taskPc; /* Program counter. */ PolyWord *taskSp; /* Stack pointer. */ PolyWord *hr; PolyWord exception_arg; bool raiseException; PolyWord *sl; /* Stack limit register. */ PolyObject *overflowPacket, *dividePacket; }; // This lock is used to synchronise all atomic operations. // It is not needed in the X86 version because that can use a global // memory lock. static PLock mutexLock; // Special value for return address. #define SPECIAL_PC_END_THREAD TAGGED(1) class Interpreter : public MachineDependent { public: Interpreter() {} // Create a task data object. virtual TaskData *CreateTaskData(void) { return new IntTaskData(); } virtual Architectures MachineArchitecture(void) { return MA_Interpreted; } }; void IntTaskData::InitStackFrame(TaskData *parentTask, Handle proc, Handle arg) /* Initialise stack frame. */ { StackSpace *space = this->stack; StackObject *stack = (StackObject *)space->stack(); PolyObject *closure = DEREFWORDHANDLE(proc); uintptr_t stack_size = space->spaceSize(); this->taskPc = closure->Get(0).AsCodePtr(); this->exception_arg = TAGGED(0); /* Used for exception argument. */ this->taskSp = (PolyWord*)stack + stack_size; this->raiseException = false; /* Set up exception handler */ /* No previous handler so point it at itself. */ this->taskSp--; *(this->taskSp) = PolyWord::FromStackAddr(this->taskSp); *(--this->taskSp) = SPECIAL_PC_END_THREAD; /* Default return address. */ this->hr = this->taskSp; /* If this function takes an argument store it on the stack. */ if (arg != 0) *(--this->taskSp) = DEREFWORD(arg); *(--this->taskSp) = SPECIAL_PC_END_THREAD; /* Return address. */ *(--this->taskSp) = closure; /* Closure address */ // Make packets for exceptions. overflowPacket = makeExceptionPacket(parentTask, EXC_overflow); dividePacket = makeExceptionPacket(parentTask, EXC_divide); } extern "C" { typedef POLYUNSIGNED(*callFastRts0)(); typedef POLYUNSIGNED(*callFastRts1)(intptr_t); typedef POLYUNSIGNED(*callFastRts2)(intptr_t, intptr_t); typedef POLYUNSIGNED(*callFastRts3)(intptr_t, intptr_t, intptr_t); typedef POLYUNSIGNED(*callFastRts4)(intptr_t, intptr_t, intptr_t, intptr_t); typedef POLYUNSIGNED(*callFastRts5)(intptr_t, intptr_t, intptr_t, intptr_t, intptr_t); typedef POLYUNSIGNED(*callFullRts0)(PolyObject *); typedef POLYUNSIGNED(*callFullRts1)(PolyObject *, intptr_t); typedef POLYUNSIGNED(*callFullRts2)(PolyObject *, intptr_t, intptr_t); typedef POLYUNSIGNED(*callFullRts3)(PolyObject *, intptr_t, intptr_t, intptr_t); typedef double (*callRTSRtoR) (double); typedef double (*callRTSRRtoR) (double, double); typedef double (*callRTSGtoR) (intptr_t); typedef double (*callRTSRGtoR) (double, intptr_t); typedef float(*callRTSFtoF) (float); typedef float(*callRTSFFtoF) (float, float); typedef float(*callRTSGtoF) (intptr_t); typedef float(*callRTSFGtoF) (float, intptr_t); } void IntTaskData::InterruptCode() /* Stop the Poly code at a suitable place. */ /* We may get an asynchronous interrupt at any time. */ { IntTaskData *itd = (IntTaskData *)this; itd->interrupt_requested = true; } void IntTaskData::SetException(poly_exn *exc) /* Set up the stack of a process to raise an exception. */ { this->raiseException = true; *(--this->taskSp) = (PolyWord)exc; /* push exception data */ } int IntTaskData::SwitchToPoly() /* (Re)-enter the Poly code from C. */ { // These are temporary values used where one instruction jumps to // common code. POLYUNSIGNED tailCount; PolyWord *tailPtr; POLYUNSIGNED returnCount; POLYUNSIGNED storeWords; POLYUNSIGNED stackCheck; // Local values. These are copies of member variables but are used so frequently that // it is important that access should be fast. POLYCODEPTR pc; PolyWord *sp; double dv; LoadInterpreterState(pc, sp); sl = (PolyWord*)this->stack->stack() + OVERFLOW_STACK_SIZE; // We may have taken an interrupt which has set an exception. if (this->raiseException) goto RAISE_EXCEPTION; for(;;){ /* Each instruction */ // char buff[1000]; // sprintf(buff, "addr = %p sp=%p instr=%02x *sp=%p\n", pc, sp, *pc, (*sp).AsStackAddr()); // OutputDebugStringA(buff); switch(*pc++) { case INSTR_enter_int: pc++; /* Skip the argument. */ break; case INSTR_jump8false: { PolyWord u = *sp++; /* Pop argument */ if (u == True) { pc += 1; break; } /* else - false - take the jump */ } case INSTR_jump8: pc += *pc + 1; break; case INSTR_jump16false: { PolyWord u = *sp++; /* Pop argument */ if (u == True) { pc += 2; break; } /* else - false - take the jump */ } case INSTR_jump16: pc += arg1 + 2; break; case INSTR_jump32False: { PolyWord u = *sp++; /* Pop argument */ if (u == True) { pc += 4; break; } /* else - false - take the jump */ } case INSTR_jump32: { // This is a 32-bit signed quantity on both 64-bits and 32-bits. POLYSIGNED offset = pc[3] & 0x80 ? -1 : 0; offset = (offset << 8) | pc[3]; offset = (offset << 8) | pc[2]; offset = (offset << 8) | pc[1]; offset = (offset << 8) | pc[0]; pc += offset + 4; break; } case INSTR_push_handler: /* Save the old handler value. */ *(--sp) = PolyWord::FromStackAddr(this->hr); /* Push old handler */ break; case INSTR_setHandler8: /* Set up a handler */ *(--sp) = PolyWord::FromCodePtr(pc + *pc + 1); /* Address of handler */ this->hr = sp; pc += 1; break; case INSTR_setHandler16: /* Set up a handler */ *(--sp) = PolyWord::FromCodePtr(pc + arg1 + 2); /* Address of handler */ this->hr = sp; pc += 2; break; case INSTR_setHandler32: /* Set up a handler */ { POLYUNSIGNED offset = pc[0] + (pc[1] << 8) + (pc[2] << 16) + (pc[3] << 24); *(--sp) = PolyWord::FromCodePtr(pc + offset + 4); /* Address of handler */ this->hr = sp; pc += 4; break; } case INSTR_deleteHandler: /* Delete handler retaining the result. */ { PolyWord u = *sp++; sp = this->hr; sp++; // Remove handler entry point this->hr = (*sp).AsStackAddr(); // Restore old handler *sp = u; // Put back the result break; } case INSTR_case16: { // arg1 is the largest value that is in the range POLYSIGNED u = UNTAGGED(*sp++); /* Get the value */ if (u > arg1 || u < 0) pc += (arg1+2)*2; /* Out of range */ else { pc += 2; pc += /* Index */pc[u*2]+pc[u*2 + 1]*256; } break; } case INSTR_case32: { // arg1 is the number of cases i.e. one more than the largest value // This is followed by that number of 32-bit offsets. // If the value is out of range the default case is immediately after the table. POLYSIGNED u = UNTAGGED(*sp++); /* Get the value */ if (u >= arg1 || u < 0) pc += 2 + arg1 * 4; /* Out of range */ else { pc += 2; pc += /* Index */pc[u*4] + (pc[u*4+1] << 8) + (pc[u*4+2] << 16) + (pc[u*4+3] << 24); } break; } case INSTR_tail_3_b: tailCount = 3; tailPtr = sp + tailCount; sp = tailPtr + *pc; goto TAIL_CALL; case INSTR_tail_3_2: tailCount = 3; tailPtr = sp + tailCount; sp = tailPtr + 2; goto TAIL_CALL; case INSTR_tail_3_3: tailCount = 3; tailPtr = sp + tailCount; sp = tailPtr + 3; goto TAIL_CALL; case INSTR_tail_4_b: tailCount = 4; tailPtr = sp + tailCount; sp = tailPtr + *pc; goto TAIL_CALL; case INSTR_tail_b_b: tailCount = *pc; tailPtr = sp + tailCount; sp = tailPtr + pc[1]; goto TAIL_CALL; case INSTR_tail: /* Tail recursive call. */ /* Move items up the stack. */ /* There may be an overlap if the function we are calling has more args than this one. */ tailCount = arg1; tailPtr = sp + tailCount; sp = tailPtr + arg2; TAIL_CALL: /* For general case. */ if (tailCount < 2) Crash("Invalid argument\n"); for (; tailCount > 0; tailCount--) *(--sp) = *(--tailPtr); pc = (*sp++).AsCodePtr(); /* Pop the original return address. */ /* And drop through. */ case INSTR_call_closure: /* Closure call. */ { POLYCODEPTR newPc = (*sp).AsObjPtr()->Get(0).AsCodePtr(); sp--; *sp = sp[1]; /* Move closure up. */ sp[1] = PolyWord::FromCodePtr(pc); /* Save return address. */ pc = newPc; /* Get entry point. */ this->taskPc = pc; // Update in case we're profiling break; } case INSTR_return_w: returnCount = arg1; /* Get no. of args to remove. */ RETURN: /* Common code for return. */ { PolyWord result = *sp++; /* Result */ sp++; /* Remove the link/closure */ pc = (*sp++).AsCodePtr(); /* Return address */ sp += returnCount; /* Add on number of args. */ if (pc == SPECIAL_PC_END_THREAD.AsCodePtr()) exitThread(this); // This thread is exiting. *(--sp) = result; /* Result */ this->taskPc = pc; // Update in case we're profiling } break; case INSTR_return_b: returnCount = *pc; goto RETURN; case INSTR_return_0: returnCount = 0; goto RETURN; case INSTR_return_1: returnCount = 1; goto RETURN; case INSTR_return_2: returnCount = 2; goto RETURN; case INSTR_return_3: returnCount = 3; goto RETURN; case INSTR_stackSize8: stackCheck = *pc++; goto STACKCHECK; case INSTR_stackSize16: { stackCheck = arg1; pc += 2; STACKCHECK: // Check there is space on the stack if (sp - stackCheck < sl) { uintptr_t min_size = (this->stack->top - (PolyWord*)sp) + OVERFLOW_STACK_SIZE + stackCheck; SaveInterpreterState(pc, sp); CheckAndGrowStack(this, min_size); LoadInterpreterState(pc, sp); sl = (PolyWord*)this->stack->stack() + OVERFLOW_STACK_SIZE; } // Also check for interrupts if (this->interrupt_requested) { // Check for interrupts this->interrupt_requested = false; SaveInterpreterState(pc, sp); return -1; } break; } case INSTR_pad: /* No-op */ break; case INSTR_raise_ex: { RAISE_EXCEPTION: this->raiseException = false; PolyException *exn = (PolyException*)((*sp).AsObjPtr()); this->exception_arg = exn; /* Get exception data */ sp = this->hr; if (*sp == SPECIAL_PC_END_THREAD) exitThread(this); // Default handler for thread. pc = (*sp++).AsCodePtr(); this->hr = (*sp++).AsStackAddr(); break; } case INSTR_get_store_w: // Get_store is now only used for mutually recursive closures. It allocates mutable store // initialised to zero. { storeWords = arg1; pc += 2; GET_STORE: PolyObject *p = this->allocateMemory(storeWords, pc, sp); if (p == 0) goto RAISE_EXCEPTION; p->SetLengthWord(storeWords, F_MUTABLE_BIT); for(; storeWords > 0; ) p->Set(--storeWords, TAGGED(0)); /* Must initialise store! */ *(--sp) = (PolyWord)p; break; } case INSTR_get_store_2: storeWords = 2; goto GET_STORE; case INSTR_get_store_3: storeWords = 3; goto GET_STORE; case INSTR_get_store_4: storeWords = 4; goto GET_STORE; case INSTR_get_store_b: storeWords = *pc; pc++; goto GET_STORE; case INSTR_tuple_w: { storeWords = arg1; pc += 2; TUPLE: /* Common code for tupling. */ PolyObject *p = this->allocateMemory(storeWords, pc, sp); if (p == 0) goto RAISE_EXCEPTION; // Exception p->SetLengthWord(storeWords, 0); for(; storeWords > 0; ) p->Set(--storeWords, *sp++); *(--sp) = (PolyWord)p; break; } case INSTR_tuple_2: storeWords = 2; goto TUPLE; case INSTR_tuple_3: storeWords = 3; goto TUPLE; case INSTR_tuple_4: storeWords = 4; goto TUPLE; case INSTR_tuple_b: storeWords = *pc; pc++; goto TUPLE; case INSTR_local_w: { PolyWord u = sp[arg1]; *(--sp) = u; pc += 2; break; } case INSTR_indirect_w: *sp = (*sp).AsObjPtr()->Get(arg1); pc += 2; break; case INSTR_move_to_vec_w: { PolyWord u = *sp++; (*sp).AsObjPtr()->Set(arg1, u); pc += 2; break; } case INSTR_set_stack_val_w: { PolyWord u = *sp++; sp[arg1-1] = u; pc += 2; break; } case INSTR_reset_w: sp += arg1; pc += 2; break; case INSTR_reset_r_w: { PolyWord u = *sp; sp += arg1; *sp = u; pc += 2; break; } case INSTR_constAddr8: *(--sp) = *(PolyWord*)(pc + pc[0] + 1); pc += 1; break; case INSTR_constAddr16: *(--sp) = *(PolyWord*)(pc + arg1 + 2); pc += 2; break; case INSTR_constAddr32: { POLYUNSIGNED offset = pc[0] + (pc[1] << 8) + (pc[2] << 16) + (pc[3] << 24); *(--sp) = *(PolyWord*)(pc + offset + 4); pc += 4; break; } case INSTR_const_int_w: *(--sp) = TAGGED(arg1); pc += 2; break; case INSTR_jump_back8: pc -= *pc + 1; if (this->interrupt_requested) { // Check for interrupt in case we're in a loop this->interrupt_requested = false; SaveInterpreterState(pc, sp); return -1; } break; case INSTR_jump_back16: pc -= arg1 + 1; if (this->interrupt_requested) { // Check for interrupt in case we're in a loop this->interrupt_requested = false; SaveInterpreterState(pc, sp); return -1; } break; case INSTR_lock: { PolyObject *obj = (*sp).AsObjPtr(); obj->SetLengthWord(obj->LengthWord() & ~_OBJ_MUTABLE_BIT); break; } case INSTR_ldexc: *(--sp) = this->exception_arg; break; case INSTR_local_b: { PolyWord u = sp[*pc]; *(--sp) = u; pc += 1; break; } case INSTR_indirect_b: *sp = (*sp).AsObjPtr()->Get(*pc); pc += 1; break; case INSTR_move_to_vec_b: { PolyWord u = *sp++; (*sp).AsObjPtr()->Set(*pc, u); pc += 1; break; } case INSTR_set_stack_val_b: { PolyWord u = *sp++; sp[*pc-1] = u; pc += 1; break; } case INSTR_reset_b: sp += *pc; pc += 1; break; case INSTR_reset_r_b: { PolyWord u = *sp; sp += *pc; *sp = u; pc += 1; break; } case INSTR_const_int_b: *(--sp) = TAGGED(*pc); pc += 1; break; case INSTR_local_0: { PolyWord u = sp[0]; *(--sp) = u; break; } case INSTR_local_1: { PolyWord u = sp[1]; *(--sp) = u; break; } case INSTR_local_2: { PolyWord u = sp[2]; *(--sp) = u; break; } case INSTR_local_3: { PolyWord u = sp[3]; *(--sp) = u; break; } case INSTR_local_4: { PolyWord u = sp[4]; *(--sp) = u; break; } case INSTR_local_5: { PolyWord u = sp[5]; *(--sp) = u; break; } case INSTR_local_6: { PolyWord u = sp[6]; *(--sp) = u; break; } case INSTR_local_7: { PolyWord u = sp[7]; *(--sp) = u; break; } case INSTR_local_8: { PolyWord u = sp[8]; *(--sp) = u; break; } case INSTR_local_9: { PolyWord u = sp[9]; *(--sp) = u; break; } case INSTR_local_10: { PolyWord u = sp[10]; *(--sp) = u; break; } case INSTR_local_11: { PolyWord u = sp[11]; *(--sp) = u; break; } case INSTR_indirect_0: *sp = (*sp).AsObjPtr()->Get(0); break; case INSTR_indirect_1: *sp = (*sp).AsObjPtr()->Get(1); break; case INSTR_indirect_2: *sp = (*sp).AsObjPtr()->Get(2); break; case INSTR_indirect_3: *sp = (*sp).AsObjPtr()->Get(3); break; case INSTR_indirect_4: *sp = (*sp).AsObjPtr()->Get(4); break; case INSTR_indirect_5: *sp = (*sp).AsObjPtr()->Get(5); break; case INSTR_const_0: *(--sp) = Zero; break; case INSTR_const_1: *(--sp) = TAGGED(1); break; case INSTR_const_2: *(--sp) = TAGGED(2); break; case INSTR_const_3: *(--sp) = TAGGED(3); break; case INSTR_const_4: *(--sp) = TAGGED(4); break; case INSTR_const_10: *(--sp) = TAGGED(10); break; case INSTR_reset_r_1: { PolyWord u = *sp; sp += 1; *sp = u; break; } case INSTR_reset_r_2: { PolyWord u = *sp; sp += 2; *sp = u; break; } case INSTR_reset_r_3: { PolyWord u = *sp; sp += 3; *sp = u; break; } case INSTR_reset_1: sp += 1; break; case INSTR_reset_2: sp += 2; break; case INSTR_stack_container: { POLYUNSIGNED words = arg1; pc += 2; while (words-- > 0) *(--sp) = Zero; sp--; *sp = PolyWord::FromStackAddr(sp + 1); break; } case INSTR_tuple_container: /* Create a tuple from a container. */ { storeWords = arg1; PolyObject *t = this->allocateMemory(storeWords, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(storeWords, 0); for(; storeWords > 0; ) { storeWords--; t->Set(storeWords, (*sp).AsObjPtr()->Get(storeWords)); } *sp = t; pc += 2; break; } case INSTR_callFastRTS0: { callFastRts0 doCall = *(callFastRts0*)(*sp++).AsObjPtr(); POLYUNSIGNED result = doCall(); *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFastRTS1: { callFastRts1 doCall = *(callFastRts1*)(*sp++).AsObjPtr(); intptr_t rtsArg1 = (*sp++).AsSigned(); POLYUNSIGNED result = doCall(rtsArg1); *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFastRTS2: { callFastRts2 doCall = *(callFastRts2*)(*sp++).AsObjPtr(); intptr_t rtsArg2 = (*sp++).AsSigned(); // Pop off the args, last arg first. intptr_t rtsArg1 = (*sp++).AsSigned(); POLYUNSIGNED result = doCall(rtsArg1, rtsArg2); *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFastRTS3: { callFastRts3 doCall = *(callFastRts3*)(*sp++).AsObjPtr(); intptr_t rtsArg3 = (*sp++).AsSigned(); // Pop off the args, last arg first. intptr_t rtsArg2 = (*sp++).AsSigned(); intptr_t rtsArg1 = (*sp++).AsSigned(); POLYUNSIGNED result = doCall(rtsArg1, rtsArg2, rtsArg3); *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFastRTS4: { callFastRts4 doCall = *(callFastRts4*)(*sp++).AsObjPtr(); intptr_t rtsArg4 = (*sp++).AsSigned(); // Pop off the args, last arg first. intptr_t rtsArg3 = (*sp++).AsSigned(); intptr_t rtsArg2 = (*sp++).AsSigned(); intptr_t rtsArg1 = (*sp++).AsSigned(); POLYUNSIGNED result = doCall(rtsArg1, rtsArg2, rtsArg3, rtsArg4); *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFastRTS5: { callFastRts5 doCall = *(callFastRts5*)(*sp++).AsObjPtr(); intptr_t rtsArg5 = (*sp++).AsSigned(); // Pop off the args, last arg first. intptr_t rtsArg4 = (*sp++).AsSigned(); intptr_t rtsArg3 = (*sp++).AsSigned(); intptr_t rtsArg2 = (*sp++).AsSigned(); intptr_t rtsArg1 = (*sp++).AsSigned(); POLYUNSIGNED result = doCall(rtsArg1, rtsArg2, rtsArg3, rtsArg4, rtsArg5); *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFullRTS0: { callFullRts0 doCall = *(callFullRts0*)(*sp++).AsObjPtr(); this->raiseException = false; SaveInterpreterState(pc, sp); POLYUNSIGNED result = doCall(this->threadObject); LoadInterpreterState(pc, sp); // If this raised an exception if (this->raiseException) goto RAISE_EXCEPTION; *(--sp)= PolyWord::FromUnsigned(result); break; } case INSTR_callFullRTS1: { callFullRts1 doCall = *(callFullRts1*)(*sp++).AsObjPtr(); intptr_t rtsArg1 = (*sp++).AsSigned(); this->raiseException = false; SaveInterpreterState(pc, sp); POLYUNSIGNED result = doCall(this->threadObject, rtsArg1); LoadInterpreterState(pc, sp); // If this raised an exception if (this->raiseException) goto RAISE_EXCEPTION; *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFullRTS2: { callFullRts2 doCall = *(callFullRts2*)(*sp++).AsObjPtr(); intptr_t rtsArg2 = (*sp++).AsSigned(); // Pop off the args, last arg first. intptr_t rtsArg1 = (*sp++).AsSigned(); this->raiseException = false; SaveInterpreterState(pc, sp); POLYUNSIGNED result = doCall(this->threadObject, rtsArg1, rtsArg2); LoadInterpreterState(pc, sp); // If this raised an exception if (this->raiseException) goto RAISE_EXCEPTION; *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFullRTS3: { callFullRts3 doCall = *(callFullRts3*)(*sp++).AsObjPtr(); intptr_t rtsArg3 = (*sp++).AsSigned(); // Pop off the args, last arg first. intptr_t rtsArg2 = (*sp++).AsSigned(); intptr_t rtsArg1 = (*sp++).AsSigned(); this->raiseException = false; SaveInterpreterState(pc, sp); POLYUNSIGNED result = doCall(this->threadObject, rtsArg1, rtsArg2, rtsArg3); LoadInterpreterState(pc, sp); // If this raised an exception if (this->raiseException) goto RAISE_EXCEPTION; *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFastRtoR: { // Floating point call. The call itself does not allocate but we // need to put the result into a "box". callRTSRtoR doCall = *(callRTSRtoR*)(*sp++).AsObjPtr(); PolyWord rtsArg1 = *sp++; double argument = unboxDouble(rtsArg1); // Allocate memory for the result. double result = doCall(argument); PolyObject *t = boxDouble(result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = (PolyWord)t; break; } case INSTR_callFastRRtoR: { // Floating point call. PolyWord rtsCall = (*sp++).AsObjPtr()->Get(0); // Value holds address. PolyWord rtsArg2 = *sp++; PolyWord rtsArg1 = *sp++; callRTSRRtoR doCall = (callRTSRRtoR)rtsCall.AsCodePtr(); double argument1 = unboxDouble(rtsArg1); double argument2 = unboxDouble(rtsArg2); // Allocate memory for the result. double result = doCall(argument1, argument2); PolyObject *t = boxDouble(result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = t; break; } case INSTR_callFastGtoR: { // Call that takes a POLYUNSIGNED argument and returns a double. callRTSGtoR doCall = *(callRTSGtoR*)(*sp++).AsObjPtr(); intptr_t rtsArg1 = (*sp++).AsSigned(); // Allocate memory for the result. double result = doCall(rtsArg1); PolyObject *t = boxDouble(result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = (PolyWord)t; break; } case INSTR_callFastRGtoR: { // Call that takes a POLYUNSIGNED argument and returns a double. PolyWord rtsCall = (*sp++).AsObjPtr()->Get(0); // Value holds address. intptr_t rtsArg2 = (*sp++).AsSigned(); PolyWord rtsArg1 = *sp++; callRTSRGtoR doCall = (callRTSRGtoR)rtsCall.AsCodePtr(); double argument1 = unboxDouble(rtsArg1); // Allocate memory for the result. double result = doCall(argument1, rtsArg2); PolyObject *t = boxDouble(result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = t; break; } case INSTR_callFastFtoF: { // Floating point call. The call itself does not allocate but we // need to put the result into a "box". PolyWord rtsCall = (*sp++).AsObjPtr()->Get(0); // Value holds address. PolyWord rtsArg1 = *sp++; callRTSFtoF doCall = (callRTSFtoF)rtsCall.AsCodePtr(); float argument = unboxFloat(rtsArg1); // Allocate memory for the result. float result = doCall(argument); PolyObject *t = boxFloat(result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = t; break; } case INSTR_callFastFFtoF: { // Floating point call. PolyWord rtsCall = (*sp++).AsObjPtr()->Get(0); // Value holds address. PolyWord rtsArg2 = *sp++; PolyWord rtsArg1 = *sp++; callRTSFFtoF doCall = (callRTSFFtoF)rtsCall.AsCodePtr(); float argument1 = unboxFloat(rtsArg1); float argument2 = unboxFloat(rtsArg2); // Allocate memory for the result. float result = doCall(argument1, argument2); PolyObject *t = boxFloat(result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = t; break; } case INSTR_callFastGtoF: { // Call that takes a POLYUNSIGNED argument and returns a double. PolyWord rtsCall = (*sp++).AsObjPtr()->Get(0); // Value holds address. intptr_t rtsArg1 = (*sp++).AsSigned(); callRTSGtoF doCall = (callRTSGtoF)rtsCall.AsCodePtr(); // Allocate memory for the result. float result = doCall(rtsArg1); PolyObject *t = boxFloat(result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = t; break; } case INSTR_callFastFGtoF: { // Call that takes a POLYUNSIGNED argument and returns a double. PolyWord rtsCall = (*sp++).AsObjPtr()->Get(0); // Value holds address. intptr_t rtsArg2 = (*sp++).AsSigned(); PolyWord rtsArg1 = *sp++; callRTSFGtoF doCall = (callRTSFGtoF)rtsCall.AsCodePtr(); float argument1 = unboxFloat(rtsArg1); // Allocate memory for the result. float result = doCall(argument1, rtsArg2); PolyObject *t = boxFloat(result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = t; break; } case INSTR_notBoolean: *sp = ((*sp) == True) ? False : True; break; case INSTR_isTagged: *sp = (*sp).IsTagged() ? True : False; break; case INSTR_cellLength: /* Return the length word. */ *sp = TAGGED((*sp).AsObjPtr()->Length()); break; case INSTR_cellFlags: { PolyObject *p = (*sp).AsObjPtr(); POLYUNSIGNED f = (p->LengthWord()) >> OBJ_PRIVATE_FLAGS_SHIFT; *sp = TAGGED(f); break; } case INSTR_clearMutable: { PolyObject *obj = (*sp).AsObjPtr(); POLYUNSIGNED lengthW = obj->LengthWord(); /* Clear the mutable bit. */ obj->SetLengthWord(lengthW & ~_OBJ_MUTABLE_BIT); *sp = Zero; break; } case INSTR_stringLength: // Now replaced by loadUntagged *sp = TAGGED(((PolyStringObject*)(*sp).AsObjPtr())->length); break; case INSTR_atomicIncr: { PLocker l(&mutexLock); PolyObject *p = (*sp).AsObjPtr(); PolyWord newValue = TAGGED(UNTAGGED(p->Get(0))+1); p->Set(0, newValue); *sp = newValue; break; } case INSTR_atomicDecr: { PLocker l(&mutexLock); PolyObject *p = (*sp).AsObjPtr(); PolyWord newValue = TAGGED(UNTAGGED(p->Get(0))-1); p->Set(0, newValue); *sp = newValue; break; } case INSTR_atomicReset: { // This is needed in the interpreted version otherwise there // is a chance that we could set the value to zero while another // thread is between getting the old value and setting it to the new value. PLocker l(&mutexLock); PolyObject *p = (*sp).AsObjPtr(); p->Set(0, TAGGED(1)); // Set this to released. *sp = TAGGED(0); // Push the unit result break; } case INSTR_longWToTagged: { // Extract the first word and return it as a tagged value. This loses the top-bit POLYUNSIGNED wx = (*sp).AsObjPtr()->Get(0).AsUnsigned(); *sp = TAGGED(wx); break; } case INSTR_signedToLongW: { // Shift the tagged value to remove the tag and put it into the first word. // The original sign bit is copied in the shift. intptr_t wx = (*sp).UnTagged(); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(intptr_t*)t = wx; *sp = (PolyWord)t; break; } case INSTR_unsignedToLongW: { // As with the above except the value is treated as an unsigned // value and the top bit is zero. uintptr_t wx = (*sp).UnTaggedUnsigned(); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wx; *sp = (PolyWord)t; break; } case INSTR_realAbs: { PolyObject *t = this->boxDouble(fabs(unboxDouble(*sp)), pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case INSTR_realNeg: { PolyObject *t = this->boxDouble(-(unboxDouble(*sp)), pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case INSTR_floatAbs: { PolyObject *t = this->boxFloat(fabs(unboxFloat(*sp)), pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = t; break; } case INSTR_floatNeg: { PolyObject *t = this->boxFloat(-(unboxFloat(*sp)), pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = t; break; } case INSTR_fixedIntToReal: { POLYSIGNED u = UNTAGGED(*sp); PolyObject *t = this->boxDouble((double)u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case INSTR_fixedIntToFloat: { POLYSIGNED u = UNTAGGED(*sp); PolyObject *t = this->boxFloat((float)u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = t; break; } case INSTR_floatToReal: { float u = unboxFloat(*sp); PolyObject *t = this->boxDouble((double)u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = t; break; } case INSTR_equalWord: { PolyWord u = *sp++; *sp = u == (*sp) ? True : False; break; } case INSTR_lessSigned: { PolyWord u = *sp++; *sp = ((*sp).AsSigned() < u.AsSigned()) ? True : False; break; } case INSTR_lessUnsigned: { PolyWord u = *sp++; *sp = ((*sp).AsUnsigned() < u.AsUnsigned()) ? True : False; break; } case INSTR_lessEqSigned: { PolyWord u = *sp++; *sp = ((*sp).AsSigned() <= u.AsSigned()) ? True : False; break; } case INSTR_lessEqUnsigned: { PolyWord u = *sp++; *sp = ((*sp).AsUnsigned() <= u.AsUnsigned()) ? True : False; break; } case INSTR_greaterSigned: { PolyWord u = *sp++; *sp = ((*sp).AsSigned() > u.AsSigned()) ? True : False; break; } case INSTR_greaterUnsigned: { PolyWord u = *sp++; *sp = ((*sp).AsUnsigned() > u.AsUnsigned()) ? True : False; break; } case INSTR_greaterEqSigned: { PolyWord u = *sp++; *sp = ((*sp).AsSigned() >= u.AsSigned()) ? True : False; break; } case INSTR_greaterEqUnsigned: { PolyWord u = *sp++; *sp = ((*sp).AsUnsigned() >= u.AsUnsigned()) ? True : False; break; } case INSTR_fixedAdd: { PolyWord x = *sp++; PolyWord y = (*sp); POLYSIGNED t = UNTAGGED(x) + UNTAGGED(y); if (t <= MAXTAGGED && t >= -MAXTAGGED-1) *sp = TAGGED(t); else { *(--sp) = (PolyWord)overflowPacket; goto RAISE_EXCEPTION; } break; } case INSTR_fixedSub: { PolyWord x = *sp++; PolyWord y = (*sp); POLYSIGNED t = UNTAGGED(y) - UNTAGGED(x); if (t <= MAXTAGGED && t >= -MAXTAGGED-1) *sp = TAGGED(t); else { *(--sp) = (PolyWord)overflowPacket; goto RAISE_EXCEPTION; } break; } case INSTR_fixedMult: { POLYSIGNED x = UNTAGGED(*sp++); POLYSIGNED y = (*sp).AsSigned() - 1; // Just remove the tag POLYSIGNED t = x * y; if (x != 0 && t / x != y) { *(--sp) = (PolyWord)overflowPacket; goto RAISE_EXCEPTION; } *sp = PolyWord::FromSigned(t+1); // Add back the tag break; } case INSTR_fixedQuot: { // Zero and overflow are checked for in ML. POLYSIGNED u = UNTAGGED(*sp++); PolyWord y = (*sp); *sp = TAGGED(UNTAGGED(y) / u); break; } case INSTR_fixedRem: { // Zero and overflow are checked for in ML. POLYSIGNED u = UNTAGGED(*sp++); PolyWord y = (*sp); *sp = TAGGED(UNTAGGED(y) % u); break; } case INSTR_wordAdd: { PolyWord u = *sp++; // Because we're not concerned with overflow we can just add the values and subtract the tag. *sp = PolyWord::FromUnsigned((*sp).AsUnsigned() + u.AsUnsigned() - TAGGED(0).AsUnsigned()); break; } case INSTR_wordSub: { PolyWord u = *sp++; *sp = PolyWord::FromUnsigned((*sp).AsUnsigned() - u.AsUnsigned() + TAGGED(0).AsUnsigned()); break; } case INSTR_wordMult: { PolyWord u = *sp++; *sp = TAGGED(UNTAGGED_UNSIGNED(*sp) * UNTAGGED_UNSIGNED(u)); break; } case INSTR_wordDiv: { POLYUNSIGNED u = UNTAGGED_UNSIGNED(*sp++); // Detection of zero is done in ML *sp = TAGGED(UNTAGGED_UNSIGNED(*sp) / u); break; } case INSTR_wordMod: { POLYUNSIGNED u = UNTAGGED_UNSIGNED(*sp++); *sp = TAGGED(UNTAGGED_UNSIGNED(*sp) % u); break; } case INSTR_wordAnd: { PolyWord u = *sp++; // Since both of these should be tagged the tag bit will be preserved. *sp = PolyWord::FromUnsigned((*sp).AsUnsigned() & u.AsUnsigned()); break; } case INSTR_wordOr: { PolyWord u = *sp++; // Since both of these should be tagged the tag bit will be preserved. *sp = PolyWord::FromUnsigned((*sp).AsUnsigned() | u.AsUnsigned()); break; } case INSTR_wordXor: { PolyWord u = *sp++; // This will remove the tag bit so it has to be reinstated. *sp = PolyWord::FromUnsigned(((*sp).AsUnsigned() ^ u.AsUnsigned()) | TAGGED(0).AsUnsigned()); break; } case INSTR_wordShiftLeft: { // ML requires shifts greater than a word to return zero. // That's dealt with at the higher level. PolyWord u = *sp++; *sp = TAGGED(UNTAGGED_UNSIGNED(*sp) << UNTAGGED_UNSIGNED(u)); break; } case INSTR_wordShiftRLog: { PolyWord u = *sp++; *sp = TAGGED(UNTAGGED_UNSIGNED(*sp) >> UNTAGGED_UNSIGNED(u)); break; } case INSTR_wordShiftRArith: { PolyWord u = *sp++; // Strictly speaking, C does not require that this uses // arithmetic shifting so we really ought to set the // high-order bits explicitly. *sp = TAGGED(UNTAGGED(*sp) >> UNTAGGED(u)); break; } case INSTR_allocByteMem: { // Allocate byte segment. This does not need to be initialised. POLYUNSIGNED flags = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED length = UNTAGGED_UNSIGNED(*sp); PolyObject *t = this->allocateMemory(length, pc, sp); if (t == 0) goto RAISE_EXCEPTION; // Exception t->SetLengthWord(length, (byte)flags); *sp = (PolyWord)t; break; } case INSTR_lgWordEqual: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); *sp = wx == wy ? True : False; break; } case INSTR_lgWordLess: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); *sp = (wy < wx) ? True : False; break; } case INSTR_lgWordLessEq: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); *sp = (wy <= wx) ? True : False; break; } case INSTR_lgWordGreater: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); *sp = (wy > wx) ? True : False; break; } case INSTR_lgWordGreaterEq: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); *sp = (wy >= wx) ? True : False; break; } case INSTR_lgWordAdd: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy+wx; *sp = (PolyWord)t; break; } case INSTR_lgWordSub: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy-wx; *sp = (PolyWord)t; break; } case INSTR_lgWordMult: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy*wx; *sp = (PolyWord)t; break; } case INSTR_lgWordDiv: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy/wx; *sp = (PolyWord)t; break; } case INSTR_lgWordMod: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy%wx; *sp = (PolyWord)t; break; } case INSTR_lgWordAnd: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy&wx; *sp = (PolyWord)t; break; } case INSTR_lgWordOr: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy|wx; *sp = (PolyWord)t; break; } case INSTR_lgWordXor: { uintptr_t wx = *(uintptr_t*)((*sp++).AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy^wx; *sp = (PolyWord)t; break; } case INSTR_lgWordShiftLeft: { // The shift amount is a tagged word not a boxed large word POLYUNSIGNED wx = UNTAGGED_UNSIGNED(*sp++); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy << wx; *sp = (PolyWord)t; break; } case INSTR_lgWordShiftRLog: { // The shift amount is a tagged word not a boxed large word POLYUNSIGNED wx = UNTAGGED_UNSIGNED(*sp++); uintptr_t wy = *(uintptr_t*)((*sp).AsObjPtr()); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy >> wx; *sp = (PolyWord)t; break; } case INSTR_lgWordShiftRArith: { // The shift amount is a tagged word not a boxed large word POLYUNSIGNED wx = UNTAGGED_UNSIGNED(*sp++); intptr_t wy = *(intptr_t*)((*sp).AsObjPtr()); PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(intptr_t*)t = wy >> wx; *sp = (PolyWord)t; break; } case INSTR_realEqual: { double u = unboxDouble(*sp++); *sp = u == unboxDouble(*sp) ? True: False; break; } case INSTR_realLess: { double u = unboxDouble(*sp++); *sp = unboxDouble(*sp) < u ? True: False; break; } case INSTR_realLessEq: { double u = unboxDouble(*sp++); *sp = unboxDouble(*sp) <= u ? True: False; break; } case INSTR_realGreater: { double u = unboxDouble(*sp++); *sp = unboxDouble(*sp) > u ? True: False; break; } case INSTR_realGreaterEq: { double u = unboxDouble(*sp++); *sp = unboxDouble(*sp) >= u ? True: False; break; } case INSTR_realUnordered: { double u = unboxDouble(*sp++); double v = unboxDouble(*sp); *sp = (std::isnan(u) || std::isnan(v)) ? True : False; break; } case INSTR_realAdd: { double u = unboxDouble(*sp++); double v = unboxDouble(*sp); PolyObject *t = this->boxDouble(v+u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case INSTR_realSub: { double u = unboxDouble(*sp++); double v = unboxDouble(*sp); PolyObject *t = this->boxDouble(v-u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case INSTR_realMult: { double u = unboxDouble(*sp++); double v = unboxDouble(*sp); PolyObject *t = this->boxDouble(v*u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case INSTR_realDiv: { double u = unboxDouble(*sp++); double v = unboxDouble(*sp); PolyObject *t = this->boxDouble(v/u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case INSTR_floatEqual: { float u = unboxFloat(*sp++); *sp = u == unboxFloat(*sp) ? True : False; break; } case INSTR_floatLess: { float u = unboxFloat(*sp++); *sp = unboxFloat(*sp) < u ? True : False; break; } case INSTR_floatLessEq: { float u = unboxFloat(*sp++); *sp = unboxFloat(*sp) <= u ? True : False; break; } case INSTR_floatGreater: { float u = unboxFloat(*sp++); *sp = unboxFloat(*sp) > u ? True : False; break; } case INSTR_floatGreaterEq: { float u = unboxFloat(*sp++); *sp = unboxFloat(*sp) >= u ? True : False; break; } case INSTR_floatUnordered: { float u = unboxFloat(*sp++); float v = unboxFloat(*sp); *sp = (std::isnan(u) || std::isnan(v)) ? True : False; break; } case INSTR_floatAdd: { float u = unboxFloat(*sp++); float v = unboxFloat(*sp); PolyObject *t = this->boxFloat(v + u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = t; break; } case INSTR_floatSub: { float u = unboxFloat(*sp++); float v = unboxFloat(*sp); PolyObject *t = this->boxFloat(v - u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = t; break; } case INSTR_floatMult: { float u = unboxFloat(*sp++); float v = unboxFloat(*sp); PolyObject *t = this->boxFloat(v*u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = t; break; } case INSTR_floatDiv: { float u = unboxFloat(*sp++); float v = unboxFloat(*sp); PolyObject *t = this->boxFloat(v / u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = t; break; } case INSTR_realToFloat: { // Convert a double to a float. It's complicated because it depends on the rounding mode. int rMode = *pc++; int current = getrounding(); // If the rounding is 4 it means "use current rounding". // Don't call unboxDouble until we're set the rounding. GCC seems to convert it // before the actual float cast. if (rMode < 4) setrounding(rMode); double d = unboxDouble(*sp); float v = (float)d; // Convert with the appropriate rounding. setrounding(current); PolyObject *t = this->boxFloat(v, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = t; break; } case INSTR_realToInt: dv = unboxDouble(*sp); goto realtoint; case INSTR_floatToInt: dv = (double)unboxFloat(*sp); realtoint: { // Convert a double or a float to a tagged integer. int rMode = *pc++; // We mustn't try converting a value that will overflow the conversion // but we need to be careful that we don't raise overflow incorrectly due // to rounding. if (dv > (double)(MAXTAGGED + MAXTAGGED / 2) || dv < -(double)(MAXTAGGED + MAXTAGGED / 2)) { *(--sp) = overflowPacket; goto RAISE_EXCEPTION; } POLYSIGNED p; switch (rMode) { case POLY_ROUND_TONEAREST: p = (POLYSIGNED)round(dv); break; case POLY_ROUND_DOWNWARD: p = (POLYSIGNED)floor(dv); break; case POLY_ROUND_UPWARD: p = (POLYSIGNED)ceil(dv); break; case POLY_ROUND_TOZERO: default: // Truncation is the default for C. p = (POLYSIGNED)dv; } // Check that the value can be tagged. if (p > MAXTAGGED || p < -MAXTAGGED - 1) { *(--sp) = overflowPacket; goto RAISE_EXCEPTION; } *sp = TAGGED(p); break; } case INSTR_getThreadId: *(--sp) = (PolyWord)this->threadObject; break; case INSTR_allocWordMemory: { // Allocate word segment. This must be initialised. // We mustn't pop the initialiser until after any potential GC. POLYUNSIGNED length = UNTAGGED_UNSIGNED(sp[2]); PolyObject *t = this->allocateMemory(length, pc, sp); if (t == 0) goto RAISE_EXCEPTION; PolyWord initialiser = *sp++; POLYUNSIGNED flags = UNTAGGED_UNSIGNED(*sp++); t->SetLengthWord(length, (byte)flags); *sp = (PolyWord)t; // Have to initialise the data. for (; length > 0; ) t->Set(--length, initialiser); break; } case INSTR_alloc_ref: { // Allocate a single word mutable cell. This is more common than allocWordMemory on its own. PolyObject *t = this->allocateMemory(1, pc, sp); if (t == 0) goto RAISE_EXCEPTION; PolyWord initialiser = (*sp); t->SetLengthWord(1, F_MUTABLE_BIT); t->Set(0, initialiser); *sp = (PolyWord)t; break; } case INSTR_loadMLWord: { // The values on the stack are base, index and offset. POLYUNSIGNED offset = UNTAGGED(*sp++); POLYUNSIGNED index = UNTAGGED(*sp++); PolyObject *p = (PolyObject*)((*sp).AsCodePtr() + offset); *sp = p->Get(index); break; } case INSTR_loadMLByte: { // The values on the stack are base and index. POLYUNSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = (*sp).AsCodePtr(); *sp = TAGGED(p[index]); // Have to tag the result break; } case INSTR_loadC8: { // This is similar to loadMLByte except that the base address is a boxed large-word. // Also the index is SIGNED. POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())); *sp = TAGGED(p[index]); // Have to tag the result break; } case INSTR_loadC16: { // This and the other loads are similar to loadMLWord with separate // index and offset values. POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())) + offset; POLYUNSIGNED r = ((uint16_t*)p)[index]; *sp = TAGGED(r); break; } case INSTR_loadC32: { POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())) + offset; uintptr_t r = ((uint32_t*)p)[index]; #ifdef IS64BITS // This is tagged in 64-bit mode *sp = TAGGED(r); #else // But boxed in 32-bit mode. PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = r; *sp = (PolyWord)t; #endif break; } #if (defined(IS64BITS)) case INSTR_loadC64: { POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())) + offset; uintptr_t r = ((uint64_t*)p)[index]; // This must be boxed. PolyObject *t = this->allocateMemory(LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = r; *sp = (PolyWord)t; break; } #endif case INSTR_loadCFloat: { POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())) + offset; double r = ((float*)p)[index]; // This must be boxed. PolyObject *t = this->boxDouble(r, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case INSTR_loadCDouble: { POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())) + offset; double r = ((double*)p)[index]; // This must be boxed. PolyObject *t = this->boxDouble(r, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case INSTR_loadUntagged: { // The values on the stack are base, index and offset. POLYUNSIGNED offset = UNTAGGED(*sp++); POLYUNSIGNED index = UNTAGGED(*sp++); PolyObject *p = (PolyObject*)((*sp).AsCodePtr() + offset); *sp = TAGGED(p->Get(index).AsUnsigned()); break; } case INSTR_storeMLWord: { PolyWord toStore = *sp++; POLYUNSIGNED offset = UNTAGGED(*sp++); POLYUNSIGNED index = UNTAGGED(*sp++); PolyObject *p = (PolyObject*)((*sp).AsCodePtr() + offset); p->Set(index, toStore); *sp = Zero; break; } case INSTR_storeMLByte: { POLYUNSIGNED toStore = UNTAGGED(*sp++); POLYUNSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = (*sp).AsCodePtr(); p[index] = (byte)toStore; *sp = Zero; break; } case INSTR_storeC8: { // Similar to storeMLByte except that the base address is a boxed large-word. POLYUNSIGNED toStore = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())); p[index] = (byte)toStore; *sp = Zero; break; } case INSTR_storeC16: { uint16_t toStore = (uint16_t)UNTAGGED(*sp++); POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())) + offset; ((uint16_t*)p)[index] = toStore; *sp = Zero; break; } case INSTR_storeC32: { #ifdef IS64BITS // This is a tagged value in 64-bit mode. uint32_t toStore = (uint32_t)UNTAGGED(*sp++); #else // but a boxed value in 32-bit mode. uint32_t toStore = (uint32_t)(*(uintptr_t*)((*sp++).AsObjPtr())); #endif POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())) + offset; ((uint32_t*)p)[index] = toStore; *sp = Zero; break; } #if (defined(IS64BITS)) case INSTR_storeC64: { // This is a boxed value. uint64_t toStore = *(uintptr_t*)((*sp++).AsObjPtr()); POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())) + offset; ((uint64_t*)p)[index] = toStore; *sp = Zero; break; } #endif case INSTR_storeCFloat: { // This is a boxed value. float toStore = (float)unboxDouble(*sp++); POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())) + offset; ((float*)p)[index] = toStore; *sp = Zero; break; } case INSTR_storeCDouble: { // This is a boxed value. double toStore = unboxDouble(*sp++); POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte **)((*sp).AsObjPtr())) + offset; ((double*)p)[index] = toStore; *sp = Zero; break; } case INSTR_storeUntagged: { PolyWord toStore = PolyWord::FromUnsigned(UNTAGGED_UNSIGNED(*sp++)); POLYUNSIGNED offset = UNTAGGED(*sp++); POLYUNSIGNED index = UNTAGGED(*sp++); PolyObject *p = (PolyObject*)((*sp).AsCodePtr() + offset); p->Set(index, toStore); *sp = Zero; break; } case INSTR_blockMoveWord: { // The offsets are byte counts but the the indexes are in words. POLYUNSIGNED length = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED destOffset = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED destIndex = UNTAGGED_UNSIGNED(*sp++); PolyObject *dest = (PolyObject*)((*sp++).AsCodePtr() + destOffset); POLYUNSIGNED srcOffset = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED srcIndex = UNTAGGED_UNSIGNED(*sp++); PolyObject *src = (PolyObject*)((*sp).AsCodePtr() + srcOffset); for (POLYUNSIGNED u = 0; u < length; u++) dest->Set(destIndex+u, src->Get(srcIndex+u)); *sp = Zero; break; } case INSTR_blockMoveByte: { POLYUNSIGNED length = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED destOffset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR dest = (*sp++).AsCodePtr(); POLYUNSIGNED srcOffset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR src = (*sp).AsCodePtr(); memcpy(dest+destOffset, src+srcOffset, length); *sp = Zero; break; } case INSTR_blockEqualByte: { POLYUNSIGNED length = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED arg2Offset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR arg2Ptr = (*sp++).AsCodePtr(); POLYUNSIGNED arg1Offset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR arg1Ptr = (*sp).AsCodePtr(); *sp = memcmp(arg1Ptr+arg1Offset, arg2Ptr+arg2Offset, length) == 0 ? True : False; break; } case INSTR_blockCompareByte: { POLYUNSIGNED length = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED arg2Offset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR arg2Ptr = (*sp++).AsCodePtr(); POLYUNSIGNED arg1Offset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR arg1Ptr = (*sp).AsCodePtr(); int result = memcmp(arg1Ptr+arg1Offset, arg2Ptr+arg2Offset, length); *sp = result == 0 ? TAGGED(0) : result < 0 ? TAGGED(-1) : TAGGED(1); break; } default: Crash("Unknown instruction %x\n", pc[-1]); } /* switch */ } /* for */ return 0; } /* MD_switch_to_poly */ void IntTaskData::GarbageCollect(ScanAddress *process) { TaskData::GarbageCollect(process); overflowPacket = process->ScanObjectAddress(overflowPacket); dividePacket = process->ScanObjectAddress(dividePacket); if (stack != 0) { StackSpace *stackSpace = stack; PolyWord *stackPtr = this->taskSp; // The exception arg if any ScanStackAddress(process, this->exception_arg, stackSpace); // Now the values on the stack. for (PolyWord *q = stackPtr; q < stackSpace->top; q++) ScanStackAddress(process, *q, stackSpace); } } // Process a value within the stack. void IntTaskData::ScanStackAddress(ScanAddress *process, PolyWord &val, StackSpace *stack) { if (! val.IsDataPtr()) return; MemSpace *space = gMem.LocalSpaceForAddress(val.AsStackAddr()-1); if (space != 0) val = process->ScanObjectAddress(val.AsObjPtr()); } // Copy a stack void IntTaskData::CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length) { /* Moves a stack, updating all references within the stack */ PolyWord *old_base = (PolyWord *)old_stack; PolyWord *new_base = (PolyWord*)new_stack; PolyWord *old_top = old_base + old_length; /* Calculate the offset of the new stack from the old. If the frame is being extended objects in the new frame will be further up the stack than in the old one. */ uintptr_t offset = new_base - old_base + new_length - old_length; PolyWord *oldSp = this->taskSp; this->taskSp = oldSp + offset; this->hr = this->hr + offset; /* Skip the unused part of the stack. */ uintptr_t i = oldSp - old_base; ASSERT(i <= old_length); i = old_length - i; PolyWord *old = oldSp; PolyWord *newp = this->taskSp; while (i--) { // ASSERT(old >= old_base && old < old_base+old_length); // ASSERT(newp >= new_base && newp < new_base+new_length); PolyWord old_word = *old++; if (old_word.IsTagged() || old_word.AsStackAddr() < old_base || old_word.AsStackAddr() >= old_top) *newp++ = old_word; else *newp++ = PolyWord::FromStackAddr(old_word.AsStackAddr() + offset); } ASSERT(old == ((PolyWord*)old_stack) + old_length); ASSERT(newp == ((PolyWord*)new_stack) + new_length); } Handle IntTaskData::EnterPolyCode() /* Called from "main" to enter the code. */ { Handle hOriginal = this->saveVec.mark(); // Set this up for the IO calls. while (1) { this->saveVec.reset(hOriginal); // Remove old RTS arguments and results. // Run the ML code and return with the function to call. this->inML = true; int ioFunction = SwitchToPoly(); this->inML = false; try { switch (ioFunction) { case -1: // We've been interrupted. This usually involves simulating a // stack overflow so we could come here because of a genuine // stack overflow. // Previously this code was executed on every RTS call but there // were problems on Mac OS X at least with contention on schedLock. // Process any asynchronous events i.e. interrupts or kill processes->ProcessAsynchRequests(this); // Release and re-acquire use of the ML memory to allow another thread // to GC. processes->ThreadReleaseMLMemory(this); processes->ThreadUseMLMemory(this); break; case -2: // A callback has returned. ASSERT(0); // Callbacks aren't implemented default: Crash("Unknown io operation %d\n", ioFunction); } } catch (IOException &) { } } } // As far as possible we want locking and unlocking an ML mutex to be fast so // we try to implement the code in the assembly code using appropriate // interlocked instructions. That does mean that if we need to lock and // unlock an ML mutex in this code we have to use the same, machine-dependent, // code to do it. These are defaults that are used where there is no // machine-specific code. static Handle ProcessAtomicIncrement(TaskData *taskData, Handle mutexp) { PLocker l(&mutexLock); PolyObject *p = DEREFHANDLE(mutexp); // A thread can only call this once so the values will be short PolyWord newValue = TAGGED(UNTAGGED(p->Get(0))+1); p->Set(0, newValue); return taskData->saveVec.push(newValue); } // Release a mutex. We need to lock the mutex to ensure we don't // reset it in the time between one of atomic operations reading // and writing the mutex. static Handle ProcessAtomicReset(TaskData *taskData, Handle mutexp) { PLocker l(&mutexLock); DEREFHANDLE(mutexp)->Set(0, TAGGED(1)); // Set this to released. return taskData->saveVec.push(TAGGED(0)); // Push the unit result } Handle IntTaskData::AtomicIncrement(Handle mutexp) { return ProcessAtomicIncrement(this, mutexp); } void IntTaskData::AtomicReset(Handle mutexp) { (void)ProcessAtomicReset(this, mutexp); } bool IntTaskData::AddTimeProfileCount(SIGNALCONTEXT *context) { if (taskPc != 0) { // See if the PC we've got is an ML code address. MemSpace *space = gMem.SpaceForAddress(taskPc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { add_count(this, taskPc, 1); return true; } } return false; } static Interpreter interpreterObject; MachineDependent *machineDependent = &interpreterObject; + +// No machine-specific calls in the interpreter. +struct _entrypts machineSpecificEPT[] = +{ + { NULL, NULL} // End of list. +}; diff --git a/libpolyml/machine_dep.h b/libpolyml/machine_dep.h index 1d8bc7ae..838de908 100644 --- a/libpolyml/machine_dep.h +++ b/libpolyml/machine_dep.h @@ -1,61 +1,63 @@ /* Title: machine_dep.h - exports signature for machine_dep.c Copyright (c) 2000 Cambridge University Technical Services Limited This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _MACHINE_DEP_H #define _MACHINE_DEP_H class ScanAddress; class TaskData; class SaveVecEntry; typedef SaveVecEntry *Handle; class StackSpace; // Machine architecture values. typedef enum { MA_Interpreted = 0, MA_I386, MA_X86_64, MA_X86_64_32 } Architectures; // Machine-dependent module. class MachineDependent { public: virtual ~MachineDependent() {} // Keep the compiler happy // Create the machine-specific task data object. virtual TaskData *CreateTaskData(void) = 0; virtual unsigned InitialStackSize(void) { return 128; } // Initial size of a stack // Must be > 40 (i.e. 2*min_stack_check) + base area in each stack frame /* ScanConstantsWithinCode - update addresses within a code segment.*/ virtual void ScanConstantsWithinCode(PolyObject *addr, PolyObject *oldAddr, POLYUNSIGNED length, ScanAddress *process) {} void ScanConstantsWithinCode(PolyObject *addr, ScanAddress *process) { ScanConstantsWithinCode(addr, addr, addr->Length(), process); } // Common case virtual Architectures MachineArchitecture(void) = 0; }; extern MachineDependent *machineDependent; +extern struct _entrypts machineSpecificEPT[]; + #endif /* _MACHINE_DEP_H */ diff --git a/libpolyml/rtsentry.cpp b/libpolyml/rtsentry.cpp index 2d403638..45b449ad 100644 --- a/libpolyml/rtsentry.cpp +++ b/libpolyml/rtsentry.cpp @@ -1,191 +1,193 @@ /* Title: rtsentry.cpp - Entry points to the run-time system Copyright (c) 2016, 2017 David C. J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #ifdef HAVE_ERRNO_H #include #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) #endif #include "globals.h" #include "rtsentry.h" #include "save_vec.h" #include "processes.h" #include "run_time.h" #include "polystring.h" #include "arb.h" #include "basicio.h" #include "polyffi.h" #include "xwindows.h" #include "os_specific.h" #include "timing.h" #include "sighandler.h" #include "sharedata.h" #include "run_time.h" #include "reals.h" #include "profiling.h" #include "processes.h" #include "process_env.h" #include "poly_specific.h" #include "objsize.h" #include "network.h" +#include "machine_dep.h" #include "exporter.h" #include "statistics.h" #include "savestate.h" extern struct _entrypts rtsCallEPT[]; static entrypts entryPointTable[] = { rtsCallEPT, arbitraryPrecisionEPT, basicIOEPT, polyFFIEPT, xwindowsEPT, osSpecificEPT, timingEPT, sigHandlerEPT, shareDataEPT, runTimeEPT, realsEPT, profilingEPT, processesEPT, processEnvEPT, polySpecificEPT, objSizeEPT, networkingEPT, exporterEPT, statisticsEPT, savestateEPT, + machineSpecificEPT, NULL }; extern "C" { #ifdef _MSC_VER __declspec(dllexport) #endif POLYUNSIGNED PolyCreateEntryPointObject(FirstArgument threadId, PolyWord arg); }; // Create an entry point containing the address of the entry and the // string name. Having the string in there allows us to export the entry. Handle creatEntryPointObject(TaskData *taskData, Handle entryH, bool isFuncPtr) { TempCString entryName(Poly_string_to_C_alloc(entryH->Word())); if ((const char *)entryName == 0) raise_syscall(taskData, "Insufficient memory", ENOMEM); // Create space for the address followed by the name as a C string. uintptr_t space = 1 + (strlen(entryName) + 1 + (isFuncPtr ? 0 : 1) + sizeof(polyRTSFunction*) - 1) / sizeof(PolyWord); // Allocate a byte, weak, mutable, no-overwrite cell. It's not clear if // it actually needs to be mutable but if it is it needs to be no-overwrite. Handle refH = alloc_and_save(taskData, space, F_BYTE_OBJ|F_WEAK_BIT|F_MUTABLE_BIT|F_NO_OVERWRITE); PolyObject *p = refH->WordP(); *(polyRTSFunction*)p = 0; // Clear it char *entryPtr = (char*)(p->AsBytePtr() + sizeof(polyRTSFunction*)); if (! isFuncPtr) *entryPtr++ = 1; // Put in a type entry strcpy(entryPtr, entryName); return refH; } // Return the string entry point. const char *getEntryPointName(PolyObject *p, bool *isFuncPtr) { if (p->Length() <= sizeof(polyRTSFunction*)/sizeof(PolyWord)) return 0; // Doesn't contain an entry point const char *entryPtr = (const char*)(p->AsBytePtr() + sizeof(polyRTSFunction*)); *isFuncPtr = *entryPtr != 1; // If the type is 1 it is a data entry point if (*entryPtr < ' ') entryPtr++; // Skip the type byte return entryPtr; } // Sets the address of the entry point in an entry point object. bool setEntryPoint(PolyObject *p) { if (p->Length() == 0) return false; *(polyRTSFunction*)p = 0; // Clear it by default if (p->Length() == 1) return false; const char *entryName = (const char*)(p->AsBytePtr()+sizeof(polyRTSFunction*)); if (*entryName < ' ') entryName++; // Skip the type byte // Search the entry point table list. for (entrypts *ept=entryPointTable; *ept != NULL; ept++) { entrypts entryPtTable = *ept; if (entryPtTable != 0) { for (struct _entrypts *ep = entryPtTable; ep->entry != NULL; ep++) { if (strcmp(entryName, ep->name) == 0) { polyRTSFunction entry = ep->entry; *(polyRTSFunction*)p = entry; return true; } } } } return false; } // External call POLYUNSIGNED PolyCreateEntryPointObject(FirstArgument threadId, PolyWord arg) { TaskData *taskData = TaskData::FindTaskForId(threadId); ASSERT(taskData != 0); taskData->PreRTSCall(); Handle reset = taskData->saveVec.mark(); Handle pushedArg = taskData->saveVec.push(arg); Handle result = 0; try { result = creatEntryPointObject(taskData, pushedArg, true /* Always functions */); if (!setEntryPoint(result->WordP())) raise_fail(taskData, "entry point not found"); } catch (...) { } // If an ML exception is raised taskData->saveVec.reset(reset); // Ensure the save vec is reset taskData->PostRTSCall(); if (result == 0) return TAGGED(0).AsUnsigned(); else return result->Word().AsUnsigned(); } struct _entrypts rtsCallEPT[] = { { "PolyCreateEntryPointObject", (polyRTSFunction)&PolyCreateEntryPointObject}, { NULL, NULL} // End of list. }; diff --git a/libpolyml/x86_dep.cpp b/libpolyml/x86_dep.cpp index ab038aec..4a76bae0 100644 --- a/libpolyml/x86_dep.cpp +++ b/libpolyml/x86_dep.cpp @@ -1,1467 +1,1502 @@ /* Title: Machine dependent code for i386 and X64 under Windows and Unix Copyright (c) 2000-7 Cambridge University Technical Services Limited Further work copyright David C. J. Matthews 2011-19 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #ifdef HAVE_STDLIB_H #include #endif #include #ifdef HAVE_SIGNAL_H #include #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_ERRNO_H #include #endif #if (defined(_WIN32)) #include #include #endif #include "globals.h" #include "run_time.h" #include "diagnostics.h" #include "processes.h" #include "profiling.h" #include "machine_dep.h" #include "scanaddrs.h" #include "memmgr.h" #include "rtsentry.h" #include "sys.h" // Temporary /********************************************************************** * * Register usage: * * %Reax: First argument to function. Result of function call. * %Rebx: Second argument to function. * %Recx: General register * %Redx: Closure pointer in call. * %Rebp: Points to memory used for extra registers * %Resi: General register. * %Redi: General register. * %Resp: Stack pointer. * The following apply only on the X64 * %R8: Third argument to function * %R9: Fourth argument to function * %R10: Fifth argument to function * %R11: General register * %R12: General register * %R13: General register * %R14: General register * %R15: Memory allocation pointer * **********************************************************************/ #ifdef HOSTARCHITECTURE_X86_64 struct fpSaveArea { double fpregister[7]; // Save area for xmm0-6 }; #else // Structure of floating point save area. // This is dictated by the hardware. typedef byte fpregister[10]; struct fpSaveArea { unsigned short cw; unsigned short _unused0; unsigned short sw; unsigned short _unused1; unsigned short tw; unsigned short _unused2; unsigned fip; unsigned short fcs0; unsigned short _unused3; unsigned foo; unsigned short fcs1; unsigned short _unused4; fpregister registers[8]; }; #endif /* the amount of ML stack space to reserve for registers, C exception handling etc. The compiler requires us to reserve 2 stack-frames worth (2 * 20 words). We actually reserve slightly more than this. */ #if (!defined(_WIN32) && !defined(HAVE_SIGALTSTACK)) // If we can't handle signals on a separate stack make sure there's space // on the Poly stack. #define OVERFLOW_STACK_SIZE (50+1024) #else #define OVERFLOW_STACK_SIZE 50 #endif union stackItem { /* #ifndef POLYML32IN64 stackItem(PolyWord v) { words[0] = v.AsUnsigned(); }; stackItem() { words[0] = TAGGED(0).AsUnsigned(); } POLYUNSIGNED words[1]; #else // In 32-in-64 we need to clear the second PolyWord. This assumes little-endian. stackItem(PolyWord v) { words[0] = v.AsUnsigned(); words[1] = 0; }; stackItem() { words[0] = TAGGED(0).AsUnsigned(); words[1] = 0; } POLYUNSIGNED words[2]; #endif */ stackItem(PolyWord v) { argValue = v.AsUnsigned(); } stackItem() { argValue = TAGGED(0).AsUnsigned(); } // These return the low order word. PolyWord w()const { return PolyWord::FromUnsigned((POLYUNSIGNED)argValue); } operator PolyWord () { return PolyWord::FromUnsigned((POLYUNSIGNED)argValue); } POLYCODEPTR codeAddr; // Return addresses stackItem *stackAddr; // Stack addresses uintptr_t argValue; // Treat an address as an int }; class X86TaskData; // This is passed as the argument vector to X86AsmSwitchToPoly. // The offsets are built into the assembly code and the code-generator. // localMpointer and stackPtr are updated before control returns to C. typedef struct _AssemblyArgs { public: PolyWord *localMpointer; // Allocation ptr + 1 word stackItem *handlerRegister; // Current exception handler PolyWord *localMbottom; // Base of memory + 1 word stackItem *stackLimit; // Lower limit of stack stackItem exceptionPacket; // Set if there is an exception byte unusedRequestCode; // No longer used. byte unusedFlag; // No longer used byte returnReason; // Reason for returning from ML. byte unusedRestore; // No longer used. uintptr_t saveCStack; // Saved C stack frame. PolyWord threadId; // My thread id. Saves having to call into RTS for it. stackItem *stackPtr; // Current stack pointer byte *noLongerUsed; // Now removed byte *heapOverFlowCall; // These are filled in with the functions. byte *stackOverFlowCall; byte *stackOverFlowCallEx; // Saved registers, where applicable. stackItem p_rax; stackItem p_rbx; stackItem p_rcx; stackItem p_rdx; stackItem p_rsi; stackItem p_rdi; #ifdef HOSTARCHITECTURE_X86_64 stackItem p_r8; stackItem p_r9; stackItem p_r10; stackItem p_r11; stackItem p_r12; stackItem p_r13; stackItem p_r14; #endif struct fpSaveArea p_fp; } AssemblyArgs; // These next few are temporarily added for the interpreter // This duplicates some code in reals.cpp but is now updated. #define DOUBLESIZE (sizeof(double)/sizeof(POLYUNSIGNED)) union realdb { double dble; POLYUNSIGNED puns[DOUBLESIZE]; }; #define LGWORDSIZE (sizeof(uintptr_t) / sizeof(PolyWord)) class X86TaskData: public TaskData { public: X86TaskData(); unsigned allocReg; // The register to take the allocated space. POLYUNSIGNED allocWords; // The words to allocate. Handle callBackResult; AssemblyArgs assemblyInterface; int saveRegisterMask; // Registers that need to be updated by a GC. virtual void GarbageCollect(ScanAddress *process); void ScanStackAddress(ScanAddress *process, stackItem &val, StackSpace *stack); virtual Handle EnterPolyCode(); // Start running ML virtual void InterruptCode(); virtual bool AddTimeProfileCount(SIGNALCONTEXT *context); virtual void InitStackFrame(TaskData *parentTask, Handle proc, Handle arg); virtual void SetException(poly_exn *exc); // Release a mutex in exactly the same way as compiler code virtual Handle AtomicIncrement(Handle mutexp); virtual void AtomicReset(Handle mutexp); // Return the minimum space occupied by the stack. Used when setting a limit. // N.B. This is PolyWords not native words. virtual uintptr_t currentStackSpace(void) const { return (this->stack->top - (PolyWord*)assemblyInterface.stackPtr) + OVERFLOW_STACK_SIZE*sizeof(uintptr_t)/sizeof(PolyWord); } // Increment the profile count for an allocation. Also now used for mutex contention. virtual void addProfileCount(POLYUNSIGNED words) { add_count(this, assemblyInterface.stackPtr[0].codeAddr, words); } // PreRTSCall: After calling from ML to the RTS we need to save the current heap pointer virtual void PreRTSCall(void) { TaskData::PreRTSCall(); SaveMemRegisters(); } // PostRTSCall: Before returning we need to restore the heap pointer. // If there has been a GC in the RTS call we need to create a new heap area. virtual void PostRTSCall(void) { SetMemRegisters(); TaskData::PostRTSCall(); } virtual void CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length); virtual Handle EnterCallbackFunction(Handle func, Handle args); int SwitchToPoly(); void HeapOverflowTrap(byte *pcPtr); void SetMemRegisters(); void SaveMemRegisters(); void SetRegisterMask(); void MakeTrampoline(byte **pointer, byte*entryPt); PLock interruptLock; stackItem *get_reg(int n); stackItem *®SP() { return assemblyInterface.stackPtr; } stackItem ®AX() { return assemblyInterface.p_rax; } stackItem ®BX() { return assemblyInterface.p_rbx; } stackItem ®CX() { return assemblyInterface.p_rcx; } stackItem ®DX() { return assemblyInterface.p_rdx; } stackItem ®SI() { return assemblyInterface.p_rsi; } stackItem ®DI() { return assemblyInterface.p_rdi; } #ifdef HOSTARCHITECTURE_X86_64 stackItem ®8() { return assemblyInterface.p_r8; } stackItem ®9() { return assemblyInterface.p_r9; } stackItem ®10() { return assemblyInterface.p_r10; } stackItem ®11() { return assemblyInterface.p_r11; } stackItem ®12() { return assemblyInterface.p_r12; } stackItem ®13() { return assemblyInterface.p_r13; } stackItem ®14() { return assemblyInterface.p_r14; } #endif #if (defined(_WIN32)) DWORD savedErrno; #else int savedErrno; #endif }; class X86Dependent: public MachineDependent { public: X86Dependent() {} // Create a task data object. virtual TaskData *CreateTaskData(void) { return new X86TaskData(); } // Initial size of stack in PolyWords virtual unsigned InitialStackSize(void) { return (128+OVERFLOW_STACK_SIZE) * sizeof(uintptr_t) / sizeof(PolyWord); } virtual void ScanConstantsWithinCode(PolyObject *addr, PolyObject *oldAddr, POLYUNSIGNED length, ScanAddress *process); virtual Architectures MachineArchitecture(void) #ifndef HOSTARCHITECTURE_X86_64 { return MA_I386; } #elif defined(POLYML32IN64) { return MA_X86_64_32; } #else { return MA_X86_64; } #endif }; // Values for the returnReason byte enum RETURN_REASON { RETURN_IO_CALL_NOW_UNUSED = 0, RETURN_HEAP_OVERFLOW = 1, RETURN_STACK_OVERFLOW = 2, RETURN_STACK_OVERFLOWEX = 3, RETURN_CALLBACK_RETURN = 6, RETURN_CALLBACK_EXCEPTION = 7, RETURN_KILL_SELF = 9 }; extern "C" { // These are declared in the assembly code segment. void X86AsmSwitchToPoly(void *); extern int X86AsmKillSelf(void); extern int X86AsmCallbackReturn(void); extern int X86AsmCallbackException(void); extern int X86AsmPopArgAndClosure(void); extern int X86AsmRaiseException(void); extern int X86AsmCallExtraRETURN_HEAP_OVERFLOW(void); extern int X86AsmCallExtraRETURN_STACK_OVERFLOW(void); extern int X86AsmCallExtraRETURN_STACK_OVERFLOWEX(void); POLYUNSIGNED X86AsmAtomicIncrement(PolyObject*); POLYUNSIGNED X86AsmAtomicDecrement(PolyObject*); }; // Pointers to assembly code or trampolines to assembly code. static byte *popArgAndClosure, *killSelf, *raiseException, *callbackException, *callbackReturn; X86TaskData::X86TaskData(): allocReg(0), allocWords(0), saveRegisterMask(0) { assemblyInterface.heapOverFlowCall = (byte*)X86AsmCallExtraRETURN_HEAP_OVERFLOW; assemblyInterface.stackOverFlowCall = (byte*)X86AsmCallExtraRETURN_STACK_OVERFLOW; assemblyInterface.stackOverFlowCallEx = (byte*)X86AsmCallExtraRETURN_STACK_OVERFLOWEX; savedErrno = 0; } void X86TaskData::GarbageCollect(ScanAddress *process) { TaskData::GarbageCollect(process); // Process the parent first assemblyInterface.threadId = threadObject; if (stack != 0) { // Now the values on the stack. for (stackItem *q = assemblyInterface.stackPtr; q < (stackItem*)stack->top; q++) ScanStackAddress(process, *q, stack); } // Register mask for (int i = 0; i < 16; i++) { if (saveRegisterMask & (1 << i)) ScanStackAddress(process, *get_reg(i), stack); } } // Process a value within the stack. void X86TaskData::ScanStackAddress(ScanAddress *process, stackItem &stackItem, StackSpace *stack) { // We may have return addresses on the stack which could look like // tagged values. Check whether the value is in the code area before // checking whether it is untagged. #ifdef POLYML32IN64 // In 32-in-64 return addresses always have the top 32 bits non-zero. if (stackItem.argValue < ((uintptr_t)1 << 32)) { // It's either a tagged integer or an object pointer. if (stackItem.w().IsDataPtr()) { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } } else { // Could be a code address or a stack address. MemSpace *space = gMem.SpaceForAddress(stackItem.codeAddr - 1); if (space == 0 || space->spaceType != ST_CODE) return; PolyObject *obj = gMem.FindCodeObject(stackItem.codeAddr); ASSERT(obj != 0); // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } #else // The -1 here is because we may have a zero-sized cell in the last // word of a space. MemSpace *space = gMem.SpaceForAddress(stackItem.codeAddr-1); if (space == 0) return; // In particular we may have one of the assembly code addresses. if (space->spaceType == ST_CODE) { PolyObject *obj = gMem.FindCodeObject(stackItem.codeAddr); // If it is actually an integer it might be outside a valid code object. if (obj == 0) { ASSERT(stackItem.w().IsTagged()); // It must be an integer } else // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } else if (space->spaceType == ST_LOCAL && stackItem.w().IsDataPtr()) // Local values must be word addresses. { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } #endif } // Copy a stack void X86TaskData::CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length) { /* Moves a stack, updating all references within the stack */ #ifdef POLYML32IN64 old_length = old_length / 2; new_length = new_length / 2; #endif stackItem *old_base = (stackItem *)old_stack; stackItem *new_base = (stackItem*)new_stack; stackItem *old_top = old_base + old_length; /* Calculate the offset of the new stack from the old. If the frame is being extended objects in the new frame will be further up the stack than in the old one. */ uintptr_t offset = new_base - old_base + new_length - old_length; stackItem *oldStackPtr = assemblyInterface.stackPtr; // Adjust the stack pointer and handler pointer since these point into the stack. assemblyInterface.stackPtr = assemblyInterface.stackPtr + offset; assemblyInterface.handlerRegister = assemblyInterface.handlerRegister + offset; // We need to adjust any values on the stack that are pointers within the stack. // Skip the unused part of the stack. size_t i = oldStackPtr - old_base; ASSERT (i <= old_length); i = old_length - i; stackItem *old = oldStackPtr; stackItem *newp = assemblyInterface.stackPtr; while (i--) { stackItem old_word = *old++; if (old_word.w().IsDataPtr() && old_word.stackAddr >= old_base && old_word.stackAddr <= old_top) old_word.stackAddr = old_word.stackAddr + offset; else if (old_word.w().IsDataPtr() && IsHeapAddress(old_word.stackAddr)) { stackItem *addr = (stackItem*)old_word.w().AsStackAddr(); if (addr >= old_base && addr <= old_top) { addr += offset; old_word = PolyWord::FromStackAddr((PolyWord*)addr); } } *newp++ = old_word; } ASSERT(old == ((stackItem*)old_stack)+old_length); ASSERT(newp == ((stackItem*)new_stack)+new_length); // And change any registers that pointed into the old stack for (int j = 0; j < 16; j++) { if (saveRegisterMask & (1 << j)) { stackItem *regAddr = get_reg(j); stackItem old_word = *regAddr; if (old_word.w().IsDataPtr() && old_word.stackAddr >= old_base && old_word.stackAddr <= old_top) old_word.stackAddr = old_word.stackAddr + offset; else if (old_word.w().IsDataPtr() && IsHeapAddress(old_word.stackAddr)) { stackItem *addr = (stackItem*)old_word.w().AsStackAddr(); if (addr >= old_base && addr <= old_top) { addr += offset; old_word = PolyWord::FromStackAddr((PolyWord*)addr); } } *regAddr = old_word; } } } Handle X86TaskData::EnterPolyCode() /* Called from "main" to enter the code. */ { Handle hOriginal = this->saveVec.mark(); // Set this up for the IO calls. while (1) { this->saveVec.reset(hOriginal); // Remove old RTS arguments and results. // Run the ML code and return with the function to call. this->inML = true; int ioFunction = SwitchToPoly(); this->inML = false; try { switch (ioFunction) { case -1: // We've been interrupted. This usually involves simulating a // stack overflow so we could come here because of a genuine // stack overflow. // Previously this code was executed on every RTS call but there // were problems on Mac OS X at least with contention on schedLock. // Process any asynchronous events i.e. interrupts or kill processes->ProcessAsynchRequests(this); // Release and re-acquire use of the ML memory to allow another thread // to GC. processes->ThreadReleaseMLMemory(this); processes->ThreadUseMLMemory(this); break; case -2: // A callback has returned. return callBackResult; // Return the saved value. Not used in the new interface. default: Crash("Unknown io operation %d\n", ioFunction); } } catch (IOException &) { } } } // Run the current ML process. X86AsmSwitchToPoly saves the C state so that // whenever the ML requires assistance from the rest of the RTS it simply // returns to C with the appropriate values set in assemblyInterface.requestCode and // int X86TaskData::SwitchToPoly() // (Re)-enter the Poly code from C. Returns with the io function to call or // -1 if we are responding to an interrupt. { Handle mark = this->saveVec.mark(); do { this->saveVec.reset(mark); // Remove old data e.g. from arbitrary precision. SetMemRegisters(); // We need to save the C stack entry across this call in case // we're making a callback and the previous C stack entry is // for the original call. uintptr_t savedCStack = this->assemblyInterface.saveCStack; // Restore the saved error state. #if (defined(_WIN32)) SetLastError(savedErrno); #else errno = savedErrno; #endif if (assemblyInterface.exceptionPacket.argValue != TAGGED(0).AsUnsigned()) { (--assemblyInterface.stackPtr)->codeAddr = (byte*)X86AsmRaiseException; regAX() = (PolyWord)assemblyInterface.exceptionPacket; /* put exception data into eax */ } // Enter the ML code. X86AsmSwitchToPoly(&this->assemblyInterface); this->assemblyInterface.saveCStack = savedCStack; // Save the error codes. We may have made an RTS/FFI call that // has set these and we don't want to do anything to change them. #if (defined(_WIN32)) savedErrno = GetLastError(); #else savedErrno = errno; #endif SaveMemRegisters(); // Update globals from the memory registers. // Handle any heap/stack overflows or arbitrary precision traps. switch (this->assemblyInterface.returnReason) { case RETURN_HEAP_OVERFLOW: // The heap has overflowed. SetRegisterMask(); this->HeapOverflowTrap(assemblyInterface.stackPtr[0].codeAddr); // Computes a value for allocWords only break; case RETURN_STACK_OVERFLOW: case RETURN_STACK_OVERFLOWEX: { SetRegisterMask(); uintptr_t min_size; // Size in PolyWords if (assemblyInterface.returnReason == RETURN_STACK_OVERFLOW) { min_size = (this->stack->top - (PolyWord*)assemblyInterface.stackPtr) + OVERFLOW_STACK_SIZE * sizeof(uintptr_t) / sizeof(PolyWord); } else { // Stack limit overflow. If the required stack space is larger than // the fixed overflow size the code will calculate the limit in %EDI. stackItem *stackP = regDI().stackAddr; min_size = (this->stack->top - (PolyWord*)stackP) + OVERFLOW_STACK_SIZE * sizeof(uintptr_t) / sizeof(PolyWord); } try { // The stack check has failed. This may either be because we really have // overflowed the stack or because the stack limit value has been adjusted // to result in a call here. CheckAndGrowStack(this, min_size); } catch (IOException &) { // We may get an exception while handling this if we run out of store } { PLocker l(&interruptLock); // Set the stack limit. This clears any interrupt and also sets the // correct value if we've grown the stack. this->assemblyInterface.stackLimit = (stackItem*)this->stack->bottom + OVERFLOW_STACK_SIZE; } return -1; // We're in a safe state to handle any interrupts. } case RETURN_CALLBACK_RETURN: // regSP has been set by the assembly code. N.B. This may not be the same value as when // EnterCallbackFunction was called because the callback may have grown and moved the stack. // Remove the extra exception handler we created in EnterCallbackFunction ASSERT(assemblyInterface.handlerRegister == regSP()); regSP() += 1; assemblyInterface.handlerRegister = (*(regSP()++)).stackAddr; // Restore the previous handler. this->callBackResult = this->saveVec.push(regAX()); // Argument to return is in RAX. return -2; case RETURN_CALLBACK_EXCEPTION: // An ML callback has raised an exception. // It isn't possible to do anything here except abort. Crash("An ML function called from foreign code raised an exception. Unable to continue."); case RETURN_KILL_SELF: exitThread(this); default: Crash("Unknown return reason code %u", this->assemblyInterface.returnReason); } } while (1); } void X86TaskData::MakeTrampoline(byte **pointer, byte *entryPt) { #ifdef POLYML32IN64 // In the native address versions we can store the address directly onto the stack. // We can't do that in 32-in-64 because it's likely that the address will be in the // bottom 32-bits and we can't distinguish it from an object ID. Instead we have to // build a small code segment which jumps to the code. unsigned requiredSize = 8; // 8 words i.e. 32 bytes PolyObject *result = gMem.AllocCodeSpace(requiredSize); byte *p = (byte*)result; *p++ = 0x48; // rex.w *p++ = 0x8b; // Movl *p++ = 0x0d; // rcx, pc relative *p++ = 0x09; // +2 bytes *p++ = 0x00; *p++ = 0x00; *p++ = 0x00; *p++ = 0xff; // jmp *p++ = 0xe1; // rcx *p++ = 0xf4; // hlt - needed to stop scan of constants for (unsigned i = 0; i < 6; i++) *p++ = 0; uintptr_t ep = (uintptr_t)entryPt; for (unsigned i = 0; i < 8; i++) { *p++ = ep & 0xff; ep >>= 8; } // Clear the remainder. In particular this sets the number // of address constants to zero. for (unsigned i = 0; i < 8; i++) *p++ = 0; result->SetLengthWord(requiredSize, F_CODE_OBJ); *pointer = (byte*)result; #else *pointer = entryPt; // Can go there directly #endif } void X86TaskData::InitStackFrame(TaskData *parentTaskData, Handle proc, Handle arg) /* Initialise stack frame. */ { // Set the assembly code addresses. if (popArgAndClosure == 0) MakeTrampoline(&popArgAndClosure, (byte*)&X86AsmPopArgAndClosure); if (killSelf == 0) MakeTrampoline(&killSelf, (byte*)&X86AsmKillSelf); if (raiseException == 0) MakeTrampoline(&raiseException, (byte*)&X86AsmRaiseException); if (callbackException == 0) MakeTrampoline(&callbackException, (byte*)&X86AsmCallbackException); if (callbackReturn == 0) MakeTrampoline(&callbackReturn, (byte*)&X86AsmCallbackReturn); StackSpace *space = this->stack; StackObject * newStack = space->stack(); uintptr_t stack_size = space->spaceSize() * sizeof(PolyWord) / sizeof(stackItem); uintptr_t topStack = stack_size-6; stackItem *stackTop = (stackItem*)newStack + topStack; assemblyInterface.stackPtr = stackTop; assemblyInterface.stackLimit = (stackItem*)space->bottom + OVERFLOW_STACK_SIZE; assemblyInterface.handlerRegister = (stackItem*)newStack+topStack+4; // Floating point save area. memset(&assemblyInterface.p_fp, 0, sizeof(struct fpSaveArea)); #ifndef HOSTARCHITECTURE_X86_64 // Set the control word for 64-bit precision otherwise we get inconsistent results. assemblyInterface.p_fp.cw = 0x027f ; // Control word assemblyInterface.p_fp.tw = 0xffff; // Tag registers - all unused #endif // Initial entry point - on the stack. stackTop[0].codeAddr = popArgAndClosure; // Push the argument and the closure on the stack. We can't put them into the registers // yet because we might get a GC before we actually start the code. stackTop[1] = proc->Word(); // Closure stackTop[2] = (arg == 0) ? TAGGED(0) : DEREFWORD(arg); // Argument /* We initialise the end of the stack with a sequence that will jump to kill_self whether the process ends with a normal return or by raising an exception. A bit of this was added to fix a bug when stacks were objects on the heap and could be scanned by the GC. */ stackTop[5] = TAGGED(0); // Probably no longer needed // Set the default handler and return address to point to this code. // PolyWord killJump(PolyWord::FromCodePtr((byte*)&X86AsmKillSelf)); // Exception handler. stackTop[4].codeAddr = killSelf; // Normal return address. We need a separate entry on the stack from // the exception handler because it is possible that the code we are entering // may replace this entry with an argument. The code-generator optimises tail-recursive // calls to functions with more args than the called function. stackTop[3].codeAddr = killSelf; #ifdef POLYML32IN64 // In 32-in-64 RBX always contains the heap base address. assemblyInterface.p_rbx.stackAddr = (stackItem*)globalHeapBase; #endif } // In Solaris-x86 the registers are named EIP and ESP. #if (!defined(REG_EIP) && defined(EIP)) #define REG_EIP EIP #endif #if (!defined(REG_ESP) && defined(ESP)) #define REG_ESP ESP #endif // Get the PC and SP(stack) from a signal context. This is needed for profiling. // This version gets the actual sp and pc if we are in ML. bool X86TaskData::AddTimeProfileCount(SIGNALCONTEXT *context) { stackItem * sp = 0; POLYCODEPTR pc = 0; if (context != 0) { // The tests for HAVE_UCONTEXT_T, HAVE_STRUCT_SIGCONTEXT and HAVE_WINDOWS_H need // to follow the tests in processes.h. #if defined(HAVE_WINDOWS_H) #ifdef _WIN64 sp = (stackItem *)context->Rsp; pc = (POLYCODEPTR)context->Rip; #else // Windows 32 including cygwin. sp = (stackItem *)context->Esp; pc = (POLYCODEPTR)context->Eip; #endif #elif defined(HAVE_UCONTEXT_T) #ifdef HAVE_MCONTEXT_T_GREGS // Linux #ifndef HOSTARCHITECTURE_X86_64 pc = (byte*)context->uc_mcontext.gregs[REG_EIP]; sp = (stackItem*)context->uc_mcontext.gregs[REG_ESP]; #else /* HOSTARCHITECTURE_X86_64 */ pc = (byte*)context->uc_mcontext.gregs[REG_RIP]; sp = (stackItem*)context->uc_mcontext.gregs[REG_RSP]; #endif /* HOSTARCHITECTURE_X86_64 */ #elif defined(HAVE_MCONTEXT_T_MC_ESP) // FreeBSD #ifndef HOSTARCHITECTURE_X86_64 pc = (byte*)context->uc_mcontext.mc_eip; sp = (stackItem*)context->uc_mcontext.mc_esp; #else /* HOSTARCHITECTURE_X86_64 */ pc = (byte*)context->uc_mcontext.mc_rip; sp = (stackItem*)context->uc_mcontext.mc_rsp; #endif /* HOSTARCHITECTURE_X86_64 */ #else // Mac OS X #ifndef HOSTARCHITECTURE_X86_64 #if(defined(HAVE_STRUCT_MCONTEXT_SS)||defined(HAVE_STRUCT___DARWIN_MCONTEXT32_SS)) pc = (byte*)context->uc_mcontext->ss.eip; sp = (PolyWord*)context->uc_mcontext->ss.esp; #elif(defined(HAVE_STRUCT___DARWIN_MCONTEXT32___SS)) pc = (byte*)context->uc_mcontext->__ss.__eip; sp = (PolyWord*)context->uc_mcontext->__ss.__esp; #endif #else /* HOSTARCHITECTURE_X86_64 */ #if(defined(HAVE_STRUCT_MCONTEXT_SS)||defined(HAVE_STRUCT___DARWIN_MCONTEXT64_SS)) pc = (byte*)context->uc_mcontext->ss.rip; sp = (PolyWord*)context->uc_mcontext->ss.rsp; #elif(defined(HAVE_STRUCT___DARWIN_MCONTEXT64___SS)) pc = (byte*)context->uc_mcontext->__ss.__rip; sp = (PolyWord*)context->uc_mcontext->__ss.__rsp; #endif #endif /* HOSTARCHITECTURE_X86_64 */ #endif #elif defined(HAVE_STRUCT_SIGCONTEXT) #if defined(HOSTARCHITECTURE_X86_64) && defined(__OpenBSD__) // CPP defines missing in amd64/signal.h in OpenBSD pc = (byte*)context->sc_rip; sp = (PolyWord*)context->sc_rsp; #else // !HOSTARCHITEXTURE_X86_64 || !defined(__OpenBSD__) pc = (byte*)context->sc_pc; sp = (PolyWord*)context->sc_sp; #endif #endif } if (pc != 0) { // See if the PC we've got is an ML code address. MemSpace *space = gMem.SpaceForAddress(pc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { add_count(this, pc, 1); return true; } } // See if the sp value is in the current stack. if (sp >= (stackItem*)this->stack->bottom && sp < (stackItem*)this->stack->top) { // We may be in the assembly code. The top of the stack will be a return address. pc = sp[0].w().AsCodePtr(); MemSpace *space = gMem.SpaceForAddress(pc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { add_count(this, pc, 1); return true; } } // See if the value of regSP is a valid stack pointer. // This works if we happen to be in an RTS call using a "Full" call. // It doesn't work if we've used a "Fast" call because that doesn't save the SP. sp = assemblyInterface.stackPtr; if (sp >= (stackItem*)this->stack->bottom && sp < (stackItem*)this->stack->top) { // We may be in the run-time system. pc = sp[0].w().AsCodePtr(); MemSpace *space = gMem.SpaceForAddress(pc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { add_count(this, pc, 1); return true; } } // None of those worked return false; } // This is called from a different thread so we have to be careful. void X86TaskData::InterruptCode() { PLocker l(&interruptLock); // Set the stack limit pointer to the top of the stack to cause // a trap when we next check for stack overflow. // We use a lock here to ensure that we always use the current value of the // stack. The thread we're interrupting could be growing the stack at this point. if (this->stack != 0) this->assemblyInterface.stackLimit = (stackItem*)(this->stack->top-1); } // This is called from SwitchToPoly before we enter the ML code. void X86TaskData::SetMemRegisters() { // Copy the current store limits into variables before we go into the assembly code. // If we haven't yet set the allocation area or we don't have enough we need // to create one (or a new one). if (this->allocPointer <= this->allocLimit + this->allocWords) { if (this->allocPointer < this->allocLimit) Crash ("Bad length in heap overflow trap"); // Find some space to allocate in. Updates taskData->allocPointer and // returns a pointer to the newly allocated space (if allocWords != 0) PolyWord *space = processes->FindAllocationSpace(this, this->allocWords, true); if (space == 0) { // We will now raise an exception instead of returning. // Set allocWords to zero so we don't set the allocation register // since that could be holding the exception packet. this->allocWords = 0; } // Undo the allocation just now. this->allocPointer += this->allocWords; } if (this->allocWords != 0) { // If we have had a heap trap we actually do the allocation here. // We will have already garbage collected and recovered sufficient space. // This also happens if we have just trapped because of store profiling. this->allocPointer -= this->allocWords; // Now allocate // Set the allocation register to this area. N.B. This is an absolute address. if (this->allocReg < 15) get_reg(this->allocReg)[0].codeAddr = (POLYCODEPTR)(this->allocPointer + 1); /* remember: it's off-by-one */ this->allocWords = 0; } // If we have run out of store, either just above or while allocating in the RTS, // allocPointer and allocLimit will have been set to zero as part of the GC. We will // now be raising an exception which may free some store but we need to come back here // before we allocate anything. The compiled code uses unsigned arithmetic to check for // heap overflow but only after subtracting the space required. We need to make sure // that the values are still non-negative after substracting any object size. if (this->allocPointer == 0) this->allocPointer += MAX_OBJECT_SIZE; if (this->allocLimit == 0) this->allocLimit += MAX_OBJECT_SIZE; this->assemblyInterface.localMbottom = this->allocLimit + 1; this->assemblyInterface.localMpointer = this->allocPointer + 1; // If we are profiling store allocation we set mem_hl so that a trap // will be generated. if (profileMode == kProfileStoreAllocation) this->assemblyInterface.localMbottom = this->assemblyInterface.localMpointer; this->assemblyInterface.returnReason = RETURN_IO_CALL_NOW_UNUSED; this->assemblyInterface.threadId = this->threadObject; } // This is called whenever we have returned from ML to C. void X86TaskData::SaveMemRegisters() { this->allocPointer = this->assemblyInterface.localMpointer - 1; this->allocWords = 0; this->assemblyInterface.exceptionPacket = TAGGED(0); this->saveRegisterMask = 0; } // Called on a GC or stack overflow trap. The register mask // is in the bytes after the trap call. void X86TaskData::SetRegisterMask() { byte *pc = assemblyInterface.stackPtr[0].codeAddr; if (*pc == 0xcd) // CD - INT n is used for a single byte { pc++; saveRegisterMask = *pc++; } else if (*pc == 0xca) // CA - FAR RETURN is used for a two byte mask { pc++; saveRegisterMask = pc[0] | (pc[1] << 8); pc += 2; } assemblyInterface.stackPtr[0].codeAddr = pc; } stackItem *X86TaskData::get_reg(int n) /* Returns a pointer to the register given by n. */ { switch (n) { case 0: return &assemblyInterface.p_rax; case 1: return &assemblyInterface.p_rcx; case 2: return &assemblyInterface.p_rdx; case 3: return &assemblyInterface.p_rbx; // Should not have rsp or rbp. case 6: return &assemblyInterface.p_rsi; case 7: return &assemblyInterface.p_rdi; #ifdef HOSTARCHITECTURE_X86_64 case 8: return &assemblyInterface.p_r8; case 9: return &assemblyInterface.p_r9; case 10: return &assemblyInterface.p_r10; case 11: return &assemblyInterface.p_r11; case 12: return &assemblyInterface.p_r12; case 13: return &assemblyInterface.p_r13; case 14: return &assemblyInterface.p_r14; // R15 is the heap pointer so shouldn't occur here. #endif /* HOSTARCHITECTURE_X86_64 */ default: Crash("Unknown register %d\n", n); } } // Called as a result of a heap overflow trap void X86TaskData::HeapOverflowTrap(byte *pcPtr) { X86TaskData *mdTask = this; POLYUNSIGNED wordsNeeded = 0; // The next instruction, after any branches round forwarding pointers or pop // instructions, will be a store of register containing the adjusted heap pointer. // We need to find that register and the value in it in order to find out how big // the area we actually wanted is. N.B. The code-generator and assembly code // must generate the correct instruction sequence. // byte *pcPtr = assemblyInterface.programCtr; while (true) { if (pcPtr[0] == 0xeb) { // Forwarding pointer if (pcPtr[1] >= 128) pcPtr += 256 - pcPtr[1] + 2; else pcPtr += pcPtr[1] + 2; } else if ((pcPtr[0] & 0xf8) == 0x58) // Pop instruction. pcPtr++; else if (pcPtr[0] == 0x41 && ((pcPtr[1] & 0xf8) == 0x58)) // Pop with Rex prefix pcPtr += 2; else break; } #ifndef HOSTARCHITECTURE_X86_64 // This should be movl REG,0[%ebp]. ASSERT(pcPtr[0] == 0x89); mdTask->allocReg = (pcPtr[1] >> 3) & 7; // Remember this until we allocate the memory stackItem *reg = get_reg(mdTask->allocReg); stackItem reg_val = *reg; // The space we need is the difference between this register // and the current value of newptr. // The +1 here is because assemblyInterface.localMpointer is A.M.pointer +1. The reason // is that after the allocation we have the register pointing at the address we will // actually use. wordsNeeded = (this->allocPointer - (PolyWord*)reg_val.stackAddr) + 1; *reg = TAGGED(0); // Clear this - it's not a valid address. /* length in words, including length word */ ASSERT (wordsNeeded <= (1<<24)); /* Max object size including length/flag word is 2^24 words. */ #else /* HOSTARCHITECTURE_X86_64 */ ASSERT(pcPtr[1] == 0x89 || pcPtr[1] == 0x8b); if (pcPtr[1] == 0x89) { // New (5.4) format. This should be movq REG,%r15 ASSERT(pcPtr[0] == 0x49 || pcPtr[0] == 0x4d); mdTask->allocReg = (pcPtr[2] >> 3) & 7; // Remember this until we allocate the memory if (pcPtr[0] & 0x4) mdTask->allocReg += 8; } else { // Alternative form of movq REG,%r15 ASSERT(pcPtr[0] == 0x4c || pcPtr[0] == 0x4d); mdTask->allocReg = pcPtr[2] & 7; // Remember this until we allocate the memory if (pcPtr[0] & 0x1) mdTask->allocReg += 8; } stackItem *reg = get_reg(this->allocReg); stackItem reg_val = *reg; wordsNeeded = (POLYUNSIGNED)((this->allocPointer - (PolyWord*)reg_val.stackAddr) + 1); *reg = TAGGED(0); // Clear this - it's not a valid address. #endif /* HOSTARCHITECTURE_X86_64 */ if (profileMode == kProfileStoreAllocation) addProfileCount(wordsNeeded); mdTask->allocWords = wordsNeeded; // The actual allocation is done in SetMemRegisters. } void X86TaskData::SetException(poly_exn *exc) // The RTS wants to raise an exception packet. Normally this is as the // result of an RTS call in which case the caller will check this. It can // also happen in a trap. { assemblyInterface.exceptionPacket = (PolyWord)exc; // Set for direct calls. } // Sets up a callback function on the current stack. The present state is that // the ML code has made a call in to foreign_dispatch. We need to set the stack // up so that we will enter the callback (as with CallCodeTupled) but when we return // the result we enter callback_return. Handle X86TaskData::EnterCallbackFunction(Handle func, Handle args) { // If we ever implement a light version of the FFI that allows a call to C // code without saving enough to allow allocation in C code we need to ensure // that this code doesn't do any allocation. Essentially we need the values // in localMpointer and localMbottom to be valid across a call to C. If we do // a callback the ML callback function would pick up the values saved in the // originating call. // However, it is essential that the light version still saves the stack pointer // and reloads it afterwards. // Set up an exception handler so we will enter callBackException if there is an exception. (--regSP())->stackAddr = assemblyInterface.handlerRegister; // Create a special handler entry (--regSP())->codeAddr = callbackException; assemblyInterface.handlerRegister = regSP(); // Push the call to callBackReturn onto the stack as the return address. (--regSP())->codeAddr = callbackReturn; // Set up the entry point of the callback. PolyObject *functToCall = func->WordP(); regDX() = (PolyWord)functToCall; // Closure address regAX() = args->Word(); // Push entry point address (--regSP())->codeAddr = *(POLYCODEPTR*)functToCall; // First word of closure is entry pt. return EnterPolyCode(); } // Decode and process an effective address. There may // be a constant address in here but in any case we need // to decode it to work out where the next instruction starts. // If this is an lea instruction any addresses are just constants // so must not be treated as addresses. static void skipea(PolyObject *base, byte **pt, ScanAddress *process, bool lea) { unsigned int modrm = *((*pt)++); unsigned int md = modrm >> 6; unsigned int rm = modrm & 7; if (md == 3) { } /* Register. */ else if (rm == 4) { /* s-i-b present. */ unsigned int sib = *((*pt)++); if (md == 0) { if ((sib & 7) == 5) { if (! lea) { #ifndef HOSTARCHITECTURE_X86_64 process->ScanConstant(base, *pt, PROCESS_RELOC_DIRECT); #endif /* HOSTARCHITECTURE_X86_64 */ } (*pt) += 4; } } else if (md == 1) (*pt)++; else if (md == 2) (*pt) += 4; } else if (md == 0 && rm == 5) { if (!lea) { #ifndef HOSTARCHITECTURE_X86_64 /* Absolute address. */ process->ScanConstant(base, *pt, PROCESS_RELOC_DIRECT); #endif /* HOSTARCHITECTURE_X86_64 */ } *pt += 4; } else { if (md == 1) *pt += 1; else if (md == 2) *pt += 4; } } /* Added to deal with constants within the code rather than in the constant area. The constant area is still needed for the function name. DCJM 2/1/2001 */ void X86Dependent::ScanConstantsWithinCode(PolyObject *addr, PolyObject *old, POLYUNSIGNED length, ScanAddress *process) { byte *pt = (byte*)addr; PolyWord *end = addr->Offset(length - 1); #ifdef POLYML32IN64 // If this begins with enter-int it's interpreted code - ignore if (pt[0] == 0xff && pt[1] == 0x55 && pt[2] == 0x48) return; #endif while (true) { // Escape prefixes come before any Rex byte if (*pt == 0xf2 || *pt == 0xf3 || *pt == 0x66) pt++; #ifdef HOSTARCHITECTURE_X86_64 // REX prefixes. Set this first. byte lastRex; if (*pt >= 0x40 && *pt <= 0x4f) lastRex = *pt++; else lastRex = 0; //printf("pt=%p *pt=%x\n", pt, *pt); #endif /* HOSTARCHITECTURE_X86_64 */ switch (*pt) { case 0x00: return; // This is actually the first byte of the old "marker" word. case 0xf4: return; // Halt - now used as a marker. case 0x50: case 0x51: case 0x52: case 0x53: case 0x54: case 0x55: case 0x56: case 0x57: /* Push */ case 0x58: case 0x59: case 0x5a: case 0x5b: case 0x5c: case 0x5d: case 0x5e: case 0x5f: /* Pop */ case 0x90: /* nop */ case 0xc3: /* ret */ case 0xf9: /* stc */ case 0xce: /* into */ case 0xf0: /* lock. */ case 0xf3: /* rep/repe */ case 0xa4: case 0xa5: case 0xaa: case 0xab: /* movs/stos */ case 0xa6: /* cmpsb */ case 0x9e: /* sahf */ case 0x99: /* cqo/cdq */ pt++; break; case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: case 0x78: case 0x79: case 0x7a: case 0x7b: case 0x7c: case 0x7d: case 0x7e: case 0x7f: case 0xeb: /* short jumps. */ case 0xcd: /* INT - now used for a register mask */ case 0xa8: /* TEST_ACC8 */ case 0x6a: /* PUSH_8 */ pt += 2; break; case 0xc2: /* RET_16 */ case 0xca: /* FAR RET 16 - used for a register mask */ pt += 3; break; case 0x8d: /* leal. */ pt++; skipea(addr, &pt, process, true); break; case 0x03: case 0x0b: case 0x13: case 0x1b: case 0x23: case 0x2b: case 0x33: case 0x3b: /* Add r,ea etc. */ case 0x88: /* MOVB_R_A */ case 0x89: /* MOVL_R_A */ case 0x8b: /* MOVL_A_R */ case 0x62: /* BOUNDL */ case 0xff: /* Group5 */ case 0xd1: /* Group2_1_A */ case 0x8f: /* POP_A */ case 0xd3: /* Group2_CL_A */ case 0x87: // XCHNG case 0x63: // MOVSXD pt++; skipea(addr, &pt, process, false); break; case 0xf6: /* Group3_a */ { int isTest = 0; pt++; /* The test instruction has an immediate operand. */ if ((*pt & 0x38) == 0) isTest = 1; skipea(addr, &pt, process, false); if (isTest) pt++; break; } case 0xf7: /* Group3_A */ { int isTest = 0; pt++; /* The test instruction has an immediate operand. */ if ((*pt & 0x38) == 0) isTest = 1; skipea(addr, &pt, process, false); if (isTest) pt += 4; break; } case 0xc1: /* Group2_8_A */ case 0xc6: /* MOVB_8_A */ case 0x83: /* Group1_8_A */ case 0x80: /* Group1_8_a */ case 0x6b: // IMUL Ev,Ib pt++; skipea(addr, &pt, process, false); pt++; break; case 0x69: // IMUL Ev,Iv pt++; skipea(addr, &pt, process, false); pt += 4; break; case 0x81: /* Group1_32_A */ { pt ++; #ifndef HOSTARCHITECTURE_X86_64 unsigned opCode = *pt; #endif skipea(addr, &pt, process, false); // Only check the 32 bit constant if this is a comparison. // For other operations this may be untagged and shouldn't be an address. #ifndef HOSTARCHITECTURE_X86_64 if ((opCode & 0x38) == 0x38) process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); #endif pt += 4; break; } case 0xe8: case 0xe9: // Long jump and call. These are used to call constant (known) functions // and also long jumps within the function. { pt++; POLYSIGNED disp = (pt[3] & 0x80) ? -1 : 0; // Set the sign just in case. for(unsigned i = 4; i > 0; i--) disp = (disp << 8) | pt[i-1]; byte *absAddr = pt + disp + 4; // The address is relative to AFTER the constant // If the new address is within the current piece of code we don't do anything if (absAddr >= (byte*)addr && absAddr < (byte*)end) {} else { #ifdef HOSTARCHITECTURE_X86_64 ASSERT(sizeof(PolyWord) == 4); // Should only be used internally on x64 #endif /* HOSTARCHITECTURE_X86_64 */ if (addr != old) { // The old value of the displacement was relative to the old address before // we copied this code segment. // We have to correct it back to the original address. absAddr = absAddr - (byte*)addr + (byte*)old; // We have to correct the displacement for the new location and store // that away before we call ScanConstant. size_t newDisp = absAddr - pt - 4; for (unsigned i = 0; i < 4; i++) { pt[i] = (byte)(newDisp & 0xff); newDisp >>= 8; } } process->ScanConstant(addr, pt, PROCESS_RELOC_I386RELATIVE); } pt += 4; break; } case 0xc7:/* MOVL_32_A */ { pt++; if ((*pt & 0xc0) == 0x40 /* Byte offset or sib present */ && ((*pt & 7) != 4) /* But not sib present */ && pt[1] == 256-sizeof(PolyWord)) { /* We may use a move instruction to set the length word on a new segment. We mustn't try to treat this as a constant. */ pt += 6; /* Skip the modrm byte, the offset and the constant. */ } else { skipea(addr, &pt, process, false); #ifndef HOSTARCHITECTURE_X86_64 // This isn't used for addresses even in 32-in-64 process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); #endif /* HOSTARCHITECTURE_X86_64 */ pt += 4; } break; } case 0xb8: case 0xb9: case 0xba: case 0xbb: case 0xbc: case 0xbd: case 0xbe: case 0xbf: /* MOVL_32_64_R */ pt ++; #ifdef HOSTARCHITECTURE_X86_64 if ((lastRex & 8) == 0) pt += 4; // 32-bit mode on 64-bits else #endif /* HOSTARCHITECTURE_X86_64 */ { // This is no longer generated in 64-bit mode but needs to // be retained in native 64-bit for backwards compatibility. #ifndef POLYML32IN64 // 32 bits in 32-bit mode, 64-bits in 64-bit mode. process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); #endif pt += sizeof(PolyWord); } break; case 0x68: /* PUSH_32 */ pt ++; #if (!defined(HOSTARCHITECTURE_X86_64) || defined(POLYML32IN64)) // Currently the only inline constant in 32-in-64. process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); #endif pt += 4; break; case 0x0f: /* ESCAPE */ { pt++; switch (*pt) { case 0xb6: /* movzl */ case 0xb7: // movzw case 0xbe: // movsx case 0xbf: // movsx case 0xc1: /* xaddl */ case 0xae: // ldmxcsr/stmxcsr case 0xaf: // imul case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47: case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f: // cmov pt++; skipea(addr, &pt, process, false); break; case 0x80: case 0x81: case 0x82: case 0x83: case 0x84: case 0x85: case 0x86: case 0x87: case 0x88: case 0x89: case 0x8a: case 0x8b: case 0x8c: case 0x8d: case 0x8e: case 0x8f: /* Conditional branches with 32-bit displacement. */ pt += 5; break; case 0x90: case 0x91: case 0x92: case 0x93: case 0x94: case 0x95: case 0x96: case 0x97: case 0x98: case 0x99: case 0x9a: case 0x9b: case 0x9c: case 0x9d: case 0x9e: case 0x9f: /* SetCC. */ pt++; skipea(addr, &pt, process, false); break; // These are SSE2 instructions case 0x10: case 0x11: case 0x58: case 0x5c: case 0x59: case 0x5e: case 0x2e: case 0x2a: case 0x54: case 0x57: case 0x5a: case 0x6e: case 0x7e: case 0x2c: case 0x2d: pt++; skipea(addr, &pt, process, false); break; case 0x73: // PSRLDQ - EA,imm pt++; skipea(addr, &pt, process, false); pt++; break; default: Crash("Unknown opcode %d at %p\n", *pt, pt); } break; } case 0xd8: case 0xd9: case 0xda: case 0xdb: case 0xdc: case 0xdd: case 0xde: case 0xdf: // Floating point escape instructions { pt++; if ((*pt & 0xe0) == 0xe0) pt++; else skipea(addr, &pt, process, false); break; } default: Crash("Unknown opcode %d at %p\n", *pt, pt); } } } // Increment the value contained in the first word of the mutex. Handle X86TaskData::AtomicIncrement(Handle mutexp) { PolyObject *p = DEREFHANDLE(mutexp); POLYUNSIGNED result = X86AsmAtomicIncrement(p); return this->saveVec.push(PolyWord::FromUnsigned(result)); } // Release a mutex. Because the atomic increment and decrement // use the hardware LOCK prefix we can simply set this to one. void X86TaskData::AtomicReset(Handle mutexp) { DEREFHANDLE(mutexp)->Set(0, TAGGED(1)); } static X86Dependent x86Dependent; MachineDependent *machineDependent = &x86Dependent; class X86Module : public RtsModule { public: virtual void GarbageCollect(ScanAddress * /*process*/); }; // Declare this. It will be automatically added to the table. static X86Module x86Module; void X86Module::GarbageCollect(ScanAddress *process) { #ifdef POLYML32IN64 // These are trampolines in the code area rather than direct calls. if (popArgAndClosure != 0) process->ScanRuntimeAddress((PolyObject**)&popArgAndClosure, ScanAddress::STRENGTH_STRONG); if (killSelf != 0) process->ScanRuntimeAddress((PolyObject**)&killSelf, ScanAddress::STRENGTH_STRONG); if (raiseException != 0) process->ScanRuntimeAddress((PolyObject**)&raiseException, ScanAddress::STRENGTH_STRONG); if (callbackException != 0) process->ScanRuntimeAddress((PolyObject**)&callbackException, ScanAddress::STRENGTH_STRONG); if (callbackReturn != 0) process->ScanRuntimeAddress((PolyObject**)&callbackReturn, ScanAddress::STRENGTH_STRONG); #endif -} \ No newline at end of file +} + + +extern "C" { + POLYEXTERNALSYMBOL void *PolyX86GetThreadData(); +} + +// Return the address of assembly data for the current thread. This is normally in +// RBP except if we are in a callback. +void *PolyX86GetThreadData() +{ + // We should get the task data for the thread that is running this code. + // If this thread has been created by the foreign code we will have to + // create a new one here. + TaskData* taskData = processes->GetTaskDataForThread(); + if (taskData == 0) + { + try { + taskData = processes->CreateNewTaskData(0, 0, 0, TAGGED(0)); + } + catch (std::bad_alloc&) { + ::Exit("Unable to create thread data - insufficient memory"); + } + catch (MemoryException&) { + ::Exit("Unable to create thread data - insufficient memory"); + } + } + return &((X86TaskData*)taskData)->assemblyInterface; +} + +struct _entrypts machineSpecificEPT[] = +{ + { "PolyX86GetThreadData", (polyRTSFunction)& PolyX86GetThreadData }, + + { NULL, NULL} // End of list. +}; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86FOREIGNCALL.sml b/mlsource/MLCompiler/CodeTree/X86Code/X86FOREIGNCALL.sml index ccd47194..0e3f00c3 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86FOREIGNCALL.sml +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86FOREIGNCALL.sml @@ -1,1101 +1,1286 @@ (* Copyright (c) 2016-19 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86FOREIGNCALL( structure X86CODE: X86CODESIG structure X86OPTIMISE: sig type operation type code type operations = operation list type closureRef (* Optimise and code-generate. *) val generateCode: {code: code, ops: operations, labelCount: int, resultClosure: closureRef} -> unit structure Sharing: sig type operation = operation type code = code type closureRef = closureRef end end structure DEBUG: DEBUGSIG structure CODE_ARRAY: CODEARRAYSIG sharing X86CODE.Sharing = X86OPTIMISE.Sharing = CODE_ARRAY.Sharing ): FOREIGNCALLSIG = struct open X86CODE open Address open CODE_ARRAY (* Unix X64. The first six arguments are in rdi, rsi, rdx, rcx, r8, r9. The rest are on the stack. Windows X64. The first four arguments are in rcx, rdx, r8 and r9. The rest are on the stack. The caller must ensure the stack is aligned on 16-byte boundary and must allocate 32-byte save area for the register args. rbx, rbp, rdi, rsi, rsp, r12-r15 are saved by the called function. X86/32. Arguments are pushed to the stack. ebx, edi, esi, ebp and esp are saved by the called function. We use esi to hold the argument data pointer and edi to save the ML stack pointer Our ML conventions use eax, ebx for the first two arguments in X86/32, rax, ebx, r8, r9, r10 for the first five arguments in X86/64 and rax, rsi, r8, r9 and r10 for the first five arguments in X86/64-32 bit. *) val memRegSize = 0 val (polyWordOpSize, nativeWordOpSize) = case targetArch of Native32Bit => (OpSize32, OpSize32) | Native64Bit => (OpSize64, OpSize64) | ObjectId32Bit => (OpSize32, OpSize64) (* Ebx/Rbx is used for the second argument on the native architectures but is replaced by esi on the object ID arch because ebx is used as the global base register. *) val mlArg2Reg = case targetArch of ObjectId32Bit => esi | _ => ebx exception InternalError = Misc.InternalError fun opSizeToMove OpSize32 = Move32 | opSizeToMove OpSize64 = Move64 val pushR = PushToStack o RegisterArg fun moveRR{source, output, opSize} = Move{source=RegisterArg source, destination=RegisterArg output, moveSize=opSizeToMove opSize} fun loadMemory(reg, base, offset, opSize) = Move{source=MemoryArg{base=base, offset=offset, index=NoIndex}, destination=RegisterArg reg, moveSize=opSizeToMove opSize} and storeMemory(reg, base, offset, opSize) = Move{source=RegisterArg reg, destination=MemoryArg {base=base, offset=offset, index=NoIndex}, moveSize=opSizeToMove opSize} val loadHeapMemory = case targetArch of ObjectId32Bit => ( fn (reg, base, offset, opSize) => Move{source=MemoryArg{base=ebx, offset=offset, index=Index4 base}, destination=RegisterArg reg, moveSize=opSizeToMove opSize} ) | _ => loadMemory fun loadAddress{source=(srcReg, 0), destination} = Move{source=RegisterArg srcReg, destination=RegisterArg destination, moveSize=opSizeToMove nativeWordOpSize} | loadAddress{source=(srcReg, srcOffset), destination} = LoadAddress{offset=srcOffset, base=SOME srcReg, index=NoIndex, output=destination, opSize=nativeWordOpSize } (* Sequence of operations to move memory. *) fun moveMemory{source, destination, length} = [ loadAddress{source=source, destination=rsi}, loadAddress{source=destination, destination=rdi}, Move{source=NonAddressConstArg(LargeInt.fromInt length), destination=RegisterArg rcx, moveSize=opSizeToMove nativeWordOpSize}, RepeatOperation MOVS8 ] fun createProfileObject _ (*functionName*) = let (* The profile object is a single mutable with the F_bytes bit set. *) open Address val profileObject = RunCall.allocateByteMemory(0w1, Word.fromLargeWord(Word8.toLargeWord(Word8.orb(F_mutable, F_bytes)))) fun clear 0w0 = () | clear i = (assignByte(profileObject, i-0w1, 0w0); clear (i-0w1)) val () = clear wordSize in toMachineWord profileObject end val makeEntryPoint: string -> machineWord = RunCall.rtsCallFull1 "PolyCreateEntryPointObject" datatype abi = X86_32 | X64Win | X64Unix local (* Get the ABI. On 64-bit Windows and Unix use different calling conventions. *) val getABICall: unit -> int = RunCall.rtsCallFast0 "PolyGetABI" in fun getABI() = case getABICall() of 0 => X86_32 | 1 => X64Unix | 2 => X64Win | n => raise InternalError ("Unknown ABI type " ^ Int.toString n) end (* This is now the standard entry call code. *) datatype fastArgs = FastArgFixed | FastArgDouble | FastArgFloat fun rtsCallFastGeneral (functionName, argFormats, (*resultFormat*) _, debugSwitches) = let val entryPointAddr = makeEntryPoint functionName (* Get the ABI. On 64-bit Windows and Unix use different calling conventions. *) val abi = getABI() val entryPtrReg = if targetArch <> Native32Bit then r11 else ecx val nArgs = List.length argFormats local (* Compute stack space. The actual number of args passed is nArgs+1. *) val argSpace = case abi of X64Unix => Int.max(0, nArgs-6)*8 | X64Win => Int.max(0, nArgs-4)*8 | X86_32 => List.foldl(fn (FastArgDouble, n) => n+8 | (_, n) => n+4) 0 argFormats val align = argSpace mod 16 in (* Add sufficient space so that esp will be 16-byte aligned after we have pushed any arguments we need to push. *) val stackSpace = if align = 0 then memRegSize else memRegSize + 16 - align end (* The number of ML arguments passed on the stack. *) val mlArgsOnStack = Int.max(case abi of X86_32 => nArgs - 2 | _ => nArgs - 5, 0) val code = [ Move{source=AddressConstArg entryPointAddr, destination=RegisterArg entryPtrReg, moveSize=opSizeToMove polyWordOpSize}, (* Load the entry point ref. *) loadHeapMemory(entryPtrReg, entryPtrReg, 0, nativeWordOpSize)(* Load its value. *) ] @ ( (* Save heap ptr. This is in r15 in X86/64 *) if targetArch <> Native32Bit then [storeMemory(r15, ebp, memRegLocalMPointer, nativeWordOpSize)] (* Save heap ptr *) else [] ) @ ( if (case abi of X86_32 => nArgs >= 3 | _ => nArgs >= 6) then [moveRR{source=esp, output=edi, opSize=nativeWordOpSize}] (* Needed if we have to load from the stack. *) else [] ) @ [ storeMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), (* Save ML stack and switch to C stack. *) loadMemory(esp, ebp, memRegCStackPtr, nativeWordOpSize), (* Set the stack pointer past the data on the stack. For Windows/64 add in a 32 byte save area *) ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt stackSpace), opSize=nativeWordOpSize} ] @ ( case abi of (* Set the argument registers. *) X86_32 => let fun pushReg(reg, FastArgFixed) = [pushR reg] | pushReg(reg, FastArgDouble) = (* reg contains the address of the value. This must be unboxed onto the stack. *) [ FPLoadFromMemory{address={base=reg, offset=0, index=NoIndex}, precision=DoublePrecision}, ArithToGenReg{ opc=SUB, output=esp, source=NonAddressConstArg 8, opSize=nativeWordOpSize}, FPStoreToMemory{ address={base=esp, offset=0, index=NoIndex}, precision=DoublePrecision, andPop=true } ] | pushReg(reg, FastArgFloat) = (* reg contains the address of the value. This must be unboxed onto the stack. *) [ FPLoadFromMemory{address={base=reg, offset=0, index=NoIndex}, precision=SinglePrecision}, ArithToGenReg{ opc=SUB, output=esp, source=NonAddressConstArg 4, opSize=nativeWordOpSize}, FPStoreToMemory{ address={base=esp, offset=0, index=NoIndex}, precision=SinglePrecision, andPop=true } ] (* The stack arguments have to be copied first followed by the ebx and finally eax. *) fun pushArgs (_, []) = [] | pushArgs (_, [argType]) = pushReg(eax, argType) | pushArgs (_, [arg2Type, arg1Type]) = pushReg(ebx, arg2Type) @ pushReg(eax, arg1Type) | pushArgs (n, FastArgFixed :: argTypes) = PushToStack(MemoryArg{base=edi, offset=(nArgs-n+1)* 4, index=NoIndex}) :: pushArgs(n-1, argTypes) | pushArgs (n, argType :: argTypes) = (* Use esi as a temporary register. *) loadMemory(esi, edi, (nArgs-n+1)* 4, polyWordOpSize) :: pushReg(esi, argType) @ pushArgs(n-1, argTypes) in pushArgs(nArgs, List.rev argFormats) end | X64Unix => ( if List.all (fn FastArgFixed => true | _ => false) argFormats then let fun pushArgs 0 = [] | pushArgs 1 = [moveRR{source=eax, output=edi, opSize=polyWordOpSize}] | pushArgs 2 = moveRR{source=mlArg2Reg, output=esi, opSize=polyWordOpSize} :: pushArgs 1 | pushArgs 3 = moveRR{source=r8, output=edx, opSize=polyWordOpSize} :: pushArgs 2 | pushArgs 4 = moveRR{source=r9, output=ecx, opSize=polyWordOpSize} :: pushArgs 3 | pushArgs 5 = (* We have to move r8 into edx before we can move r10 into r8 *) moveRR{source=r8, output=edx, opSize=polyWordOpSize} :: moveRR{source=r9, output=ecx, opSize=polyWordOpSize} :: moveRR{source=r10, output=r8, opSize=polyWordOpSize} :: pushArgs 2 | pushArgs 6 = (* We have to move r9 into edi before we can load r9 from the stack. *) moveRR{source=r8, output=edx, opSize=polyWordOpSize} :: moveRR{source=r9, output=ecx, opSize=polyWordOpSize} :: loadMemory(r9, edi, 8, polyWordOpSize) :: moveRR{source=r10, output=r8, opSize=polyWordOpSize} :: pushArgs 2 | pushArgs _ = raise InternalError "rtsCall: Abi/argument count not implemented" in pushArgs nArgs end else case argFormats of [] => [] | [FastArgFixed] => [ moveRR{source=eax, output=edi, opSize=polyWordOpSize} ] | [FastArgFixed, FastArgFixed] => (* Since mlArgs2Reg is esi on 32-in-64 this is redundant. *) [ moveRR{source=mlArg2Reg, output=esi, opSize=polyWordOpSize}, moveRR{source=eax, output=edi, opSize=polyWordOpSize} ] | [FastArgFixed, FastArgFixed, FastArgFixed] => [ moveRR{source=mlArg2Reg, output=esi, opSize=polyWordOpSize}, moveRR{source=eax, output=edi, opSize=polyWordOpSize}, moveRR{source=r8, output=edx, opSize=polyWordOpSize} ] | [FastArgFixed, FastArgFixed, FastArgFixed, FastArgFixed] => [ moveRR{source=mlArg2Reg, output=esi, opSize=polyWordOpSize}, moveRR{source=eax, output=edi, opSize=polyWordOpSize}, moveRR{source=r8, output=edx, opSize=polyWordOpSize}, moveRR{source=r9, output=ecx, opSize=polyWordOpSize} ] (* One "double" argument. The value needs to be unboxed. *) | [FastArgDouble] => [] (* Already in xmm0 *) (* X64 on both Windows and Unix take the first arg in xmm0 and the second in xmm1. They are already there. *) | [FastArgDouble, FastArgDouble] => [] | [FastArgDouble, FastArgFixed] => [ moveRR{source=eax, output=edi, opSize=nativeWordOpSize} ] | [FastArgFloat] => [] (* Already in xmm0 *) | [FastArgFloat, FastArgFloat] => [] (* Already in xmm0 and xmm1 *) (* One float argument and one fixed. *) | [FastArgFloat, FastArgFixed] => [moveRR{source=mlArg2Reg, output=edi, opSize=polyWordOpSize} ] | _ => raise InternalError "rtsCall: Abi/argument count not implemented" ) | X64Win => ( if List.all (fn FastArgFixed => true | _ => false) argFormats then let fun pushArgs 0 = [] | pushArgs 1 = [moveRR{source=eax, output=ecx, opSize=polyWordOpSize}] | pushArgs 2 = moveRR{source=mlArg2Reg, output=edx, opSize=polyWordOpSize} :: pushArgs 1 | pushArgs 3 = (* Already in r8 *) pushArgs 2 | pushArgs 4 = (* Already in r9, and r8 *) pushArgs 2 | pushArgs 5 = pushR r10 :: pushArgs 2 | pushArgs 6 = PushToStack(MemoryArg{base=edi, offset=8, index=NoIndex}) :: pushArgs 5 | pushArgs _ = raise InternalError "rtsCall: Abi/argument count not implemented" in pushArgs nArgs end else case argFormats of [FastArgFixed] => [ moveRR{source=eax, output=ecx, opSize=polyWordOpSize} ] | [FastArgFixed, FastArgFixed] => [ moveRR{source=eax, output=ecx, opSize=polyWordOpSize}, moveRR{source=mlArg2Reg, output=edx, opSize=polyWordOpSize} ] | [FastArgFixed, FastArgFixed, FastArgFixed] => [ moveRR{source=eax, output=ecx, opSize=polyWordOpSize}, moveRR{source=mlArg2Reg, output=edx, opSize=polyWordOpSize} (* Arg3 is already in r8. *) ] | [FastArgFixed, FastArgFixed, FastArgFixed, FastArgFixed] => [ moveRR{source=eax, output=ecx, opSize=polyWordOpSize}, moveRR{source=mlArg2Reg, output=edx, opSize=polyWordOpSize} (* Arg3 is already in r8 and arg4 in r9. *) ] | [FastArgDouble] => [ (* Already in xmm0 *) ] (* X64 on both Windows and Unix take the first arg in xmm0 and the second in xmm1. They are already there. *) | [FastArgDouble, FastArgDouble] => [ ] (* X64 on both Windows and Unix take the first arg in xmm0. On Unix the integer argument is treated as the first argument and goes into edi. On Windows it's treated as the second and goes into edx. N.B. It's also the first argument in ML so is in rax. *) | [FastArgDouble, FastArgFixed] => [ moveRR{source=eax, output=edx, opSize=nativeWordOpSize} ] | [FastArgFloat] => [] | [FastArgFloat, FastArgFloat] => [] (* Already in xmm0 and xmm1 *) | [FastArgFloat, FastArgFixed] => [moveRR{source=mlArg2Reg, output=edx, opSize=polyWordOpSize}] | _ => raise InternalError "rtsCall: Abi/argument count not implemented" ) ) @ (* For Windows/64 add in a 32 byte save area ater we've pushed any arguments. *) (case abi of X64Win => [ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg 32, opSize=nativeWordOpSize}] | _ => []) @ [ CallAddress(RegisterArg entryPtrReg), (* Call the function *) loadMemory(esp, ebp, memRegStackPtr, nativeWordOpSize) (* Restore the ML stack pointer. *) ] @ ( if targetArch <> Native32Bit then [loadMemory(r15, ebp, memRegLocalMPointer, nativeWordOpSize) ] (* Copy back the heap ptr *) else [] ) @ [ (* Since this is an ML function we need to remove any ML stack arguments. *) ReturnFromFunction mlArgsOnStack ] val profileObject = createProfileObject functionName val newCode = codeCreate (functionName, profileObject, debugSwitches) val closure = makeConstantClosure() val () = X86OPTIMISE.generateCode{code=newCode, labelCount=0, ops=code, resultClosure=closure} in closureAsAddress closure end fun rtsCallFast (functionName, nArgs, debugSwitches) = rtsCallFastGeneral (functionName, List.tabulate(nArgs, fn _ => FastArgFixed), FastArgFixed, debugSwitches) (* RTS call with one double-precision floating point argument and a floating point result. *) fun rtsCallFastRealtoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgDouble], FastArgDouble, debugSwitches) (* RTS call with two double-precision floating point arguments and a floating point result. *) fun rtsCallFastRealRealtoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgDouble, FastArgDouble], FastArgDouble, debugSwitches) (* RTS call with one double-precision floating point argument, one fixed point argument and a floating point result. *) fun rtsCallFastRealGeneraltoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgDouble, FastArgFixed], FastArgDouble, debugSwitches) (* RTS call with one general (i.e. ML word) argument and a floating point result. This is used only to convert arbitrary precision values to floats. *) fun rtsCallFastGeneraltoReal (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFixed], FastArgDouble, debugSwitches) (* Operations on Real32.real values. *) fun rtsCallFastFloattoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFloat], FastArgFloat, debugSwitches) fun rtsCallFastFloatFloattoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFloat, FastArgFloat], FastArgFloat, debugSwitches) (* RTS call with one double-precision floating point argument, one fixed point argument and a floating point result. *) fun rtsCallFastFloatGeneraltoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFloat, FastArgFixed], FastArgFloat, debugSwitches) (* RTS call with one general (i.e. ML word) argument and a floating point result. This is used only to convert arbitrary precision values to floats. *) fun rtsCallFastGeneraltoFloat (functionName, debugSwitches) = rtsCallFastGeneral (functionName, [FastArgFixed], FastArgFloat, debugSwitches) datatype ffiABI = FFI_SYSV (* Unix 32 bit and Windows GCC 32-bit *) | FFI_STDCALL (* Windows 32-bit system ABI. Callee clears the stack. *) | FFI_MS_CDECL (* VS 32-bit. Same as SYSV except when returning a struct. Default on Windows including GCC in Mingw. *) | FFI_WIN64 (* Windows 64 bit *) | FFI_UNIX64 (* Unix 64 bit. libffi also implements this on X86/32. *) (* We don't include various other 32-bit Windows ABIs. *) local (* Get the current ABI list. N.B. Foreign.LibFFI.abiList is the ABIs on the platform we built the compiler on, not necessarily the one we're running on. *) val ffiGeneral = RunCall.rtsCallFull2 "PolyFFIGeneral" in fun getFFIAbi abi = let val abis: (string * Foreign.LibFFI.abi) list = ffiGeneral (50, ()) in case List.find (fn ("default", _) => false | (_, a) => a = abi) abis of SOME ("sysv", _) => FFI_SYSV | SOME ("stdcall", _) => FFI_STDCALL | SOME ("ms_cdecl", _) => FFI_MS_CDECL | SOME ("win64", _) => FFI_WIN64 | SOME ("unix64", _) => FFI_UNIX64 | _ => raise Foreign.Foreign "Unknown or unsupported ABI" end end fun alignUp(s, align) = Word.andb(s + align-0w1, ~ align) fun intAlignUp(s, align) = Word.toInt(alignUp(Word.fromInt s, align)) + + val getThreadDataCall = makeEntryPoint "PolyX86GetThreadData" (* Build a foreign call function. The arguments are the abi, the list of argument types and the result type. The result is the code of the ML function that takes three arguments: the C function to call, the arguments as a vector of C values and the address of the memory for the result. *) fun call32Bits(abi, args, result) = let (* 32-bit arguments. These all go to the stack so we can simply push them. The arguments go on the stack in reverse order. *) fun loadArgs32([], stackOffset, argOffset, code, continue) = continue(stackOffset, argOffset, code) | loadArgs32(arg::args, stackOffset, argOffset, code, continue) = let val {size, align, typeCode, elements} = Foreign.LibFFI.extractFFItype arg val newArgOffset = alignUp(argOffset, align) val baseAddr = {base=mlArg2Reg, offset=Word.toInt newArgOffset, index=NoIndex} in if typeCode = Foreign.LibFFI.ffiTypeCodeUInt8 then (* Unsigned char. *) loadArgs32(args, stackOffset+4, newArgOffset+size, Move{source=MemoryArg baseAddr, destination=RegisterArg edx, moveSize=Move8 } :: PushToStack(RegisterArg edx) :: code, continue) else if typeCode = Foreign.LibFFI.ffiTypeCodeSInt8 then (* Signed char. *) loadArgs32(args, stackOffset+4, newArgOffset+size, Move{source=MemoryArg baseAddr, destination=RegisterArg edx, moveSize=Move8X32 } :: PushToStack(RegisterArg edx) :: code, continue) else if typeCode = Foreign.LibFFI.ffiTypeCodeUInt16 then (* Unsigned 16-bits. *) loadArgs32(args, stackOffset+4, newArgOffset+size, Move{source=MemoryArg baseAddr, destination=RegisterArg edx, moveSize=Move16 } :: PushToStack(RegisterArg edx) :: code, continue) else if typeCode = Foreign.LibFFI.ffiTypeCodeSInt16 then (* Signed 16-bits. *) loadArgs32(args, stackOffset+4, newArgOffset+size, Move{source=MemoryArg baseAddr, destination=RegisterArg edx, moveSize=Move16X32 } :: PushToStack(RegisterArg edx) :: code, continue) else if typeCode = Foreign.LibFFI.ffiTypeCodeUInt32 orelse typeCode = Foreign.LibFFI.ffiTypeCodeSInt32 orelse typeCode = Foreign.LibFFI.ffiTypeCodePointer orelse typeCode = Foreign.LibFFI.ffiTypeCodeFloat orelse typeCode = Foreign.LibFFI.ffiTypeCodeInt then (* 32-bits. *) loadArgs32(args, stackOffset+4, newArgOffset+size, PushToStack(MemoryArg baseAddr) :: code, continue) else if typeCode = Foreign.LibFFI.ffiTypeCodeDouble then (* Double: push the two words. High-order word first, then low-order. *) loadArgs32(args, stackOffset+8, newArgOffset+size, PushToStack(MemoryArg{base=mlArg2Reg, offset=Word.toInt newArgOffset+4, index=NoIndex}) :: PushToStack(MemoryArg{base=mlArg2Reg, offset=Word.toInt newArgOffset, index=NoIndex}) :: code, continue) else if typeCode = Foreign.LibFFI.ffiTypeCodeStruct (* structs passed as values are recursively unpacked. *) then loadArgs32(elements, stackOffset, newArgOffset (* Struct is aligned. *), code, fn (so, ao, code) => loadArgs32(args, so, ao, code, continue)) else if typeCode = Foreign.LibFFI.ffiTypeCodeVoid then raise Foreign.Foreign "Void cannot be used for a function argument" else (* Anything else? *) raise Foreign.Foreign "Unrecognised type for function argument" end val {typeCode, size, ...} = Foreign.LibFFI.extractFFItype result val resultMemory = {base=ecx, offset=0, index=NoIndex} (* Structures are passed by reference by storing the address of the result as the first argument except that in MS_CDECL (and STDCALL?) structures of size 1, 2, 4 and 8 are returned in EAX, and for 8, EDX. *) val (getResult, needResultAddress) = if typeCode = Foreign.LibFFI.ffiTypeCodeStruct andalso (abi = FFI_SYSV orelse (size <> 0w1 andalso size <> 0w2 andalso size <> 0w4 andalso size <> 0w8)) (* TODO: We have to get the address of the destination area. *) then ([], true) else if typeCode = Foreign.LibFFI.ffiTypeCodeVoid then ([], false) else (loadMemory(ecx, esp, 4, nativeWordOpSize) :: loadHeapMemory(ecx, ecx, 0, nativeWordOpSize) :: (if size = 0w1 then (* Single byte *) [Move{source=RegisterArg eax, destination=MemoryArg resultMemory, moveSize=Move8}] else if size = 0w2 then (* 16-bits *) [Move{source=RegisterArg eax, destination=MemoryArg resultMemory, moveSize=Move16}] else if typeCode = Foreign.LibFFI.ffiTypeCodeFloat then [FPStoreToMemory{address=resultMemory, precision=SinglePrecision, andPop=true }] else if size = 0w4 then [Move{source=RegisterArg eax, destination=MemoryArg resultMemory, moveSize=Move32}] else if typeCode = Foreign.LibFFI.ffiTypeCodeDouble then [FPStoreToMemory{address=resultMemory, precision=DoublePrecision, andPop=true }] else if size = 0w8 then [ Move{source=RegisterArg eax, destination=MemoryArg resultMemory, moveSize=Move32}, Move{source=RegisterArg edx, destination=MemoryArg {base=ecx, offset=4, index=NoIndex}, moveSize=Move32} ] else raise Foreign.Foreign "Unrecognised result type"), false) local (* Load the arguments. If we need to pass the return address for a struct that is the first arg. *) val (startStack, startCode) = if needResultAddress then (4, [PushToStack(MemoryArg{base=ecx, offset=0, index=NoIndex})]) else (0, []) in val (argCode, argStack) = loadArgs32(args, startStack, 0w0, startCode, fn (stackOffset, _, code) => (code, stackOffset)) end local val align = argStack mod 16 in (* Always align the stack. It's not always necessary on 32-bits but GCC prefers it. *) val preArgAlign = if align = 0 then 0 else 16-align end in ( (* If we're returning a struct we need the result address before we call. *) if needResultAddress then [loadMemory(ecx, esp, 4, nativeWordOpSize)] else [] ) @ [ (* Save the stack pointer. *) storeMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), (* Save ML stack and switch to C stack. *) loadMemory(esp, ebp, memRegCStackPtr, nativeWordOpSize) (* Load the saved C stack pointer. *) ] @ ( if preArgAlign = 0 then [] else [ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt preArgAlign), opSize=nativeWordOpSize}] ) @ ( (* The second argument is a SysWord containing the address of a malloced area of memory with the actual arguments in it. *) if null args then [] else [loadHeapMemory(mlArg2Reg, mlArg2Reg, 0, nativeWordOpSize)] ) @ argCode @ [ (* Call the function. We're discarding the value in rsp so no need to remove args. *) CallAddress(MemoryArg{base=eax, offset=0, index=NoIndex}), loadMemory(esp, ebp, memRegStackPtr, nativeWordOpSize) (* Restore the ML stack pointer. *) ] @ getResult @ (* Store the result in the destination. *) [ ReturnFromFunction 1 ] end + + fun closure32Bits(abi, args, result, f) = + let + (* Arguments are copied from the stack into a struct that is then passed to the + ML function. *) + fun copyArgs([], nArgs, argOffset, code, continue) = continue(nArgs, argOffset, code) + + | copyArgs(arg::args, nArgs, argOffset, code, continue) = + let + val {size, align, typeCode, elements} = Foreign.LibFFI.extractFFItype arg + val newArgOffset = alignUp(argOffset, align) + val sourceAddr = {base=ebx, offset=nArgs*4, index=NoIndex} + val destAddr = {base=esp, offset=Word.toInt newArgOffset, index=NoIndex} + in + if typeCode = Foreign.LibFFI.ffiTypeCodeStruct + then (* structs passed as values are recursively unpacked. *) + copyArgs(elements, nArgs, newArgOffset (* Struct is aligned. *), code, + fn (na, ao, c) => copyArgs(args, na, ao, c, continue)) + else if typeCode = Foreign.LibFFI.ffiTypeCodeVoid + then raise Foreign.Foreign "Void cannot be used for a function argument" + else if typeCode = Foreign.LibFFI.ffiTypeCodeDouble + then (* Double: copy the two words. High-order word first, then low-order. *) + copyArgs(args, nArgs+2, argOffset+size, + Move{source=MemoryArg sourceAddr, destination=RegisterArg eax, moveSize=Move32} :: + Move{source=RegisterArg eax, destination=MemoryArg destAddr, moveSize=Move32} :: + Move{source=MemoryArg {base=ebx, offset=nArgs*4+4, index=NoIndex}, destination=RegisterArg eax, moveSize=Move32} :: + Move{source=RegisterArg eax, destination=MemoryArg{base=esp, offset=Word.toInt newArgOffset + 4, index=NoIndex}, moveSize=Move32} :: + code, continue) + else (* Everything else is a single word on the stack. *) + let + val moveOp = + case size of + 0w1 => Move8 + | 0w2 => Move16 + | 0w4 => Move32 + | _ => raise Foreign.Foreign "copyArgs: Invalid size" + in + copyArgs(args, nArgs+1, argOffset+size, + Move{source=MemoryArg sourceAddr, destination=RegisterArg eax, moveSize=Move32} :: + Move{source=RegisterArg eax, destination=MemoryArg destAddr, moveSize=moveOp} :: code, continue) + end + end + + val {typeCode, size, align, ...} = Foreign.LibFFI.extractFFItype result + + (* Struct results are normally passed by reference. *) + val resultStructByRef = + typeCode = Foreign.LibFFI.ffiTypeCodeStruct andalso + (abi = FFI_SYSV orelse (size <> 0w1 andalso size <> 0w2 andalso size <> 0w4 andalso size <> 0w8)) + + val (argCount, argumentSpace, copyArgsFromStack) = + copyArgs(args, if resultStructByRef then 1 else 0, 0w0, [], fn result => result) + + val resultOffset = alignUp(argumentSpace, align) (* Offset of result area *) + + val (loadResults, resultSize) = + if typeCode = Foreign.LibFFI.ffiTypeCodeVoid orelse resultStructByRef + then ([], 0w0) + else + let + val resultMem = {base=esp, offset=Word.toInt resultOffset, index=NoIndex} + val resultCode = + if typeCode = Foreign.LibFFI.ffiTypeCodeSInt8 + then [Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move8X32 }] + else if size = 0w1 + then [Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move8 }] + else if typeCode = Foreign.LibFFI.ffiTypeCodeSInt16 + then [Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move16X32 }] + else if size = 0w2 + then [Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move16 }] + else if typeCode = Foreign.LibFFI.ffiTypeCodeFloat + then [FPLoadFromMemory{ address=resultMem, precision=SinglePrecision }] + else if size = 0w4 + then [Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move32 }] + else if typeCode = Foreign.LibFFI.ffiTypeCodeDouble + then [FPLoadFromMemory{ address=resultMem, precision=DoublePrecision }] + else if size = 0w8 (* MSC only. Struct returned in eax/edx. *) + then + [ + Move{source=MemoryArg resultMem, destination=RegisterArg eax, moveSize=Move32 }, + Move{source=MemoryArg {base=esp, offset=Word.toInt resultOffset + 4, index=NoIndex}, + destination=RegisterArg edx, moveSize=Move32 } + ] + else raise Foreign.Foreign "Unrecognised result type" + in + (resultCode, size) + end + + val stackSpace = Word.toInt(resultOffset + resultSize) + + local + val align = stackSpace mod 16 + in + (* Stack space. In order to align the stack correctly for GCC we need the value in memRegCStackPtr + to be a multiple of 16 bytes + 8. esp would have been on a 16 byte boundary before the return address + was pushed so after pushing the return address and four registers we need a further 4 bytes + to get the alignment back again. The effect of this is that the argument and result area is + on an 8-byte boundary. *) + val stackToAllocate = stackSpace + (if align = 0 then 0 else 16-align) + 4 + end + + val lengthWord = IntInf.orb(1, IntInf.<<(Word8.toLargeInt 0wx1, 0w24)) + in + [ + (* Push callee-save registers. *) + PushToStack(RegisterArg ebp), PushToStack(RegisterArg ebx), PushToStack(RegisterArg edi), PushToStack(RegisterArg esi), + (* Set ebx to point to the original args. *) + LoadAddress{ output=ebx, offset=20, base=SOME esp, index=NoIndex, opSize=nativeWordOpSize}, + (* Allocate stack space. *) + ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt stackToAllocate), opSize=nativeWordOpSize} + ] @ copyArgsFromStack @ + [ + (* Get the value for ebp. *) + Move{source=AddressConstArg getThreadDataCall, destination=RegisterArg ecx, moveSize=opSizeToMove polyWordOpSize}, + CallAddress(MemoryArg{base=ecx, offset=0, index=NoIndex}), (* Get the address - N.B. Heap addr in 32-in-64. *) + moveRR{source=eax, output=ebp, opSize=nativeWordOpSize}, + (* Save the address of the argument and result area. *) + moveRR{source=esp, output=ecx, opSize=nativeWordOpSize}, + (* Switch to the ML stack. *) + storeMemory(esp, ebp, memRegCStackPtr, nativeWordOpSize), + loadMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), + (* Set up the ML arguments. These are SysWord values so we need to allocate + memory for them. That in turn raises the possibility of a GC. *) + AllocStore{ size=1, output=eax, saveRegs=[] }, + Move{source=NonAddressConstArg lengthWord, destination=MemoryArg {offset= ~ (Word.toInt wordSize), base=eax, index=NoIndex}, + moveSize=opSizeToMove polyWordOpSize}, + Move{source=RegisterArg ecx, destination=MemoryArg {offset=0, base=eax, index=NoIndex}, moveSize=opSizeToMove nativeWordOpSize}, + StoreInitialised + ] @ + ( + (* If we're returning a struct the address for the result will have been passed in the + first argument. We use that as the result area. Otherwise point to the result + area on the stack. *) + if resultStructByRef + then Move{source=MemoryArg {offset=0, base=ebx, index=NoIndex}, destination=RegisterArg ecx, moveSize=opSizeToMove nativeWordOpSize} + else ArithToGenReg{opc=ADD, output=ecx, source=NonAddressConstArg(Word.toLargeInt resultOffset), opSize=nativeWordOpSize} + ) :: + [ + AllocStore{ size=1, output=ebx, saveRegs=[eax] }, + Move{source=NonAddressConstArg lengthWord, destination=MemoryArg {offset= ~ (Word.toInt wordSize), base=ebx, index=NoIndex}, + moveSize=opSizeToMove polyWordOpSize}, + Move{source=RegisterArg ecx, destination=MemoryArg {offset=0, base=ebx, index=NoIndex}, moveSize=opSizeToMove nativeWordOpSize}, + StoreInitialised, + (* Call the ML function using the full closure call. *) + Move{source=AddressConstArg(Address.toMachineWord f), destination=RegisterArg edx, moveSize=opSizeToMove polyWordOpSize}, + CallAddress(MemoryArg{offset=0, base=edx, index=NoIndex}), + (* Save the ML stack pointer because we may have grown the stack. Switch to the C stack. *) + storeMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), + loadMemory(esp, ebp, memRegCStackPtr, nativeWordOpSize) + ] @ loadResults @ + [ + (* Remove the stack space. *) + ArithToGenReg{opc=ADD, output=esp, source=NonAddressConstArg(LargeInt.fromInt stackToAllocate), opSize=nativeWordOpSize}, + PopR esi, PopR edi, PopR ebx, PopR ebp, (* Restore callee-save registers. *) + (* Callee removes arguments in StdCall. *) + ReturnFromFunction (if abi = FFI_STDCALL then argCount else 0) + ] + end (* Windows on X64. *) fun callWindows64Bits(args, result) = let val extraStackReg = r10 (* Not used for any arguments. *) fun loadWin64Args([], stackOffset, _, _, code, extraStack, preCode) = (code, stackOffset, preCode, extraStack) | loadWin64Args(arg::args, stackOffset, argOffset, regs, code, extraStack, preCode) = let val {size, align, typeCode, ...} = Foreign.LibFFI.extractFFItype arg val newArgOffset = alignUp(argOffset, align) val baseAddr = {base=mlArg2Reg, offset=Word.toInt newArgOffset, index=NoIndex} val workReg = rcx (* rcx: the last to be loaded. *) (* Integer arguments. *) fun loadIntArg moveOp = case regs of (areg, _) :: regs' => loadWin64Args(args, stackOffset, newArgOffset+size, regs', Move{source=MemoryArg baseAddr, destination=RegisterArg areg, moveSize=moveOp } :: code, extraStack, preCode) | [] => loadWin64Args(args, stackOffset+8, newArgOffset+size, [], if size = 0w8 then PushToStack(MemoryArg baseAddr) :: code else (* Need to load it into a register first. *) Move{source=MemoryArg baseAddr, destination=RegisterArg workReg, moveSize=moveOp } :: PushToStack(RegisterArg workReg) :: code, extraStack, preCode) in (* Structs of 1, 2, 4 and 8 bytes are passed as the corresponding int. It may not be necessary to sign-extend 1, 2 or 4-byte values. 2, 4 or 8-byte structs may not be aligned onto the appropriate boundary but it should still work. *) case size of 0w1 => if typeCode = Foreign.LibFFI.ffiTypeCodeSInt8 then (* Signed char. *) loadIntArg Move8X64 else (* Unsigned char or single byte struct *) loadIntArg Move8 | 0w2 => if typeCode = Foreign.LibFFI.ffiTypeCodeSInt16 then (* Signed 16-bits. *) loadIntArg Move16X64 else (* Unsigned 16-bits. *) loadIntArg Move16 | 0w4 => if typeCode = Foreign.LibFFI.ffiTypeCodeFloat then ( case regs of (_, fpReg) :: regs' => loadWin64Args(args, stackOffset, newArgOffset+size, regs', XMMArith{opc=SSE2MoveFloat, source=MemoryArg baseAddr, output=fpReg } :: code, extraStack, preCode) | [] => loadWin64Args(args, stackOffset+8, newArgOffset+size, [], Move{source=MemoryArg baseAddr, destination=RegisterArg workReg, moveSize=Move32 } :: PushToStack(RegisterArg workReg) :: code, extraStack, preCode) ) else if typeCode = Foreign.LibFFI.ffiTypeCodeSInt32 orelse typeCode = Foreign.LibFFI.ffiTypeCodeInt then (* Signed 32-bits. *) loadIntArg Move32X64 else (* Unsigned 32-bits. *) loadIntArg Move32 | 0w8 => if typeCode = Foreign.LibFFI.ffiTypeCodeDouble then ( case regs of (_, fpReg) :: regs' => loadWin64Args(args, stackOffset, newArgOffset+size, regs', XMMArith{opc=SSE2MoveDouble, source=MemoryArg baseAddr, output=fpReg } :: code, extraStack, preCode) | [] => loadWin64Args(args, stackOffset+8, newArgOffset+size, [], Move{source=MemoryArg baseAddr, destination=RegisterArg workReg, moveSize=Move64 } :: PushToStack(RegisterArg workReg) :: code, extraStack, preCode) ) else (* 64-bits. *) loadIntArg Move64 | _ => if typeCode <> Foreign.LibFFI.ffiTypeCodeStruct then raise Foreign.Foreign "Unrecognised type for function argument" else let (* Structures of other sizes are passed by reference. They are first copied into new areas on the stack. This ensures that the called function can update the structure without changing the original values. *) val newExtra = intAlignUp(extraStack + Word.toInt size, 0w16) val newPreCode = moveMemory{source=(mlArg2Reg, Word.toInt newArgOffset), destination=(extraStackReg, extraStack), length=Word.toInt size} @ preCode in case regs of (areg, _) :: regs' => loadWin64Args(args, stackOffset, newArgOffset+size, regs', loadAddress{source=(extraStackReg, extraStack), destination=areg} :: code, newExtra, newPreCode) | [] => loadWin64Args(args, stackOffset+8, newArgOffset+size, [], loadAddress{source=(extraStackReg, extraStack), destination=workReg} :: PushToStack(RegisterArg workReg) :: code, newExtra, newPreCode) end end val {typeCode, size, ...} = Foreign.LibFFI.extractFFItype result val resultAreaPtr = r12 (* Saved value of r8 - This is callee save. *) val resultMemory = {base=resultAreaPtr, offset=0, index=NoIndex} fun storeIntValue moveOp = ([Move{source=RegisterArg eax, destination=MemoryArg resultMemory, moveSize=moveOp}], false) and storeFloatValue precision = ([XMMStoreToMemory{toStore=xmm0, address=resultMemory, precision=precision}], false) val (getResult, passStructAddress) = if typeCode = Foreign.LibFFI.ffiTypeCodeVoid then ([], false) else if size = 0w1 (* Includes structs *) then (* Single byte *) storeIntValue Move8 else if size = 0w2 then (* 16-bits *) storeIntValue Move16 else if typeCode = Foreign.LibFFI.ffiTypeCodeFloat then storeFloatValue SinglePrecision else if size = 0w4 then storeIntValue Move32 else if typeCode = Foreign.LibFFI.ffiTypeCodeDouble then storeFloatValue DoublePrecision else if size = 0w8 then storeIntValue Move64 else if typeCode = Foreign.LibFFI.ffiTypeCodeStruct then ([], true) else raise Foreign.Foreign "Unrecognised result type" val win64ArgRegs = [ (rcx, xmm0), (rdx, xmm1), (r8, xmm2), (r9, xmm3) ] (* argCode is the code to load and push the arguments. argStack is the amount of stack space the arguments will take. It's only used to ensure that the stack is aligned onto a 16-byte boundary. preArgCode is any code that is needed to copy the arguments before they are actually loaded. Because it is done before the argument registers are loaded it can use rcx, rdi and rsi. extraStack is local stack space needed. It is usually zero but if it is non-zero it must be a multiple of 16 bytes. The address of this area is loaded into r10 before preArgCode is called. *) val (argCode, argStack, preArgCode, extraStack) = if passStructAddress then (* The address of the result structure goes in the first argument register: rcx *) loadWin64Args(args, 0, 0w0, tl win64ArgRegs, [moveRR{source=resultAreaPtr, output=rcx, opSize=nativeWordOpSize}], 0, []) else loadWin64Args(args, 0, 0w0, win64ArgRegs, [], 0, []) local val align = argStack mod 16 in (* Always align the stack. *) val preArgAlign = if align = 0 then 0 else 16-align end in (* Save heap ptr. Needed in case we have a callback. *) [storeMemory(r15, ebp, memRegLocalMPointer, nativeWordOpSize)] @ ( (* Put the destination address into a callee save resgister. We have to put the C address in there now because an ML address wouldn't be updated by a possible GC in a callback. *) if #typeCode(Foreign.LibFFI.extractFFItype result) <> Foreign.LibFFI.ffiTypeCodeVoid then [loadHeapMemory(resultAreaPtr, r8, 0, nativeWordOpSize)] else [] ) @ [ (* Save the stack pointer. *) storeMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), (* Save ML stack and switch to C stack. *) loadMemory(esp, ebp, memRegCStackPtr, nativeWordOpSize) (* Load the saved C stack pointer. *) ] @ ( if extraStack = 0 then [] else [ ArithToGenReg{opc=SUB, output=rsp, source=NonAddressConstArg(LargeInt.fromInt extraStack), opSize=nativeWordOpSize}, Move{source=RegisterArg rsp, destination=RegisterArg extraStackReg, moveSize=Move64} ] ) @ ( if preArgAlign = 0 then [] else [ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt preArgAlign), opSize=nativeWordOpSize}] ) @ ( (* The second argument is a SysWord containing the address of a malloced area of memory with the actual arguments in it. *) if null args then [] else [loadHeapMemory(mlArg2Reg, mlArg2Reg, 0, nativeWordOpSize)] ) @ preArgCode @ argCode @ [ (* Reserve a 32-byte area after the arguments. This is specific to the Windows ABI. *) ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt 32), opSize=nativeWordOpSize}, let (* The entry point is in a SysWord.word value in RAX. *) val entryPoint = case targetArch of ObjectId32Bit => MemoryArg{base=ebx, offset=0, index=Index4 eax} | _ => MemoryArg{base=eax, offset=0, index=NoIndex} in (* Call the function. We're discarding the value in rsp so no need to remove args. *) CallAddress entryPoint end, loadMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), (* Restore the ML stack pointer. *) (* Reload the heap pointer. If we've called back to ML this could well have changed. *) loadMemory(r15, ebp, memRegLocalMPointer, nativeWordOpSize) ] @ (* Store the result in the destination. *) getResult @ [ReturnFromFunction 0 ] end (* callWindows64Bits *) + fun closureWindows64Bits(args, result, f) = raise Fail "TODO: closureWindows64Bits" local (* The rules for passing structs in SysV on X86/64 are complicated but most of the special cases don't apply. We don't support floating point larger than 8 bytes, packed structures or C++ constructors. It then reduces to the following: Structures of up to 8 bytes are passed in a single register and of 8-16 bytes in two registers. Larger structures are passed on the stack. The question is whether to use general registers or SSE2 registers. Each 8 byte chunk is considered independently after any internal structs have been unwrapped. Each chunk will consist of either a single 8-byte value (i.e.. a pointer, int64_t or a double) or one or more smaller values and possibly some padding. An SSE2 register is used if the value is a double, two floats or a single float and padding. Otherwise it must have at least one shorter int-like type (e.g. int, char, short etc) in which case a general register is used. That applies even if it also contains a float. If, having selected the kind of registers to be used, there are not enough for the whole struct it is passed on the stack. We don't really need this for simple arguments but it's easier to consider them all together. *) datatype argClass = ArgInMemory | ArgInRegs of { firstInSSE: bool, secondInSSE: bool } fun classifyArg arg = let val {size, ...} = Foreign.LibFFI.extractFFItype arg (* Unwrap the struct and any internal structs. *) fun getFields([], _) = [] | getFields(field::fields, offset) = let val {size, align, typeCode, elements, ...} = Foreign.LibFFI.extractFFItype field val alignedOffset = alignUp(offset, align) (* Align this even if it's a sub-struct *) in if typeCode = Foreign.LibFFI.ffiTypeCodeVoid then raise Foreign.Foreign "Void cannot be used for a function argument" else if typeCode = Foreign.LibFFI.ffiTypeCodeStruct then getFields(elements, alignedOffset) @ getFields(fields, alignedOffset+size) else (typeCode, alignedOffset) :: getFields(fields, alignedOffset+size) end val isSSE = List.all (fn (tc, _) => tc = Foreign.LibFFI.ffiTypeCodeFloat orelse tc = Foreign.LibFFI.ffiTypeCodeDouble) in if size > 0w16 then ArgInMemory else let val fieldsAndOffsets = getFields([arg], 0w0) in if size <= 0w8 (* Only the first register will be used. *) then ArgInRegs{firstInSSE=isSSE fieldsAndOffsets, secondInSSE=false} else let val (first8Bytes, second8Bytes) = List.partition (fn (_, off) => off <= 0w8) fieldsAndOffsets in ArgInRegs{firstInSSE=isSSE first8Bytes, secondInSSE=isSSE second8Bytes} end end end in fun callUnix64Bits(args, result) = let val argWorkReg = r10 (* Not used for any arguments. *) val resultAreaPtr = r12 (* Saved value of r8 - This is callee save. *) val argPtrReg = r11 (* Pointer to argument area - Can't use mlArg2Reg since that's RSI on 32-in-64. *) fun loadSysV64Args([], stackOffset, _, _, _, code, preCode) = (code, stackOffset, preCode) | loadSysV64Args(arg::args, stackOffset, argOffset, gRegs, fpRegs, code, preCode) = let val {size, align, typeCode, ...} = Foreign.LibFFI.extractFFItype arg (* Load a value into a register. Normally the size will be 1, 2, 4 or 8 bytes and this will just involve a simple load. Structs, though, can be of any size up to 8 bytes. *) fun loadRegister(reg, offset, size) = let (* We don't necessarily have to sign-extend. There's a comment in libffi that suggests that LVM expects it even though the SysV ABI doesn't require it. *) val moveOp = if size = 0w8 then Move64 else if typeCode = Foreign.LibFFI.ffiTypeCodeSInt32 orelse typeCode = Foreign.LibFFI.ffiTypeCodeInt then Move32X64 else if size >= 0w4 then Move32 else if typeCode = Foreign.LibFFI.ffiTypeCodeSInt16 then Move16X64 else if size >= 0w2 then Move16 else if typeCode = Foreign.LibFFI.ffiTypeCodeSInt8 then Move8X64 else Move8 in [Move{source=MemoryArg{base=argPtrReg, offset=Word.toInt offset, index=NoIndex}, destination=RegisterArg reg, moveSize=moveOp}] end @ ( if size = 0w6 orelse size = 0w7 then [ Move{source=MemoryArg{base=argPtrReg, offset=Word.toInt offset + 4, index=NoIndex}, destination=RegisterArg argWorkReg, moveSize=Move16}, ShiftConstant{ shiftType=SHL, output=argWorkReg, shift=0w32, opSize=OpSize64 }, ArithToGenReg{ opc=OR, output=reg, source=RegisterArg argWorkReg, opSize=OpSize64 } ] else [] ) @ ( if size = 0w3 orelse size = 0w5 orelse size = 0w7 then [ Move{source=MemoryArg{base=argPtrReg, offset=Word.toInt offset + Word.toInt(size-0w1), index=NoIndex}, destination=RegisterArg argWorkReg, moveSize=Move8}, ShiftConstant{ shiftType=SHL, output=argWorkReg, shift=Word8.fromLargeWord(Word.toLargeWord((size-0w1)*0w8)), opSize=OpSize64 }, ArithToGenReg{ opc=OR, output=reg, source=RegisterArg argWorkReg, opSize=OpSize64 } ] else [] ) val newArgOffset = alignUp(argOffset, align) val word1Addr = {base=argPtrReg, offset=Word.toInt newArgOffset, index=NoIndex} val word2Addr = {base=argPtrReg, offset=Word.toInt newArgOffset + 8, index=NoIndex} in case (classifyArg arg, size > 0w8, gRegs, fpRegs) of (* 8 bytes or smaller - single general reg. This is the usual case. *) (ArgInRegs{firstInSSE=false, ...}, false, gReg :: gRegs', fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', loadRegister(gReg, newArgOffset, size) @ code, preCode) (* 8 bytes or smaller - single SSE reg. Usual case for real arguments. *) | (ArgInRegs{firstInSSE=true, ...}, false, gRegs', fpReg :: fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', XMMArith{opc=if size = 0w4 then SSE2MoveFloat else SSE2MoveDouble, source=MemoryArg word1Addr, output=fpReg } :: code, preCode) (* 9-16 bytes - both values in general regs. *) | (ArgInRegs{firstInSSE=false, secondInSSE=false}, true, gReg1 :: gReg2 :: gRegs', fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', Move{source=MemoryArg word1Addr, destination=RegisterArg gReg1, moveSize=Move64} :: loadRegister(gReg2, newArgOffset+0w8, size-0w8) @ code, preCode) (* 9-16 bytes - first in general, second in SSE. *) | (ArgInRegs{firstInSSE=false, secondInSSE=true}, true, gReg :: gRegs', fpReg :: fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', Move{source=MemoryArg word1Addr, destination=RegisterArg gReg, moveSize=Move64} :: XMMArith{opc=if size = 0w12 then SSE2MoveFloat else SSE2MoveDouble, source=MemoryArg word2Addr, output=fpReg } :: code, preCode) (* 9-16 bytes - first in SSE, second in general. *) | (ArgInRegs{firstInSSE=true, secondInSSE=false}, true, gReg :: gRegs', fpReg :: fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', XMMArith{opc=SSE2MoveDouble, source=MemoryArg word1Addr, output=fpReg } :: loadRegister(gReg, newArgOffset+0w8, size-0w8) @ code, preCode) | (* 9-16 bytes - both values in SSE regs. *) (ArgInRegs{firstInSSE=true, secondInSSE=true}, true, gRegs', fpReg1 :: fpReg2 :: fpRegs') => loadSysV64Args(args, stackOffset, newArgOffset+size, gRegs', fpRegs', XMMArith{opc=SSE2MoveDouble, source=MemoryArg word1Addr, output=fpReg1 } :: XMMArith{opc=if size = 0w12 then SSE2MoveFloat else SSE2MoveDouble, source=MemoryArg word2Addr, output=fpReg2 } :: code, preCode) | (_, _, gRegs', fpRegs') => (* Either larger than 16 bytes or we've run out of the right kind of registers. *) (* Move the argument in the preCode. It's possible a large struct could be the first argument and if we left it until the end RDI and RSI would already have been loaded. *) let val space = intAlignUp(Word.toInt size, 0w8) in loadSysV64Args(args, stackOffset+space, newArgOffset+size, gRegs', fpRegs', code, ArithToGenReg{opc=SUB, output=rsp, source=NonAddressConstArg(LargeInt.fromInt space), opSize=nativeWordOpSize} :: moveMemory{source=(argPtrReg, Word.toInt newArgOffset), destination=(rsp, 0), length=Word.toInt size} @ preCode) end end (* The rules for returning structs are similar to those for parameters. *) local (* Store a result register into the result area. In almost all cases this is very simple: the only complication is with structs of odd sizes. *) fun storeResult(reg, offset, size) = let val moveOp = if size = 0w8 then Move64 else if size >= 0w4 then Move32 else if size >= 0w2 then Move16 else Move8 in [Move{source=RegisterArg reg, destination=MemoryArg {base=resultAreaPtr, offset=offset, index=NoIndex}, moveSize=moveOp}] end @ ( if size = 0w6 orelse size = 0w7 then [ ShiftConstant{ shiftType=SHR, output=reg, shift=0w32, opSize=OpSize64 }, Move{source=RegisterArg reg, destination=MemoryArg {base=resultAreaPtr, offset=offset+4, index=NoIndex}, moveSize=Move16} ] else [] ) @ ( if size = 0w3 orelse size = 0w5 orelse size = 0w7 then [ ShiftConstant{ shiftType=SHR, output=reg, shift=Word8.fromLargeWord(Word.toLargeWord((size-0w1)*0w8)), opSize=OpSize64 }, Move{source=RegisterArg reg, destination=MemoryArg {base=resultAreaPtr, offset=offset+Word.toInt(size-0w1), index=NoIndex}, moveSize=Move8} ] else [] ) val {size, typeCode, ...} = Foreign.LibFFI.extractFFItype result in val (getResult, passArgAddress) = if typeCode = Foreign.LibFFI.ffiTypeCodeVoid then ([], false) else case (classifyArg result, size > 0w8) of (* 8 bytes or smaller - returned in RAX - Normal case for int-like results. *) (ArgInRegs{firstInSSE=false, ...}, false) => (storeResult(rax, 0, size), false) (* 8 bytes or smaller - returned in XMM0 - Normal case for real results. *) | (ArgInRegs{firstInSSE=true, ...}, false) => ([XMMStoreToMemory{toStore=xmm0, address={base=resultAreaPtr, offset=0, index=NoIndex}, precision=if size = 0w4 then SinglePrecision else DoublePrecision}], false) (* 9-16 bytes - returned in RAX/RDX. *) | (ArgInRegs{firstInSSE=false, secondInSSE=false}, true) => (storeResult(rax, 0, 0w8) @ storeResult(rdx, 0, size-0w8), false) (* 9-16 bytes - first in RAX, second in XMM0. *) | (ArgInRegs{firstInSSE=false, secondInSSE=true}, true) => (XMMStoreToMemory{toStore=xmm0, address={base=resultAreaPtr, offset=8, index=NoIndex}, precision=if size = 0w12 then SinglePrecision else DoublePrecision} :: storeResult(rax, 0, 0w8), false) (* 9-16 bytes - first in XMM0, second in RAX. *) | (ArgInRegs{firstInSSE=true, secondInSSE=false}, true) => (XMMStoreToMemory{toStore=xmm0, address={base=resultAreaPtr, offset=0, index=NoIndex}, precision=DoublePrecision} :: storeResult(rax, 8, size-0w8), false) (* 9-16 bytes - both values in SSE regs.*) | (ArgInRegs{firstInSSE=true, secondInSSE=true}, true) => ([XMMStoreToMemory{toStore=xmm0, address={base=resultAreaPtr, offset=0, index=NoIndex}, precision=DoublePrecision}, XMMStoreToMemory{toStore=xmm1, address={base=resultAreaPtr, offset=8, index=NoIndex}, precision=if size = 0w12 then SinglePrecision else DoublePrecision}], false) | _ => ([], true) (* Have to pass the address of the area in memory *) end val sysVGenRegs = [rdi, rsi, rdx, rcx, r8, r9] and sysVFPRegs = [xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7] val (argCode, argStack, preArgCode) = if passArgAddress (* If we have to pass the address of the result struct it goes in rdi. *) then loadSysV64Args(args, 0, 0w0, tl sysVGenRegs, sysVFPRegs, [moveRR{source=resultAreaPtr, output=rdi, opSize=nativeWordOpSize}], []) else loadSysV64Args(args, 0, 0w0, sysVGenRegs, sysVFPRegs, [], []) local val align = argStack mod 16 in (* Always align the stack. *) val preArgAlign = if align = 0 then 0 else 16-align end in (* Save heap ptr. Needed in case we have a callback. *) [storeMemory(r15, ebp, memRegLocalMPointer, nativeWordOpSize)] @ ( (* Put the destination address into a callee save resgister. We have to put the C address in there now because an ML address wouldn't be updated by a possible GC in a callback. *) if #typeCode(Foreign.LibFFI.extractFFItype result) <> Foreign.LibFFI.ffiTypeCodeVoid then [loadHeapMemory(resultAreaPtr, r8, 0, nativeWordOpSize)] else [] ) @ [ (* Save the stack pointer. *) storeMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), (* Save ML stack and switch to C stack. *) loadMemory(esp, ebp, memRegCStackPtr, nativeWordOpSize) (* Load the saved C stack pointer. *) ] @ ( if preArgAlign = 0 then [] else [ArithToGenReg{opc=SUB, output=esp, source=NonAddressConstArg(LargeInt.fromInt preArgAlign), opSize=nativeWordOpSize}] ) @ ( (* The second argument is a SysWord containing the address of a malloced area of memory with the actual arguments in it. *) if null args then [] else [loadHeapMemory(argPtrReg, mlArg2Reg, 0, nativeWordOpSize)] ) @ preArgCode @ argCode @ [ let (* The entry point is in a SysWord.word value in RAX. *) val entryPoint = case targetArch of ObjectId32Bit => MemoryArg{base=ebx, offset=0, index=Index4 eax} | _ => MemoryArg{base=eax, offset=0, index=NoIndex} in (* Call the function. We're discarding the value in rsp so no need to remove args. *) CallAddress entryPoint end, loadMemory(esp, ebp, memRegStackPtr, nativeWordOpSize), (* Restore the ML stack pointer. *) (* Reload the heap pointer. If we've called back to ML this could well have changed. *) loadMemory(r15, ebp, memRegLocalMPointer, nativeWordOpSize) ] @ (* Store the result in the destination. *) getResult @ [ ReturnFromFunction 0 ] end (* callUnix64Bits *) + + fun closureUnix64Bits(args, result, f) = raise Fail "TODO: closureUnix64Bits" end fun foreignCall(abivalue: Foreign.LibFFI.abi, args: Foreign.LibFFI.ffiType list, result: Foreign.LibFFI.ffiType): Address.machineWord = let - val abi = getFFIAbi abivalue - val code = - case abi of - FFI_UNIX64 => callUnix64Bits(args, result) - | FFI_WIN64 => callWindows64Bits(args, result) - | _ => call32Bits(abi, args, result) + case (getABI(), getFFIAbi abivalue) of + (X64Unix, FFI_UNIX64) => callUnix64Bits(args, result) + | (X64Win, FFI_WIN64) => callWindows64Bits(args, result) + | (X86_32, abi) => call32Bits(abi, args, result) + | _ => raise Foreign.Foreign "Unsupported ABI" + val functionName = "foreignCall" val debugSwitches = [(*Universal.tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), Universal.tagInject DEBUG.assemblyCodeTag true*)] val profileObject = createProfileObject functionName val newCode = codeCreate (functionName, profileObject, debugSwitches) val closure = makeConstantClosure() val () = X86OPTIMISE.generateCode{code=newCode, labelCount=0, ops=code, resultClosure=closure} in closureAsAddress closure end (* Build a callback function. The arguments are the abi, the list of argument types and the result type. The result is an ML function that takes an ML function, f, as its argument, registers it as a callback and returns the C function as its result. When the C function is called the arguments are copied into temporary memory and the vector passed to f along with the address of the memory for the result. "f" stores the result in it when it returns and the result is then passed back as the result of the callback. *) fun buildCallBack(abivalue: Foreign.LibFFI.abi, args: Foreign.LibFFI.ffiType list, result: Foreign.LibFFI.ffiType): Address.machineWord = let - val abi = getFFIAbi abivalue + (* For the moment we do everything inside the result function. The intention is to generate + the main closure function outside this and have this function load "f" into a register + and jump to the main function. *) + fun resultFunction f = + let + val code = + case (getABI(), getFFIAbi abivalue) of + (X64Unix, FFI_UNIX64) => closureUnix64Bits(args, result, f) + | (X64Win, FFI_WIN64) => closureWindows64Bits(args, result, f) + | (X86_32, abi) => closure32Bits(abi, args, result, f) + | _ => raise Foreign.Foreign "Unsupported ABI" + + val functionName = "foreignCall" + val debugSwitches = + [Universal.tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), + Universal.tagInject DEBUG.assemblyCodeTag true] + val profileObject = createProfileObject functionName + val newCode = codeCreate (functionName, profileObject, debugSwitches) + val closure = makeConstantClosure() + val () = X86OPTIMISE.generateCode{code=newCode, labelCount=0, ops=code, resultClosure=closure} + in + closureAsAddress closure + end in - raise Fail "TODO: foreignCall" + Address.toMachineWord resultFunction end end;