diff --git a/basis/Thread.sml b/basis/Thread.581.sml similarity index 100% copy from basis/Thread.sml copy to basis/Thread.581.sml diff --git a/basis/Thread.sml b/basis/Thread.sml index 1cef1b6f..5625ca65 100644 --- a/basis/Thread.sml +++ b/basis/Thread.sml @@ -1,768 +1,769 @@ (* Title: Thread package for ML. Author: David C. J. Matthews - Copyright (c) 2007-2014, 2018-20 + Copyright (c) 2007-2014, 2018-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* This signature and structure are not part of the standard basis library but are included here because they depend on the Time structure and are in turn dependencies of the BasicIO structure. *) (*!Earlier versions of Poly/ML have provided a form of concurrent execution through the Process structure. Version 5.1 introduces new thread primitives in the Thread structure. This structure is modelled on the Posix thread (pthread) package but simplified and modified for ML. The aim is to provide an efficient implementation of parallelism particularly to enable ML programs to make use of multi-core processors while minimising the changes needed to existing code. The Process structure will continue to be available as a library written on top of these primitives but new programs should use the Thread structure directly. The thread package differs from pthreads in a number of ways. There is no join function to wait for the completion of a thread. This can be written using mutexes and condition variables. Cancellation and signal handling are combined into the interrupt functions. (The Poly/ML Signal structure handles signals for all the threads together). The effect of explicit cancellation is achieved using the interrupt function. This causes an interrupt to be generated in a specific thread. Alternatively an interrupt can be broadcast to all threads. This is most likely to be used interactively to kill threads that appear to have gone out of control. The normal top-level handler for a console interrupt will generate this. Threads can choose how or whether they respond to these interrupts. A thread that is doing processor-intensive work probably needs to be able to be interrupted asynchronously whereas if it is communicating with other threads the presence of asynchronous interrupts makes correct programming difficult. *) signature THREAD = sig (*!The Thread exception can be raised by various of the functions in the structure if they detect an error.*) exception Thread of string (* Raised if an operation fails. *) structure Thread: sig (*!The type of a thread identifier.*) eqtype thread (* Thread attributes - This may be extended. *) (*!The type of a thread attribute. Thread attributes are properties of the thread that are set initially when the thread is created but can subsequently be modified by the thread itself. The thread attribute type may be extended in the future to include things like scheduling priority. The current thread attributes control the way interrupt exceptions are delivered to the thread. `EnableBroadcastInterrupt` controls whether the thread will receive an interrupt sent using `broadcastInterrupt` or as a result of pressing the console interrupt key. If this is false the thread will not receive them. The default for a new thread if this is not specified is false. `InterruptState` controls when and whether interrupts are delivered to the thread. This includes broadcast interrupts and also interrupts directed at a specific thread with the interrupt call. `InterruptDefer` means the thread will not receive any interrupts. However, if the thread has previously been interrupted the interrupt may be delivered when the thread calls setAttributes to change its interrupt state. `InterruptSynch` means interrupts are delivered synchronously. An interrupt will be delayed until an interruption point. An interruption point is one of: `testInterrupt`, `ConditionVar.wait`, `ConditionVar.waitUntil` and various library calls that may block, such as IO calls, pause etc. N.B. `Mutex.lock` is not an interruption point even though it can result in a thread blocking for an indefinite period. `InterruptAsynch` means interrupts are delivered asynchronously i.e. at a suitable point soon after they are triggered. `InterruptAsynchOnce` means that only a single interrupt is delivered asynchronously after which the interrupt state is changed to `InterruptSynch`. It allows a thread to tidy up and if necessary indicate that it has been interrupted without the risk of a second asynchronous interrupt occurring in the handler for the first interrupt. If this attribute is not specified when a thread is created the default is `InterruptSynch`. `MaximumMLStack` was added in version 5.5.3. It controls the maximum size the ML stack may grow to. It is an option type where NONE allows the stack to grow to the limit of the available memory whereas SOME n limits the stack to n words. This is approximate since there is some rounding involved. When the limit is reached the thread is sent an Interrupt exception.*) datatype threadAttribute = (* Does this thread accept a broadcast interrupt? The default is not to accept broadcast interrupts. *) EnableBroadcastInterrupt of bool (* How to handle interrupts. The default is to handle interrupts synchronously. *) | InterruptState of interruptState (* Maximum size of the ML stack in words. NONE means unlimited *) | MaximumMLStack of int option and interruptState = InterruptDefer (* Defer any interrupts. *) | InterruptSynch (* Interrupts are delivered synchronously. An interrupt will be delayed until an interruption point. An interruption point is one of: testInterrupt, ConditionVar.wait, ConditionVar.waitUntil and various library calls that may block, such as IO calls, pause etc. N.B. Mutex.lock is not an interruption point even though it can result in a thread blocking for an indefinite period. *) | InterruptAsynch (* Interrupts are delivered asynchronously i.e. at a suitable point soon after they are triggered. *) | InterruptAsynchOnce (* As InterruptAsynch except that only a single interrupt is delivered asynchronously after which the interrupt state is changed to InterruptSynch. It allows a thread to tidy up and if necessary indicate that it has been interrupted without the risk of a second asynchronous interrupt occurring in the handler for the first interrupt. *) (*!Fork a thread. Starts a new thread running the function argument. The attribute list gives initial values for thread attributes which can be modified by the thread itself. Any unspecified attributes take default values. The thread is terminated when the thread function returns, if it raises an uncaught exception or if it calls `exit`;*) val fork: (unit->unit) * threadAttribute list -> thread (*!Terminate this thread. *) val exit: unit -> unit (*!Test if a thread is still running or has terminated. This function should be used with care. The thread may be on the point of terminating and still appear to be active.*) val isActive: thread -> bool (*!Test whether thread ids are the same. This is provided for backwards compatibility since `thread` is an eqtype. *) val equal: thread * thread -> bool (*!Return the thread identifier for the current thread. *) val self: unit -> thread exception Interrupt (* = SML90.Interrupt *) (*!Send an Interrupt exception to a specific thread. When and indeed whether the exception is actually delivered will depend on the interrupt state of the target thread. Raises Thread if the thread is no longer running, so an exception handler should be used unless the thread is known to be blocked. *) val interrupt: thread -> unit (*!Send an interrupt exception to every thread which is set to accept it. *) val broadcastInterrupt: unit -> unit (*!If this thread is handling interrupts synchronously, test to see if it has been interrupted. If so it raises the `Interrupt` exception. *) val testInterrupt: unit -> unit (*!Terminate a thread. This should be used as a last resort. Normally a thread should be allowed to clean up and terminate by using the interrupt call. Raises Thread if the thread is no longer running, so an exception handler should be used unless the thread is known to be blocked. *) val kill: thread -> unit (*!Get and set thread-local store for the calling thread. The store is a tagged associative memory which is initially empty for a new thread. A thread can call setLocal to add or replace items in its store and call getLocal to return values if they exist. The Universal structure contains functions to make new tags as well as injection, projection and test functions. *) val getLocal: 'a Universal.tag -> 'a option and setLocal: 'a Universal.tag * 'a -> unit (*!Change the specified attribute(s) for the calling thread. Unspecified attributes remain unchanged. *) val setAttributes: threadAttribute list -> unit (*!Get the values of attributes. *) val getAttributes: unit -> threadAttribute list (*!Return the number of processors that will be used to run threads and the number of physical processors if that is available. *) val numProcessors: unit -> int and numPhysicalProcessors: unit -> int option end structure Mutex: sig (*!A mutex provides simple mutual exclusion. A thread can lock a mutex and until it unlocks it no other thread will be able to lock it. Locking and unlocking are intended to be fast in the situation when there is no other process attempting to lock the mutex. These functions may not work correctly if an asynchronous interrupt is delivered during the calls. A thread should use synchronous interrupt when using these calls. *) type mutex (*!Make a new mutex *) val mutex: unit -> mutex (*!Lock a mutex. If the mutex is currently locked the thread is blocked until it is unlocked. If a thread tries to lock a mutex that it has previously locked the thread will deadlock. N.B. `thread` is not an interruption point (a point where synchronous interrupts are delivered) even though a thread can be blocked indefinitely. *) val lock: mutex -> unit (*!Unlock a mutex and allow any waiting threads to run. The behaviour if the mutex was not previously locked by the calling thread is undefined. *) val unlock: mutex -> unit (*!Attempt to lock the mutex. Returns true if the mutex was not previously locked and has now been locked by the calling thread. Returns false if the mutex was previously locked, including by the calling thread. *) val trylock: mutex -> bool - end structure ConditionVar: sig (*!Condition variables are used to provide communication between threads. A condition variable is used in conjunction with a mutex and usually a reference to establish and test changes in state. The normal use is for one thread to lock a mutex, test the reference and then wait on the condition variable, releasing the lock on the mutex while it does so. Another thread may then lock the mutex, update the reference, unlock the mutex, and signal the condition variable. This wakes up the first thread and reacquires the lock allowing the thread to test the updated reference with the lock held. More complex communication mechanisms, such as blocking channels, can be written in terms of condition variables. *) type conditionVar (*!Make a new condition variable. *) val conditionVar: unit -> conditionVar (*!Release the mutex and block until the condition variable is signalled. When wait returns the mutex will have been re-acquired. If the thread is handling interrupts synchronously this function can be interrupted using the `Thread.interrupt` function or, if the thread is set to accept broadcast interrupts, `Thread.broadcastInterrupt`. The thread will re-acquire the mutex before the exception is delivered. An exception will only be delivered in this case if the interrupt is sent before the condition variable is signalled. If the interrupt is sent after the condition variable is signalled the function will return normally even if it has not yet re-acquired the mutex. The interrupt state will be delivered on the next call to "wait", `Thread.testInterrupt` or other blocking call. A thread should never call this function if it may receive an asynchronous interrupt. It should always set its interrupt state to either `InterruptSynch` or `InterruptDefer` beforehand. An asynchronous interrupt may leave the condition variable and the mutex in an indeterminate state and could lead to deadlock. A condition variable should only be associated with one mutex at a time. All the threads waiting on a condition variable should pass the same mutex as argument.*) val wait: conditionVar * Mutex.mutex -> unit (*!As wait except that it blocks until either the condition variable is signalled or the time (absolute) is reached. Either way the mutex is reacquired so there may be a further delay if it is held by another thread. *) val waitUntil: conditionVar * Mutex.mutex * Time.time -> bool (*!Wake up one thread if any are waiting on the condition variable. If there are several threads waiting for the condition variable one will be selected to run and will run as soon as it has re-acquired the lock.*) val signal: conditionVar -> unit (*!Wake up all threads waiting on the condition variable. *) val broadcast: conditionVar -> unit end end; structure Thread :> THREAD = struct exception Thread = RunCall.Thread structure Thread = struct open Thread (* Created in INITIALISE with thread type and self function. *) (* Equality is pointer equality. *) val equal : thread*thread->bool = op = datatype threadAttribute = EnableBroadcastInterrupt of bool | InterruptState of interruptState | MaximumMLStack of int option and interruptState = InterruptDefer | InterruptSynch | InterruptAsynch | InterruptAsynchOnce (* Convert attributes to bits and a mask. *) fun attrsToWord (at: threadAttribute list): Word.word * Word.word = let (* Check that a particular attribute appears only once. As well as accumulating the actual bits in the result we also accumulate the mask of bits. If any of these reappear we raise an exception. *) fun checkRepeat(r, acc, set, mask) = if Word.andb(set, mask) <> 0w0 then raise Thread "The same attribute appears more than once in the list" else convert(r, acc, Word.orb(set, mask)) and convert([], acc, set) = (acc, set) | convert(EnableBroadcastInterrupt true :: r, acc, set) = checkRepeat(r, Word.orb(acc, 0w1), set, 0w1) | convert(EnableBroadcastInterrupt false :: r, acc, set) = checkRepeat(r, acc (* No bit *), set, 0w1) | convert(InterruptState s :: r, acc, set) = checkRepeat(r, Word.orb(setIstateBits s, acc), set, 0w6) | convert(MaximumMLStack _ :: r, acc, set) = convert(r, acc, set) in convert(at, 0w0, 0w0) end and setIstateBits InterruptDefer = 0w0 | setIstateBits InterruptSynch = 0w2 | setIstateBits InterruptAsynch = 0w4 | setIstateBits InterruptAsynchOnce = 0w6 fun getIstateBits(w: Word.word): interruptState = let val ibits = Word.andb(w, 0w6) in if ibits = 0w0 then InterruptDefer else if ibits = 0w2 then InterruptSynch else if ibits = 0w4 then InterruptAsynch else InterruptAsynchOnce end fun wordToAttrs w = let (* Enable broadcast - true if bottom bit is set. *) val bcast = EnableBroadcastInterrupt(Word.andb(w, 0w1) = 0w1) in [bcast, InterruptState(getIstateBits w)] end exception Interrupt = RunCall.Interrupt (* The thread id is opaque outside this structure but is actually a six word mutable object. Word 0: Index into thread table (used inside the RTS only) Word 1: Flags: initialised by the RTS and set by this code Word 2: Thread local store: read and set by this code. Word 3: IntRequest: Set by the RTS if there is an interrupt pending Word 4: Maximum ML stack size. Unlimited is stored here as zero *) val threadIdFlags = 0w1 and threadIdThreadLocal = 0w2 and threadIdIntRequest = 0w3 and threadIdStackSize = 0w4 fun getLocal (t: 'a Universal.tag) : 'a option = let val root: Universal.universal ref list = RunCall.loadWord(self(), threadIdThreadLocal) fun doFind [] = NONE | doFind ((ref v)::r) = if Universal.tagIs t v then SOME(Universal.tagProject t v) else doFind r in doFind root end fun setLocal (t: 'a Universal.tag, newVal: 'a) : unit = let (* See if we already have this in the list. *) val root: Universal.universal ref list = RunCall.loadWord(self(), threadIdThreadLocal) fun doFind [] = (* Not in the list - Add it. *) RunCall.storeWord (self(), threadIdThreadLocal, ref (Universal.tagInject t newVal) :: root) | doFind (v::r) = if Universal.tagIs t (!v) (* If it's in the list update it. *) then v := Universal.tagInject t newVal else doFind r in doFind root end local val threadTestInterrupt: unit -> unit = RunCall.rtsCallFull0 "PolyThreadTestInterrupt" in fun testInterrupt() = (* If there is a pending request the word in the thread object will be non-zero. *) if RunCall.loadWord(self(), threadIdIntRequest) <> 0 then threadTestInterrupt() else () end val exit: unit -> unit = RunCall.rtsCallFull0 "PolyThreadKillSelf" and isActive: thread -> bool = RunCall.rtsCallFast1 "PolyThreadIsActive" and broadcastInterrupt: unit -> unit = RunCall.rtsCallFull0 "PolyThreadBroadcastInterrupt" local fun getAttrWord (me: thread) : Word.word = RunCall.loadWord(me, threadIdFlags) fun getStackSizeAsInt (me: thread) : int = RunCall.loadWord(me, threadIdStackSize) and getStackSize me : int option = case getStackSizeAsInt me of 0 => NONE | s => SOME s fun newStackSize ([], default) = default | newStackSize (MaximumMLStack NONE :: _, _) = 0 | newStackSize (MaximumMLStack (SOME n) :: _, _) = if n <= 0 then raise Thread "The stack size must be greater than zero" else n | newStackSize (_ :: l, default) = newStackSize (l, default) val threadMaxStackSize: int -> unit = RunCall.rtsCallFull1 "PolyThreadMaxStackSize" in (* Set attributes. Only changes the values that are specified. The others remain the same. *) fun setAttributes (attrs: threadAttribute list) : unit = let val me = self() val oldValues: Word.word = getAttrWord me val (newValue, mask) = attrsToWord attrs val stack = newStackSize(attrs, getStackSizeAsInt me) in RunCall.storeWord (self(), threadIdFlags, Word.orb(newValue, Word.andb(Word.notb mask, oldValues))); if stack = getStackSizeAsInt me then () else threadMaxStackSize stack; (* If we are now handling interrupts asynchronously check whether we have a pending interrupt now. This will only be effective if we were previously handling them synchronously or blocking them. *) if Word.andb(newValue, 0w4) = 0w4 then testInterrupt() else () end fun getAttributes() : threadAttribute list = let val me = self() in MaximumMLStack (getStackSize me) :: wordToAttrs(getAttrWord me) end (* These are used in the ConditionVar structure. They affect only the interrupt handling bits. *) fun getInterruptState(): interruptState = getIstateBits(getAttrWord(self())) and setInterruptState(s: interruptState): unit = RunCall.storeWord (self(), threadIdFlags, Word.orb(setIstateBits s, Word.andb(Word.notb 0w6, getAttrWord(self())))) local (* The default for a new thread is to ignore broadcasts and handle explicit interrupts synchronously. *) val (defaultAttrs, _) = attrsToWord[EnableBroadcastInterrupt false, InterruptState InterruptSynch] val threadForkFunction: (unit->unit) * word * int -> thread = RunCall.rtsCallFull3 "PolyThreadForkThread" in fun fork(f:unit->unit, attrs: threadAttribute list): thread = let (* Any attributes specified explicitly override the defaults. *) val (attrWord, mask) = attrsToWord attrs val attrValue = Word.orb(attrWord, Word.andb(Word.notb mask, defaultAttrs)) val stack = newStackSize(attrs, 0 (* Default is unlimited *)) (* Run the function and exit whether it returns normally or raises an exception. *) fun threadFunction () = (f() handle _ => ()) before exit() in threadForkFunction(threadFunction, attrValue, stack) end end end local (* Send an interrupt to a thread. If it returns false the thread did not exist and this should raise an exception. *) val threadSendInterrupt: thread -> bool = RunCall.rtsCallFast1 "PolyThreadInterruptThread" in fun interrupt(t: thread) = if threadSendInterrupt t then () else raise Thread "Thread does not exist" end local val threadKillThread: thread -> bool = RunCall.rtsCallFast1 "PolyThreadKillThread" in fun kill(t: thread) = if threadKillThread t then () else raise Thread "Thread does not exist" end val numProcessors: unit -> int = RunCall.rtsCallFast0 "PolyThreadNumProcessors" local val numberOfPhysical: unit -> int = RunCall.rtsCallFast0 "PolyThreadNumPhysicalProcessors" in fun numPhysicalProcessors(): int option = (* It is not always possible to get this information *) case numberOfPhysical() of 0 => NONE | n => SOME n end end structure Mutex = struct type mutex = Word.word ref val mutex = LibrarySupport.volatileWordRef (* Initially 0=unlocked. *) - open Thread (* atomicIncr, atomicDecr and atomicReset are set up by Initialise. *) + open Thread (* atomicExchangeAdd, atomicReset and cpuPause are set up by Initialise. *) val threadMutexBlock: mutex -> unit = RunCall.rtsCallFull1 "PolyThreadMutexBlock" val threadMutexUnlock: mutex -> unit = RunCall.rtsCallFull1 "PolyThreadMutexUnlock" (* A mutex is implemented as a Word.word ref. It is initially set to 0 and locked by atomically incrementing it. If it was previously unlocked the result will by one but if it was already locked it will be some positive value. When it is unlocked it is atomically decremented. If there was no contention the result will again be 0 but if some other thread tried to lock it the result will be one or positive. In that case the unlocking thread needs to call in to the RTS to wake up the blocked thread. The cost of contention on the lock is very high. To try to avoid this we first loop (spin) to see if we can get the lock without contention. *) val spin_cycle = 20000 fun spin (m: mutex, c: int) = - if ! m = 0w0 then () + if atomicExchAdd(m, 0w0) = 0w0 then () else if c = spin_cycle then () - else spin(m, c+1); + else (cpuPause(); spin(m, c+1)); fun lock (m: mutex): unit = let val () = spin(m, 0) - val newValue = atomicIncr m + val oldValue = atomicExchAdd(m, 0w1) in - if newValue = 0w1 + if oldValue = 0w0 then () (* We've acquired the lock. *) else (* It's locked. We return when we have the lock. *) ( threadMutexBlock m; lock m (* Try again. *) ) end fun unlock (m: mutex): unit = let - val newValue = atomicDecr m + val oldValue = atomicExchAdd(m, ~ 0w1) in - if newValue = 0w0 + if oldValue = 0w1 then () (* No contention. *) else (* Another thread has blocked and we have to release it. We can safely set the value to 0 here to release the lock. If another thread acquires it before we have woken up the other threads that's fine. Equally, if another thread incremented the count and saw it was still locked it will enter the RTS and try to acquire the lock there. It's probably better to reset it here rather than within the RTS since it allows another thread to acquire the lock immediately rather than after the rather long process of entering the RTS. Resetting this needs to be atomic with respect to atomic increment and decrement. That's not a problem on X86 so a simple assignment is sufficient but in the interpreter at least it's necessary to acquire a lock. *) ( atomicReset m; threadMutexUnlock m ) end (* Try to lock the mutex. If it was previously unlocked then lock it and return true otherwise return false. Because we don't block here there is the possibility that the thread that has locked it could release the lock - shortly afterwards. The check for !m = 0w0 is an optimisation and nearly - all the time it avoids the call to atomicIncr setting m to a value > 1. + shortly afterwards. The check for atomicExchangeAdd(m, 0w0) = 0w0 + is an optimisation and the idea is that we avoid the increment which will + cause the thread that actually has the lock to have to call into the + RTS to release the, non-existent, blocked threa. There is a small chance that another thread could lock the mutex between the - test for !m = 0w0 and the atomicIncr. In that case the atomicIncr would + test and the atomicIncr. In that case the atomicIncr would return a value > 1 and the function that locked the mutex will have to call into the RTS to reset it when it is unlocked. *) fun trylock (m: mutex): bool = - if !m = 0w0 andalso atomicIncr m = 0w1 + if atomicExchAdd(m, 0w0) = 0w0 andalso atomicExchAdd(m, 0w1) = 0w0 then true (* We've acquired the lock. *) else false (* The lock was taken. *) end structure ConditionVar = struct open Thread (* A condition variable contains a lock and a list of suspended threads. *) type conditionVar = { lock: Mutex.mutex, threads: thread list ref } fun conditionVar(): conditionVar = { lock = Mutex.mutex(), threads = LibrarySupport.volatileListRef() } local val threadCondVarWait: Mutex.mutex -> unit = RunCall.rtsCallFull1 "PolyThreadCondVarWait" and threadCondVarWaitUntil: Mutex.mutex * Time.time -> unit = RunCall.rtsCallFull2 "PolyThreadCondVarWaitUntil" in fun innerWait({lock, threads}: conditionVar, m: Mutex.mutex, t: Time.time option) : bool = let val me = self() (* My thread id. *) fun waitAgain() = let fun doFind [] = false | doFind(h::t) = equal(h, me) orelse doFind t fun removeThis [] = raise Fail "Thread missing in list" | removeThis (h::t) = if equal(h, me) then t else h :: removeThis t val () = case t of SOME time => threadCondVarWaitUntil(lock, time) | NONE => threadCondVarWait lock val () = Mutex.lock lock (* Get the lock again. *) (* Are we still on the list? If so we haven't been explicitly woken up. We've either timed out, been interrupted or simply returned because the RTS needed to process some asynchronous results. *) val stillThere = doFind(!threads) open Time (* For >= *) in if not stillThere then (* We're done. *) ( Mutex.unlock lock; true ) else if (case t of NONE => false | SOME t => Time.now() >= t) then (* We've timed out. *) ( threads := removeThis(! threads); Mutex.unlock lock; false ) else ( (* See if we've been interrupted. If so remove ourselves and exit. *) testInterrupt() handle exn => (threads := removeThis(! threads); Mutex.unlock lock; raise exn); (* Otherwise just keep waiting. *) waitAgain() ) end in Mutex.lock lock; (* Lock the internal mutex. *) Mutex.unlock m; (* Unlock the external mutex *) threads := me :: !threads; (* Add ourselves to the list. *) waitAgain() (* Wait and return the result when we're done. *) end fun doWait(c: conditionVar, m: Mutex.mutex, t: Time.time option) : bool = let val originalIntstate = getInterruptState() (* Set this to handle interrupts synchronously unless we're already ignoring them. *) val () = if originalIntstate = InterruptDefer then () else setInterruptState InterruptSynch; (* Wait for the condition. If it raises an exception we still need to reacquire the lock unless we were handling interrupts asynchronously. *) val result = innerWait(c, m, t) handle exn => ( (* We had an exception. If we were handling exceptions synchronously we reacquire the lock. If it was set to InterruptAsynchOnce this counts as a single asynchronous exception and we restore the state as InterruptSynch. *) case originalIntstate of InterruptDefer => (* Shouldn't happen? *) Mutex.lock m | InterruptSynch => Mutex.lock m | InterruptAsynch => setInterruptState InterruptAsynch | InterruptAsynchOnce => setInterruptState InterruptSynch; raise exn (* Reraise the exception*) ) in (* Restore the original interrupt state first. *) setInterruptState originalIntstate; (* Normal return. Reacquire the lock before returning. *) Mutex.lock m; result end fun wait(c: conditionVar, m: Mutex.mutex) : unit = (doWait(c, m, NONE); ()) and waitUntil(c: conditionVar, m: Mutex.mutex, t: Time.time) : bool = doWait(c, m, SOME t) end local (* This call wakes up the specified thread. If the thread has already been interrupted and is not ignoring interrupts it returns false. Otherwise it wakes up the thread and returns true. We have to use this because we define that if a thread is interrupted before it is signalled then it raises Interrupt. *) val threadCondVarWake: thread -> bool = RunCall.rtsCallFast1 "PolyThreadCondVarWake" (* Wake a single thread if we can (signal). *) fun wakeOne [] = [] | wakeOne (thread::rest) = if threadCondVarWake thread then rest else thread :: wakeOne rest (* Wake all threads (broadcast). *) fun wakeAll [] = [] (* Always returns the empty list. *) | wakeAll (thread::rest) = (threadCondVarWake thread; wakeAll rest) fun signalOrBroadcast({lock, threads}: conditionVar, wakeThreads) : unit = let val originalState = getInterruptState() in (* Set this to handle interrupts synchronously unless we're already ignoring them. We need to do this to avoid an asynchronous interrupt which could leave the internal lock in an inconsistent state. *) if originalState = InterruptDefer then () else setInterruptState InterruptSynch; (* Get the condition var lock. *) Mutex.lock lock; threads := wakeThreads(! threads); Mutex.unlock lock; setInterruptState originalState; (* Restore original state. *) (* Test if we were interrupted while we were handling interrupts synchronously. *) if originalState = InterruptAsynch orelse originalState = InterruptAsynchOnce then testInterrupt() else () end in fun signal cv = signalOrBroadcast(cv, wakeOne) and broadcast cv = signalOrBroadcast(cv, wakeAll) end end end; local fun prettyMutex _ _ (_: Thread.Mutex.mutex) = PolyML.PrettyString "?" and prettyThread _ _ (_: Thread.Thread.thread) = PolyML.PrettyString "?" and prettyCondVar _ _ (_: Thread.ConditionVar.conditionVar) = PolyML.PrettyString "?" in val () = PolyML.addPrettyPrinter prettyMutex and () = PolyML.addPrettyPrinter prettyThread and () = PolyML.addPrettyPrinter prettyCondVar end; diff --git a/libpolyml/arm64.cpp b/libpolyml/arm64.cpp index 4d6f3aac..2be1b43b 100644 --- a/libpolyml/arm64.cpp +++ b/libpolyml/arm64.cpp @@ -1,483 +1,475 @@ /* Machine-dependent code for ARM64 Copyright David C.J. Matthews 2020-21. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ // Currently this is just copied from the interpreted version. #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) 0 #endif #ifdef HAVE_STRING_H #include #endif #include "globals.h" #include "machine_dep.h" #include "sys.h" #include "profiling.h" #include "arb.h" #include "processes.h" #include "run_time.h" #include "gc.h" #include "diagnostics.h" #include "polystring.h" #include "save_vec.h" #include "memmgr.h" #include "scanaddrs.h" #include "rtsentry.h" #include "bytecode.h" /* * ARM64 register use: * X0 First argument and return value * X1-X7 Second-eighth argument * X8 Indirect result (C), ML closure pointer on entry * X9-X15 Volatile scratch registers * X16-17 Intra-procedure-call (C). Only used for special cases in ML. * X18 Platform register. Not used in ML. * X19-X25 Non-volatile (C). Scratch registers (ML). * X26 ML assembly interface pointer. Non-volatile (C). * X27 ML Heap allocation pointer. Non-volatile (C). * X28 ML Stack pointer. Non-volatile (C). * X29 Frame pointer (C). Not used in ML * X30 Link register. Not used in ML. * X31 Stack pointer (C). Not used in ML. Also zero register. * * Floating point registers: * V0 First argument and return value * V1-V7 Second-eighth argument * V8-V15 Non volatile. Not currently used in ML. * V16-V31 Volatile. Not currently used in ML. * * The ML calling conventions generally follow the C ABI except that * all registers are volatile and X28 is used for the stack. */ /* the amount of ML stack space to reserve for registers, C exception handling etc. The compiler requires us to reserve 2 stack-frames worth (2 * 20 words). We actually reserve slightly more than this. */ #define OVERFLOW_STACK_SIZE 50 // X26 always points at this area when executing ML code. // The offsets are built into the assembly code and some are built into // the code generator typedef struct _AssemblyArgs { public: byte* enterInterpreter; // These are filled in with the functions. byte* heapOverFlowCall; byte* stackOverFlowCall; byte* stackOverFlowCallEx; byte* trapHandlerEntry; stackItem exceptionPacket; // Set if there is an exception PolyWord threadId; // My thread id. Saves having to call into RTS for it. byte returnReason; // Reason for returning from ML - Set by assembly code. } AssemblyArgs; class Arm64TaskData: public TaskData, ByteCodeInterpreter { public: Arm64TaskData(); ~Arm64TaskData() {} AssemblyArgs assemblyInterface; int saveRegisterMask; // Registers that need to be updated by a GC. virtual void GarbageCollect(ScanAddress *process); void ScanStackAddress(ScanAddress *process, stackItem& val, StackSpace *stack); virtual void EnterPolyCode(); // Start running ML virtual void SetException(poly_exn *exc) { assemblyInterface.exceptionPacket = (PolyWord)exc; } virtual void InterruptCode(); // AddTimeProfileCount is used in time profiling. virtual bool AddTimeProfileCount(SIGNALCONTEXT *context); virtual void InitStackFrame(TaskData *newTask, Handle proc); - // Increment or decrement the first word of the object pointed to by the - // mutex argument and return the new value. - virtual POLYUNSIGNED AtomicDecrement(PolyObject* mutexp); + // Atomic exchange-and-add. Used in the process module to release a mutex and in the + // interpreter. It needs to use the same instruction that compiled code uses. + virtual POLYSIGNED AtomicExchAdd(PolyObject* mutexp, POLYSIGNED incr); // Set a mutex to zero. virtual void AtomicReset(PolyObject* mutexp); - virtual POLYUNSIGNED AtomicIncrement(PolyObject* mutexp); // Return the minimum space occupied by the stack. Used when setting a limit. virtual uintptr_t currentStackSpace(void) const { return ((stackItem*)this->stack->top - this->taskSp) + OVERFLOW_STACK_SIZE; } virtual void addProfileCount(POLYUNSIGNED words) { addSynchronousCount(interpreterPc, words); } virtual void CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length); PLock interruptLock; virtual void ClearExceptionPacket() { assemblyInterface.exceptionPacket = TAGGED(0); } virtual PolyWord GetExceptionPacket() { return assemblyInterface.exceptionPacket; } virtual stackItem* GetHandlerRegister() { return hr; } virtual void SetHandlerRegister(stackItem* newHr) { hr = newHr; } virtual void HandleStackOverflow(uintptr_t space); stackItem *taskSp; /* Stack pointer. */ stackItem *hr; stackItem *sl; /* Stack limit register. */ }; // Values for the returnReason byte enum RETURN_REASON { RETURN_HEAP_OVERFLOW = 1, RETURN_STACK_OVERFLOW = 2, RETURN_STACK_OVERFLOWEX = 3, RETURN_ENTER_INTERPRETER = 4 }; extern "C" { // These are declared in the assembly code segment. void Arm64AsmSwitchToPoly(void*); int Arm64AsmCallExtraRETURN_ENTER_INTERPRETER(void); int Arm64AsmCallExtraRETURN_HEAP_OVERFLOW(void); int Arm64AsmCallExtraRETURN_STACK_OVERFLOW(void); int Arm64AsmCallExtraRETURN_STACK_OVERFLOWEX(void); // This is declared here and called from the assembly code. // It avoids having a call to an external in the assembly code // which sometimes gives problems with position-indepent code. void Arm64TrapHandler(PolyWord threadId); }; Arm64TaskData::Arm64TaskData() : ByteCodeInterpreter(&taskSp, &sl) { } class Arm64Dependent : public MachineDependent { public: Arm64Dependent() {} // Create a task data object. virtual TaskData *CreateTaskData(void) { return new Arm64TaskData(); } virtual Architectures MachineArchitecture(void) { return MA_Arm64; } }; void Arm64TaskData::InitStackFrame(TaskData *parentTask, Handle proc) /* Initialise stack frame. */ { StackSpace *space = this->stack; StackObject *stack = (StackObject *)space->stack(); PolyObject *closure = DEREFWORDHANDLE(proc); uintptr_t stack_size = space->spaceSize() * sizeof(PolyWord) / sizeof(stackItem); this->taskSp = (stackItem*)stack + stack_size; this->hr = this->taskSp; *(--this->taskSp) = (PolyWord)closure; /* Closure address */ this->interpreterPc = *(POLYCODEPTR*)closure; this->assemblyInterface.exceptionPacket = TAGGED(0); /* Used for exception argument. */ } void Arm64TaskData::HandleStackOverflow(uintptr_t space) { uintptr_t min_size = (this->stack->top - (PolyWord*)taskSp) + OVERFLOW_STACK_SIZE + space; try { // The stack check has failed. This may either be because we really have // overflowed the stack or because the stack limit value has been adjusted // to result in a call here. CheckAndGrowStack(this, min_size); } catch (IOException&) { // We may get an exception while handling this if we run out of store } { PLocker l(&interruptLock); // Set the stack limit. This clears any interrupt and also sets the // correct value if we've grown the stack. sl = (stackItem*)stack->bottom + OVERFLOW_STACK_SIZE; } try { processes->ProcessAsynchRequests(this); // Release and re-acquire use of the ML memory to allow another thread // to GC. processes->ThreadReleaseMLMemory(this); processes->ThreadUseMLMemory(this); } catch (IOException&) { } } void Arm64TaskData::GarbageCollect(ScanAddress *process) { TaskData::GarbageCollect(process); ByteCodeInterpreter::GarbageCollect(process); if (assemblyInterface.exceptionPacket.w().IsDataPtr()) { PolyObject* obj = assemblyInterface.exceptionPacket.w().AsObjPtr(); obj = process->ScanObjectAddress(obj); assemblyInterface.exceptionPacket = (PolyWord)obj; } if (stack != 0) { stackItem*stackPtr = this->taskSp; // Now the values on the stack. for (stackItem* q = stackPtr; q < (stackItem*)stack->top; q++) ScanStackAddress(process, *q, stack); } } // Process a value within the stack. void Arm64TaskData::ScanStackAddress(ScanAddress *process, stackItem& stackItem, StackSpace *stack) { // We may have return addresses on the stack which could look like // tagged values. Check whether the value is in the code area before // checking whether it is untagged. #ifdef POLYML32IN64 // In 32-in-64 return addresses always have the top 32 bits non-zero. if (stackItem.argValue < ((uintptr_t)1 << 32)) { // It's either a tagged integer or an object pointer. if (stackItem.w().IsDataPtr()) { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } } else { // Could be a code address or a stack address. MemSpace* space = gMem.SpaceForAddress(stackItem.codeAddr - 1); if (space == 0 || space->spaceType != ST_CODE) return; PolyObject* obj = gMem.FindCodeObject(stackItem.codeAddr); ASSERT(obj != 0); // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } #else // The -1 here is because we may have a zero-sized cell in the last // word of a space. MemSpace* space = gMem.SpaceForAddress(stackItem.codeAddr - 1); if (space == 0) return; // In particular we may have one of the assembly code addresses. if (space->spaceType == ST_CODE) { PolyObject* obj = gMem.FindCodeObject(stackItem.codeAddr); // If it is actually an integer it might be outside a valid code object. if (obj == 0) { ASSERT(stackItem.w().IsTagged()); // It must be an integer } else // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } else if (space->spaceType == ST_LOCAL && stackItem.w().IsDataPtr()) // Local values must be word addresses. { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } #endif } // Copy a stack void Arm64TaskData::CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length) { #ifdef POLYML32IN64 old_length = old_length / 2; new_length = new_length / 2; #endif /* Moves a stack, updating all references within the stack */ stackItem*old_base = (stackItem*)old_stack; stackItem*new_base = (stackItem*)new_stack; stackItem*old_top = old_base + old_length; /* Calculate the offset of the new stack from the old. If the frame is being extended objects in the new frame will be further up the stack than in the old one. */ uintptr_t offset = new_base - old_base + new_length - old_length; stackItem *oldSp = this->taskSp; this->taskSp = oldSp + offset; this->hr = this->hr + offset; /* Skip the unused part of the stack. */ uintptr_t i = oldSp - old_base; ASSERT(i <= old_length); i = old_length - i; stackItem *old = oldSp; stackItem *newp = this->taskSp; while (i--) { stackItem old_word = *old++; if (old_word.w().IsDataPtr() && old_word.stackAddr >= old_base && old_word.stackAddr <= old_top) old_word.stackAddr = old_word.stackAddr + offset; else if (old_word.w().IsDataPtr() && IsHeapAddress(old_word.stackAddr)) { stackItem* addr = (stackItem*)old_word.w().AsStackAddr(); if (addr >= old_base && addr <= old_top) { addr += offset; old_word = PolyWord::FromStackAddr((PolyWord*)addr); } } *newp++ = old_word; } ASSERT(old == ((stackItem*)old_stack) + old_length); ASSERT(newp == ((stackItem*)new_stack) + new_length); } void Arm64TaskData::EnterPolyCode() /* Called from "main" to enter the code. */ { sl = (stackItem*)((PolyWord*)this->stack->stack() + OVERFLOW_STACK_SIZE); // Run the interpreter. Currently this can never return. // Callbacks are not currently implemented which is the only case that could // occur in the pure interpreted version. (void)RunInterpreter(this); exitThread(this); // This thread is exiting. ASSERT(0); // We should never get here. } // This is called from a different thread so we have to be careful. void Arm64TaskData::InterruptCode() { PLocker l(&interruptLock); // Set the stack limit pointer to the top of the stack to cause // a trap when we next check for stack overflow. // We use a lock here to ensure that we always use the current value of the // stack. The thread we're interrupting could be growing the stack at this point. if (stack != 0) sl = (stackItem*)(stack->top - 1); } // As far as possible we want locking and unlocking an ML mutex to be fast so // we try to implement the code in the assembly code using appropriate // interlocked instructions. That does mean that if we need to lock and // unlock an ML mutex in this code we have to use the same, machine-dependent, // code to do it. These are defaults that are used where there is no // machine-specific code. #if defined(_MSC_VER) // This saves having to define it in the MASM assembly code. -static POLYUNSIGNED Arm64AsmAtomicExchangeAndAdd(PolyObject* mutexp, POLYSIGNED addend) +static POLYSIGNED Arm64AsmAtomicExchangeAndAdd(PolyObject* mutexp, POLYSIGNED addend) { # if (SIZEOF_POLYWORD == 8) return InterlockedExchangeAdd64((LONG64*)mutexp, addend); # else return InterlockedExchangeAdd((LONG*)mutexp, addend); # endif } #else extern "C" { // This is only defined in the GAS assembly code - POLYUNSIGNED Arm64AsmAtomicExchangeAndAdd(PolyObject*, POLYSIGNED); + POLYSIGNED Arm64AsmAtomicExchangeAndAdd(PolyObject*, POLYSIGNED); } #endif -// Decrement the value contained in the first word of the mutex. -// The addend is TAGGED(+/-1) - 1 -POLYUNSIGNED Arm64TaskData::AtomicDecrement(PolyObject* mutexp) +// Do the exchange-and-add +POLYSIGNED Arm64TaskData::AtomicExchAdd(PolyObject* mutexp, POLYSIGNED incr) { - return Arm64AsmAtomicExchangeAndAdd(mutexp, -2) - 2; -} - -// Increment the value contained in the first word of the mutex. -POLYUNSIGNED Arm64TaskData::AtomicIncrement(PolyObject* mutexp) -{ - return Arm64AsmAtomicExchangeAndAdd(mutexp, 2) + 2; + return Arm64AsmAtomicExchangeAndAdd(mutexp, incr - TAGGED(0).AsSigned()/* Remove the tag */); } // Release a mutex. Because the atomic increment and decrement // use the hardware atomic load-and-add we can simply set this to zero. void Arm64TaskData::AtomicReset(PolyObject* mutexp) { mutexp->Set(0, TAGGED(0)); // Set this to released. } bool Arm64TaskData::AddTimeProfileCount(SIGNALCONTEXT *context) { if (interpreterPc != 0) { // See if the PC we've got is an ML code address. MemSpace *space = gMem.SpaceForAddress(interpreterPc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { incrementCountAsynch(interpreterPc); return true; } } return false; } extern "C" { POLYEXTERNALSYMBOL POLYUNSIGNED PolyInterpretedEnterIntMode(); POLYEXTERNALSYMBOL POLYUNSIGNED PolyEndBootstrapMode(FirstArgument threadId, PolyWord function); } // Do we require EnterInt instructions and if so for which architecture? // 0 = > None; 1 => X86_32, 2 => X86_64. 3 => X86_32_in_64. POLYUNSIGNED PolyInterpretedEnterIntMode() { return TAGGED(0).AsUnsigned(); } // This has no effect in the interpreted version. POLYUNSIGNED PolyEndBootstrapMode(FirstArgument threadId, PolyWord function) { TaskData* taskData = TaskData::FindTaskForId(threadId); ASSERT(taskData != 0); taskData->PreRTSCall(); Handle pushedFunction = taskData->saveVec.push(function); taskData->InitStackFrame(taskData, pushedFunction); taskData->EnterPolyCode(); // Should never return. ASSERT(0); return TAGGED(0).AsUnsigned(); } static Arm64Dependent arm64Dependent; MachineDependent *machineDependent = &arm64Dependent; // No machine-specific calls in the interpreter. struct _entrypts machineSpecificEPT[] = { { "PolyInterpretedEnterIntMode", (polyRTSFunction)&PolyInterpretedEnterIntMode }, { "PolyEndBootstrapMode", (polyRTSFunction)&PolyEndBootstrapMode }, { NULL, NULL} // End of list. }; diff --git a/libpolyml/bytecode.cpp b/libpolyml/bytecode.cpp index 075e0f84..0ffdcf44 100644 --- a/libpolyml/bytecode.cpp +++ b/libpolyml/bytecode.cpp @@ -1,2601 +1,2615 @@ /* Title: An interpreter for a compact instruction set. Author: Dave Matthews, Cambridge University Computer Laboratory Copyright (c) 2000-7 Cambridge University Technical Services Limited - Further development Copyright David C.J. Matthews 2015-18, 2020. + Further development Copyright David C.J. Matthews 2015-18, 2020-21. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #ifdef HAVE_STDIO_H #include #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) 0 #endif #ifdef HAVE_STRING_H #include #endif /* #ifdef HAVE_FLOAT_H #include #endif #ifdef HAVE_MATH_H #include #endif */ #include // Currently just for isnan. #include "globals.h" #include "int_opcodes.h" #include "sys.h" #include "profiling.h" #include "arb.h" #include "reals.h" #include "run_time.h" #include "gc.h" #include "diagnostics.h" #include "polystring.h" #include "scanaddrs.h" #include "rtsentry.h" #include "bytecode.h" #if (SIZEOF_VOIDP == 8 && !defined(POLYML32IN64)) #define IS64BITS 1 #endif #define arg1 (pc[0] + pc[1]*256) #define arg2 (pc[2] + pc[3]*256) const PolyWord True = TAGGED(1); const PolyWord False = TAGGED(0); const PolyWord Zero = TAGGED(0); // This duplicates some code in reals.cpp but is now updated. #define DOUBLESIZE (sizeof(double)/sizeof(POLYUNSIGNED)) union realdb { double dble; POLYUNSIGNED puns[DOUBLESIZE]; }; #define LGWORDSIZE (sizeof(uintptr_t) / sizeof(PolyWord)) // We're using float for Real32 so it needs to be 32-bits. // Assume that's true for the moment. #if (SIZEOF_FLOAT != 4) #error "Float is not 32-bits. Please report this" #endif union flt { float fl; int32_t i; }; ByteCodeInterpreter::ByteCodeInterpreter(stackItem** spAddr, stackItem** slAddr) : mixedCode(false), stackPointerAddress(spAddr), stackLimitAddress(slAddr), overflowPacket(0), dividePacket(0) { #ifdef PROFILEOPCODES memset(frequency, 0, sizeof(frequency)); memset(arg1Value, 0, sizeof(arg1Value)); memset(arg2Value, 0, sizeof(arg2Value)); #endif } ByteCodeInterpreter::~ByteCodeInterpreter() { #ifdef PROFILEOPCODES OutputDebugStringA("Frequency\n"); for (unsigned i = 0; i < 256; i++) { if (frequency[i] != 0) { char buffer[100]; sprintf(buffer, "%02X: %u\n", i, frequency[i]); OutputDebugStringA(buffer); } } OutputDebugStringA("Arg1\n"); for (unsigned i = 0; i < 256; i++) { if (arg1Value[i] != 0) { char buffer[100]; sprintf(buffer, "%02X: %u\n", i, arg1Value[i]); OutputDebugStringA(buffer); } } OutputDebugStringA("Arg2\n"); for (unsigned i = 0; i < 256; i++) { if (arg2Value[i] != 0) { char buffer[100]; sprintf(buffer, "%02X: %u\n", i, arg2Value[i]); OutputDebugStringA(buffer); } } #endif } extern "C" { typedef POLYUNSIGNED(*callFastRts0)(); typedef POLYUNSIGNED(*callFastRts1)(intptr_t); typedef POLYUNSIGNED(*callFastRts2)(intptr_t, intptr_t); typedef POLYUNSIGNED(*callFastRts3)(intptr_t, intptr_t, intptr_t); typedef POLYUNSIGNED(*callFastRts4)(intptr_t, intptr_t, intptr_t, intptr_t); typedef POLYUNSIGNED(*callFastRts5)(intptr_t, intptr_t, intptr_t, intptr_t, intptr_t); typedef double (*callRTSRtoR) (double); typedef double (*callRTSRRtoR) (double, double); typedef double (*callRTSGtoR) (intptr_t); typedef double (*callRTSRGtoR) (double, intptr_t); typedef float(*callRTSFtoF) (float); typedef float(*callRTSFFtoF) (float, float); typedef float(*callRTSGtoF) (intptr_t); typedef float(*callRTSFGtoF) (float, intptr_t); } // Allocate memory on the heap. Returns with the address of the cell. Does not set the // length word or any of the data. PolyObject* ByteCodeInterpreter::allocateMemory(TaskData * taskData, POLYUNSIGNED words, POLYCODEPTR& pc, stackItem*& sp) { words++; // Add the size of the length word. // N.B. The allocation area may be empty so that both of these are zero. if (taskData->allocPointer >= taskData->allocLimit + words + 1) { #ifdef POLYML32IN64 if (words & 1) words++; #endif taskData->allocPointer -= words; return (PolyObject*)(taskData->allocPointer + 1); } // Insufficient space. SaveInterpreterState(pc, sp); // Find some space to allocate in. Returns a pointer to the newly allocated space. // N.B. This may return zero if the heap is exhausted and it has set this // up for an exception. Generally it allocates by decrementing allocPointer // but if the required memory is large it may allocate in a separate area. PolyWord* space = processes->FindAllocationSpace(taskData, words, true); LoadInterpreterState(pc, sp); if (space == 0) return 0; return (PolyObject*)(space + 1); } // Put a real result in a "box" PolyObject* ByteCodeInterpreter::boxDouble(TaskData* taskData, double d, POLYCODEPTR& pc, stackItem*& sp) { PolyObject* mem = this->allocateMemory(taskData, DOUBLESIZE, pc, sp); if (mem == 0) return 0; mem->SetLengthWord(DOUBLESIZE, F_BYTE_OBJ); union realdb uniondb; uniondb.dble = d; // Copy the words. Depending on the word length this may copy one or more words. for (unsigned i = 0; i < DOUBLESIZE; i++) mem->Set(i, PolyWord::FromUnsigned(uniondb.puns[i])); return mem; } // Extract a double value from a box. double ByteCodeInterpreter::unboxDouble(PolyWord p) { union realdb uniondb; for (unsigned i = 0; i < DOUBLESIZE; i++) uniondb.puns[i] = p.AsObjPtr()->Get(i).AsUnsigned(); return uniondb.dble; } // Largely copied from reals.cpp #if (SIZEOF_FLOAT < SIZEOF_POLYWORD) // Typically for 64-bit mode. Use a tagged representation. // The code-generator on the X86/64 assumes the float is in the // high order word. #define FLT_SHIFT ((SIZEOF_POLYWORD-SIZEOF_FLOAT)*8) float ByteCodeInterpreter::unboxFloat(PolyWord p) { union flt argx; argx.i = p.AsSigned() >> FLT_SHIFT; return argx.fl; } PolyObject* ByteCodeInterpreter::boxFloat(TaskData* taskData, float f, POLYCODEPTR& pc, stackItem*& sp) { union flt argx; argx.fl = f; PolyWord p = PolyWord::FromSigned(((POLYSIGNED)argx.i << FLT_SHIFT) + 1); return p.AsObjPtr(); // Temporarily cast it to this even though it isn't really } #else // Typically for 32-bit mode. Use a boxed representation. PolyObject* ByteCodeInterpreter::boxFloat(TaskData* taskData, float f, POLYCODEPTR& pc, stackItem*& sp) { PolyObject* mem = this->allocateMemory(taskData, 1, pc, sp); if (mem == 0) return 0; mem->SetLengthWord(1, F_BYTE_OBJ); union flt argx; argx.fl = f; mem->Set(0, PolyWord::FromSigned(argx.i)); return mem; } // Extract a double value from a box. float ByteCodeInterpreter::unboxFloat(PolyWord p) { union flt argx; argx.i = (int32_t)p.AsObjPtr()->Get(0).AsSigned(); return argx.fl; } #endif enum ByteCodeInterpreter::_returnValue ByteCodeInterpreter::RunInterpreter(TaskData *taskData) /* (Re)-enter the Poly code from C. */ { // Make packets for exceptions. if (overflowPacket == 0) overflowPacket = makeExceptionPacket(taskData, EXC_overflow); if (dividePacket == 0) dividePacket = makeExceptionPacket(taskData, EXC_divide); // Local values. These are copies of member variables but are used so frequently that // it is important that access should be fast. POLYCODEPTR pc; stackItem*sp; LoadInterpreterState(pc, sp); // We may have taken an interrupt which has set an exception. if (GetExceptionPacket().IsDataPtr()) goto RAISE_EXCEPTION; for(;;){ /* Each instruction */ #if (0) char buff[1000]; sprintf(buff, "addr = %p sp=%p instr=%02x *sp=%p\n", pc, sp, *pc, (*sp).stackAddr); OutputDebugStringA(buff); #endif // These are temporary values used where one instruction jumps to // common code. POLYUNSIGNED tailCount; stackItem* tailPtr; POLYUNSIGNED returnCount; POLYUNSIGNED storeWords; POLYUNSIGNED stackCheck; PolyObject *closure; double dv; #ifdef PROFILEOPCODES frequency[*pc]++; #endif switch(*pc++) { case INSTR_jump8false: { PolyWord u = *sp++; if (u == True) pc += 1; else pc += *pc + 1; break; } case INSTR_jump8: pc += *pc + 1; break; case INSTR_jump8True: { PolyWord u = *sp++; if (u == False) pc += 1; else pc += *pc + 1; break; } case INSTR_jump16True: // Invert the sense of the test and fall through. *sp = ((*sp).w() == True) ? False : True; case INSTR_jump16false: { PolyWord u = *sp++; /* Pop argument */ if (u == True) { pc += 2; break; } /* else - false - take the jump */ } case INSTR_jump16: pc += arg1 + 2; break; case INSTR_push_handler: /* Save the old handler value. */ (*(--sp)).stackAddr = GetHandlerRegister(); /* Push old handler */ break; case INSTR_setHandler8: /* Set up a handler */ (*(--sp)).codeAddr = pc + *pc + 1; /* Address of handler */ SetHandlerRegister(sp); pc += 1; break; case INSTR_setHandler16: /* Set up a handler */ (*(--sp)).codeAddr = pc + arg1 + 2; /* Address of handler */ SetHandlerRegister(sp); pc += 2; break; case INSTR_deleteHandler: /* Delete handler retaining the result. */ { stackItem u = *sp++; sp = GetHandlerRegister(); sp++; // Remove handler entry point SetHandlerRegister((*sp).stackAddr); // Restore old handler *sp = u; // Put back the result break; } case INSTR_case16: { // arg1 is the largest value that is in the range POLYSIGNED u = UNTAGGED(*sp++); /* Get the value */ if (u >= arg1 || u < 0) pc += 2 + arg1*2; /* Out of range */ else { pc += 2; pc += /* Index */pc[u*2]+pc[u*2 + 1]*256; } break; } case INSTR_tail_b_b: tailCount = *pc; tailPtr = sp + tailCount; sp = tailPtr + pc[1]; TAIL_CALL: /* For general case. */ if (tailCount < 2) Crash("Invalid argument\n"); numTailArguments = (unsigned)(tailCount - 2); for (; tailCount > 0; tailCount--) *(--sp) = *(--tailPtr); pc = (*sp++).codeAddr; /* Pop the original return address. */ closure = (*sp++).w().AsObjPtr(); if (mixedCode) { // Return to the caller in case the function we're calling is machine code. // The number of arguments we're passing is given in the tail-count. There's // no enter-int after this because we're not coming back. (--sp)->codeAddr = pc; *(--sp) = (PolyWord)closure; SaveInterpreterState(pc, sp); return ReturnTailCall; } goto CALL_CLOSURE; /* And drop through. */ case INSTR_call_closure: /* Closure call. */ { closure = (*sp++).w().AsObjPtr(); CALL_CLOSURE: (--sp)->codeAddr = pc; /* Save return address. */ *(--sp) = (PolyWord)closure; if (mixedCode) { SaveInterpreterState(pc, sp); return ReturnCall; // Caller must look at enter-int to determine number of args } pc = *(POLYCODEPTR*)closure; /* Get entry point. */ SaveInterpreterState(pc, sp); // Update in case we're profiling // Check that there at least 128 words on the stack stackCheck = 128; goto STACKCHECK; } case INSTR_callConstAddr8: closure = (*(PolyWord*)(pc + pc[0] + 1)).AsObjPtr(); pc += 1; goto CALL_CLOSURE; case INSTR_callConstAddr16: closure = (*(PolyWord*)(pc + arg1 + 2)).AsObjPtr(); pc += 2; goto CALL_CLOSURE; case INSTR_callConstAddr8_8: closure = ((PolyWord*)(pc + pc[0] + 2))[pc[1] + 3].AsObjPtr(); pc += 2; goto CALL_CLOSURE; case INSTR_callConstAddr8_0: closure = ((PolyWord*)(pc + pc[0] + 1))[3].AsObjPtr(); pc += 1; goto CALL_CLOSURE; case INSTR_callConstAddr8_1: closure = ((PolyWord*)(pc + pc[0] + 1))[4].AsObjPtr(); pc += 1; goto CALL_CLOSURE; case INSTR_callConstAddr16_8: closure = ((PolyWord*)(pc + arg1 + 3))[pc[2] + 3].AsObjPtr(); pc += 3; goto CALL_CLOSURE; case INSTR_callLocalB: { closure = (sp[*pc++]).w().AsObjPtr(); goto CALL_CLOSURE; } case INSTR_return_w: returnCount = arg1; /* Get no. of args to remove. */ RETURN: /* Common code for return. */ { stackItem result = *sp++; /* Result */ sp++; /* Remove the link/closure */ pc = (*sp++).codeAddr; /* Return address */ sp += returnCount; /* Add on number of args. */ *(--sp) = result; /* Result */ SaveInterpreterState(pc, sp); // Update in case we're profiling or if returning if (mixedCode) return ReturnReturn; } break; case INSTR_return_b: returnCount = *pc; goto RETURN; case INSTR_return_1: returnCount = 1; goto RETURN; case INSTR_return_2: returnCount = 2; goto RETURN; case INSTR_return_3: returnCount = 3; goto RETURN; case INSTR_stackSize16: { stackCheck = arg1; pc += 2; STACKCHECK: // Check stack space. This is combined with interrupts on the native code version. if (sp - stackCheck < *stackLimitAddress) { SaveInterpreterState(pc, sp); HandleStackOverflow(stackCheck); LoadInterpreterState(pc, sp); } break; } case INSTR_raise_ex: { { PolyException *exn = (PolyException*)((*sp).w().AsObjPtr()); taskData->SetException(exn); } RAISE_EXCEPTION: sp = GetHandlerRegister(); pc = (*sp++).codeAddr; // It is possible we could raise an exception to be // handled by native code but that does not currently happen // during the bootstrap. SetHandlerRegister((*sp++).stackAddr); break; } case INSTR_tuple_2: storeWords = 2; goto TUPLE; case INSTR_tuple_3: storeWords = 3; goto TUPLE; case INSTR_tuple_4: storeWords = 4; goto TUPLE; case INSTR_tuple_b: storeWords = *pc; pc++; goto TUPLE; case INSTR_closureB: storeWords = *pc++; goto CREATE_CLOSURE; break; case INSTR_local_w: { stackItem u = sp[arg1]; *(--sp) = u; pc += 2; break; } case INSTR_constAddr8: *(--sp) = *(PolyWord*)(pc + pc[0] + 1); pc += 1; break; case INSTR_constAddr16: *(--sp) = *(PolyWord*)(pc + arg1 + 2); pc += 2; break; case INSTR_constAddr8_8: *(--sp) = ((PolyWord*)(pc + pc[0]+ 2))[pc[1] + 3]; pc += 2; break; case INSTR_constAddr8_0: *(--sp) = ((PolyWord*)(pc + pc[0] + 1))[3]; pc += 1; break; case INSTR_constAddr8_1: *(--sp) = ((PolyWord*)(pc + pc[0] + 1))[4]; pc += 1; break; case INSTR_constAddr16_8: *(--sp) = ((PolyWord*)(pc + arg1 + 3))[pc[2] + 3]; pc += 3; break; case INSTR_const_int_w: *(--sp) = TAGGED(arg1); pc += 2; break; case INSTR_jump_back8: pc -= *pc + 1; // Check for interrupt in case we're in a loop if (sp < *stackLimitAddress) { SaveInterpreterState(pc, sp); HandleStackOverflow(0); LoadInterpreterState(pc, sp); } break; case INSTR_jump_back16: pc -= arg1 + 1; // Check for interrupt in case we're in a loop if (sp < *stackLimitAddress) { SaveInterpreterState(pc, sp); HandleStackOverflow(0); LoadInterpreterState(pc, sp); } break; case INSTR_lock: { PolyObject *obj = (*sp).w().AsObjPtr(); obj->SetLengthWord(obj->LengthWord() & ~_OBJ_MUTABLE_BIT); break; } case INSTR_ldexc: *(--sp) = GetExceptionPacket(); break; case INSTR_local_b: { stackItem u = sp[*pc]; *(--sp) = u; pc += 1; break; } case INSTR_indirect_b: *sp = (*sp).w().AsObjPtr()->Get(*pc); pc += 1; break; case INSTR_indirectLocalBB: { PolyWord u = sp[*pc++]; *(--sp) = u.AsObjPtr()->Get(*pc++); break; } case INSTR_indirectLocalB0: { PolyWord u = sp[*pc++]; *(--sp) = u.AsObjPtr()->Get(0); break; } case INSTR_indirect0Local0: { PolyWord u = sp[0]; *(--sp) = u.AsObjPtr()->Get(0); break; } case INSTR_indirectLocalB1: { PolyWord u = sp[*pc++]; *(--sp) = u.AsObjPtr()->Get(1); break; } case INSTR_moveToContainerB: { PolyWord u = *sp++; (*sp).stackAddr[*pc] = u; pc += 1; break; } case INSTR_moveToMutClosureB: { PolyWord u = *sp++; (*sp).w().AsObjPtr()->Set(*pc++ + sizeof(uintptr_t) / sizeof(PolyWord), u); break; } case INSTR_indirectContainerB: *sp = (*sp).stackAddr[*pc]; pc += 1; break; case INSTR_indirectClosureBB: { PolyWord u = sp[*pc++]; *(--sp) = u.AsObjPtr()->Get(*pc++ + sizeof(uintptr_t) / sizeof(PolyWord)); break; } case INSTR_indirectClosureB0: { PolyWord u = sp[*pc++]; *(--sp) = u.AsObjPtr()->Get(sizeof(uintptr_t) / sizeof(PolyWord)); break; } case INSTR_indirectClosureB1: { PolyWord u = sp[*pc++]; *(--sp) = u.AsObjPtr()->Get(sizeof(uintptr_t) / sizeof(PolyWord) + 1); break; } case INSTR_indirectClosureB2: { PolyWord u = sp[*pc++]; *(--sp) = u.AsObjPtr()->Get(sizeof(uintptr_t) / sizeof(PolyWord) + 2); break; } case INSTR_set_stack_val_b: { PolyWord u = *sp++; sp[*pc-1] = u; pc += 1; break; } case INSTR_reset_b: sp += *pc; pc += 1; break; case INSTR_reset_r_b: { PolyWord u = *sp; sp += *pc; *sp = u; pc += 1; break; } case INSTR_const_int_b: *(--sp) = TAGGED(*pc); pc += 1; break; case INSTR_local_0: { stackItem u = sp[0]; *(--sp) = u; break; } case INSTR_local_1: { stackItem u = sp[1]; *(--sp) = u; break; } case INSTR_local_2: { stackItem u = sp[2]; *(--sp) = u; break; } case INSTR_local_3: { stackItem u = sp[3]; *(--sp) = u; break; } case INSTR_local_4: { stackItem u = sp[4]; *(--sp) = u; break; } case INSTR_local_5: { stackItem u = sp[5]; *(--sp) = u; break; } case INSTR_local_6: { stackItem u = sp[6]; *(--sp) = u; break; } case INSTR_local_7: { stackItem u = sp[7]; *(--sp) = u; break; } case INSTR_local_8: { stackItem u = sp[8]; *(--sp) = u; break; } case INSTR_local_9: { stackItem u = sp[9]; *(--sp) = u; break; } case INSTR_local_10: { stackItem u = sp[10]; *(--sp) = u; break; } case INSTR_local_11: { stackItem u = sp[11]; *(--sp) = u; break; } case INSTR_local_12: { stackItem u = sp[12]; *(--sp) = u; break; } case INSTR_local_13: { stackItem u = sp[13]; *(--sp) = u; break; } case INSTR_local_14: { stackItem u = sp[14]; *(--sp) = u; break; } case INSTR_local_15: { stackItem u = sp[15]; *(--sp) = u; break; } case INSTR_indirect_0: *sp = (*sp).w().AsObjPtr()->Get(0); break; case INSTR_indirect_1: *sp = (*sp).w().AsObjPtr()->Get(1); break; case INSTR_indirect_2: *sp = (*sp).w().AsObjPtr()->Get(2); break; case INSTR_indirect_3: *sp = (*sp).w().AsObjPtr()->Get(3); break; case INSTR_indirect_4: *sp = (*sp).w().AsObjPtr()->Get(4); break; case INSTR_indirect_5: *sp = (*sp).w().AsObjPtr()->Get(5); break; case INSTR_const_0: *(--sp) = Zero; break; case INSTR_const_1: *(--sp) = TAGGED(1); break; case INSTR_const_2: *(--sp) = TAGGED(2); break; case INSTR_const_3: *(--sp) = TAGGED(3); break; case INSTR_const_4: *(--sp) = TAGGED(4); break; case INSTR_const_10: *(--sp) = TAGGED(10); break; case INSTR_reset_r_1: { PolyWord u = *sp; sp += 1; *sp = u; break; } case INSTR_reset_r_2: { PolyWord u = *sp; sp += 2; *sp = u; break; } case INSTR_reset_r_3: { PolyWord u = *sp; sp += 3; *sp = u; break; } case INSTR_reset_1: sp += 1; break; case INSTR_reset_2: sp += 2; break; case INSTR_stack_containerB: { POLYUNSIGNED words = *pc++; while (words-- > 0) *(--sp) = Zero; sp--; (*sp).stackAddr = sp + 1; break; } case INSTR_callFastRTS0: { callFastRts0 doCall = *(callFastRts0*)(*sp++).w().AsObjPtr(); ClearExceptionPacket(); SaveInterpreterState(pc, sp); POLYUNSIGNED result = doCall(); LoadInterpreterState(pc, sp); // If this raised an exception if (GetExceptionPacket().IsDataPtr()) goto RAISE_EXCEPTION; *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFastRTS1: { callFastRts1 doCall = *(callFastRts1*)(*sp++).w().AsObjPtr(); intptr_t rtsArg1 = (*sp++).argValue; ClearExceptionPacket(); SaveInterpreterState(pc, sp); POLYUNSIGNED result = doCall(rtsArg1); LoadInterpreterState(pc, sp); // If this raised an exception if (GetExceptionPacket().IsDataPtr()) goto RAISE_EXCEPTION; *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFastRTS2: { callFastRts2 doCall = *(callFastRts2*)(*sp++).w().AsObjPtr(); intptr_t rtsArg2 = (*sp++).argValue; // Pop off the args, last arg first. intptr_t rtsArg1 = (*sp++).argValue; ClearExceptionPacket(); SaveInterpreterState(pc, sp); POLYUNSIGNED result = doCall(rtsArg1, rtsArg2); LoadInterpreterState(pc, sp); // If this raised an exception if (GetExceptionPacket().IsDataPtr()) goto RAISE_EXCEPTION; *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFastRTS3: { callFastRts3 doCall = *(callFastRts3*)(*sp++).w().AsObjPtr(); intptr_t rtsArg3 = (*sp++).argValue; // Pop off the args, last arg first. intptr_t rtsArg2 = (*sp++).argValue; intptr_t rtsArg1 = (*sp++).argValue; ClearExceptionPacket(); SaveInterpreterState(pc, sp); POLYUNSIGNED result = doCall(rtsArg1, rtsArg2, rtsArg3); LoadInterpreterState(pc, sp); // If this raised an exception if (GetExceptionPacket().IsDataPtr()) goto RAISE_EXCEPTION; *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFastRTS4: { callFastRts4 doCall = *(callFastRts4*)(*sp++).w().AsObjPtr(); intptr_t rtsArg4 = (*sp++).argValue; // Pop off the args, last arg first. intptr_t rtsArg3 = (*sp++).argValue; intptr_t rtsArg2 = (*sp++).argValue; intptr_t rtsArg1 = (*sp++).argValue; ClearExceptionPacket(); SaveInterpreterState(pc, sp); POLYUNSIGNED result = doCall(rtsArg1, rtsArg2, rtsArg3, rtsArg4); LoadInterpreterState(pc, sp); // If this raised an exception if (GetExceptionPacket().IsDataPtr()) goto RAISE_EXCEPTION; *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_callFastRTS5: { callFastRts5 doCall = *(callFastRts5*)(*sp++).w().AsObjPtr(); intptr_t rtsArg5 = (*sp++).argValue; // Pop off the args, last arg first. intptr_t rtsArg4 = (*sp++).argValue; intptr_t rtsArg3 = (*sp++).argValue; intptr_t rtsArg2 = (*sp++).argValue; intptr_t rtsArg1 = (*sp++).argValue; ClearExceptionPacket(); SaveInterpreterState(pc, sp); POLYUNSIGNED result = doCall(rtsArg1, rtsArg2, rtsArg3, rtsArg4, rtsArg5); LoadInterpreterState(pc, sp); // If this raised an exception if (GetExceptionPacket().IsDataPtr()) goto RAISE_EXCEPTION; *(--sp) = PolyWord::FromUnsigned(result); break; } case INSTR_notBoolean: *sp = ((*sp).w() == True) ? False : True; break; case INSTR_isTagged: *sp = (*sp).w().IsTagged() ? True : False; break; case INSTR_cellLength: /* Return the length word. */ *sp = TAGGED((*sp).w().AsObjPtr()->Length()); break; case INSTR_cellFlags: { PolyObject *p = (*sp).w().AsObjPtr(); POLYUNSIGNED f = (p->LengthWord()) >> OBJ_PRIVATE_FLAGS_SHIFT; *sp = TAGGED(f); break; } case INSTR_clearMutable: { PolyObject *obj = (*sp).w().AsObjPtr(); POLYUNSIGNED lengthW = obj->LengthWord(); /* Clear the mutable bit. */ obj->SetLengthWord(lengthW & ~_OBJ_MUTABLE_BIT); *sp = Zero; break; } case INSTR_atomicIncr: { PolyObject* p = (*sp).w().AsObjPtr(); POLYUNSIGNED newValue = taskData->AtomicIncrement(p); *sp = PolyWord::FromUnsigned(newValue); break; } case INSTR_atomicDecr: { PolyObject *p = (*sp).w().AsObjPtr(); POLYUNSIGNED newValue = taskData->AtomicDecrement(p); *sp = PolyWord::FromUnsigned(newValue); break; } case INSTR_equalWord: { PolyWord u = *sp++; *sp = u == (*sp) ? True : False; break; } case INSTR_jumpNEqLocal: { // Compare a local with a constant and jump if not equal. PolyWord u = sp[pc[0]]; if (u.IsTagged() && u.UnTagged() == pc[1]) pc += 3; else pc += pc[2] + 3; break; } case INSTR_jumpNEqLocalInd: { // Test the union tag value in the first word of a tuple. PolyWord u = sp[pc[0]]; u = u.AsObjPtr()->Get(0); if (u.IsTagged() && u.UnTagged() == pc[1]) pc += 3; else pc += pc[2] + 3; break; } case INSTR_isTaggedLocalB: { PolyWord u = sp[*pc++]; *(--sp) = u.IsTagged() ? True : False; break; } case INSTR_jumpTaggedLocal: { PolyWord u = sp[*pc]; // Jump if the value is tagged. if (u.IsTagged()) pc += pc[1] + 2; else pc += 2; break; } case INSTR_lessSigned: { PolyWord u = *sp++; *sp = ((*sp).w().AsSigned() < u.AsSigned()) ? True : False; break; } case INSTR_lessUnsigned: { PolyWord u = *sp++; *sp = ((*sp).w().AsUnsigned() < u.AsUnsigned()) ? True : False; break; } case INSTR_lessEqSigned: { PolyWord u = *sp++; *sp = ((*sp).w().AsSigned() <= u.AsSigned()) ? True : False; break; } case INSTR_lessEqUnsigned: { PolyWord u = *sp++; *sp = ((*sp).w().AsUnsigned() <= u.AsUnsigned()) ? True : False; break; } case INSTR_greaterSigned: { PolyWord u = *sp++; *sp = ((*sp).w().AsSigned() > u.AsSigned()) ? True : False; break; } case INSTR_greaterUnsigned: { PolyWord u = *sp++; *sp = ((*sp).w().AsUnsigned() > u.AsUnsigned()) ? True : False; break; } case INSTR_greaterEqSigned: { PolyWord u = *sp++; *sp = ((*sp).w().AsSigned() >= u.AsSigned()) ? True : False; break; } case INSTR_greaterEqUnsigned: { PolyWord u = *sp++; *sp = ((*sp).w().AsUnsigned() >= u.AsUnsigned()) ? True : False; break; } case INSTR_fixedAdd: { PolyWord x = *sp++; PolyWord y = (*sp); POLYSIGNED t = UNTAGGED(x) + UNTAGGED(y); if (t <= MAXTAGGED && t >= -MAXTAGGED-1) *sp = TAGGED(t); else { taskData->SetException((poly_exn*)overflowPacket); goto RAISE_EXCEPTION; } break; } case INSTR_fixedSub: { PolyWord x = *sp++; PolyWord y = (*sp); POLYSIGNED t = UNTAGGED(y) - UNTAGGED(x); if (t <= MAXTAGGED && t >= -MAXTAGGED-1) *sp = TAGGED(t); else { taskData->SetException((poly_exn*)overflowPacket); goto RAISE_EXCEPTION; } break; } case INSTR_fixedMult: { // There's no simple way to detect signed overflow in multiplication. // Unsigned multiplication is defined to wrap but signed is not and // GCC optimised away the previous test we had here. PolyWord x = *sp++; PolyWord y = (*sp); try { Handle mark = taskData->saveVec.mark(); SaveInterpreterState(pc, sp); Handle result = mult_longc(taskData, taskData->saveVec.push(x), taskData->saveVec.push(y)); LoadInterpreterState(pc, sp); *sp = result->Word(); taskData->saveVec.reset(mark); if ((*sp).w().IsDataPtr()) { taskData->SetException((poly_exn*)overflowPacket); goto RAISE_EXCEPTION; } } catch (IOException&) { // We could run out of store goto RAISE_EXCEPTION; } break; } case INSTR_fixedQuot: { // Zero and overflow are checked for in ML. POLYSIGNED u = UNTAGGED(*sp++); PolyWord y = (*sp); *sp = TAGGED(UNTAGGED(y) / u); break; } case INSTR_fixedRem: { // Zero and overflow are checked for in ML. POLYSIGNED u = UNTAGGED(*sp++); PolyWord y = (*sp); *sp = TAGGED(UNTAGGED(y) % u); break; } case INSTR_wordAdd: { PolyWord u = *sp++; // Because we're not concerned with overflow we can just add the values and subtract the tag. *sp = PolyWord::FromUnsigned((*sp).w().AsUnsigned() + u.AsUnsigned() - TAGGED(0).AsUnsigned()); break; } case INSTR_wordSub: { PolyWord u = *sp++; *sp = PolyWord::FromUnsigned((*sp).w().AsUnsigned() - u.AsUnsigned() + TAGGED(0).AsUnsigned()); break; } case INSTR_wordMult: { PolyWord u = *sp++; *sp = TAGGED(UNTAGGED_UNSIGNED(*sp) * UNTAGGED_UNSIGNED(u)); break; } case INSTR_wordDiv: { POLYUNSIGNED u = UNTAGGED_UNSIGNED(*sp++); // Detection of zero is done in ML *sp = TAGGED(UNTAGGED_UNSIGNED(*sp) / u); break; } case INSTR_wordMod: { POLYUNSIGNED u = UNTAGGED_UNSIGNED(*sp++); *sp = TAGGED(UNTAGGED_UNSIGNED(*sp) % u); break; } case INSTR_wordAnd: { PolyWord u = *sp++; // Since both of these should be tagged the tag bit will be preserved. *sp = PolyWord::FromUnsigned((*sp).w().AsUnsigned() & u.AsUnsigned()); break; } case INSTR_wordOr: { PolyWord u = *sp++; // Since both of these should be tagged the tag bit will be preserved. *sp = PolyWord::FromUnsigned((*sp).w().AsUnsigned() | u.AsUnsigned()); break; } case INSTR_wordXor: { PolyWord u = *sp++; // This will remove the tag bit so it has to be reinstated. *sp = PolyWord::FromUnsigned(((*sp).w().AsUnsigned() ^ u.AsUnsigned()) | TAGGED(0).AsUnsigned()); break; } case INSTR_wordShiftLeft: { // ML requires shifts greater than a word to return zero. // That's dealt with at the higher level. PolyWord u = *sp++; *sp = TAGGED(UNTAGGED_UNSIGNED(*sp) << UNTAGGED_UNSIGNED(u)); break; } case INSTR_wordShiftRLog: { PolyWord u = *sp++; *sp = TAGGED(UNTAGGED_UNSIGNED(*sp) >> UNTAGGED_UNSIGNED(u)); break; } case INSTR_arbAdd: { PolyWord x = *sp++; PolyWord y = (*sp); if (x.IsTagged() && y.IsTagged()) { POLYSIGNED t = UNTAGGED(x) + UNTAGGED(y); if (t <= MAXTAGGED && t >= -MAXTAGGED - 1) { *sp = TAGGED(t); break; } } // One argument was untagged or there was an overflow try { Handle mark = taskData->saveVec.mark(); SaveInterpreterState(pc, sp); Handle result = add_longc(taskData, taskData->saveVec.push(x), taskData->saveVec.push(y)); LoadInterpreterState(pc, sp); *sp = result->Word(); taskData->saveVec.reset(mark); } catch (IOException&) { // We could run out of store goto RAISE_EXCEPTION; } break; } case INSTR_arbSubtract: { PolyWord x = *sp++; PolyWord y = (*sp); if (x.IsTagged() && y.IsTagged()) { POLYSIGNED t = UNTAGGED(y) - UNTAGGED(x); if (t <= MAXTAGGED && t >= -MAXTAGGED - 1) { *sp = TAGGED(t); break; } } // One argument was untagged or there was an overflow try { Handle mark = taskData->saveVec.mark(); SaveInterpreterState(pc, sp); Handle result = sub_longc(taskData, taskData->saveVec.push(x), taskData->saveVec.push(y)); LoadInterpreterState(pc, sp); *sp = result->Word(); taskData->saveVec.reset(mark); } catch (IOException&) { // We could run out of store goto RAISE_EXCEPTION; } break; } case INSTR_arbMultiply: { // See comment on fixedMultiply above PolyWord x = *sp++; PolyWord y = (*sp); try { Handle mark = taskData->saveVec.mark(); SaveInterpreterState(pc, sp); Handle result = mult_longc(taskData, taskData->saveVec.push(x), taskData->saveVec.push(y)); LoadInterpreterState(pc, sp); *sp = result->Word(); taskData->saveVec.reset(mark); } catch (IOException&) { // We could run out of store goto RAISE_EXCEPTION; } break; } case INSTR_allocByteMem: { // Allocate byte segment. This does not need to be initialised. POLYUNSIGNED flags = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED length = UNTAGGED_UNSIGNED(*sp); PolyObject *t = this->allocateMemory(taskData, length, pc, sp); if (t == 0) goto RAISE_EXCEPTION; // Exception t->SetLengthWord(length, (byte)flags); *sp = (PolyWord)t; break; } case INSTR_getThreadId: *(--sp) = (PolyWord)taskData->threadObject; break; case INSTR_allocWordMemory: { // Allocate word segment. This must be initialised. // We mustn't pop the initialiser until after any potential GC. POLYUNSIGNED length = UNTAGGED_UNSIGNED(sp[2]); PolyObject *t = this->allocateMemory(taskData, length, pc, sp); if (t == 0) goto RAISE_EXCEPTION; PolyWord initialiser = *sp++; POLYUNSIGNED flags = UNTAGGED_UNSIGNED(*sp++); t->SetLengthWord(length, (byte)flags); *sp = (PolyWord)t; // Have to initialise the data. for (; length > 0; ) t->Set(--length, initialiser); break; } case INSTR_alloc_ref: { // Allocate a single word mutable cell. This is more common than allocWordMemory on its own. PolyObject *t = this->allocateMemory(taskData, 1, pc, sp); if (t == 0) goto RAISE_EXCEPTION; PolyWord initialiser = (*sp); t->SetLengthWord(1, F_MUTABLE_BIT); t->Set(0, initialiser); *sp = (PolyWord)t; break; } case INSTR_allocMutClosureB: { // Allocate memory for a mutable closure and copy in the code address. POLYUNSIGNED length = *pc++ + sizeof(uintptr_t) / sizeof(PolyWord); PolyObject* t = this->allocateMemory(taskData, length, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(length, F_CLOSURE_OBJ | F_MUTABLE_BIT); PolyObject* srcClosure = (*sp).w().AsObjPtr(); *(uintptr_t*)t = *(uintptr_t*)srcClosure; for (POLYUNSIGNED i = sizeof(uintptr_t) / sizeof(PolyWord); i < length; i++) t->Set(i, TAGGED(0)); *sp = (PolyWord)t; break; } case INSTR_loadMLWord: { POLYUNSIGNED index = UNTAGGED(*sp++); PolyObject* p = (PolyObject*)((*sp).w().AsCodePtr()); *sp = p->Get(index); break; } case INSTR_loadMLByte: { // The values on the stack are base and index. POLYUNSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = (*sp).w().AsCodePtr(); *sp = TAGGED(p[index]); // Have to tag the result break; } case INSTR_loadUntagged: { POLYUNSIGNED index = UNTAGGED(*sp++); PolyObject* p = (PolyObject*)((*sp).w().AsCodePtr()); *sp = TAGGED(p->Get(index).AsUnsigned()); break; } case INSTR_storeMLWord: { PolyWord toStore = *sp++; POLYUNSIGNED index = UNTAGGED(*sp++); PolyObject* p = (PolyObject*)((*sp).w().AsCodePtr()); p->Set(index, toStore); *sp = Zero; break; } case INSTR_storeMLByte: { POLYUNSIGNED toStore = UNTAGGED(*sp++); POLYUNSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = (*sp).w().AsCodePtr(); p[index] = (byte)toStore; *sp = Zero; break; } case INSTR_storeUntagged: { PolyWord toStore = PolyWord::FromUnsigned(UNTAGGED_UNSIGNED(*sp++)); POLYUNSIGNED index = UNTAGGED(*sp++); PolyObject* p = (PolyObject*)((*sp).w().AsCodePtr()); p->Set(index, toStore); *sp = Zero; break; } case INSTR_blockMoveWord: { POLYUNSIGNED length = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED destIndex = UNTAGGED_UNSIGNED(*sp++); PolyObject* dest = (PolyObject*)((*sp++).w().AsCodePtr()); POLYUNSIGNED srcIndex = UNTAGGED_UNSIGNED(*sp++); PolyObject* src = (PolyObject*)((*sp).w().AsCodePtr()); for (POLYUNSIGNED u = 0; u < length; u++) dest->Set(destIndex + u, src->Get(srcIndex + u)); *sp = Zero; break; } case INSTR_blockMoveByte: { POLYUNSIGNED length = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED destOffset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR dest = (*sp++).w().AsCodePtr(); POLYUNSIGNED srcOffset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR src = (*sp).w().AsCodePtr(); memcpy(dest+destOffset, src+srcOffset, length); *sp = Zero; break; } case INSTR_blockEqualByte: { POLYUNSIGNED length = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED arg2Offset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR arg2Ptr = (*sp++).w().AsCodePtr(); POLYUNSIGNED arg1Offset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR arg1Ptr = (*sp).w().AsCodePtr(); *sp = memcmp(arg1Ptr+arg1Offset, arg2Ptr+arg2Offset, length) == 0 ? True : False; break; } case INSTR_blockCompareByte: { POLYUNSIGNED length = UNTAGGED_UNSIGNED(*sp++); POLYUNSIGNED arg2Offset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR arg2Ptr = (*sp++).w().AsCodePtr(); POLYUNSIGNED arg1Offset = UNTAGGED_UNSIGNED(*sp++); POLYCODEPTR arg1Ptr = (*sp).w().AsCodePtr(); int result = memcmp(arg1Ptr+arg1Offset, arg2Ptr+arg2Offset, length); *sp = result == 0 ? TAGGED(0) : result < 0 ? TAGGED(-1) : TAGGED(1); break; } case INSTR_escape: { switch (*pc++) { case EXTINSTR_callFastRRtoR: { // Floating point call. callRTSRRtoR doCall = *(callRTSRRtoR*)(*sp++).w().AsObjPtr(); PolyWord rtsArg2 = *sp++; PolyWord rtsArg1 = *sp++; double argument1 = unboxDouble(rtsArg1); double argument2 = unboxDouble(rtsArg2); // Allocate memory for the result. double result = doCall(argument1, argument2); PolyObject* t = boxDouble(taskData, result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = (PolyWord)t; break; } case EXTINSTR_callFastRGtoR: { // Call that takes a POLYUNSIGNED argument and returns a double. callRTSRGtoR doCall = *(callRTSRGtoR*)(*sp++).w().AsObjPtr(); intptr_t rtsArg2 = (*sp++).w().AsSigned(); PolyWord rtsArg1 = *sp++; double argument1 = unboxDouble(rtsArg1); // Allocate memory for the result. double result = doCall(argument1, rtsArg2); PolyObject* t = boxDouble(taskData, result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = (PolyWord)t; break; } case EXTINSTR_callFastGtoR: { // Call that takes a POLYUNSIGNED argument and returns a double. callRTSGtoR doCall = *(callRTSGtoR*)(*sp++).w().AsObjPtr(); intptr_t rtsArg1 = (*sp++).w().AsSigned(); // Allocate memory for the result. double result = doCall(rtsArg1); PolyObject* t = boxDouble(taskData, result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = (PolyWord)t; break; } case EXTINSTR_callFastFtoF: { // Floating point call. The call itself does not allocate but we // need to put the result into a "box". callRTSFtoF doCall = *(callRTSFtoF*)(*sp++).w().AsObjPtr(); PolyWord rtsArg1 = *sp++; float argument = unboxFloat(rtsArg1); // Allocate memory for the result. float result = doCall(argument); PolyObject* t = boxFloat(taskData, result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = (PolyWord)t; break; } case EXTINSTR_callFastFFtoF: { // Floating point call. callRTSFFtoF doCall = *(callRTSFFtoF*)(*sp++).w().AsObjPtr(); PolyWord rtsArg2 = *sp++; PolyWord rtsArg1 = *sp++; float argument1 = unboxFloat(rtsArg1); float argument2 = unboxFloat(rtsArg2); // Allocate memory for the result. float result = doCall(argument1, argument2); PolyObject* t = boxFloat(taskData, result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = (PolyWord)t; break; } case EXTINSTR_callFastGtoF: { // Call that takes a POLYUNSIGNED argument and returns a double. callRTSGtoF doCall = *(callRTSGtoF*)(*sp++).w().AsObjPtr(); intptr_t rtsArg1 = (*sp++).w().AsSigned(); // Allocate memory for the result. float result = doCall(rtsArg1); PolyObject* t = boxFloat(taskData, result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = (PolyWord)t; break; } case EXTINSTR_callFastFGtoF: { // Call that takes a POLYUNSIGNED argument and returns a double. callRTSFGtoF doCall = *(callRTSFGtoF*)(*sp++).w().AsObjPtr(); intptr_t rtsArg2 = (*sp++).w().AsSigned(); PolyWord rtsArg1 = *sp++; float argument1 = unboxFloat(rtsArg1); // Allocate memory for the result. float result = doCall(argument1, rtsArg2); PolyObject* t = boxFloat(taskData, result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = (PolyWord)t; break; } case EXTINSTR_callFastRtoR: { // Floating point call. The call itself does not allocate but we // need to put the result into a "box". callRTSRtoR doCall = *(callRTSRtoR*)(*sp++).w().AsObjPtr(); PolyWord rtsArg1 = *sp++; double argument = unboxDouble(rtsArg1); // Allocate memory for the result. double result = doCall(argument); PolyObject* t = boxDouble(taskData, result, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *(--sp) = (PolyWord)t; break; } + case EXTINSTR_atomicExchAdd: + { + PolyWord u = *sp++; + PolyObject* p = (*sp).w().AsObjPtr(); + *sp = PolyWord::FromSigned(taskData->AtomicExchAdd(p, u.AsSigned())); + break; + } + case EXTINSTR_atomicReset: { // This is needed in the interpreted version otherwise there // is a chance that we could set the value to zero while another // thread is between getting the old value and setting it to the new value. PolyObject* p = (*sp).w().AsObjPtr(); taskData->AtomicReset(p); *sp = TAGGED(0); // Push the unit result break; } case EXTINSTR_longWToTagged: { // Extract the first word and return it as a tagged value. This loses the top-bit POLYUNSIGNED wx = (*sp).w().AsObjPtr()->Get(0).AsUnsigned(); *sp = TAGGED(wx); break; } case EXTINSTR_signedToLongW: { // Shift the tagged value to remove the tag and put it into the first word. // The original sign bit is copied in the shift. intptr_t wx = (*sp).w().UnTagged(); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(intptr_t*)t = wx; *sp = (PolyWord)t; break; } case EXTINSTR_unsignedToLongW: { // As with the above except the value is treated as an unsigned // value and the top bit is zero. uintptr_t wx = (*sp).w().UnTaggedUnsigned(); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wx; *sp = (PolyWord)t; break; } case EXTINSTR_realAbs: { PolyObject* t = this->boxDouble(taskData, fabs(unboxDouble(*sp)), pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_realNeg: { PolyObject* t = this->boxDouble(taskData, -(unboxDouble(*sp)), pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_floatAbs: { PolyObject* t = this->boxFloat(taskData, fabs(unboxFloat(*sp)), pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_floatNeg: { PolyObject* t = this->boxFloat(taskData, -(unboxFloat(*sp)), pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_fixedIntToReal: { POLYSIGNED u = UNTAGGED(*sp); PolyObject* t = this->boxDouble(taskData, (double)u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_fixedIntToFloat: { POLYSIGNED u = UNTAGGED(*sp); PolyObject* t = this->boxFloat(taskData, (float)u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_floatToReal: { float u = unboxFloat(*sp); PolyObject* t = this->boxDouble(taskData, (double)u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_wordShiftRArith: { PolyWord u = *sp++; // Strictly speaking, C does not require that this uses // arithmetic shifting so we really ought to set the // high-order bits explicitly. *sp = TAGGED(UNTAGGED(*sp) >> UNTAGGED(u)); break; } case EXTINSTR_lgWordEqual: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); *sp = wx == wy ? True : False; break; } case EXTINSTR_lgWordLess: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); *sp = (wy < wx) ? True : False; break; } case EXTINSTR_lgWordLessEq: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); *sp = (wy <= wx) ? True : False; break; } case EXTINSTR_lgWordGreater: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); *sp = (wy > wx) ? True : False; break; } case EXTINSTR_lgWordGreaterEq: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); *sp = (wy >= wx) ? True : False; break; } case EXTINSTR_lgWordAdd: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy + wx; *sp = (PolyWord)t; break; } case EXTINSTR_lgWordSub: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy - wx; *sp = (PolyWord)t; break; } case EXTINSTR_lgWordMult: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy * wx; *sp = (PolyWord)t; break; } case EXTINSTR_lgWordDiv: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy / wx; *sp = (PolyWord)t; break; } case EXTINSTR_lgWordMod: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy % wx; *sp = (PolyWord)t; break; } case EXTINSTR_lgWordAnd: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy & wx; *sp = (PolyWord)t; break; } case EXTINSTR_lgWordOr: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy | wx; *sp = (PolyWord)t; break; } case EXTINSTR_lgWordXor: { uintptr_t wx = *(uintptr_t*)((*sp++).w().AsObjPtr()); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy ^ wx; *sp = (PolyWord)t; break; } case EXTINSTR_lgWordShiftLeft: { // The shift amount is a tagged word not a boxed large word POLYUNSIGNED wx = UNTAGGED_UNSIGNED(*sp++); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy << wx; *sp = (PolyWord)t; break; } case EXTINSTR_lgWordShiftRLog: { // The shift amount is a tagged word not a boxed large word POLYUNSIGNED wx = UNTAGGED_UNSIGNED(*sp++); uintptr_t wy = *(uintptr_t*)((*sp).w().AsObjPtr()); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = wy >> wx; *sp = (PolyWord)t; break; } case EXTINSTR_lgWordShiftRArith: { // The shift amount is a tagged word not a boxed large word POLYUNSIGNED wx = UNTAGGED_UNSIGNED(*sp++); intptr_t wy = *(intptr_t*)((*sp).w().AsObjPtr()); PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(intptr_t*)t = wy >> wx; *sp = (PolyWord)t; break; } case EXTINSTR_realEqual: { double u = unboxDouble(*sp++); *sp = u == unboxDouble(*sp) ? True : False; break; } case EXTINSTR_realLess: { double u = unboxDouble(*sp++); *sp = unboxDouble(*sp) < u ? True : False; break; } case EXTINSTR_realLessEq: { double u = unboxDouble(*sp++); *sp = unboxDouble(*sp) <= u ? True : False; break; } case EXTINSTR_realGreater: { double u = unboxDouble(*sp++); *sp = unboxDouble(*sp) > u ? True : False; break; } case EXTINSTR_realGreaterEq: { double u = unboxDouble(*sp++); *sp = unboxDouble(*sp) >= u ? True : False; break; } case EXTINSTR_realUnordered: { double u = unboxDouble(*sp++); double v = unboxDouble(*sp); *sp = (std::isnan(u) || std::isnan(v)) ? True : False; break; } case EXTINSTR_realAdd: { double u = unboxDouble(*sp++); double v = unboxDouble(*sp); PolyObject* t = this->boxDouble(taskData, v + u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_realSub: { double u = unboxDouble(*sp++); double v = unboxDouble(*sp); PolyObject* t = this->boxDouble(taskData, v - u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_realMult: { double u = unboxDouble(*sp++); double v = unboxDouble(*sp); PolyObject* t = this->boxDouble(taskData, v * u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_realDiv: { double u = unboxDouble(*sp++); double v = unboxDouble(*sp); PolyObject* t = this->boxDouble(taskData, v / u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_floatEqual: { float u = unboxFloat(*sp++); *sp = u == unboxFloat(*sp) ? True : False; break; } case EXTINSTR_floatLess: { float u = unboxFloat(*sp++); *sp = unboxFloat(*sp) < u ? True : False; break; } case EXTINSTR_floatLessEq: { float u = unboxFloat(*sp++); *sp = unboxFloat(*sp) <= u ? True : False; break; } case EXTINSTR_floatGreater: { float u = unboxFloat(*sp++); *sp = unboxFloat(*sp) > u ? True : False; break; } case EXTINSTR_floatGreaterEq: { float u = unboxFloat(*sp++); *sp = unboxFloat(*sp) >= u ? True : False; break; } case EXTINSTR_floatUnordered: { float u = unboxFloat(*sp++); float v = unboxFloat(*sp); *sp = (std::isnan(u) || std::isnan(v)) ? True : False; break; } case EXTINSTR_floatAdd: { float u = unboxFloat(*sp++); float v = unboxFloat(*sp); PolyObject* t = this->boxFloat(taskData, v + u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_floatSub: { float u = unboxFloat(*sp++); float v = unboxFloat(*sp); PolyObject* t = this->boxFloat(taskData, v - u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_floatMult: { float u = unboxFloat(*sp++); float v = unboxFloat(*sp); PolyObject* t = this->boxFloat(taskData, v * u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_floatDiv: { float u = unboxFloat(*sp++); float v = unboxFloat(*sp); PolyObject* t = this->boxFloat(taskData, v / u, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_realToFloat: { // Convert a double to a float. It's complicated because it depends on the rounding mode. int rMode = *pc++; int current = getrounding(); // If the rounding is 4 it means "use current rounding". // Don't call unboxDouble until we're set the rounding. GCC seems to convert it // before the actual float cast. if (rMode < 4) setrounding(rMode); double d = unboxDouble(*sp); float v = (float)d; // Convert with the appropriate rounding. setrounding(current); PolyObject* t = this->boxFloat(taskData, v, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_realToInt: dv = unboxDouble(*sp); goto realtoint; case EXTINSTR_floatToInt: dv = (double)unboxFloat(*sp); realtoint: { // Convert a double or a float to a tagged integer. int rMode = *pc++; // We mustn't try converting a value that will overflow the conversion // but we need to be careful that we don't raise overflow incorrectly due // to rounding. if (dv > (double)(MAXTAGGED + MAXTAGGED / 2) || dv < -(double)(MAXTAGGED + MAXTAGGED / 2)) { *(--sp) = (PolyWord)overflowPacket; goto RAISE_EXCEPTION; } POLYSIGNED p; switch (rMode) { case POLY_ROUND_TONEAREST: p = (POLYSIGNED)round(dv); break; case POLY_ROUND_DOWNWARD: p = (POLYSIGNED)floor(dv); break; case POLY_ROUND_UPWARD: p = (POLYSIGNED)ceil(dv); break; case POLY_ROUND_TOZERO: default: // Truncation is the default for C. p = (POLYSIGNED)dv; } // Check that the value can be tagged. if (p > MAXTAGGED || p < -MAXTAGGED - 1) { *(--sp) = (PolyWord)overflowPacket; goto RAISE_EXCEPTION; } *sp = TAGGED(p); break; } case EXTINSTR_loadC8: { // This is similar to loadMLByte except that the base address is a boxed large-word. // Also the index is SIGNED. POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; *sp = TAGGED(p[index]); // Have to tag the result break; } case EXTINSTR_loadC16: { // This and the other loads are similar to loadMLWord with separate // index and offset values. POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; POLYUNSIGNED r = ((uint16_t*)p)[index]; *sp = TAGGED(r); break; } case EXTINSTR_loadC32: { POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; uintptr_t r = ((uint32_t*)p)[index]; #ifdef IS64BITS // This is tagged in 64-bit mode * sp = TAGGED(r); #else // But boxed in 32-bit mode. PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = r; *sp = (PolyWord)t; #endif break; } #if (defined(IS64BITS) || defined(POLYML32IN64)) case EXTINSTR_loadC64: { POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; uintptr_t r = ((uint64_t*)p)[index]; // This must be boxed. PolyObject* t = this->allocateMemory(taskData, LGWORDSIZE, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(LGWORDSIZE, F_BYTE_OBJ); *(uintptr_t*)t = r; *sp = (PolyWord)t; break; } #endif case EXTINSTR_loadCFloat: { POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; double r = ((float*)p)[index]; // This must be boxed. PolyObject* t = this->boxDouble(taskData, r, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_loadCDouble: { POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; double r = ((double*)p)[index]; // This must be boxed. PolyObject* t = this->boxDouble(taskData, r, pc, sp); if (t == 0) goto RAISE_EXCEPTION; *sp = (PolyWord)t; break; } case EXTINSTR_storeC8: { // Similar to storeMLByte except that the base address is a boxed large-word. POLYUNSIGNED toStore = UNTAGGED(*sp++); POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; p[index] = (byte)toStore; *sp = Zero; break; } case EXTINSTR_storeC16: { uint16_t toStore = (uint16_t)UNTAGGED(*sp++); POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; ((uint16_t*)p)[index] = toStore; *sp = Zero; break; } case EXTINSTR_storeC32: { #ifdef IS64BITS // This is a tagged value in 64-bit mode. uint32_t toStore = (uint32_t)UNTAGGED(*sp++); #else // but a boxed value in 32-bit mode. uint32_t toStore = (uint32_t)(*(uintptr_t*)((*sp++).w().AsObjPtr())); #endif POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; ((uint32_t*)p)[index] = toStore; *sp = Zero; break; } #if (defined(IS64BITS) || defined(POLYML32IN64)) case EXTINSTR_storeC64: { // This is a boxed value. uint64_t toStore = *(uintptr_t*)((*sp++).w().AsObjPtr()); POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; ((uint64_t*)p)[index] = toStore; *sp = Zero; break; } #endif case EXTINSTR_storeCFloat: { // This is a boxed value. float toStore = (float)unboxDouble(*sp++); POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; ((float*)p)[index] = toStore; *sp = Zero; break; } case EXTINSTR_storeCDouble: { // This is a boxed value. double toStore = unboxDouble(*sp++); POLYSIGNED offset = UNTAGGED(*sp++); POLYSIGNED index = UNTAGGED(*sp++); POLYCODEPTR p = *((byte**)((*sp).w().AsObjPtr())) + offset; ((double*)p)[index] = toStore; *sp = Zero; break; } case EXTINSTR_jump32True: // Invert the sense of the test and fall through. *sp = ((*sp).w() == True) ? False : True; case EXTINSTR_jump32False: { PolyWord u = *sp++; /* Pop argument */ if (u == True) { pc += 4; break; } /* else - false - take the jump */ } case EXTINSTR_jump32: { // This is a 32-bit signed quantity on both 64-bits and 32-bits. POLYSIGNED offset = pc[3] & 0x80 ? -1 : 0; offset = (offset << 8) | pc[3]; offset = (offset << 8) | pc[2]; offset = (offset << 8) | pc[1]; offset = (offset << 8) | pc[0]; pc += offset + 4; break; } case EXTINSTR_setHandler32: /* Set up a handler */ { POLYUNSIGNED offset = pc[0] + (pc[1] << 8) + (pc[2] << 16) + (pc[3] << 24); (--sp)->codeAddr = pc + offset + 4; /* Address of handler */ SetHandlerRegister(sp); pc += 4; break; } case EXTINSTR_case32: { // arg1 is the number of cases i.e. one more than the largest value // This is followed by that number of 32-bit offsets. // If the value is out of range the default case is immediately after the table. POLYSIGNED u = UNTAGGED(*sp++); /* Get the value */ if (u >= arg1 || u < 0) pc += 2 + arg1 * 4; /* Out of range */ else { pc += 2; pc += /* Index */pc[u * 4] + (pc[u * 4 + 1] << 8) + (pc[u * 4 + 2] << 16) + (pc[u * 4 + 3] << 24); } break; } case EXTINSTR_tuple_w: { storeWords = arg1; pc += 2; TUPLE: /* Common code for tupling. */ PolyObject* p = this->allocateMemory(taskData, storeWords, pc, sp); if (p == 0) goto RAISE_EXCEPTION; // Exception p->SetLengthWord(storeWords, 0); for (; storeWords > 0; ) p->Set(--storeWords, *sp++); *(--sp) = (PolyWord)p; break; } case EXTINSTR_indirect_w: *sp = (*sp).w().AsObjPtr()->Get(arg1); pc += 2; break; case EXTINSTR_moveToContainerW: { PolyWord u = *sp++; (*sp).stackAddr[arg1] =u; pc += 2; break; } case EXTINSTR_moveToMutClosureW: { PolyWord u = *sp++; (*sp).w().AsObjPtr()->Set(arg1 + sizeof(uintptr_t)/sizeof(PolyWord), u); pc += 2; break; } case EXTINSTR_indirectContainerW: *sp = (*sp).stackAddr[arg1]; pc += 2; break; case EXTINSTR_indirectClosureW: *sp = (*sp).w().AsObjPtr()->Get(arg1+sizeof(uintptr_t)/sizeof(PolyWord)); pc += 2; break; case EXTINSTR_set_stack_val_w: { PolyWord u = *sp++; sp[arg1 - 1] = u; pc += 2; break; } case EXTINSTR_reset_w: sp += arg1; pc += 2; break; case EXTINSTR_reset_r_w: { PolyWord u = *sp; sp += arg1; *sp = u; pc += 2; break; } case EXTINSTR_stack_containerW: { POLYUNSIGNED words = arg1; pc += 2; while (words-- > 0) *(--sp) = Zero; sp--; (*sp).stackAddr = sp + 1; break; } case EXTINSTR_constAddr32: { POLYUNSIGNED offset = pc[0] + (pc[1] << 8) + (pc[2] << 16) + (pc[3] << 24); *(--sp) = *(PolyWord*)(pc + offset + 4); pc += 4; break; } case EXTINSTR_constAddr32_16: { POLYUNSIGNED offset = pc[0] + (pc[1] << 8) + (pc[2] << 16) + (pc[3] << 24); POLYUNSIGNED cNum = pc[4] + (pc[5] << 8) + 3; offset += cNum * sizeof(PolyWord); *(--sp) = *(PolyWord*)(pc + offset + 6); pc += 6; break; } case EXTINSTR_allocCSpace: { // Allocate this on the C heap. POLYUNSIGNED length = UNTAGGED_UNSIGNED(*sp); void* memory = malloc(length); *sp = Make_sysword(taskData, (uintptr_t)memory)->Word(); break; } case EXTINSTR_freeCSpace: { // Both the address and the size are passed as arguments. sp++; // Size PolyWord addr = *sp; free(*(void**)(addr.AsObjPtr())); *sp = TAGGED(0); break; } case EXTINSTR_tail: /* Tail recursive call. */ /* Move items up the stack. */ /* There may be an overlap if the function we are calling has more args than this one. */ tailCount = arg1; tailPtr = sp + tailCount; sp = tailPtr + arg2; goto TAIL_CALL; case EXTINSTR_allocMutClosureW: { // Allocate memory for a mutable closure and copy in the code address. POLYUNSIGNED length = arg1 + sizeof(uintptr_t) / sizeof(PolyWord); pc += 2; PolyObject* t = this->allocateMemory(taskData, length, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(length, F_CLOSURE_OBJ | F_MUTABLE_BIT); PolyObject* srcClosure = (*sp).w().AsObjPtr(); *(uintptr_t*)t = *(uintptr_t*)srcClosure; for (POLYUNSIGNED i = sizeof(uintptr_t) / sizeof(PolyWord); i < length; i++) t->Set(i, TAGGED(0)); *sp = (PolyWord)t; break; } case EXTINSTR_closureW: { storeWords = arg1; pc += 2; CREATE_CLOSURE: // Allocate a closure. storeWords is the number of non-locals. POLYUNSIGNED length = storeWords + sizeof(uintptr_t) / sizeof(PolyWord); PolyObject* t = this->allocateMemory(taskData, length, pc, sp); if (t == 0) goto RAISE_EXCEPTION; t->SetLengthWord(length, F_CLOSURE_OBJ); for (; storeWords > 0; ) t->Set(--storeWords + sizeof(uintptr_t) / sizeof(PolyWord), *sp++); PolyObject* srcClosure = (*sp).w().AsObjPtr(); *(uintptr_t*)t = *(uintptr_t*)srcClosure; *sp = (PolyWord)t; break; } default: Crash("Unknown extended instruction %x\n", pc[-1]); } break; } case INSTR_enterIntX86: // This is a no-op if we are already interpreting. pc += 3; break; + case INSTR_enterIntArm64: + pc += 8; break; + + case INSTR_no_op: + // Only used for alignment for ARM64. + break; + default: Crash("Unknown instruction %x\n", pc[-1]); } /* switch */ } /* for */ return ReturnReturn; // Never actually reached } void ByteCodeInterpreter::GarbageCollect(ScanAddress* process) { if (overflowPacket != 0) overflowPacket = process->ScanObjectAddress(overflowPacket); if (dividePacket != 0) dividePacket = process->ScanObjectAddress(dividePacket); } extern "C" { POLYEXTERNALSYMBOL POLYUNSIGNED PolyInterpretedGetAbiList(FirstArgument threadId); POLYEXTERNALSYMBOL POLYUNSIGNED PolyInterpretedCreateCIF(FirstArgument threadId, PolyWord abiValue, PolyWord resultType, PolyWord argTypes); POLYEXTERNALSYMBOL POLYUNSIGNED PolyInterpretedCallFunction(FirstArgument threadId, PolyWord cifAddr, PolyWord cFunAddr, PolyWord resAddr, PolyWord argVec); } // FFI #if (defined(HAVE_LIBFFI) && defined(HAVE_FFI_H)) #ifdef HAVE_ERRNO_H #include #endif #include static struct _abiTable { const char* abiName; ffi_abi abiCode; } abiTable[] = { // Unfortunately the ABI entries are enums rather than #defines so we // can't test individual entries. #ifdef X86_WIN32 {"sysv", FFI_SYSV}, {"stdcall", FFI_STDCALL}, {"thiscall", FFI_THISCALL}, {"fastcall", FFI_FASTCALL}, {"ms_cdecl", FFI_MS_CDECL}, #elif defined(X86_WIN64) {"win64", FFI_WIN64}, #elif defined(X86_64) || (defined (__x86_64__) && defined (X86_DARWIN)) {"unix64", FFI_UNIX64}, #elif defined(X86_ANY) {"sysv", FFI_SYSV}, #endif { "default", FFI_DEFAULT_ABI} }; static Handle mkAbitab(TaskData* taskData, void*, char* p); static Handle toSysWord(TaskData* taskData, void* p) { return Make_sysword(taskData, (uintptr_t)p); } // Convert the Poly type info into ffi_type values. /* datatype cTypeForm = CTypeFloatingPt | CTypePointer | CTypeSignedInt | CTypeUnsignedInt | CTypeStruct of cType list | CTypeVoid withtype cType = { typeForm: cTypeForm, align: word, size: word } */ static ffi_type* decodeType(PolyWord pType) { PolyWord typeForm = pType.AsObjPtr()->Get(2); PolyWord typeSize = pType.AsObjPtr()->Get(0); if (typeForm.IsDataPtr()) { // Struct size_t size = typeSize.UnTaggedUnsigned(); unsigned short align = (unsigned short)pType.AsObjPtr()->Get(1).UnTaggedUnsigned(); unsigned nElems = 0; PolyWord listStart = typeForm.AsObjPtr()->Get(0); for (PolyWord p = listStart; !ML_Cons_Cell::IsNull(p); p = ((ML_Cons_Cell*)p.AsObjPtr())->t) nElems++; size_t space = sizeof(ffi_type); // Add space for the elements plus one extra for the zero terminator. space += (nElems + 1) * sizeof(ffi_type*); ffi_type* result = (ffi_type*)calloc(1, space); // Raise an exception rather than returning zero. if (result == 0) return 0; ffi_type** elem = (ffi_type**)(result + 1); result->size = size; result->alignment = align; result->type = FFI_TYPE_STRUCT; result->elements = elem; if (elem != 0) { for (PolyWord p = listStart; !ML_Cons_Cell::IsNull(p); p = ((ML_Cons_Cell*)p.AsObjPtr())->t) { PolyWord e = ((ML_Cons_Cell*)p.AsObjPtr())->h; ffi_type* t = decodeType(e); if (t == 0) return 0; *elem++ = t; } *elem = 0; // Null terminator } return result; } else { switch (typeForm.UnTaggedUnsigned()) { case 0: { // Floating point if (typeSize.UnTaggedUnsigned() == ffi_type_float.size) return &ffi_type_float; else if (typeSize.UnTaggedUnsigned() == ffi_type_double.size) return &ffi_type_double; ASSERT(0); } case 1: // FFI type poiner return &ffi_type_pointer; case 2: // Signed integer. { switch (typeSize.UnTaggedUnsigned()) { case 1: return &ffi_type_sint8; case 2: return &ffi_type_sint16; case 4: return &ffi_type_sint32; case 8: return &ffi_type_sint64; default: ASSERT(0); } } case 3: // Unsigned integer. { switch (typeSize.UnTaggedUnsigned()) { case 1: return &ffi_type_uint8; case 2: return &ffi_type_uint16; case 4: return &ffi_type_uint32; case 8: return &ffi_type_uint64; default: ASSERT(0); } } case 4: // Void return &ffi_type_void; } ASSERT(0); } return 0; } // Create a CIF. This contains all the types and some extra information. // The arguments are the raw ML values. That does make this dependent on the // representations used by the compiler. // This mallocs space for the CIF and the types. The space is never freed. // POLYUNSIGNED PolyInterpretedCreateCIF(FirstArgument threadId, PolyWord abiValue, PolyWord resultType, PolyWord argTypes) { TaskData* taskData = TaskData::FindTaskForId(threadId); ASSERT(taskData != 0); taskData->PreRTSCall(); Handle reset = taskData->saveVec.mark(); Handle result = 0; ffi_abi abi = (ffi_abi)get_C_ushort(taskData, abiValue); try { unsigned nArgs = 0; for (PolyWord p = argTypes; !ML_Cons_Cell::IsNull(p); p = ((ML_Cons_Cell*)p.AsObjPtr())->t) nArgs++; // Allocate space for the cif followed by the argument type vector size_t space = sizeof(ffi_cif) + nArgs * sizeof(ffi_type*); ffi_cif* cif = (ffi_cif*)malloc(space); if (cif == 0) raise_syscall(taskData, "Insufficient memory", ENOMEM); ffi_type* rtype = decodeType(resultType); if (rtype == 0) raise_syscall(taskData, "Insufficient memory", ENOMEM); ffi_type** atypes = (ffi_type**)(cif + 1); // Copy the arguments types. ffi_type** at = atypes; for (PolyWord p = argTypes; !ML_Cons_Cell::IsNull(p); p = ((ML_Cons_Cell*)p.AsObjPtr())->t) { PolyWord e = ((ML_Cons_Cell*)p.AsObjPtr())->h; ffi_type* atype = decodeType(e); if (atype == 0) raise_syscall(taskData, "Insufficient memory", ENOMEM); *at++ = atype; } ffi_status status = ffi_prep_cif(cif, abi, nArgs, rtype, atypes); if (status == FFI_BAD_TYPEDEF) raise_exception_string(taskData, EXC_foreign, "Bad typedef in ffi_prep_cif"); else if (status == FFI_BAD_ABI) raise_exception_string(taskData, EXC_foreign, "Bad ABI in ffi_prep_cif"); else if (status != FFI_OK) raise_exception_string(taskData, EXC_foreign, "Error in ffi_prep_cif"); result = toSysWord(taskData, cif); } catch (...) {} // If an ML exception is raised taskData->saveVec.reset(reset); taskData->PostRTSCall(); if (result == 0) return TAGGED(0).AsUnsigned(); else return result->Word().AsUnsigned(); } // Call a function. POLYUNSIGNED PolyInterpretedCallFunction(FirstArgument threadId, PolyWord cifAddr, PolyWord cFunAddr, PolyWord resAddr, PolyWord argVec) { ffi_cif* cif = *(ffi_cif**)cifAddr.AsAddress(); void* f = *(void**)cFunAddr.AsAddress(); void* res = *(void**)resAddr.AsAddress(); void* arg = *(void**)argVec.AsAddress(); // Poly passes the arguments as values, effectively a single struct. // Libffi wants a vector of addresses. void** argVector = (void**)calloc(cif->nargs + 1, sizeof(void*)); unsigned n = 0; uintptr_t p = (uintptr_t)arg; while (n < cif->nargs) { uintptr_t align = cif->arg_types[n]->alignment; p = (p + align - 1) & (0 - align); argVector[n] = (void*)p; p += cif->arg_types[n]->size; n++; } // The result area we have provided is only as big as required. // Libffi may need a larger area. if (cif->rtype->size < FFI_SIZEOF_ARG) { char result[FFI_SIZEOF_ARG]; ffi_call(cif, FFI_FN(f), &result, argVector); if (cif->rtype->type != FFI_TYPE_VOID) memcpy(res, result, cif->rtype->size); } else ffi_call(cif, FFI_FN(f), res, argVector); free(argVector); return TAGGED(0).AsUnsigned(); } #else // Libffi is not present. // A basic table so that the Foreign structure will compile static struct _abiTable { const char* abiName; int abiCode; } abiTable[] = { { "default", 0} }; // Don't raise an exception at this point so we can build calls. POLYUNSIGNED PolyInterpretedCreateCIF(FirstArgument threadId, PolyWord abiValue, PolyWord resultType, PolyWord argTypes) { return TAGGED(0).AsUnsigned(); } POLYUNSIGNED PolyInterpretedCallFunction(FirstArgument threadId, PolyWord cifAddr, PolyWord cFunAddr, PolyWord resAddr, PolyWord argVec) { TaskData* taskData = TaskData::FindTaskForId(threadId); try { raise_exception_string(taskData, EXC_foreign, "Foreign function calling is not available. Libffi is not installled."); } catch (...) {} // Handle the IOException return TAGGED(0).AsUnsigned(); } #endif // Construct an entry in the ABI table. static Handle mkAbitab(TaskData* taskData, void* arg, char* p) { struct _abiTable* ab = (struct _abiTable*)p; // Construct a pair of the string and the code Handle name = taskData->saveVec.push(C_string_to_Poly(taskData, ab->abiName)); Handle code = Make_arbitrary_precision(taskData, ab->abiCode); Handle result = alloc_and_save(taskData, 2); result->WordP()->Set(0, name->Word()); result->WordP()->Set(1, code->Word()); return result; } // Get ABI list. This is called once only before the basis library is built. POLYUNSIGNED PolyInterpretedGetAbiList(FirstArgument threadId) { TaskData* taskData = TaskData::FindTaskForId(threadId); ASSERT(taskData != 0); taskData->PreRTSCall(); Handle reset = taskData->saveVec.mark(); Handle result = 0; try { result = makeList(taskData, sizeof(abiTable) / sizeof(abiTable[0]), (char*)abiTable, sizeof(abiTable[0]), 0, mkAbitab); } catch (...) {} // If an ML exception is raised taskData->saveVec.reset(reset); taskData->PostRTSCall(); if (result == 0) return TAGGED(0).AsUnsigned(); else return result->Word().AsUnsigned(); } -// No machine-specific calls in the interpreter. struct _entrypts byteCodeEPT[] = { { "PolyInterpretedGetAbiList", (polyRTSFunction)&PolyInterpretedGetAbiList }, { "PolyInterpretedCreateCIF", (polyRTSFunction)&PolyInterpretedCreateCIF }, { "PolyInterpretedCallFunction", (polyRTSFunction)&PolyInterpretedCallFunction }, { NULL, NULL} // End of list. }; diff --git a/libpolyml/int_opcodes.h b/libpolyml/int_opcodes.h index 90bf2fe3..7a95a779 100644 --- a/libpolyml/int_opcodes.h +++ b/libpolyml/int_opcodes.h @@ -1,276 +1,279 @@ /* Title: Definitions for the code-tree instructions. Author: Dave Matthews, Cambridge University Computer Laboratory Copyright (c) 2000 Cambridge University Technical Services Limited - Further development Copyright David C.J. Matthews 2015-18, 2020. + Further development Copyright David C.J. Matthews 2015-18, 2020-21. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #define INSTR_jump8 0x02 #define INSTR_jump8false 0x03 #define INSTR_loadMLWord 0x04 #define INSTR_storeMLWord 0x05 #define INSTR_alloc_ref 0x06 #define INSTR_blockMoveWord 0x07 #define INSTR_loadUntagged 0x08 #define INSTR_storeUntagged 0x09 #define INSTR_case16 0x0a #define INSTR_call_closure 0x0c #define INSTR_return_w 0x0d #define INSTR_stack_containerB 0x0e #define INSTR_raise_ex 0x10 #define INSTR_callConstAddr16 0x11 // Legacy #define INSTR_callConstAddr8 0x12 // Legacy #define INSTR_local_w 0x13 #define INSTR_constAddr16_8 0x14 #define INSTR_constAddr8_8 0x15 #define INSTR_callLocalB 0x16 #define INSTR_callConstAddr8_8 0x17 #define INSTR_callConstAddr16_8 0x18 #define INSTR_constAddr16 0x1a // Legacy #define INSTR_const_int_w 0x1b #define INSTR_jump_back8 0x1e #define INSTR_return_b 0x1f #define INSTR_jump_back16 0x20 #define INSTR_indirectLocalBB 0x21 #define INSTR_local_b 0x22 #define INSTR_indirect_b 0x23 #define INSTR_moveToContainerB 0x24 #define INSTR_set_stack_val_b 0x25 #define INSTR_reset_b 0x26 #define INSTR_reset_r_b 0x27 #define INSTR_const_int_b 0x28 #define INSTR_local_0 0x29 #define INSTR_local_1 0x2a #define INSTR_local_2 0x2b #define INSTR_local_3 0x2c #define INSTR_local_4 0x2d #define INSTR_local_5 0x2e #define INSTR_local_6 0x2f #define INSTR_local_7 0x30 #define INSTR_local_8 0x31 #define INSTR_local_9 0x32 #define INSTR_local_10 0x33 #define INSTR_local_11 0x34 #define INSTR_indirect_0 0x35 #define INSTR_indirect_1 0x36 #define INSTR_indirect_2 0x37 #define INSTR_indirect_3 0x38 #define INSTR_indirect_4 0x39 #define INSTR_indirect_5 0x3a #define INSTR_const_0 0x3b #define INSTR_const_1 0x3c #define INSTR_const_2 0x3d #define INSTR_const_3 0x3e #define INSTR_const_4 0x3f #define INSTR_const_10 0x40 #define INSTR_return_1 0x42 #define INSTR_return_2 0x43 #define INSTR_return_3 0x44 #define INSTR_local_12 0x45 #define INSTR_jump8True 0x46 #define INSTR_jump16True 0x47 #define INSTR_local_13 0x49 #define INSTR_local_14 0x4a #define INSTR_local_15 0x4b #define INSTR_arbAdd 0x4c #define INSTR_arbSubtract 0x4d #define INSTR_arbMultiply 0x4e +#define INSTR_enterIntArm64 0x4f #define INSTR_reset_1 0x50 #define INSTR_reset_2 0x51 +#define INSTR_no_op 0x52 #define INSTR_indirectClosureBB 0x54 #define INSTR_constAddr8_0 0x55 #define INSTR_constAddr8_1 0x56 #define INSTR_callConstAddr8_0 0x57 #define INSTR_callConstAddr8_1 0x58 #define INSTR_reset_r_1 0x64 #define INSTR_reset_r_2 0x65 #define INSTR_reset_r_3 0x66 #define INSTR_tuple_b 0x68 #define INSTR_tuple_2 0x69 #define INSTR_tuple_3 0x6a #define INSTR_tuple_4 0x6b #define INSTR_lock 0x6c #define INSTR_ldexc 0x6d #define INSTR_indirectContainerB 0x74 #define INSTR_moveToMutClosureB 0x75 #define INSTR_allocMutClosureB 0x76 #define INSTR_indirectClosureB0 0x77 #define INSTR_push_handler 0x78 #define INSTR_indirectClosureB1 0x7a #define INSTR_tail_b_b 0x7b #define INSTR_indirectClosureB2 0x7c #define INSTR_setHandler8 0x81 #define INSTR_callFastRTS0 0x83 #define INSTR_callFastRTS1 0x84 #define INSTR_callFastRTS2 0x85 #define INSTR_callFastRTS3 0x86 #define INSTR_callFastRTS4 0x87 #define INSTR_callFastRTS5 0x88 #define INSTR_notBoolean 0x91 #define INSTR_isTagged 0x92 #define INSTR_cellLength 0x93 #define INSTR_cellFlags 0x94 #define INSTR_clearMutable 0x95 -#define INSTR_atomicIncr 0x97 -#define INSTR_atomicDecr 0x98 +#define INSTR_atomicIncr 0x97 // Legacy +#define INSTR_atomicDecr 0x98 // Legacy #define INSTR_equalWord 0xa0 #define INSTR_lessSigned 0xa2 #define INSTR_lessUnsigned 0xa3 #define INSTR_lessEqSigned 0xa4 #define INSTR_lessEqUnsigned 0xa5 #define INSTR_greaterSigned 0xa6 #define INSTR_greaterUnsigned 0xa7 #define INSTR_greaterEqSigned 0xa8 #define INSTR_greaterEqUnsigned 0xa9 #define INSTR_fixedAdd 0xaa #define INSTR_fixedSub 0xab #define INSTR_fixedMult 0xac #define INSTR_fixedQuot 0xad #define INSTR_fixedRem 0xae #define INSTR_wordAdd 0xb1 #define INSTR_wordSub 0xb2 #define INSTR_wordMult 0xb3 #define INSTR_wordDiv 0xb4 #define INSTR_wordMod 0xb5 #define INSTR_wordAnd 0xb7 #define INSTR_wordOr 0xb8 #define INSTR_wordXor 0xb9 #define INSTR_wordShiftLeft 0xba #define INSTR_wordShiftRLog 0xbb #define INSTR_allocByteMem 0xbd #define INSTR_indirectLocalB1 0xc1 #define INSTR_isTaggedLocalB 0xc2 #define INSTR_jumpNEqLocalInd 0xc3 #define INSTR_jumpTaggedLocal 0xc4 #define INSTR_jumpNEqLocal 0xc5 #define INSTR_indirect0Local0 0xc6 #define INSTR_indirectLocalB0 0xc7 #define INSTR_closureB 0xd0 #define INSTR_getThreadId 0xd9 #define INSTR_allocWordMemory 0xda #define INSTR_loadMLByte 0xdc #define INSTR_storeMLByte 0xe4 #define INSTR_blockMoveByte 0xec #define INSTR_blockEqualByte 0xed #define INSTR_blockCompareByte 0xee #define INSTR_deleteHandler 0xf1 #define INSTR_jump16 0xf7 #define INSTR_jump16false 0xf8 #define INSTR_setHandler16 0xf9 #define INSTR_constAddr8 0xfa // Legacy #define INSTR_stackSize16 0xfc #define INSTR_escape 0xfe #define INSTR_enterIntX86 0xff // Extended opcodes - preceded by escape #define EXTINSTR_stack_containerW 0x0b #define EXTINSTR_allocMutClosureW 0x0f #define EXTINSTR_indirectClosureW 0x10 #define EXTINSTR_indirectContainerW 0x11 #define EXTINSTR_indirect_w 0x14 #define EXTINSTR_moveToContainerW 0x15 #define EXTINSTR_moveToMutClosureW 0x16 #define EXTINSTR_set_stack_val_w 0x17 #define EXTINSTR_reset_w 0x18 #define EXTINSTR_reset_r_w 0x19 #define EXTINSTR_callFastRRtoR 0x1c #define EXTINSTR_callFastRGtoR 0x1d #define EXTINSTR_jump32True 0x48 #define EXTINSTR_floatAbs 0x56 #define EXTINSTR_floatNeg 0x57 #define EXTINSTR_fixedIntToFloat 0x58 #define EXTINSTR_floatToReal 0x59 #define EXTINSTR_realToFloat 0x5a #define EXTINSTR_floatEqual 0x5b #define EXTINSTR_floatLess 0x5c #define EXTINSTR_floatLessEq 0x5d #define EXTINSTR_floatGreater 0x5e #define EXTINSTR_floatGreaterEq 0x5f #define EXTINSTR_floatAdd 0x60 #define EXTINSTR_floatSub 0x61 #define EXTINSTR_floatMult 0x62 #define EXTINSTR_floatDiv 0x63 #define EXTINSTR_realToInt 0x6e #define EXTINSTR_tuple_w 0x67 #define EXTINSTR_floatToInt 0x6f #define EXTINSTR_callFastFtoF 0x70 #define EXTINSTR_callFastGtoF 0x71 #define EXTINSTR_callFastFFtoF 0x72 #define EXTINSTR_callFastFGtoF 0x73 #define EXTINSTR_realUnordered 0x79 #define EXTINSTR_floatUnordered 0x7a #define EXTINSTR_tail 0x7c #define EXTINSTR_callFastRtoR 0x8f #define EXTINSTR_callFastGtoR 0x90 +#define EXTINSTR_atomicExchAdd 0x96 #define EXTINSTR_atomicReset 0x99 #define EXTINSTR_longWToTagged 0x9a #define EXTINSTR_signedToLongW 0x9b #define EXTINSTR_unsignedToLongW 0x9c #define EXTINSTR_realAbs 0x9d #define EXTINSTR_realNeg 0x9e #define EXTINSTR_fixedIntToReal 0x9f #define EXTINSTR_fixedDiv 0xaf #define EXTINSTR_fixedMod 0xb0 #define EXTINSTR_wordShiftRArith 0xbc #define EXTINSTR_lgWordEqual 0xbe #define EXTINSTR_lgWordLess 0xc0 #define EXTINSTR_lgWordLessEq 0xc1 #define EXTINSTR_lgWordGreater 0xc2 #define EXTINSTR_lgWordGreaterEq 0xc3 #define EXTINSTR_lgWordAdd 0xc4 #define EXTINSTR_lgWordSub 0xc5 #define EXTINSTR_lgWordMult 0xc6 #define EXTINSTR_lgWordDiv 0xc7 #define EXTINSTR_lgWordMod 0xc8 #define EXTINSTR_lgWordAnd 0xc9 #define EXTINSTR_lgWordOr 0xca #define EXTINSTR_lgWordXor 0xcb #define EXTINSTR_lgWordShiftLeft 0xcc #define EXTINSTR_lgWordShiftRLog 0xcd #define EXTINSTR_lgWordShiftRArith 0xce #define EXTINSTR_realEqual 0xcf #define EXTINSTR_closureW 0xd0 #define EXTINSTR_realLess 0xd1 #define EXTINSTR_realLessEq 0xd2 #define EXTINSTR_realGreater 0xd3 #define EXTINSTR_realGreaterEq 0xd4 #define EXTINSTR_realAdd 0xd5 #define EXTINSTR_realSub 0xd6 #define EXTINSTR_realMult 0xd7 #define EXTINSTR_realDiv 0xd8 #define EXTINSTR_loadC8 0xdd #define EXTINSTR_loadC16 0xde #define EXTINSTR_loadC32 0xdf #define EXTINSTR_loadC64 0xe0 #define EXTINSTR_loadCFloat 0xe1 #define EXTINSTR_loadCDouble 0xe2 #define EXTINSTR_storeC8 0xe5 #define EXTINSTR_storeC16 0xe6 #define EXTINSTR_storeC32 0xe7 #define EXTINSTR_storeC64 0xe8 #define EXTINSTR_storeCFloat 0xe9 #define EXTINSTR_storeCDouble 0xea #define EXTINSTR_constAddr32_16 0xf0 #define EXTINSTR_jump32 0xf2 #define EXTINSTR_jump32False 0xf3 #define EXTINSTR_constAddr32 0xf4 // Legacy #define EXTINSTR_setHandler32 0xf5 #define EXTINSTR_case32 0xf6 #define EXTINSTR_allocCSpace 0xfd #define EXTINSTR_freeCSpace 0xfe diff --git a/libpolyml/interpreter.cpp b/libpolyml/interpreter.cpp index faad605c..9c9e8344 100644 --- a/libpolyml/interpreter.cpp +++ b/libpolyml/interpreter.cpp @@ -1,403 +1,392 @@ /* Architecture independent wrapper for the byte-code interpreter. - Copyright David C.J. Matthews 2020. + Copyright David C.J. Matthews 2020-21. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) 0 #endif #ifdef HAVE_STRING_H #include #endif #include "globals.h" #include "machine_dep.h" #include "sys.h" #include "profiling.h" #include "arb.h" #include "processes.h" #include "run_time.h" #include "gc.h" #include "diagnostics.h" #include "polystring.h" #include "save_vec.h" #include "memmgr.h" #include "scanaddrs.h" #include "rtsentry.h" #include "bytecode.h" /* the amount of ML stack space to reserve for registers, C exception handling etc. The compiler requires us to reserve 2 stack-frames worth (2 * 20 words) plus whatever we require for the register save area. We actually reserve slightly more than this. SPF 3/3/97 */ #define OVERFLOW_STACK_SIZE 50 class IntTaskData: public TaskData, ByteCodeInterpreter { public: IntTaskData() : ByteCodeInterpreter(&taskSp, &sl) {} ~IntTaskData() {} virtual void GarbageCollect(ScanAddress *process); void ScanStackAddress(ScanAddress *process, stackItem& val, StackSpace *stack); virtual void EnterPolyCode(); // Start running ML virtual void SetException(poly_exn *exc) { exception_arg = exc; } virtual void InterruptCode(); // AddTimeProfileCount is used in time profiling. virtual bool AddTimeProfileCount(SIGNALCONTEXT *context); virtual void InitStackFrame(TaskData *newTask, Handle proc); - // Increment or decrement the first word of the object pointed to by the - // mutex argument and return the new value. - virtual POLYUNSIGNED AtomicDecrement(PolyObject* mutexp); + // Atomic exchanege-and-add. + virtual POLYSIGNED AtomicExchAdd(PolyObject* mutexp, POLYSIGNED incr); // Set a mutex to zero. virtual void AtomicReset(PolyObject* mutexp); - virtual POLYUNSIGNED AtomicIncrement(PolyObject* mutexp); // Return the minimum space occupied by the stack. Used when setting a limit. virtual uintptr_t currentStackSpace(void) const { return ((stackItem*)this->stack->top - this->taskSp) + OVERFLOW_STACK_SIZE; } virtual void addProfileCount(POLYUNSIGNED words) { addSynchronousCount(interpreterPc, words); } virtual void CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length); PLock interruptLock; virtual void ClearExceptionPacket() { exception_arg = TAGGED(0); } virtual PolyWord GetExceptionPacket() { return exception_arg; } virtual stackItem* GetHandlerRegister() { return hr; } virtual void SetHandlerRegister(stackItem* newHr) { hr = newHr; } virtual void HandleStackOverflow(uintptr_t space); stackItem *taskSp; /* Stack pointer. */ stackItem *hr; PolyWord exception_arg; stackItem *sl; /* Stack limit register. */ }; class Interpreter : public MachineDependent { public: Interpreter() {} // Create a task data object. virtual TaskData *CreateTaskData(void) { return new IntTaskData(); } virtual Architectures MachineArchitecture(void) { return MA_Interpreted; } }; void IntTaskData::InitStackFrame(TaskData *parentTask, Handle proc) /* Initialise stack frame. */ { StackSpace *space = this->stack; StackObject *stack = (StackObject *)space->stack(); PolyObject *closure = DEREFWORDHANDLE(proc); uintptr_t stack_size = space->spaceSize() * sizeof(PolyWord) / sizeof(stackItem); this->taskSp = (stackItem*)stack + stack_size; this->hr = this->taskSp; *(--this->taskSp) = (PolyWord)closure; /* Closure address */ this->interpreterPc = *(POLYCODEPTR*)closure; this->exception_arg = TAGGED(0); /* Used for exception argument. */ } void IntTaskData::HandleStackOverflow(uintptr_t space) { uintptr_t min_size = (this->stack->top - (PolyWord*)taskSp) + OVERFLOW_STACK_SIZE + space; try { // The stack check has failed. This may either be because we really have // overflowed the stack or because the stack limit value has been adjusted // to result in a call here. CheckAndGrowStack(this, min_size); } catch (IOException&) { // We may get an exception while handling this if we run out of store } { PLocker l(&interruptLock); // Set the stack limit. This clears any interrupt and also sets the // correct value if we've grown the stack. sl = (stackItem*)stack->bottom + OVERFLOW_STACK_SIZE; } try { processes->ProcessAsynchRequests(this); // Release and re-acquire use of the ML memory to allow another thread // to GC. processes->ThreadReleaseMLMemory(this); processes->ThreadUseMLMemory(this); } catch (IOException&) { } } void IntTaskData::GarbageCollect(ScanAddress *process) { TaskData::GarbageCollect(process); ByteCodeInterpreter::GarbageCollect(process); if (exception_arg.IsDataPtr()) { PolyObject* obj = exception_arg.AsObjPtr(); obj = process->ScanObjectAddress(obj); exception_arg = obj; } if (stack != 0) { stackItem*stackPtr = this->taskSp; // Now the values on the stack. for (stackItem* q = stackPtr; q < (stackItem*)stack->top; q++) ScanStackAddress(process, *q, stack); } } // Process a value within the stack. void IntTaskData::ScanStackAddress(ScanAddress *process, stackItem& stackItem, StackSpace *stack) { // We may have return addresses on the stack which could look like // tagged values. Check whether the value is in the code area before // checking whether it is untagged. #ifdef POLYML32IN64 // In 32-in-64 return addresses always have the top 32 bits non-zero. if (stackItem.argValue < ((uintptr_t)1 << 32)) { // It's either a tagged integer or an object pointer. if (stackItem.w().IsDataPtr()) { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } } else { // Could be a code address or a stack address. MemSpace* space = gMem.SpaceForAddress(stackItem.codeAddr - 1); if (space == 0 || space->spaceType != ST_CODE) return; PolyObject* obj = gMem.FindCodeObject(stackItem.codeAddr); ASSERT(obj != 0); // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } #else // The -1 here is because we may have a zero-sized cell in the last // word of a space. MemSpace* space = gMem.SpaceForAddress(stackItem.codeAddr - 1); if (space == 0) return; // In particular we may have one of the assembly code addresses. if (space->spaceType == ST_CODE) { PolyObject* obj = gMem.FindCodeObject(stackItem.codeAddr); // If it is actually an integer it might be outside a valid code object. if (obj == 0) { ASSERT(stackItem.w().IsTagged()); // It must be an integer } else // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } else if (space->spaceType == ST_LOCAL && stackItem.w().IsDataPtr()) // Local values must be word addresses. { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } #endif } // Copy a stack void IntTaskData::CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length) { #ifdef POLYML32IN64 old_length = old_length / 2; new_length = new_length / 2; #endif /* Moves a stack, updating all references within the stack */ stackItem*old_base = (stackItem*)old_stack; stackItem*new_base = (stackItem*)new_stack; stackItem*old_top = old_base + old_length; /* Calculate the offset of the new stack from the old. If the frame is being extended objects in the new frame will be further up the stack than in the old one. */ uintptr_t offset = new_base - old_base + new_length - old_length; stackItem *oldSp = this->taskSp; this->taskSp = oldSp + offset; this->hr = this->hr + offset; /* Skip the unused part of the stack. */ uintptr_t i = oldSp - old_base; ASSERT(i <= old_length); i = old_length - i; stackItem *old = oldSp; stackItem *newp = this->taskSp; while (i--) { stackItem old_word = *old++; if (old_word.w().IsDataPtr() && old_word.stackAddr >= old_base && old_word.stackAddr <= old_top) old_word.stackAddr = old_word.stackAddr + offset; else if (old_word.w().IsDataPtr() && IsHeapAddress(old_word.stackAddr)) { stackItem* addr = (stackItem*)old_word.w().AsStackAddr(); if (addr >= old_base && addr <= old_top) { addr += offset; old_word = PolyWord::FromStackAddr((PolyWord*)addr); } } *newp++ = old_word; } ASSERT(old == ((stackItem*)old_stack) + old_length); ASSERT(newp == ((stackItem*)new_stack) + new_length); } void IntTaskData::EnterPolyCode() /* Called from "main" to enter the code. */ { sl = (stackItem*)((PolyWord*)this->stack->stack() + OVERFLOW_STACK_SIZE); // Run the interpreter. Currently this can never return. // Callbacks are not currently implemented which is the only case that could // occur in the pure interpreted version. (void)RunInterpreter(this); exitThread(this); // This thread is exiting. ASSERT(0); // We should never get here. } // This is called from a different thread so we have to be careful. void IntTaskData::InterruptCode() { PLocker l(&interruptLock); // Set the stack limit pointer to the top of the stack to cause // a trap when we next check for stack overflow. // We use a lock here to ensure that we always use the current value of the // stack. The thread we're interrupting could be growing the stack at this point. if (stack != 0) sl = (stackItem*)(stack->top - 1); } // As far as possible we want locking and unlocking an ML mutex to be fast so // we try to implement the code in the assembly code using appropriate // interlocked instructions. That does mean that if we need to lock and // unlock an ML mutex in this code we have to use the same, machine-dependent, // code to do it. These are defaults that are used where there is no // machine-specific code. // This lock is used to synchronise all atomic operations. // It is not needed in the X86 version because that can use a global // memory lock. static PLock mutexLock; -POLYUNSIGNED IntTaskData::AtomicIncrement(PolyObject* mutexp) +POLYSIGNED IntTaskData::AtomicExchAdd(PolyObject* mutexp, POLYSIGNED incr) { PLocker l(&mutexLock); - // A thread can only call this once so the values will be short - PolyWord newValue = TAGGED(UNTAGGED(mutexp->Get(0)) + 1); + POLYSIGNED oldValue = mutexp->Get(0).AsSigned(); + PolyWord newValue = PolyWord::FromSigned(oldValue + incr - TAGGED(0).AsSigned() /* Remove the tag */); mutexp->Set(0, newValue); - return newValue.AsUnsigned(); -} - -POLYUNSIGNED IntTaskData::AtomicDecrement(PolyObject* mutexp) -{ - PLocker l(&mutexLock); - // A thread can only call this once so the values will be short - PolyWord newValue = TAGGED(UNTAGGED(mutexp->Get(0)) - 1); - mutexp->Set(0, newValue); - return newValue.AsUnsigned(); + return oldValue; } // Release a mutex. We need to lock the mutex to ensure we don't // reset it in the time between one of atomic operations reading // and writing the mutex. void IntTaskData::AtomicReset(PolyObject* mutexp) { PLocker l(&mutexLock); mutexp->Set(0, TAGGED(0)); // Set this to released. } bool IntTaskData::AddTimeProfileCount(SIGNALCONTEXT *context) { if (interpreterPc != 0) { // See if the PC we've got is an ML code address. MemSpace *space = gMem.SpaceForAddress(interpreterPc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { incrementCountAsynch(interpreterPc); return true; } } return false; } extern "C" { POLYEXTERNALSYMBOL POLYUNSIGNED PolyInterpretedEnterIntMode(); POLYEXTERNALSYMBOL POLYUNSIGNED PolyEndBootstrapMode(FirstArgument threadId, PolyWord function); } // Do we require EnterInt instructions and if so for which architecture? // 0 = > None; 1 => X86_32, 2 => X86_64. 3 => X86_32_in_64. POLYUNSIGNED PolyInterpretedEnterIntMode() { return TAGGED(0).AsUnsigned(); } // This has no effect in the interpreted version. POLYUNSIGNED PolyEndBootstrapMode(FirstArgument threadId, PolyWord function) { TaskData* taskData = TaskData::FindTaskForId(threadId); ASSERT(taskData != 0); taskData->PreRTSCall(); Handle pushedFunction = taskData->saveVec.push(function); taskData->InitStackFrame(taskData, pushedFunction); taskData->EnterPolyCode(); // Should never return. ASSERT(0); return TAGGED(0).AsUnsigned(); } static Interpreter interpreterObject; MachineDependent *machineDependent = &interpreterObject; // No machine-specific calls in the interpreter. struct _entrypts machineSpecificEPT[] = { { "PolyInterpretedEnterIntMode", (polyRTSFunction)&PolyInterpretedEnterIntMode }, { "PolyEndBootstrapMode", (polyRTSFunction)&PolyEndBootstrapMode }, { NULL, NULL} // End of list. }; diff --git a/libpolyml/processes.h b/libpolyml/processes.h index abe29a85..2f6ff218 100644 --- a/libpolyml/processes.h +++ b/libpolyml/processes.h @@ -1,353 +1,364 @@ /* Title: Lightweight process library Author: David C.J. Matthews - Copyright (c) 2007-8, 2012, 2015, 2017, 2019, 2020 David C.J. Matthews + Copyright (c) 2007-8, 2012, 2015, 2017, 2019-21 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _PROCESSES_H_ #define _PROCESSES_H_ #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #include "globals.h" #include "rts_module.h" #include "save_vec.h" #include "noreturn.h" #include "locking.h" class SaveVecEntry; typedef SaveVecEntry *Handle; class StackSpace; class PolyWord; class ScanAddress; class MDTaskData; class Exporter; class StackObject; #ifdef HAVE_WINDOWS_H typedef void *HANDLE; #endif #ifdef HAVE_SIGNAL_H #include #endif #ifdef HAVE_UCONTEXT_H #include #endif #ifdef HAVE_PTHREAD_H #include #endif // SIGNALCONTEXT is the argument type that is passed to GetPCandSPFromContext // to get the actual PC and SP in a profiling trap. #if defined(HAVE_WINDOWS_H) // First because it's used in both native Windows and Cygwin. #include #define SIGNALCONTEXT CONTEXT // This is the thread context. #elif defined(HAVE_UCONTEXT_T) #define SIGNALCONTEXT ucontext_t #elif defined(HAVE_STRUCT_SIGCONTEXT) #define SIGNALCONTEXT struct sigcontext #else #define SIGNALCONTEXT void #endif #define MIN_HEAP_SIZE 4096 // Minimum and initial heap segment size (words) // This is the ML "thread identifier" object. The fields // are read and set by the ML code. class ThreadObject: public PolyObject { public: PolyWord threadRef; // Weak ref containing the address of the thread data. Not used by ML PolyWord flags; // Tagged integer containing flags indicating how interrupts // are handled. Set by ML but only by the thread itself PolyWord threadLocal; // Head of a list of thread-local store items. // Handled entirely by ML but only by the thread. PolyWord requestCopy; // A tagged integer copy of the "requests" field. // This is provided so that ML can easily test if there // is an interrupt pending. PolyWord mlStackSize; // A tagged integer with the maximum ML stack size in bytes PolyWord debuggerSlots[4]; // These are used by the debugger. }; // Other threads may make requests to a thread. typedef enum { kRequestNone = 0, // Increasing severity kRequestInterrupt = 1, kRequestKill = 2 } ThreadRequests; // Per-thread data. This is subclassed for each architecture. class TaskData { public: TaskData(); virtual ~TaskData(); void FillUnusedSpace(void); virtual void GarbageCollect(ScanAddress *process); virtual void EnterPolyCode() = 0; // Start running ML virtual void InterruptCode() = 0; virtual bool AddTimeProfileCount(SIGNALCONTEXT *context) = 0; // Initialise the stack for a new thread. The parent task object is passed in because any // allocation that needs to be made must be made in the parent. virtual void InitStackFrame(TaskData *parentTask, Handle proc) = 0; virtual void SetException(poly_exn *exc) = 0; // The scheduler needs versions of atomic decrement and atomic reset that // work in exactly the same way as the code-generated versions (if any). - // Atomic decrement isn't needed since it only ever releases a mutex. - virtual POLYUNSIGNED AtomicDecrement(PolyObject *mutexp) = 0; + // Atomic increment isn't needed since the scheduler only ever releases a mutex. + virtual POLYUNSIGNED AtomicDecrement(PolyObject* mutexp) + { + POLYSIGNED prev = AtomicExchAdd(mutexp, TAGGED(-1).AsSigned()); + return (POLYUNSIGNED)(prev - TAGGED(1).AsSigned() + TAGGED(0).AsSigned()); + } // Reset a mutex to one. This needs to be atomic with respect to the // atomic increment and decrement instructions. virtual void AtomicReset(PolyObject* mutexp) = 0; // This is actually only used in the interpreter. - virtual POLYUNSIGNED AtomicIncrement(PolyObject* mutexp) = 0; + virtual POLYUNSIGNED AtomicIncrement(PolyObject* mutexp) + { + POLYSIGNED prev = AtomicExchAdd(mutexp, TAGGED(1).AsSigned()); + return (POLYUNSIGNED)(prev + TAGGED(1).AsSigned() - TAGGED(0).AsSigned()); + } + // This is now used in the interpreter in place of AtomicIncrement and AtomicDecrement. + // It may be simpler to use it everywhere. + virtual POLYSIGNED AtomicExchAdd(PolyObject* mutexp, POLYSIGNED incr) = 0; virtual void CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length) = 0; virtual uintptr_t currentStackSpace(void) const = 0; // Add a count to the local function if we are using store profiling. virtual void addProfileCount(POLYUNSIGNED words) = 0; // Functions called before and after an RTS call. virtual void PreRTSCall(void) {} virtual void PostRTSCall(void) {} SaveVec saveVec; PolyWord *allocPointer; // Allocation pointer - decremented towards... PolyWord *allocLimit; // ... lower limit of allocation uintptr_t allocSize; // The preferred heap segment size unsigned allocCount; // The number of allocations since the last GC StackSpace *stack; ThreadObject *threadObject; // Pointer to the thread object. int lastError; // Last error from foreign code. void *signalStack; // Stack to handle interrupts (Unix only) // Get a TaskData pointer given the ML taskId. // This is called at the start of every RTS function that may allocate memory. // It is can be called safely to get the thread's own TaskData object without // a lock but any call to get the TaskData for another thread must take the // schedLock first in case the thread is exiting. static TaskData *FindTaskForId(PolyWord taskId) { return *(TaskData**)(((ThreadObject*)taskId.AsObjPtr())->threadRef.AsObjPtr()); } private: // If a thread has to block it will block on this. PCondVar threadLock; // External requests made are stored here until they // can be actioned. ThreadRequests requests; // Pointer to the mutex when blocked. Set to NULL when it doesn't apply. PolyObject *blockMutex; // This is set to false when a thread blocks or enters foreign code, // While it is true the thread can manipulate ML memory so no other // thread can garbage collect. bool inMLHeap; // In Linux, at least, we need to run a separate timer in each thread bool runningProfileTimer; #ifdef HAVE_WINDOWS_H LONGLONG lastCPUTime; // Used for profiling #endif public: bool threadExited; private: #ifdef HAVE_PTHREAD_H pthread_t threadId; #endif #ifdef HAVE_WINDOWS_H public: // Because, on Cygwin, it's used in NewThreadFunction HANDLE threadHandle; private: #endif friend class Processes; }; NORETURNFN(extern Handle exitThread(TaskData *mdTaskData)); class ScanAddress; // Indicate what the main thread is doing if the profile // timer goes off. extern enum _mainThreadPhase { MTP_USER_CODE=0, MTP_GCPHASESHARING, MTP_GCPHASEMARK, MTP_GCPHASECOMPACT, MTP_GCPHASEUPDATE, MTP_GCQUICK, MTP_SHARING, MTP_EXPORTING, MTP_SAVESTATE, MTP_LOADSTATE, MTP_PROFILING, MTP_SIGHANDLER, MTP_CYGWINSPAWN, MTP_STOREMODULE, MTP_LOADMODULE, MTP_MAXENTRY } mainThreadPhase; // Data structure used for requests from a thread to the root // thread. These are GCs or similar. class MainThreadRequest { public: MainThreadRequest (enum _mainThreadPhase phase): mtp(phase), completed(false) {} virtual ~MainThreadRequest () {} // Suppress silly GCC warning const enum _mainThreadPhase mtp; bool completed; virtual void Perform() = 0; }; class PLock; // Class to wait for a given time or for an event, whichever comes first. // // A pointer to this class or a subclass is passed to ThreadPauseForIO. // Because a thread may be interrupted or killed by another ML thread we // don't allow any thread to block indefinitely. Instead whenever a // thread wants to do an operation that may block we have it enter a // loop that polls for the desired condition and if it is not ready it // calls ThreadPauseForIO. The default action is to block for a short // period and then return so that the caller can poll again. That can // limit performance when, for example, reading from a pipe so where possible // we use a sub-class that waits until either input is available or it times // out, whichever comes first, using "select" in Unix or MsgWaitForMultipleObjects // in Windows. // During a call to Waiter::Wait the thread is set as "not using ML memory" // so a GC can happen while this thread is blocked. class Waiter { public: Waiter() {} virtual ~Waiter() {} virtual void Wait(unsigned maxMillisecs); static Waiter *defaultWaiter; }; #ifdef _WIN32 class WaitHandle: public Waiter { public: WaitHandle(HANDLE h, unsigned maxWait): m_Handle(h), m_maxWait(maxWait) {} virtual void Wait(unsigned maxMillisecs); private: HANDLE m_Handle; unsigned m_maxWait; }; #else // Unix: Wait until a file descriptor is available for input class WaitInputFD: public Waiter { public: WaitInputFD(int fd): m_waitFD(fd) {} virtual void Wait(unsigned maxMillisecs); private: int m_waitFD; }; #endif // External interface to the Process module. These functions are all implemented // by the Processes class. class ProcessExternal { public: virtual ~ProcessExternal() {} // Defined to suppress a warning from GCC virtual TaskData *GetTaskDataForThread(void) = 0; virtual TaskData *CreateNewTaskData() = 0; // Request all ML threads to exit and set the result code. Does not cause // the calling thread itself to exit since this may be called on the GUI thread. virtual void RequestProcessExit(int n) = 0; // Exit from this thread. virtual NORETURNFN(void ThreadExit(TaskData *taskData)) = 0; virtual void BroadcastInterrupt(void) = 0; virtual void BeginRootThread(PolyObject *rootFunction) = 0; // Called when a thread may block. Returns some time later when perhaps // the input is available. virtual void ThreadPauseForIO(TaskData *taskData, Waiter *pWait) = 0; // As ThreadPauseForIO but when there is no stream virtual void ThreadPause(TaskData *taskData) { ThreadPauseForIO(taskData, Waiter::defaultWaiter); } // If a thread is blocking for some time it should release its use // of the ML memory. That allows a GC. ThreadUseMLMemory returns true if // a GC was in progress. virtual void ThreadUseMLMemory(TaskData *taskData) = 0; virtual void ThreadReleaseMLMemory(TaskData *taskData) = 0; // Requests from the threads for actions that need to be performed by // the root thread. virtual void MakeRootRequest(TaskData *taskData, MainThreadRequest *request) = 0; // Deal with any interrupt or kill requests. virtual bool ProcessAsynchRequests(TaskData *taskData) = 0; // Process an interrupt request synchronously. virtual void TestSynchronousRequests(TaskData *taskData) = 0; // Process any events, synchronous or asynchronous. virtual void TestAnyEvents(TaskData *taskData) = 0; // Profiling control. virtual void StartProfiling(void) = 0; virtual void StopProfiling(void) = 0; // Find space for an object. Returns a pointer to the start. "words" must include // the length word and the result points at where the length word will go. // If the allocation succeeds it may update the allocation values in the taskData object. // If the heap is exhausted it may set this thread (or other threads) to raise an exception. virtual PolyWord *FindAllocationSpace(TaskData *taskData, POLYUNSIGNED words, bool alwaysInSeg) = 0; // Signal handling support. The ML signal handler thread blocks until it is // woken up by the signal detection thread. virtual bool WaitForSignal(TaskData *taskData, PLock *sigLock) = 0; virtual void SignalArrived(void) = 0; virtual poly_exn* GetInterrupt(void) = 0; }; // Return the number of processors. Used when configuring multi-threaded GC. extern unsigned NumberOfProcessors(void); extern unsigned NumberOfPhysicalProcessors(void); extern ProcessExternal *processes; extern struct _entrypts processesEPT[]; #endif diff --git a/libpolyml/sharedata.cpp b/libpolyml/sharedata.cpp index efa47528..d6e51dea 100644 --- a/libpolyml/sharedata.cpp +++ b/libpolyml/sharedata.cpp @@ -1,1123 +1,1125 @@ /* Title: Share common immutable data Copyright (c) 2000 Cambridge University Technical Services Limited and David C. J. Matthews 2006, 2010-13, 2016-17, 2019 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) #endif #ifdef HAVE_STRING_H #include #endif #include #include "globals.h" #include "save_vec.h" #include "machine_dep.h" #include "scanaddrs.h" #include "run_time.h" #include "sys.h" #include "gc.h" #include "rtsentry.h" #include "memmgr.h" #include "processes.h" #include "gctaskfarm.h" #include "diagnostics.h" #include "sharedata.h" #include "gc_progress.h" /* This code was largely written by Simon Finn as a database improver for the memory-mapped persistent store version. The aim is that where two immutable objects (cells) contain the same data (i.e. where ML equality would say they were equal) they should be merged so that only a single object is retained. The basic algorithm works like this: 1. From the root, recursively process all objects and calculate a "depth" for each object. Mutable data and code segments have depth 0 and cannot be merged. Byte segments (e.g. strings and long-format arbitrary precision values) have depth 1. Other cells have depths of 1 or greater, the depth being the maximum recursion depth until a byte segment or an object with depth 0 is reached. Cycles of immutable data don't arise normally in ML but could be produced as a result of locking mutable objects. To avoid infinite recursion cycles are broken by setting the depth of an object to zero before processing it. The depth of each object is stored in the length word of the object. This ensures each object is processed once only. 2. Vectors are created containing objects of the same depth, from 1 to the maximum depth found. 3. We begin a loop starting at depth 1. 4. The length words are restored, replacing the depth count in the header. 5. The objects are sorted by their contents so bringing together objects with the same contents. The contents are considered simply as uninterpreted bits. 6. The sorted vector is processed to find those objects that are actually bitwise equal. One object is selected to be retained and other objects have their length words turned into tombstones pointing at the retained object. 7. Objects at the next depth are first processed to find pointers to objects that moved in the previous step (or that step with a lower depth). The addresses are updated to point to the retained object. The effect of this step is to ensure that now two objects that are equal in ML terms have identical contents. e.g. If we have val a = ("abc", "def") and b = ("abc", "def") then we will have merged the two occurrences of "abc" and "def" in the previous pass of level 1 objects. This step ensures that the two cells containing the pairs both hold pointers to the same objects and so are bitwise equal. 8. Repeat with 4, 5 and 6 until all the levels have been processed. Each object is processed once and at the end most of the objects have been updated with the shared addresses. We have to scan all the mutable and code objects to update the addresses but also have to scan the immutables because of the possibility of missing an update as a result of breaking a loop (see SPF's comment below). DCJM 3/8/06 This has been substantially updated while retaining the basic algorithm. Sorting is now done in parallel by the GC task farm and the stack is now in dynamic memory. That avoids a possible segfault if the normal C stack overflows. A further problem is that the vectors can get very large and this can cause problems if there is insufficient contiguous space. The code has been modified to reduce the size of the vectors at the cost of increasing the total memory requirement. */ extern "C" { POLYEXTERNALSYMBOL POLYUNSIGNED PolyShareCommonData(FirstArgument threadId, PolyWord root); } // The depth is stored in the length field. If the Weak bit is set but the Mutable bit // is clear the value in the length word is a depth rather than a real length. // The tombstone bit is zero. // Previously "depth" values were encoded with the tombstone bit set but that isn't // possible in 32-in-64 because we need 31 bits in a forwarding pointer. inline bool OBJ_IS_DEPTH(POLYUNSIGNED L) { return (L & (_OBJ_WEAK_BIT| _OBJ_MUTABLE_BIT)) == _OBJ_WEAK_BIT; } inline POLYUNSIGNED OBJ_GET_DEPTH(POLYUNSIGNED L) { return OBJ_OBJECT_LENGTH(L); } inline POLYUNSIGNED OBJ_SET_DEPTH(POLYUNSIGNED n) { return n | _OBJ_WEAK_BIT; } // The DepthVector type contains all the items of a particular depth. // This is the abstract class. There are variants for the case where all // the cells have the same size and where they may vary. class DepthVector { public: DepthVector() : nitems(0), vsize(0), ptrVector(0) {} virtual ~DepthVector() { free(ptrVector); } virtual POLYUNSIGNED MergeSameItems(void); virtual void Sort(void); virtual POLYUNSIGNED ItemCount(void) { return nitems; } virtual void AddToVector(POLYUNSIGNED L, PolyObject *pt) = 0; void FixLengthAndAddresses(ScanAddress *scan); virtual void RestoreForwardingPointers() = 0; protected: POLYUNSIGNED nitems; POLYUNSIGNED vsize; PolyObject **ptrVector; // This must only be called BEFORE sorting. The pointer vector will be // modified by sorting but the length vector is not. virtual void RestoreLengthWords(void) = 0; static void SortRange(PolyObject * *first, PolyObject * *last); static int CompareItems(const PolyObject * const *a, const PolyObject * const *b); static int qsCompare(const void *a, const void *b) { return CompareItems((const PolyObject * const*)a, (const PolyObject *const *)b); } static void sortTask(GCTaskId*, void *s, void *l) { SortRange((PolyObject **)s, (PolyObject **)l); } }; // DepthVector where the size needs to be held for each item. class DepthVectorWithVariableLength: public DepthVector { public: DepthVectorWithVariableLength() : lengthVector(0) {} virtual ~DepthVectorWithVariableLength() { free(lengthVector); } virtual void RestoreLengthWords(void); virtual void AddToVector(POLYUNSIGNED L, PolyObject *pt); virtual void RestoreForwardingPointers(); protected: POLYUNSIGNED *lengthVector; // Same size as the ptrVector }; class DepthVectorWithFixedLength : public DepthVector { public: DepthVectorWithFixedLength(POLYUNSIGNED l) : length(l) {} virtual void RestoreLengthWords(void); virtual void AddToVector(POLYUNSIGNED L, PolyObject *pt); // It's safe to run this again for the fixed length vectors. virtual void RestoreForwardingPointers() { RestoreLengthWords(); } protected: POLYUNSIGNED length; }; // We have special vectors for the sizes from 1 to FIXEDLENGTHSIZE-1. // Zero-sized and large objects go in depthVectorArray[0]. #define FIXEDLENGTHSIZE 10 class ShareDataClass { public: ShareDataClass(); ~ShareDataClass(); bool RunShareData(PolyObject *root); void AddToVector(POLYUNSIGNED depth, POLYUNSIGNED length, PolyObject *pt); private: std::vector depthVectorArray[FIXEDLENGTHSIZE]; POLYUNSIGNED maxVectorSize; }; ShareDataClass::ShareDataClass() { maxVectorSize = 0; } ShareDataClass::~ShareDataClass() { // Free the bitmaps associated with the permanent spaces. for (std::vector::iterator i = gMem.pSpaces.begin(); i < gMem.pSpaces.end(); i++) (*i)->shareBitmap.Destroy(); // Free the depth vectors. for (unsigned i = 0; i < FIXEDLENGTHSIZE; i++) { for (std::vector ::iterator j = depthVectorArray[i].begin(); j < depthVectorArray[i].end(); j++) delete(*j); } } // Grow the appropriate depth vector if necessary and add the item to it. void ShareDataClass::AddToVector(POLYUNSIGNED depth, POLYUNSIGNED length, PolyObject *pt) { // Select the appropriate vector. Element zero is the variable length vector and is // also used for the, rare, zero length objects. std::vector *vectorToUse = &(depthVectorArray[length < FIXEDLENGTHSIZE ? length : 0]); if (depth >= maxVectorSize) maxVectorSize = depth+1; while (vectorToUse->size() <= depth) { try { if (length != 0 && length < FIXEDLENGTHSIZE) vectorToUse->push_back(new DepthVectorWithFixedLength(length)); else vectorToUse->push_back(new DepthVectorWithVariableLength); } catch (std::bad_alloc&) { throw MemoryException(); } } (*vectorToUse)[depth]->AddToVector(length, pt); } // Add an object to a depth vector void DepthVectorWithVariableLength::AddToVector(POLYUNSIGNED L, PolyObject *pt) { ASSERT (this->nitems <= this->vsize); if (this->nitems == this->vsize) { // The vector is full or has not yet been allocated. Grow it by 50%. POLYUNSIGNED new_vsize = this->vsize + this->vsize / 2 + 1; if (new_vsize < 15) new_vsize = 15; // First the length vector. POLYUNSIGNED *newLength = (POLYUNSIGNED *)realloc(this->lengthVector, new_vsize * sizeof(POLYUNSIGNED)); if (newLength == 0) { // The vectors can get large and we may not be able to grow them // particularly if the address space is limited in 32-bit mode. // Try again with just a small increase. new_vsize = this->vsize + 15; newLength = (POLYUNSIGNED *)realloc(this->lengthVector, new_vsize * sizeof(POLYUNSIGNED)); // If that failed give up. if (newLength == 0) throw MemoryException(); } PolyObject **newPtrVector = (PolyObject * *)realloc (this->ptrVector, new_vsize*sizeof(PolyObject *)); if (newPtrVector == 0) { new_vsize = this->vsize + 15; newPtrVector = (PolyObject **)realloc (this->ptrVector, new_vsize*sizeof(PolyObject *)); // If that failed give up. if (newPtrVector == 0) throw MemoryException(); } this->lengthVector = newLength; this->ptrVector = newPtrVector; this->vsize = new_vsize; } ASSERT (this->nitems < this->vsize); this->lengthVector[this->nitems] = L; this->ptrVector[this->nitems] = pt; this->nitems++; ASSERT (this->nitems <= this->vsize); } // Add an object to a depth vector void DepthVectorWithFixedLength::AddToVector(POLYUNSIGNED L, PolyObject *pt) { ASSERT(this->nitems <= this->vsize); ASSERT(L == length); if (this->nitems == this->vsize) { // The vector is full or has not yet been allocated. Grow it by 50%. POLYUNSIGNED new_vsize = this->vsize + this->vsize / 2 + 1; if (new_vsize < 15) new_vsize = 15; PolyObject **newPtrVector = (PolyObject * *)realloc(this->ptrVector, new_vsize * sizeof(PolyObject *)); if (newPtrVector == 0) { new_vsize = this->vsize + 15; newPtrVector = (PolyObject **)realloc(this->ptrVector, new_vsize * sizeof(PolyObject *)); // If that failed give up. if (newPtrVector == 0) throw MemoryException(); } this->ptrVector = newPtrVector; this->vsize = new_vsize; } ASSERT(this->nitems < this->vsize); this->ptrVector[this->nitems] = pt; this->nitems++; ASSERT(this->nitems <= this->vsize); } // Comparison function used for sorting and also to test whether // two cells can be merged. int DepthVector::CompareItems(const PolyObject *const *a, const PolyObject *const *b) { const PolyObject *x = *a; const PolyObject *y = *b; POLYUNSIGNED lX = x->LengthWord(); POLYUNSIGNED lY = y->LengthWord(); // ASSERT (OBJ_IS_LENGTH(lX)); // ASSERT (OBJ_IS_LENGTH(lY)); if (lX > lY) return 1; // These tests include the flag bits if (lX < lY) return -1; // Return simple bitwise equality. return memcmp(x, y, OBJ_OBJECT_LENGTH(lX)*sizeof(PolyWord)); } // Merge cells with the same contents. POLYUNSIGNED DepthVector::MergeSameItems() { POLYUNSIGNED N = this->nitems; POLYUNSIGNED n = 0; POLYUNSIGNED i = 0; while (i < N) { PolyObject *bestShare = 0; // Candidate to share. MemSpace *bestSpace = 0; POLYUNSIGNED j; for (j = i; j < N; j++) { ASSERT (OBJ_IS_LENGTH(ptrVector[i]->LengthWord())); // Search for identical objects. Don't bother to compare it with itself. if (i != j && CompareItems (&ptrVector[i], &ptrVector[j]) != 0) break; // The order of sharing is significant. // Choose an object in the permanent memory if that is available. // This is necessary to retain the invariant that no object in // the permanent memory points to an object in the temporary heap. // (There may well be pointers to this object elsewhere in the permanent // heap). // Choose the lowest hierarchy value for preference since that // may reduce the size of saved state when resaving already saved // data. // If we can't find a permanent space choose a space that isn't // an allocation space. Otherwise we could break the invariant // that immutable areas never point into the allocation area. MemSpace *space = gMem.SpaceForObjectAddress(ptrVector[j]); if (bestSpace == 0) { bestShare = ptrVector[j]; bestSpace = space; } else if (bestSpace->spaceType == ST_PERMANENT) { // Only update if the current space is also permanent and a lower hierarchy if (space->spaceType == ST_PERMANENT && ((PermanentMemSpace *)space)->hierarchy < ((PermanentMemSpace *)bestSpace)->hierarchy) { bestShare = ptrVector[j]; bestSpace = space; } } else if (bestSpace->spaceType == ST_LOCAL) { // Update if the current space is not an allocation space if (space->spaceType != ST_LOCAL || ! ((LocalMemSpace*)space)->allocationSpace) { bestShare = ptrVector[j]; bestSpace = space; } } } POLYUNSIGNED k = j; // Remember the first object that didn't match. // For each identical object set all but the one we want to point to // the shared object. for (j = i; j < k; j++) { ASSERT (OBJ_IS_LENGTH(ptrVector[j]->LengthWord())); if (ptrVector[j] != bestShare) { ptrVector[j]->SetForwardingPtr(bestShare); /* an indirection */ n++; } } i = k; } return n; } // Sort this vector void DepthVector::Sort() { if (nitems > 1) { SortRange(ptrVector, ptrVector + (nitems - 1)); gpTaskFarm->WaitForCompletion(); } // Check // for (POLYUNSIGNED i = 0; i < nitems-1; i++) // ASSERT(CompareItems(vector+i, vector+i+1) <= 0); } inline void swapItems(PolyObject * *i, PolyObject * *j) { PolyObject * t = *i; *i = *j; *j = t; } // Simple parallel quick-sort. "first" and "last" are the first // and last items (inclusive) in the vector. void DepthVector::SortRange(PolyObject * *first, PolyObject * *last) { while (first < last) { if (last-first <= 100) { // Use the standard library function for small ranges. qsort(first, last-first+1, sizeof(PolyObject *), qsCompare); return; } // Select the best pivot from the first, last and middle item // by sorting these three items. We use the middle item as // the pivot and since the first and last items are sorted // by this we can skip them when we start the partitioning. PolyObject * *middle = first + (last-first)/2; if (CompareItems(first, middle) > 0) swapItems(first, middle); if (CompareItems(middle, last) > 0) { swapItems(middle, last); if (CompareItems(first, middle) > 0) swapItems(first, middle); } // Partition the data about the pivot. This divides the // vector into two partitions with all items <= pivot to // the left and all items >= pivot to the right. // Note: items equal to the pivot could be in either partition. PolyObject * *f = first+1; PolyObject * *l = last-1; do { // Find an item we have to move. These loops will always // terminate because testing the middle with itself // will return == 0. while (CompareItems(f, middle/* pivot*/) < 0) f++; while (CompareItems(middle/* pivot*/, l) < 0) l--; // If we haven't finished we need to swap the items. if (f < l) { swapItems(f, l); // If one of these was the pivot item it will have moved to // the other position. if (middle == f) middle = l; else if (middle == l) middle = f; f++; l--; } else if (f == l) { f++; l--; break; } } while (f <= l); // Process the larger partition as a separate task or // by recursion and do the smaller partition by tail // recursion. if (l-first > last-f) { // Lower part is larger gpTaskFarm->AddWorkOrRunNow(sortTask, first, l); first = f; } else { // Upper part is larger gpTaskFarm->AddWorkOrRunNow(sortTask, f, last); last = l; } } } // Set the genuine length word. This overwrites both depth words and forwarding pointers. void DepthVectorWithVariableLength::RestoreLengthWords() { for (POLYUNSIGNED i = 0; i < this->nitems; i++) { PolyObject* obj = ptrVector[i]; obj = gMem.SpaceForObjectAddress(obj)->writeAble(obj); // This could be code. obj->SetLengthWord(lengthVector[i]); // restore genuine length word } } void DepthVectorWithFixedLength::RestoreLengthWords() { for (POLYUNSIGNED i = 0; i < this->nitems; i++) ptrVector[i]->SetLengthWord(length); // restore genuine length word } // Fix up the length word. Then update all addresses to their new location if // we have shared the original destination of the address with something else. void DepthVector::FixLengthAndAddresses(ScanAddress *scan) { RestoreLengthWords(); for (POLYUNSIGNED i = 0; i < this->nitems; i++) { // Fix up all addresses. scan->ScanAddressesInObject(ptrVector[i]); } } // Restore the original length words on forwarding pointers. // After sorting the pointer vector and length vector are no longer // matched so we have to follow the pointers. void DepthVectorWithVariableLength::RestoreForwardingPointers() { for (POLYUNSIGNED i = 0; i < this->nitems; i++) { PolyObject *obj = ptrVector[i]; if (obj->ContainsForwardingPtr()) obj->SetLengthWord(obj->GetForwardingPtr()->LengthWord()); } } // This class is used in two places and is called to ensure that all // object length words have been restored. // Before we actually try to share the immutable objects at a particular depth it // is called to update addresses in those objects to take account of // sharing at lower depths. // When all sharing is complete it is called to update the addresses in // level zero objects, i.e. mutables and code. class ProcessFixupAddress: public ScanAddress { protected: virtual POLYUNSIGNED ScanAddressAt(PolyWord *pt); virtual POLYUNSIGNED ScanCodeAddressAt(PolyObject **pt); virtual PolyObject *ScanObjectAddress(PolyObject *base) { return GetNewAddress(base).AsObjPtr(); } PolyWord GetNewAddress(PolyWord old); }; POLYUNSIGNED ProcessFixupAddress::ScanAddressAt(PolyWord *pt) { *pt = GetNewAddress(*pt); return 0; } // Don't have to do anything for code since it isn't moved. POLYUNSIGNED ProcessFixupAddress::ScanCodeAddressAt(PolyObject **pt) { return 0; } // Returns the new address if the argument is the address of an object that // has moved, otherwise returns the original. PolyWord ProcessFixupAddress::GetNewAddress(PolyWord old) { if (old.IsTagged() || old == PolyWord::FromUnsigned(0)) return old; // Nothing to do. ASSERT(old.IsDataPtr()); PolyObject *obj = old.AsObjPtr(); POLYUNSIGNED L = obj->LengthWord(); if (obj->ContainsForwardingPtr()) // tombstone is a pointer to a shared object { PolyObject *newp = obj->GetForwardingPtr(); // ASSERT (newp->ContainsNormalLengthWord()); return newp; } // Generally each address will point to an object processed at a lower depth. // The exception is if we have a cycle and have assigned the rest of the // structure to a higher depth. // N.B. We return the original address here but this could actually share // with something else and not be retained. if (OBJ_IS_DEPTH(L)) return old; ASSERT (obj->ContainsNormalLengthWord()); // object is not shared return old; } // This class is used to set up the depth vectors for sorting. It subclasses ScanAddress // in order to be able to use that for code objects since they are complicated but it // handles all the other object types itself. It scans them depth-first using an explicit stack. class ProcessAddToVector: public ScanAddress { public: ProcessAddToVector(ShareDataClass *p): m_parent(p), addStack(0), stackSize(0), asp(0) {} ~ProcessAddToVector(); // These are used when scanning code areas. They return either // a length or a possibly updated address. virtual POLYUNSIGNED ScanAddressAt(PolyWord *pt) { (void)AddPolyWordToDepthVectors(*pt); return 0; } virtual PolyObject *ScanObjectAddress(PolyObject *base) { (void)AddObjectToDepthVector(base); return base; } + virtual POLYUNSIGNED ScanCodeAddressAt(PolyObject** pt) + { *pt = ScanObjectAddress(*pt); return 0; } void ProcessRoot(PolyObject *root); protected: // Process an address and return the "depth". POLYUNSIGNED AddPolyWordToDepthVectors(PolyWord old); POLYUNSIGNED AddObjectToDepthVector(PolyObject *obj); void PushToStack(PolyObject *obj); ShareDataClass *m_parent; PolyObject **addStack; unsigned stackSize; unsigned asp; }; ProcessAddToVector::~ProcessAddToVector() { // Normally the stack will be empty. However if we have run out of // memory and thrown an exception we may well have items left. // We have to remove the mark bits otherwise it will mess up any // subsequent GC. for (unsigned i = 0; i < asp; i++) { PolyObject *obj = addStack[i]; if (obj->LengthWord() & _OBJ_GC_MARK) obj->SetLengthWord(obj->LengthWord() & (~_OBJ_GC_MARK)); } free(addStack); // Now free the stack } POLYUNSIGNED ProcessAddToVector::AddPolyWordToDepthVectors(PolyWord old) { // If this is a tagged integer or an IO pointer that's simply a constant. if (old.IsTagged() || old == PolyWord::FromUnsigned(0)) return 0; return AddObjectToDepthVector(old.AsObjPtr()); } // Either adds an object to the stack or, if its depth is known, adds it // to the depth vector and returns the depth. // We use _OBJ_GC_MARK to detect when we have visited a cell but not yet // computed the depth. We have to be careful that this bit is removed // before we finish in the case that we run out of memory and throw an // exception. PushToStack may throw the exception if the stack needs to // grow. POLYUNSIGNED ProcessAddToVector::AddObjectToDepthVector(PolyObject *obj) { MemSpace *space = gMem.SpaceForObjectAddress(obj); if (space == 0) return 0; POLYUNSIGNED L = obj->LengthWord(); if (OBJ_IS_DEPTH(L)) // tombstone contains genuine depth or 0. return OBJ_GET_DEPTH(L); if (obj->LengthWord() & _OBJ_GC_MARK) return 0; // Marked but not yet scanned. Circular structure. ASSERT (OBJ_IS_LENGTH(L)); if (obj->IsMutable()) { // Mutable data in the local or permanent areas. Ignore byte objects or // word objects containing only ints. if (obj->IsWordObject()) { bool containsAddress = false; for (POLYUNSIGNED j = 0; j < OBJ_OBJECT_LENGTH(L) && !containsAddress; j++) containsAddress = ! obj->Get(j).IsTagged(); if (containsAddress) { // Add it to the vector so we will update any addresses it contains. m_parent->AddToVector(0, L, obj); // and follow any addresses to try to merge those. PushToStack(obj); obj->SetLengthWord(L | _OBJ_GC_MARK); // To prevent rescan } // If we don't add it to the vector we mustn't set _OBJ_GC_MARK. } return 0; // Level is zero } if (space->spaceType == ST_PERMANENT && ((PermanentMemSpace*)space)->hierarchy == 0) { // Immutable data in the permanent area can't be merged // because it's read only. We need to follow the addresses // because they may point to mutable areas containing data // that can be. A typical case is the root function pointing // at the global name table containing new declarations. Bitmap *bm = &((PermanentMemSpace*)space)->shareBitmap; if (! bm->TestBit((PolyWord*)obj - space->bottom)) { bm->SetBit((PolyWord*)obj - space->bottom); if (! obj->IsByteObject()) PushToStack(obj); } return 0; } /* There's a problem sharing code objects if they have relative calls/jumps in them to other code. The code of two functions may be identical (e.g. they both call functions 100 bytes ahead) and so they will appear the same but if the functions they jump to are different they are actually different. For that reason we don't share code segments. DCJM 4/1/01 */ if (obj->IsCodeObject()) { // We want to update addresses in the code segment. m_parent->AddToVector(0, L, obj); PushToStack(obj); gMem.SpaceForObjectAddress(obj)->writeAble(obj)->SetLengthWord(L | _OBJ_GC_MARK); // To prevent rescan return 0; } // Byte objects always have depth 1 and can't contain addresses. if (obj->IsByteObject()) { m_parent->AddToVector (1, L, obj);// add to vector at correct depth obj->SetLengthWord(OBJ_SET_DEPTH(1)); return 1; } ASSERT(OBJ_IS_WORD_OBJECT(L) || OBJ_IS_CLOSURE_OBJECT(L)); // That leaves immutable data objects. PushToStack(obj); obj->SetLengthWord(L | _OBJ_GC_MARK); // To prevent rescan return 0; } // Adds an object to the stack. void ProcessAddToVector::PushToStack(PolyObject *obj) { if (asp == stackSize) { if (addStack == 0) { addStack = (PolyObject**)malloc(sizeof(PolyObject*) * 100); if (addStack == 0) throw MemoryException(); stackSize = 100; } else { unsigned newSize = stackSize+100; PolyObject** newStack = (PolyObject**)realloc(addStack, sizeof(PolyObject*) * newSize); if (newStack == 0) throw MemoryException(); stackSize = newSize; addStack = newStack; } } ASSERT(asp < stackSize); addStack[asp++] = obj; } // Processes the root and anything reachable from it. Addresses are added to the // explicit stack if an object has not yet been processed. Most of this function // is about processing the stack. void ProcessAddToVector::ProcessRoot(PolyObject *root) { // Mark the initial object AddObjectToDepthVector(root); // Process the stack until it's empty. while (asp != 0) { // Pop it from the stack. PolyObject *obj = addStack[asp-1]; if (obj->IsCodeObject()) { // Code cells are now only found in the code area. /* There's a problem sharing code objects if they have relative calls/jumps in them to other code. The code of two functions may be identical (e.g. they both call functions 100 bytes ahead) and so they will appear the same but if the functions they jump to are different they are actually different. For that reason we don't share code segments. DCJM 4/1/01 */ asp--; // Pop it because we'll process it completely ScanAddressesInObject(obj); // If it's local set the depth with the value zero. It has already been // added to the zero depth vector. if (obj->LengthWord() & _OBJ_GC_MARK) gMem.SpaceForObjectAddress(obj)->writeAble(obj)->SetLengthWord(OBJ_SET_DEPTH(0)); // Now scanned } else { POLYUNSIGNED length = obj->Length(); PolyWord *pt = (PolyWord*)obj; unsigned osp = asp; if (obj->IsClosureObject()) { // The first word of a closure is a code pointer. We don't share code but // we do want to share anything reachable from the constants. AddObjectToDepthVector(*(PolyObject**)pt); pt += sizeof(PolyObject*) / sizeof(PolyWord); length -= sizeof(PolyObject*) / sizeof(PolyWord); } if (((obj->LengthWord() & _OBJ_GC_MARK) && !obj->IsMutable())) { // Immutable local objects. These can be shared. We need to compute the // depth by computing the maximum of the depth of all the addresses in it. POLYUNSIGNED depth = 0; while (length != 0 && osp == asp) { POLYUNSIGNED d = AddPolyWordToDepthVectors(*pt); if (d > depth) depth = d; pt++; length--; } if (osp == asp) { // We've finished it asp--; // Pop this item. depth++; // One more for this object obj->SetLengthWord(obj->LengthWord() & (~_OBJ_GC_MARK)); m_parent->AddToVector(depth, obj->LengthWord() & (~_OBJ_GC_MARK), obj); obj->SetLengthWord(OBJ_SET_DEPTH(depth)); } } else { // Mutable or non-local objects. These have depth zero. Local objects have // _OBJ_GC_MARK in their header. Immutable permanent objects cannot be // modified so we don't set the depth. Mutable objects are added to the // depth vectors even though they aren't shared so that they will be // updated if they point to immutables that have been shared. while (length != 0) { if (!(*pt).IsTagged()) { // If we've already pushed an address break now if (osp != asp) break; // Process the address and possibly push it AddPolyWordToDepthVectors(*pt); } pt++; length--; } if (length == 0) { // We've finished it if (osp != asp) { ASSERT(osp == asp - 1); addStack[osp - 1] = addStack[osp]; } asp--; // Pop this item. if (obj->LengthWord() & _OBJ_GC_MARK) obj->SetLengthWord(OBJ_SET_DEPTH(0)); } } } } } // This is called by the root thread to do the work. bool ShareDataClass::RunShareData(PolyObject *root) { // We use a bitmap to indicate when we've visited an object to avoid // infinite recursion in cycles in the data. for (std::vector::iterator i = gMem.pSpaces.begin(); i < gMem.pSpaces.end(); i++) { PermanentMemSpace *space = *i; if (!space->isMutable && space->hierarchy == 0) { if (! space->shareBitmap.Create(space->spaceSize())) return false; } } POLYUNSIGNED totalObjects = 0; POLYUNSIGNED totalShared = 0; // Build the vectors from the immutable objects. bool success = true; try { ProcessAddToVector addToVector(this); addToVector.ProcessRoot(root); } catch (MemoryException &) { // If we ran out of memory we may still be able to process what we have. // That will also do any clean-up. success = false; } ProcessFixupAddress fixup; for (POLYUNSIGNED depth = 1; depth < maxVectorSize; depth++) { for (unsigned j = 0; j < FIXEDLENGTHSIZE; j++) { if (depth < depthVectorArray[j].size()) { DepthVector *vec = depthVectorArray[j][depth]; // Set the length word and update all addresses. vec->FixLengthAndAddresses(&fixup); vec->Sort(); POLYUNSIGNED n = vec->MergeSameItems(); if ((debugOptions & DEBUG_SHARING) && n > 0) Log("Sharing: Level %4" POLYUFMT ", size %3u, Objects %6" POLYUFMT ", Shared %6" POLYUFMT " (%1.0f%%)\n", depth, j, vec->ItemCount(), n, (float)n / (float)vec->ItemCount() * 100.0); totalObjects += vec->ItemCount(); totalShared += n; } } } if (debugOptions & DEBUG_SHARING) Log("Sharing: Maximum level %4" POLYUFMT ",\n", maxVectorSize); /* At this stage, we have fixed up most but not all of the forwarding pointers. The ones that we haven't fixed up arise from situations such as the following: X -> Y <-> Z i.e. Y and Z form a loop, and X is isomorphic to Z. When we assigned the depths, we have to arbitrarily break the loop between Y and Z. Suppose Y is assigned to level 1, and Z is assigned to level 2. When we process level 1 and fixup Y, there's nothing to do, since Z is still an ordinary object. However when we process level 2, we find that X and Z are isomorphic so we arbitrarily choose one of them and turn it into a "tombstone" pointing at the other. If we change Z into the tombstone, then Y now contains a pointer that needs fixing up. That's why we need the second fixup pass. Note also that if we had broken the loop the other way, we would have assigned Z to level 1, Y to level 2 and X to level 3, so we would have missed the chance to share Z and X. Perhaps that's why running the program repeatedly sometimes finds extra things to share? SPF 26/1/95 */ /* We have updated the addresses in objects with non-zero level so they point to the single occurrence but we need to do the same with level 0 objects (mutables and code). */ for (unsigned j = 0; j < FIXEDLENGTHSIZE; j++) { if (! depthVectorArray[j].empty()) { DepthVector *v = depthVectorArray[j][0]; // Log this because it could be very large. if (debugOptions & DEBUG_SHARING) Log("Sharing: Level %4" POLYUFMT ", size %3u, Objects %6" POLYUFMT "\n", 0ul, j, v->ItemCount()); v->FixLengthAndAddresses(&fixup); } } /* Previously we made a complete scan over the memory updating any addresses so that if we have shared two substructures within our root we would also share any external pointers. This has been removed but we have to reinstate the length words we've overwritten with forwarding pointers because there may be references to unshared objects from outside. */ for (POLYUNSIGNED d = 1; d < maxVectorSize; d++) { for (unsigned j = 0; j < FIXEDLENGTHSIZE; j++) { if (d < depthVectorArray[j].size()) { DepthVector *v = depthVectorArray[j][d]; v->RestoreForwardingPointers(); } } } if (debugOptions & DEBUG_SHARING) Log ("Sharing: Total Objects %6" POLYUFMT ", Total Shared %6" POLYUFMT " (%1.0f%%)\n", totalObjects, totalShared, (float)totalShared / (float)totalObjects * 100.0); return success; // Succeeded. } class ShareRequest: public MainThreadRequest { public: ShareRequest(Handle root): MainThreadRequest(MTP_SHARING), shareRoot(root), result(false) {} virtual void Perform() { ShareDataClass s; // Do a full GC. If we have a large heap the allocation of the vectors // can cause paging. Doing this now reduces the heap and discards the // allocation spaces. It may be overkill if we are applying the sharing // to a small root but generally it seems to be applied to the whole heap. FullGCForShareCommonData(); gcProgressBeginOtherGC(); // Set the phase to "other" now the GC is complete. // Now do the sharing. result = s.RunShareData(shareRoot->WordP()); } Handle shareRoot; bool result; }; // ShareData. This is the main entry point. // Because this can recurse deeply it needs to be run by the main thread. // Also it manipulates the heap in ways that could mess up other threads // so we need to stop them before executing this. void ShareData(TaskData *taskData, Handle root) { if (! root->Word().IsDataPtr()) return; // Nothing to do. We could do handle a code pointer but it shouldn't occur. // Request the main thread to do the sharing. ShareRequest request(root); processes->MakeRootRequest(taskData, &request); // Raise an exception if it failed. if (! request.result) raise_exception_string(taskData, EXC_Fail, "Insufficient memory"); } // RTS call entry. POLYUNSIGNED PolyShareCommonData(FirstArgument threadId, PolyWord root) { TaskData *taskData = TaskData::FindTaskForId(threadId); ASSERT(taskData != 0); taskData->PreRTSCall(); Handle reset = taskData->saveVec.mark(); try { if (! root.IsDataPtr()) return TAGGED(0).AsUnsigned(); // Nothing to do. // Request the main thread to do the sharing. ShareRequest request(taskData->saveVec.push(root)); processes->MakeRootRequest(taskData, &request); // Raise an exception if it failed. if (! request.result) raise_exception_string(taskData, EXC_Fail, "Insufficient memory"); } catch (...) { } // If an ML exception is raised taskData->saveVec.reset(reset); taskData->PostRTSCall(); return TAGGED(0).AsUnsigned(); } struct _entrypts shareDataEPT[] = { { "PolyShareCommonData", (polyRTSFunction)&PolyShareCommonData}, { NULL, NULL} // End of list. }; diff --git a/libpolyml/x86_dep.cpp b/libpolyml/x86_dep.cpp index 4d2f7dff..7565c20e 100644 --- a/libpolyml/x86_dep.cpp +++ b/libpolyml/x86_dep.cpp @@ -1,1492 +1,1483 @@ /* Title: Machine dependent code for i386 and X64 under Windows and Unix Copyright (c) 2000-7 Cambridge University Technical Services Limited Further work copyright David C. J. Matthews 2011-20 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #ifdef HAVE_STDLIB_H #include #endif #include #ifdef HAVE_SIGNAL_H #include #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_ERRNO_H #include #endif #if (defined(_WIN32)) #include #include #endif #include "globals.h" #include "run_time.h" #include "diagnostics.h" #include "processes.h" #include "profiling.h" #include "machine_dep.h" #include "scanaddrs.h" #include "memmgr.h" #include "rtsentry.h" #include "bytecode.h" #include "sys.h" // Temporary /********************************************************************** * * Register usage: * * %Reax: First argument to function. Result of function call. * %Rebx: Second argument to function. * %Recx: General register * %Redx: Closure pointer in call. * %Rebp: Points to memory used for extra registers * %Resi: General register. * %Redi: General register. * %Resp: Stack pointer. * The following apply only on the X64 * %R8: Third argument to function * %R9: Fourth argument to function * %R10: Fifth argument to function * %R11: General register * %R12: General register * %R13: General register * %R14: General register * %R15: Memory allocation pointer * **********************************************************************/ #ifdef HOSTARCHITECTURE_X86_64 struct fpSaveArea { double fpregister[7]; // Save area for xmm0-6 }; #else // Structure of floating point save area. // This is dictated by the hardware. typedef byte fpregister[10]; struct fpSaveArea { unsigned short cw; unsigned short _unused0; unsigned short sw; unsigned short _unused1; unsigned short tw; unsigned short _unused2; unsigned fip; unsigned short fcs0; unsigned short _unused3; unsigned foo; unsigned short fcs1; unsigned short _unused4; fpregister registers[8]; }; #endif /* the amount of ML stack space to reserve for registers, C exception handling etc. The compiler requires us to reserve 2 stack-frames worth (2 * 20 words). We actually reserve slightly more than this. */ #if (!defined(_WIN32) && !defined(HAVE_SIGALTSTACK)) // If we can't handle signals on a separate stack make sure there's space // on the Poly stack. #define OVERFLOW_STACK_SIZE (50+1024) #else #define OVERFLOW_STACK_SIZE 50 #endif class X86TaskData; // This is passed as the argument vector to X86AsmSwitchToPoly. // The offsets are built into the assembly code and the code-generator. // localMpointer and stackPtr are updated before control returns to C. typedef struct _AssemblyArgs { public: PolyWord *localMpointer; // Allocation ptr + 1 word stackItem *handlerRegister; // Current exception handler PolyWord *localMbottom; // Base of memory + 1 word stackItem *stackLimit; // Lower limit of stack stackItem exceptionPacket; // Set if there is an exception byte unusedRequestCode; // No longer used. byte unusedFlag; // No longer used byte returnReason; // Reason for returning from ML. byte unusedRestore; // No longer used. uintptr_t saveCStack; // Saved C stack frame. PolyWord threadId; // My thread id. Saves having to call into RTS for it. stackItem *stackPtr; // Current stack pointer byte *enterInterpreter; // These are filled in with the functions. byte *heapOverFlowCall; byte *stackOverFlowCall; byte *stackOverFlowCallEx; byte *trapHandlerEntry; // Saved registers, where applicable. stackItem p_rax; stackItem p_rbx; stackItem p_rcx; stackItem p_rdx; stackItem p_rsi; stackItem p_rdi; #ifdef HOSTARCHITECTURE_X86_64 stackItem p_r8; stackItem p_r9; stackItem p_r10; stackItem p_r11; stackItem p_r12; stackItem p_r13; stackItem p_r14; #endif struct fpSaveArea p_fp; } AssemblyArgs; // These next few are temporarily added for the interpreter // This duplicates some code in reals.cpp but is now updated. #define DOUBLESIZE (sizeof(double)/sizeof(POLYUNSIGNED)) union realdb { double dble; POLYUNSIGNED puns[DOUBLESIZE]; }; #define LGWORDSIZE (sizeof(uintptr_t) / sizeof(PolyWord)) class X86TaskData: public TaskData, ByteCodeInterpreter { public: X86TaskData(); unsigned allocReg; // The register to take the allocated space. POLYUNSIGNED allocWords; // The words to allocate. AssemblyArgs assemblyInterface; int saveRegisterMask; // Registers that need to be updated by a GC. virtual void GarbageCollect(ScanAddress *process); void ScanStackAddress(ScanAddress *process, stackItem &val, StackSpace *stack); virtual void EnterPolyCode(); // Start running ML virtual void InterruptCode(); virtual bool AddTimeProfileCount(SIGNALCONTEXT *context); virtual void InitStackFrame(TaskData *parentTask, Handle proc); virtual void SetException(poly_exn *exc); - // Release a mutex in exactly the same way as compiler code - virtual POLYUNSIGNED AtomicDecrement(PolyObject* mutexp); - // Reset a mutex to one. This needs to be atomic with respect to the - // atomic increment and decrement instructions. + // Atomic exchange-and-add. Used in the process module to release a mutex and in the + // interpreter. It needs to use the same instruction that compiled code uses. + virtual POLYSIGNED AtomicExchAdd(PolyObject* mutexp, POLYSIGNED incr); + // Reset a mutex to zero. This needs to be atomic with respect to AtomicExchAdd. virtual void AtomicReset(PolyObject* mutexp); - // This is actually only used in the interpreter. - virtual POLYUNSIGNED AtomicIncrement(PolyObject* mutexp); // Return the minimum space occupied by the stack. Used when setting a limit. // N.B. This is PolyWords not native words. virtual uintptr_t currentStackSpace(void) const { return (this->stack->top - (PolyWord*)assemblyInterface.stackPtr) + OVERFLOW_STACK_SIZE*sizeof(uintptr_t)/sizeof(PolyWord); } // Increment the profile count for an allocation. Also now used for mutex contention. virtual void addProfileCount(POLYUNSIGNED words) { addSynchronousCount(assemblyInterface.stackPtr[0].codeAddr, words); } // PreRTSCall: After calling from ML to the RTS we need to save the current heap pointer virtual void PreRTSCall(void) { TaskData::PreRTSCall(); SaveMemRegisters(); } // PostRTSCall: Before returning we need to restore the heap pointer. // If there has been a GC in the RTS call we need to create a new heap area. virtual void PostRTSCall(void) { SetMemRegisters(); TaskData::PostRTSCall(); } virtual void CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length); void HeapOverflowTrap(byte *pcPtr); void StackOverflowTrap(uintptr_t space); void SetMemRegisters(); void SaveMemRegisters(); void SetRegisterMask(); void HandleTrap(); // ByteCode overrides. The interpreter and native code states need to be in sync. // The interpreter is only used during the initial bootstrap. virtual void ClearExceptionPacket() { assemblyInterface.exceptionPacket = TAGGED(0); } virtual PolyWord GetExceptionPacket() { return assemblyInterface.exceptionPacket; } virtual stackItem* GetHandlerRegister() { return assemblyInterface.handlerRegister; } virtual void SetHandlerRegister(stackItem* hr) { assemblyInterface.handlerRegister = hr; } // Check and grow the stack if necessary. Process any interupts. virtual void HandleStackOverflow(uintptr_t space) { StackOverflowTrap(space); } void Interpret(); void EndBootStrap() { mixedCode = true; } PLock interruptLock; stackItem *get_reg(int n); stackItem *®SP() { return assemblyInterface.stackPtr; } stackItem ®AX() { return assemblyInterface.p_rax; } stackItem ®BX() { return assemblyInterface.p_rbx; } stackItem ®CX() { return assemblyInterface.p_rcx; } stackItem ®DX() { return assemblyInterface.p_rdx; } stackItem ®SI() { return assemblyInterface.p_rsi; } stackItem ®DI() { return assemblyInterface.p_rdi; } #ifdef HOSTARCHITECTURE_X86_64 stackItem ®8() { return assemblyInterface.p_r8; } stackItem ®9() { return assemblyInterface.p_r9; } stackItem ®10() { return assemblyInterface.p_r10; } stackItem ®11() { return assemblyInterface.p_r11; } stackItem ®12() { return assemblyInterface.p_r12; } stackItem ®13() { return assemblyInterface.p_r13; } stackItem ®14() { return assemblyInterface.p_r14; } #endif #if (defined(_WIN32)) DWORD savedErrno; #else int savedErrno; #endif }; class X86Dependent: public MachineDependent { public: X86Dependent(): mustInterpret(false) {} // Create a task data object. virtual TaskData *CreateTaskData(void) { return new X86TaskData(); } // Initial size of stack in PolyWords virtual unsigned InitialStackSize(void) { return (128+OVERFLOW_STACK_SIZE) * sizeof(uintptr_t) / sizeof(PolyWord); } virtual void ScanConstantsWithinCode(PolyObject *addr, PolyObject *oldAddr, POLYUNSIGNED length, ScanAddress *process); virtual void SetBootArchitecture(char arch, unsigned wordLength); virtual Architectures MachineArchitecture(void); // During the first bootstrap phase this is interpreted. bool mustInterpret; }; static X86Dependent x86Dependent; MachineDependent* machineDependent = &x86Dependent; Architectures X86Dependent::MachineArchitecture(void) { if (mustInterpret) return MA_Interpreted; #ifndef HOSTARCHITECTURE_X86_64 return MA_I386; #elif defined(POLYML32IN64) return MA_X86_64_32; #else return MA_X86_64; #endif } void X86Dependent::SetBootArchitecture(char arch, unsigned wordLength) { if (arch == 'I') mustInterpret = true; else if (arch != 'X') Crash("Boot file has unexpected architecture code: %c", arch); } // Values for the returnReason byte enum RETURN_REASON { RETURN_HEAP_OVERFLOW = 1, RETURN_STACK_OVERFLOW = 2, RETURN_STACK_OVERFLOWEX = 3, RETURN_ENTER_INTERPRETER = 4 }; extern "C" { // These are declared in the assembly code segment. void X86AsmSwitchToPoly(void *); int X86AsmCallExtraRETURN_ENTER_INTERPRETER(void); int X86AsmCallExtraRETURN_HEAP_OVERFLOW(void); int X86AsmCallExtraRETURN_STACK_OVERFLOW(void); int X86AsmCallExtraRETURN_STACK_OVERFLOWEX(void); void X86TrapHandler(PolyWord threadId); }; X86TaskData::X86TaskData(): ByteCodeInterpreter(&assemblyInterface.stackPtr, &assemblyInterface.stackLimit), allocReg(0), allocWords(0), saveRegisterMask(0) { assemblyInterface.enterInterpreter = (byte*)X86AsmCallExtraRETURN_ENTER_INTERPRETER; assemblyInterface.heapOverFlowCall = (byte*)X86AsmCallExtraRETURN_HEAP_OVERFLOW; assemblyInterface.stackOverFlowCall = (byte*)X86AsmCallExtraRETURN_STACK_OVERFLOW; assemblyInterface.stackOverFlowCallEx = (byte*)X86AsmCallExtraRETURN_STACK_OVERFLOWEX; assemblyInterface.trapHandlerEntry = (byte*)X86TrapHandler; savedErrno = 0; interpreterPc = 0; mixedCode = !x86Dependent.mustInterpret; } void X86TaskData::GarbageCollect(ScanAddress *process) { TaskData::GarbageCollect(process); // Process the parent first ByteCodeInterpreter::GarbageCollect(process); assemblyInterface.threadId = threadObject; if (stack != 0) { ASSERT(assemblyInterface.stackPtr >= (stackItem*)stack->bottom && assemblyInterface.stackPtr <= (stackItem*)stack->top); // Now the values on the stack. for (stackItem *q = assemblyInterface.stackPtr; q < (stackItem*)stack->top; q++) ScanStackAddress(process, *q, stack); } // Register mask for (int i = 0; i < 16; i++) { if (saveRegisterMask & (1 << i)) ScanStackAddress(process, *get_reg(i), stack); } } // Process a value within the stack. void X86TaskData::ScanStackAddress(ScanAddress *process, stackItem &stackItem, StackSpace *stack) { // We may have return addresses on the stack which could look like // tagged values. Check whether the value is in the code area before // checking whether it is untagged. #ifdef POLYML32IN64 // In 32-in-64 return addresses always have the top 32 bits non-zero. if (stackItem.argValue < ((uintptr_t)1 << 32)) { // It's either a tagged integer or an object pointer. if (stackItem.w().IsDataPtr()) { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } } else { // Could be a code address or a stack address. MemSpace *space = gMem.SpaceForAddress(stackItem.codeAddr - 1); if (space == 0 || space->spaceType != ST_CODE) return; PolyObject *obj = gMem.FindCodeObject(stackItem.codeAddr); ASSERT(obj != 0); // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } #else // The -1 here is because we may have a zero-sized cell in the last // word of a space. MemSpace *space = gMem.SpaceForAddress(stackItem.codeAddr-1); if (space == 0) return; // In particular we may have one of the assembly code addresses. if (space->spaceType == ST_CODE) { PolyObject *obj = gMem.FindCodeObject(stackItem.codeAddr); // If it is actually an integer it might be outside a valid code object. if (obj == 0) { ASSERT(stackItem.w().IsTagged()); // It must be an integer } else // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } else if (space->spaceType == ST_LOCAL && stackItem.w().IsDataPtr()) // Local values must be word addresses. { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } #endif } // Copy a stack void X86TaskData::CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length) { /* Moves a stack, updating all references within the stack */ #ifdef POLYML32IN64 old_length = old_length / 2; new_length = new_length / 2; #endif stackItem *old_base = (stackItem *)old_stack; stackItem *new_base = (stackItem*)new_stack; stackItem *old_top = old_base + old_length; /* Calculate the offset of the new stack from the old. If the frame is being extended objects in the new frame will be further up the stack than in the old one. */ uintptr_t offset = new_base - old_base + new_length - old_length; stackItem *oldStackPtr = assemblyInterface.stackPtr; // Adjust the stack pointer and handler pointer since these point into the stack. assemblyInterface.stackPtr = assemblyInterface.stackPtr + offset; assemblyInterface.handlerRegister = assemblyInterface.handlerRegister + offset; // We need to adjust any values on the stack that are pointers within the stack. // Skip the unused part of the stack. size_t i = oldStackPtr - old_base; ASSERT (i <= old_length); i = old_length - i; stackItem *old = oldStackPtr; stackItem *newp = assemblyInterface.stackPtr; while (i--) { stackItem old_word = *old++; if (old_word.w().IsDataPtr() && old_word.stackAddr >= old_base && old_word.stackAddr <= old_top) old_word.stackAddr = old_word.stackAddr + offset; else if (old_word.w().IsDataPtr() && IsHeapAddress(old_word.stackAddr)) { stackItem *addr = (stackItem*)old_word.w().AsStackAddr(); if (addr >= old_base && addr <= old_top) { addr += offset; old_word = PolyWord::FromStackAddr((PolyWord*)addr); } } *newp++ = old_word; } ASSERT(old == ((stackItem*)old_stack)+old_length); ASSERT(newp == ((stackItem*)new_stack)+new_length); // And change any registers that pointed into the old stack for (int j = 0; j < 16; j++) { if (saveRegisterMask & (1 << j)) { stackItem *regAddr = get_reg(j); stackItem old_word = *regAddr; if (old_word.w().IsDataPtr() && old_word.stackAddr >= old_base && old_word.stackAddr <= old_top) old_word.stackAddr = old_word.stackAddr + offset; else if (old_word.w().IsDataPtr() && IsHeapAddress(old_word.stackAddr)) { stackItem *addr = (stackItem*)old_word.w().AsStackAddr(); if (addr >= old_base && addr <= old_top) { addr += offset; old_word = PolyWord::FromStackAddr((PolyWord*)addr); } } *regAddr = old_word; } } } void X86TaskData::EnterPolyCode() /* Called from "main" to enter the code. */ { if (x86Dependent.mustInterpret) { PolyWord closure = assemblyInterface.p_rdx; *(--assemblyInterface.stackPtr) = closure; /* Closure address */ interpreterPc = *(POLYCODEPTR*)closure.AsObjPtr(); Interpret(); ASSERT(0); // Should never return } SetMemRegisters(); // Enter the ML code. X86AsmSwitchToPoly(&this->assemblyInterface); // This should never return ASSERT(0); } void X86TaskData::Interpret() { while (true) { switch (RunInterpreter(this)) { case ReturnCall: // After the call there will be an enter-int instruction so that when this // returns we will re-enter the interpreter. The number of arguments for // this call is after that. ASSERT(interpreterPc[0] == 0xff); numTailArguments = interpreterPc[3]; case ReturnTailCall: { ClearExceptionPacket(); // Pop the closure. PolyWord closureWord = *assemblyInterface.stackPtr++; PolyObject* closure = closureWord.AsObjPtr(); interpreterPc = *(POLYCODEPTR*)closure; if (interpreterPc[0] == 0xff && interpreterPc[1] == 0x55 && (interpreterPc[2] == 0x48 || interpreterPc[2] == 0x24)) { // If the code we're going to is interpreted push back the closure and // continue. assemblyInterface.stackPtr--; continue; } assemblyInterface.p_rdx = closureWord; // Put closure in the closure reg. // Pop the return address. POLYCODEPTR originalReturn = (assemblyInterface.stackPtr++)->codeAddr; // Because of the way the build process works we only ever call functions with a single argument. ASSERT(numTailArguments == 1); assemblyInterface.p_rax = *(assemblyInterface.stackPtr++); (*(--assemblyInterface.stackPtr)).codeAddr = originalReturn; // Push return address to caller (*(--assemblyInterface.stackPtr)).codeAddr = *(POLYCODEPTR*)closure; // Entry point to callee interpreterPc = 0; // No longer in the interpreter (See SaveMemRegs) return; } case ReturnReturn: { ClearExceptionPacket(); if (interpreterPc[0] == 0xff && interpreterPc[1] == 0x55 && (interpreterPc[2] == 0x48 || interpreterPc[2] == 0x24)) continue; // Get the return value from the stack and replace it by the // address we're going to. assemblyInterface.p_rax = assemblyInterface.stackPtr[0]; assemblyInterface.stackPtr[0].codeAddr = interpreterPc; interpreterPc = 0; // No longer in the interpreter (See SaveMemRegs) return; } } } } // Called from the assembly code as a result of a trap i.e. a request for // a GC or to extend the stack. void X86TrapHandler(PolyWord threadId) { X86TaskData* taskData = (X86TaskData*)TaskData::FindTaskForId(threadId); taskData->HandleTrap(); } void X86TaskData::HandleTrap() { SaveMemRegisters(); // Update globals from the memory registers. switch (this->assemblyInterface.returnReason) { case RETURN_HEAP_OVERFLOW: // The heap has overflowed. SetRegisterMask(); this->HeapOverflowTrap(assemblyInterface.stackPtr[0].codeAddr); // Computes a value for allocWords only break; case RETURN_STACK_OVERFLOW: case RETURN_STACK_OVERFLOWEX: { SetRegisterMask(); uintptr_t min_size; // Size in PolyWords if (assemblyInterface.returnReason == RETURN_STACK_OVERFLOW) { min_size = (this->stack->top - (PolyWord*)assemblyInterface.stackPtr) + OVERFLOW_STACK_SIZE * sizeof(uintptr_t) / sizeof(PolyWord); } else { // Stack limit overflow. If the required stack space is larger than // the fixed overflow size the code will calculate the limit in %EDI. stackItem* stackP = regDI().stackAddr; min_size = (this->stack->top - (PolyWord*)stackP) + OVERFLOW_STACK_SIZE * sizeof(uintptr_t) / sizeof(PolyWord); } StackOverflowTrap(min_size); break; } case RETURN_ENTER_INTERPRETER: { interpreterPc = assemblyInterface.stackPtr[0].codeAddr; assemblyInterface.stackPtr++; // Pop return address. byte reasonCode = *interpreterPc++; // Sort out arguments. assemblyInterface.exceptionPacket = TAGGED(0); if (reasonCode == 0xff) { // Exception handler. ASSERT(0); // Not used assemblyInterface.exceptionPacket = assemblyInterface.p_rax; // Get the exception packet // We're already in the exception handler but we still have to // adjust the stack pointer and pop the current exception handler. assemblyInterface.stackPtr = assemblyInterface.handlerRegister; assemblyInterface.stackPtr++; assemblyInterface.handlerRegister = (assemblyInterface.stackPtr++)[0].stackAddr; } else if (reasonCode >= 128) { // Start of function. unsigned numArgs = reasonCode - 128; // We need the stack to contain: // The closure, the return address, the arguments. // First pop the original return address. POLYCODEPTR returnAddr = (assemblyInterface.stackPtr++)[0].codeAddr; // Push the register args. ASSERT(numArgs == 1); // We only ever call functions with one argument. #ifdef HOSTARCHITECTURE_X86_64 ASSERT(numArgs <= 5); if (numArgs >= 1) *(--assemblyInterface.stackPtr) = assemblyInterface.p_rax; #ifdef POLYML32IN64 if (numArgs >= 2) *(--assemblyInterface.stackPtr) = assemblyInterface.p_rsi; #else if (numArgs >= 2) *(--assemblyInterface.stackPtr) = assemblyInterface.p_rbx; #endif if (numArgs >= 3) *(--assemblyInterface.stackPtr) = assemblyInterface.p_r8; if (numArgs >= 4) *(--assemblyInterface.stackPtr) = assemblyInterface.p_r9; if (numArgs >= 5) *(--assemblyInterface.stackPtr) = assemblyInterface.p_r10; #else ASSERT(numArgs <= 2); if (numArgs >= 1) *(--assemblyInterface.stackPtr) = assemblyInterface.p_rax; if (numArgs >= 2) *(--assemblyInterface.stackPtr) = assemblyInterface.p_rbx; #endif (--assemblyInterface.stackPtr)[0].codeAddr = returnAddr; *(--assemblyInterface.stackPtr) = assemblyInterface.p_rdx; // Closure } else { // Return from call. Push RAX *(--assemblyInterface.stackPtr) = assemblyInterface.p_rax; } Interpret(); break; } default: Crash("Unknown return reason code %u", this->assemblyInterface.returnReason); } SetMemRegisters(); } void X86TaskData::StackOverflowTrap(uintptr_t space) { uintptr_t min_size = (this->stack->top - (PolyWord*)assemblyInterface.stackPtr) + OVERFLOW_STACK_SIZE + space; try { // The stack check has failed. This may either be because we really have // overflowed the stack or because the stack limit value has been adjusted // to result in a call here. CheckAndGrowStack(this, min_size); } catch (IOException&) { // We may get an exception while handling this if we run out of store } { PLocker l(&interruptLock); // Set the stack limit. This clears any interrupt and also sets the // correct value if we've grown the stack. assemblyInterface.stackLimit = (stackItem*)this->stack->bottom + OVERFLOW_STACK_SIZE; } try { processes->ProcessAsynchRequests(this); // Release and re-acquire use of the ML memory to allow another thread // to GC. processes->ThreadReleaseMLMemory(this); processes->ThreadUseMLMemory(this); } catch (IOException&) { } } void X86TaskData::InitStackFrame(TaskData *parentTaskData, Handle proc) /* Initialise stack frame. */ { StackSpace *space = this->stack; StackObject * newStack = space->stack(); uintptr_t stack_size = space->spaceSize() * sizeof(PolyWord) / sizeof(stackItem); // Set the top of the stack inside the stack rather than at the end. This wastes // a word but if sp is actually at the end OpenBSD segfaults because it isn't in // a MAP_STACK area. uintptr_t topStack = stack_size - 1; stackItem* stackTop = (stackItem*)newStack + topStack; *stackTop = TAGGED(0); // Set it to non-zero. assemblyInterface.stackPtr = stackTop; assemblyInterface.stackLimit = (stackItem*)space->bottom + OVERFLOW_STACK_SIZE; assemblyInterface.handlerRegister = stackTop; // Floating point save area. memset(&assemblyInterface.p_fp, 0, sizeof(struct fpSaveArea)); #ifndef HOSTARCHITECTURE_X86_64 // Set the control word for 64-bit precision otherwise we get inconsistent results. assemblyInterface.p_fp.cw = 0x027f ; // Control word assemblyInterface.p_fp.tw = 0xffff; // Tag registers - all unused #endif // Store the argument and the closure. assemblyInterface.p_rdx = proc->Word(); // Closure assemblyInterface.p_rax = TAGGED(0); // Argument // Have to set the register mask in case we get a GC before the thread starts. saveRegisterMask = (1 << 2) | 1; // Rdx and rax #ifdef POLYML32IN64 // In 32-in-64 RBX always contains the heap base address. assemblyInterface.p_rbx.stackAddr = (stackItem*)globalHeapBase; #endif } // In Solaris-x86 the registers are named EIP and ESP. #if (!defined(REG_EIP) && defined(EIP)) #define REG_EIP EIP #endif #if (!defined(REG_ESP) && defined(ESP)) #define REG_ESP ESP #endif // Get the PC and SP(stack) from a signal context. This is needed for profiling. // This version gets the actual sp and pc if we are in ML. // N.B. This must not call malloc since we're in a signal handler. bool X86TaskData::AddTimeProfileCount(SIGNALCONTEXT *context) { stackItem * sp = 0; POLYCODEPTR pc = 0; if (context != 0) { // The tests for HAVE_UCONTEXT_T, HAVE_STRUCT_SIGCONTEXT and HAVE_WINDOWS_H need // to follow the tests in processes.h. #if defined(HAVE_WINDOWS_H) #ifdef _WIN64 sp = (stackItem *)context->Rsp; pc = (POLYCODEPTR)context->Rip; #else // Windows 32 including cygwin. sp = (stackItem *)context->Esp; pc = (POLYCODEPTR)context->Eip; #endif #elif defined(HAVE_UCONTEXT_T) #ifdef HAVE_MCONTEXT_T_GREGS // Linux #ifndef HOSTARCHITECTURE_X86_64 pc = (byte*)context->uc_mcontext.gregs[REG_EIP]; sp = (stackItem*)context->uc_mcontext.gregs[REG_ESP]; #else /* HOSTARCHITECTURE_X86_64 */ pc = (byte*)context->uc_mcontext.gregs[REG_RIP]; sp = (stackItem*)context->uc_mcontext.gregs[REG_RSP]; #endif /* HOSTARCHITECTURE_X86_64 */ #elif defined(HAVE_MCONTEXT_T_MC_ESP) // FreeBSD #ifndef HOSTARCHITECTURE_X86_64 pc = (byte*)context->uc_mcontext.mc_eip; sp = (stackItem*)context->uc_mcontext.mc_esp; #else /* HOSTARCHITECTURE_X86_64 */ pc = (byte*)context->uc_mcontext.mc_rip; sp = (stackItem*)context->uc_mcontext.mc_rsp; #endif /* HOSTARCHITECTURE_X86_64 */ #else // Mac OS X #ifndef HOSTARCHITECTURE_X86_64 #if(defined(HAVE_STRUCT_MCONTEXT_SS)||defined(HAVE_STRUCT___DARWIN_MCONTEXT32_SS)) pc = (byte*)context->uc_mcontext->ss.eip; sp = (stackItem*)context->uc_mcontext->ss.esp; #elif(defined(HAVE_STRUCT___DARWIN_MCONTEXT32___SS)) pc = (byte*)context->uc_mcontext->__ss.__eip; sp = (stackItem*)context->uc_mcontext->__ss.__esp; #endif #else /* HOSTARCHITECTURE_X86_64 */ #if(defined(HAVE_STRUCT_MCONTEXT_SS)||defined(HAVE_STRUCT___DARWIN_MCONTEXT64_SS)) pc = (byte*)context->uc_mcontext->ss.rip; sp = (stackItem*)context->uc_mcontext->ss.rsp; #elif(defined(HAVE_STRUCT___DARWIN_MCONTEXT64___SS)) pc = (byte*)context->uc_mcontext->__ss.__rip; sp = (stackItem*)context->uc_mcontext->__ss.__rsp; #endif #endif /* HOSTARCHITECTURE_X86_64 */ #endif #elif defined(HAVE_STRUCT_SIGCONTEXT) #if defined(HOSTARCHITECTURE_X86_64) && defined(__OpenBSD__) // CPP defines missing in amd64/signal.h in OpenBSD pc = (byte*)context->sc_rip; sp = (stackItem*)context->sc_rsp; #else // !HOSTARCHITEXTURE_X86_64 || !defined(__OpenBSD__) pc = (byte*)context->sc_pc; sp = (stackItem*)context->sc_sp; #endif #endif } if (pc != 0) { // See if the PC we've got is an ML code address. MemSpace *space = gMem.SpaceForAddress(pc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { incrementCountAsynch(pc); return true; } } // See if the sp value is in the current stack. if (sp >= (stackItem*)this->stack->bottom && sp < (stackItem*)this->stack->top) { // We may be in the assembly code. The top of the stack will be a return address. pc = sp[0].w().AsCodePtr(); MemSpace *space = gMem.SpaceForAddress(pc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { incrementCountAsynch(pc); return true; } } // See if the value of regSP is a valid stack pointer. // This works if we happen to be in an RTS call using a "Full" call. // It doesn't work if we've used a "Fast" call because that doesn't save the SP. sp = assemblyInterface.stackPtr; if (sp >= (stackItem*)this->stack->bottom && sp < (stackItem*)this->stack->top) { // We may be in the run-time system. pc = sp[0].w().AsCodePtr(); MemSpace *space = gMem.SpaceForAddress(pc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { incrementCountAsynch(pc); return true; } } // None of those worked return false; } // This is called from a different thread so we have to be careful. void X86TaskData::InterruptCode() { PLocker l(&interruptLock); // Set the stack limit pointer to the top of the stack to cause // a trap when we next check for stack overflow. // We use a lock here to ensure that we always use the current value of the // stack. The thread we're interrupting could be growing the stack at this point. if (this->stack != 0) this->assemblyInterface.stackLimit = (stackItem*)(this->stack->top-1); } // This is called from SwitchToPoly before we enter the ML code. void X86TaskData::SetMemRegisters() { // Copy the current store limits into variables before we go into the assembly code. // If we haven't yet set the allocation area or we don't have enough we need // to create one (or a new one). if (this->allocPointer <= this->allocLimit + this->allocWords) { if (this->allocPointer < this->allocLimit) Crash ("Bad length in heap overflow trap"); // Find some space to allocate in. Updates taskData->allocPointer and // returns a pointer to the newly allocated space (if allocWords != 0) PolyWord *space = processes->FindAllocationSpace(this, this->allocWords, true); if (space == 0) { // We will now raise an exception instead of returning. // Set allocWords to zero so we don't set the allocation register // since that could be holding the exception packet. this->allocWords = 0; } // Undo the allocation just now. this->allocPointer += this->allocWords; } if (this->allocWords != 0) { // If we have had a heap trap we actually do the allocation here. // We will have already garbage collected and recovered sufficient space. // This also happens if we have just trapped because of store profiling. this->allocPointer -= this->allocWords; // Now allocate // Set the allocation register to this area. N.B. This is an absolute address. if (this->allocReg < 15) get_reg(this->allocReg)[0].codeAddr = (POLYCODEPTR)(this->allocPointer + 1); /* remember: it's off-by-one */ this->allocWords = 0; } // If we have run out of store, either just above or while allocating in the RTS, // allocPointer and allocLimit will have been set to zero as part of the GC. We will // now be raising an exception which may free some store but we need to come back here // before we allocate anything. The compiled code uses unsigned arithmetic to check for // heap overflow but only after subtracting the space required. We need to make sure // that the values are still non-negative after substracting any object size. if (this->allocPointer == 0) this->allocPointer += MAX_OBJECT_SIZE; if (this->allocLimit == 0) this->allocLimit += MAX_OBJECT_SIZE; this->assemblyInterface.localMbottom = this->allocLimit + 1; this->assemblyInterface.localMpointer = this->allocPointer + 1; // If we are profiling store allocation we set mem_hl so that a trap // will be generated. if (profileMode == kProfileStoreAllocation) this->assemblyInterface.localMbottom = this->assemblyInterface.localMpointer; this->assemblyInterface.threadId = this->threadObject; } // This is called whenever we have returned from ML to C. void X86TaskData::SaveMemRegisters() { if (interpreterPc == 0) // Not if we're already in the interpreter this->allocPointer = this->assemblyInterface.localMpointer - 1; this->allocWords = 0; this->assemblyInterface.exceptionPacket = TAGGED(0); this->saveRegisterMask = 0; } // Called on a GC or stack overflow trap. The register mask // is in the bytes after the trap call. void X86TaskData::SetRegisterMask() { byte *pc = assemblyInterface.stackPtr[0].codeAddr; if (*pc == 0xcd) // CD - INT n is used for a single byte { pc++; saveRegisterMask = *pc++; } else if (*pc == 0xca) // CA - FAR RETURN is used for a two byte mask { pc++; saveRegisterMask = pc[0] | (pc[1] << 8); pc += 2; } assemblyInterface.stackPtr[0].codeAddr = pc; } stackItem *X86TaskData::get_reg(int n) /* Returns a pointer to the register given by n. */ { switch (n) { case 0: return &assemblyInterface.p_rax; case 1: return &assemblyInterface.p_rcx; case 2: return &assemblyInterface.p_rdx; case 3: return &assemblyInterface.p_rbx; // Should not have rsp or rbp. case 6: return &assemblyInterface.p_rsi; case 7: return &assemblyInterface.p_rdi; #ifdef HOSTARCHITECTURE_X86_64 case 8: return &assemblyInterface.p_r8; case 9: return &assemblyInterface.p_r9; case 10: return &assemblyInterface.p_r10; case 11: return &assemblyInterface.p_r11; case 12: return &assemblyInterface.p_r12; case 13: return &assemblyInterface.p_r13; case 14: return &assemblyInterface.p_r14; // R15 is the heap pointer so shouldn't occur here. #endif /* HOSTARCHITECTURE_X86_64 */ default: Crash("Unknown register %d\n", n); } } // Called as a result of a heap overflow trap void X86TaskData::HeapOverflowTrap(byte *pcPtr) { X86TaskData *mdTask = this; POLYUNSIGNED wordsNeeded = 0; // The next instruction, after any branches round forwarding pointers or pop // instructions, will be a store of register containing the adjusted heap pointer. // We need to find that register and the value in it in order to find out how big // the area we actually wanted is. N.B. The code-generator and assembly code // must generate the correct instruction sequence. // byte *pcPtr = assemblyInterface.programCtr; while (true) { if (pcPtr[0] == 0xeb) { // Forwarding pointer if (pcPtr[1] >= 128) pcPtr += 256 - pcPtr[1] + 2; else pcPtr += pcPtr[1] + 2; } else if ((pcPtr[0] & 0xf8) == 0x58) // Pop instruction. pcPtr++; else if (pcPtr[0] == 0x41 && ((pcPtr[1] & 0xf8) == 0x58)) // Pop with Rex prefix pcPtr += 2; else break; } #ifndef HOSTARCHITECTURE_X86_64 // This should be movl REG,0[%ebp]. ASSERT(pcPtr[0] == 0x89); mdTask->allocReg = (pcPtr[1] >> 3) & 7; // Remember this until we allocate the memory stackItem *reg = get_reg(mdTask->allocReg); stackItem reg_val = *reg; // The space we need is the difference between this register // and the current value of newptr. // The +1 here is because assemblyInterface.localMpointer is A.M.pointer +1. The reason // is that after the allocation we have the register pointing at the address we will // actually use. wordsNeeded = (this->allocPointer - (PolyWord*)reg_val.stackAddr) + 1; *reg = TAGGED(0); // Clear this - it's not a valid address. /* length in words, including length word */ ASSERT (wordsNeeded <= (1<<24)); /* Max object size including length/flag word is 2^24 words. */ #else /* HOSTARCHITECTURE_X86_64 */ ASSERT(pcPtr[1] == 0x89 || pcPtr[1] == 0x8b); if (pcPtr[1] == 0x89) { // New (5.4) format. This should be movq REG,%r15 ASSERT(pcPtr[0] == 0x49 || pcPtr[0] == 0x4d); mdTask->allocReg = (pcPtr[2] >> 3) & 7; // Remember this until we allocate the memory if (pcPtr[0] & 0x4) mdTask->allocReg += 8; } else { // Alternative form of movq REG,%r15 ASSERT(pcPtr[0] == 0x4c || pcPtr[0] == 0x4d); mdTask->allocReg = pcPtr[2] & 7; // Remember this until we allocate the memory if (pcPtr[0] & 0x1) mdTask->allocReg += 8; } stackItem *reg = get_reg(this->allocReg); stackItem reg_val = *reg; wordsNeeded = (POLYUNSIGNED)((this->allocPointer - (PolyWord*)reg_val.stackAddr) + 1); *reg = TAGGED(0); // Clear this - it's not a valid address. #endif /* HOSTARCHITECTURE_X86_64 */ if (profileMode == kProfileStoreAllocation) addProfileCount(wordsNeeded); mdTask->allocWords = wordsNeeded; // The actual allocation is done in SetMemRegisters. } void X86TaskData::SetException(poly_exn *exc) // The RTS wants to raise an exception packet. Normally this is as the // result of an RTS call in which case the caller will check this. It can // also happen in a trap. { assemblyInterface.exceptionPacket = (PolyWord)exc; // Set for direct calls. } // Decode and process an effective address. There may // be a constant address in here but in any case we need // to decode it to work out where the next instruction starts. // If this is an lea instruction any addresses are just constants // so must not be treated as addresses. static void skipea(PolyObject *base, byte **pt, ScanAddress *process, bool lea) { unsigned int modrm = *((*pt)++); unsigned int md = modrm >> 6; unsigned int rm = modrm & 7; if (md == 3) { } /* Register. */ else if (rm == 4) { /* s-i-b present. */ unsigned int sib = *((*pt)++); if (md == 0) { if ((sib & 7) == 5) { if (! lea) { #ifndef HOSTARCHITECTURE_X86_64 process->ScanConstant(base, *pt, PROCESS_RELOC_DIRECT); #endif /* HOSTARCHITECTURE_X86_64 */ } (*pt) += 4; } } else if (md == 1) (*pt)++; else if (md == 2) (*pt) += 4; } else if (md == 0 && rm == 5) { if (!lea) { #ifndef HOSTARCHITECTURE_X86_64 /* Absolute address. */ process->ScanConstant(base, *pt, PROCESS_RELOC_DIRECT); #endif /* HOSTARCHITECTURE_X86_64 */ } *pt += 4; } else { if (md == 1) *pt += 1; else if (md == 2) *pt += 4; } } /* Added to deal with constants within the code rather than in the constant area. The constant area is still needed for the function name. DCJM 2/1/2001 */ void X86Dependent::ScanConstantsWithinCode(PolyObject *addr, PolyObject *old, POLYUNSIGNED length, ScanAddress *process) { byte *pt = (byte*)addr; PolyWord *end = addr->Offset(length - 1); // If this begins with enter-int it's interpreted code - ignore if (pt[0] == 0xff && pt[1] == 0x55 && (pt[2] == 0x48 || pt[2] == 0x24)) return; while (true) { // Escape prefixes come before any Rex byte if (*pt == 0xf2 || *pt == 0xf3 || *pt == 0x66) pt++; #ifdef HOSTARCHITECTURE_X86_64 // REX prefixes. Set this first. byte lastRex; if (*pt >= 0x40 && *pt <= 0x4f) lastRex = *pt++; else lastRex = 0; //printf("pt=%p *pt=%x\n", pt, *pt); #endif /* HOSTARCHITECTURE_X86_64 */ switch (*pt) { case 0x00: return; // This is actually the first byte of the old "marker" word. case 0xf4: return; // Halt - now used as a marker. case 0x50: case 0x51: case 0x52: case 0x53: case 0x54: case 0x55: case 0x56: case 0x57: /* Push */ case 0x58: case 0x59: case 0x5a: case 0x5b: case 0x5c: case 0x5d: case 0x5e: case 0x5f: /* Pop */ case 0x90: /* nop */ case 0xc3: /* ret */ case 0xf9: /* stc */ case 0xce: /* into */ case 0xf0: /* lock. */ case 0xf3: /* rep/repe */ case 0xa4: case 0xa5: case 0xaa: case 0xab: /* movs/stos */ case 0xa6: /* cmpsb */ case 0x9e: /* sahf */ case 0x99: /* cqo/cdq */ pt++; break; case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: case 0x78: case 0x79: case 0x7a: case 0x7b: case 0x7c: case 0x7d: case 0x7e: case 0x7f: case 0xeb: /* short jumps. */ case 0xcd: /* INT - now used for a register mask */ case 0xa8: /* TEST_ACC8 */ case 0x6a: /* PUSH_8 */ pt += 2; break; case 0xc2: /* RET_16 */ case 0xca: /* FAR RET 16 - used for a register mask */ pt += 3; break; case 0x8d: /* leal. */ pt++; skipea(addr, &pt, process, true); break; case 0x03: case 0x0b: case 0x13: case 0x1b: case 0x23: case 0x2b: case 0x33: case 0x3b: /* Add r,ea etc. */ case 0x88: /* MOVB_R_A */ case 0x89: /* MOVL_R_A */ case 0x8b: /* MOVL_A_R */ case 0x62: /* BOUNDL */ case 0xff: /* Group5 */ case 0xd1: /* Group2_1_A */ case 0x8f: /* POP_A */ case 0xd3: /* Group2_CL_A */ case 0x87: // XCHNG case 0x63: // MOVSXD pt++; skipea(addr, &pt, process, false); break; case 0xf6: /* Group3_a */ { int isTest = 0; pt++; /* The test instruction has an immediate operand. */ if ((*pt & 0x38) == 0) isTest = 1; skipea(addr, &pt, process, false); if (isTest) pt++; break; } case 0xf7: /* Group3_A */ { int isTest = 0; pt++; /* The test instruction has an immediate operand. */ if ((*pt & 0x38) == 0) isTest = 1; skipea(addr, &pt, process, false); if (isTest) pt += 4; break; } case 0xc1: /* Group2_8_A */ case 0xc6: /* MOVB_8_A */ case 0x83: /* Group1_8_A */ case 0x80: /* Group1_8_a */ case 0x6b: // IMUL Ev,Ib pt++; skipea(addr, &pt, process, false); pt++; break; case 0x69: // IMUL Ev,Iv pt++; skipea(addr, &pt, process, false); pt += 4; break; case 0x81: /* Group1_32_A */ { pt ++; #ifndef HOSTARCHITECTURE_X86_64 unsigned opCode = *pt; #endif skipea(addr, &pt, process, false); // Only check the 32 bit constant if this is a comparison. // For other operations this may be untagged and shouldn't be an address. #ifndef HOSTARCHITECTURE_X86_64 if ((opCode & 0x38) == 0x38) process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); #endif pt += 4; break; } case 0xe8: case 0xe9: // Long jump and call. These are used to call constant (known) functions // and also long jumps within the function. { pt++; POLYSIGNED disp = (pt[3] & 0x80) ? -1 : 0; // Set the sign just in case. for(unsigned i = 4; i > 0; i--) disp = (disp << 8) | pt[i-1]; byte *absAddr = pt + disp + 4; // The address is relative to AFTER the constant // If the new address is within the current piece of code we don't do anything if (absAddr >= (byte*)addr && absAddr < (byte*)end) {} else { #ifdef HOSTARCHITECTURE_X86_64 ASSERT(sizeof(PolyWord) == 4); // Should only be used internally on x64 #endif /* HOSTARCHITECTURE_X86_64 */ if (addr != old) { // The old value of the displacement was relative to the old address before // we copied this code segment. // We have to correct it back to the original address. absAddr = absAddr - (byte*)addr + (byte*)old; // We have to correct the displacement for the new location and store // that away before we call ScanConstant. size_t newDisp = absAddr - pt - 4; byte* wr = gMem.SpaceForAddress(pt)->writeAble(pt); for (unsigned i = 0; i < 4; i++) { wr[i] = (byte)(newDisp & 0xff); newDisp >>= 8; } } process->ScanConstant(addr, pt, PROCESS_RELOC_I386RELATIVE); } pt += 4; break; } case 0xc7:/* MOVL_32_A */ { pt++; if ((*pt & 0xc0) == 0x40 /* Byte offset or sib present */ && ((*pt & 7) != 4) /* But not sib present */ && pt[1] == 256-sizeof(PolyWord)) { /* We may use a move instruction to set the length word on a new segment. We mustn't try to treat this as a constant. */ pt += 6; /* Skip the modrm byte, the offset and the constant. */ } else { skipea(addr, &pt, process, false); #ifndef HOSTARCHITECTURE_X86_64 // This isn't used for addresses even in 32-in-64 process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); #endif /* HOSTARCHITECTURE_X86_64 */ pt += 4; } break; } case 0xb8: case 0xb9: case 0xba: case 0xbb: case 0xbc: case 0xbd: case 0xbe: case 0xbf: /* MOVL_32_64_R */ pt ++; #ifdef HOSTARCHITECTURE_X86_64 if ((lastRex & 8) == 0) pt += 4; // 32-bit mode on 64-bits else #endif /* HOSTARCHITECTURE_X86_64 */ { // This is used in native 32-bit for constants and in // 32-in-64 for the special case of an absolute address. process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); pt += sizeof(uintptr_t); } break; case 0x68: /* PUSH_32 */ pt ++; #if (!defined(HOSTARCHITECTURE_X86_64)) process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); #endif pt += 4; break; case 0x0f: /* ESCAPE */ { pt++; switch (*pt) { case 0xb6: /* movzl */ case 0xb7: // movzw case 0xbe: // movsx case 0xbf: // movsx case 0xc1: /* xaddl */ case 0xae: // ldmxcsr/stmxcsr case 0xaf: // imul case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47: case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f: // cmov pt++; skipea(addr, &pt, process, false); break; case 0x80: case 0x81: case 0x82: case 0x83: case 0x84: case 0x85: case 0x86: case 0x87: case 0x88: case 0x89: case 0x8a: case 0x8b: case 0x8c: case 0x8d: case 0x8e: case 0x8f: /* Conditional branches with 32-bit displacement. */ pt += 5; break; case 0x90: case 0x91: case 0x92: case 0x93: case 0x94: case 0x95: case 0x96: case 0x97: case 0x98: case 0x99: case 0x9a: case 0x9b: case 0x9c: case 0x9d: case 0x9e: case 0x9f: /* SetCC. */ pt++; skipea(addr, &pt, process, false); break; // These are SSE2 instructions case 0x10: case 0x11: case 0x58: case 0x5c: case 0x59: case 0x5e: case 0x2e: case 0x2a: case 0x54: case 0x57: case 0x5a: case 0x6e: case 0x7e: case 0x2c: case 0x2d: pt++; skipea(addr, &pt, process, false); break; case 0x73: // PSRLDQ - EA,imm pt++; skipea(addr, &pt, process, false); pt++; break; default: Crash("Unknown opcode %d at %p\n", *pt, pt); } break; } case 0xd8: case 0xd9: case 0xda: case 0xdb: case 0xdc: case 0xdd: case 0xde: case 0xdf: // Floating point escape instructions { pt++; if ((*pt & 0xe0) == 0xe0) pt++; else skipea(addr, &pt, process, false); break; } default: Crash("Unknown opcode %d at %p\n", *pt, pt); } } } #if defined(_MSC_VER) // This saves having to define it in the MASM assembly code. -static POLYUNSIGNED X86AsmAtomicExchangeAndAdd(PolyObject* mutexp, POLYSIGNED addend) +static POLYSIGNED X86AsmAtomicExchangeAndAdd(PolyObject* mutexp, POLYSIGNED addend) { # if (SIZEOF_POLYWORD == 8) return InterlockedExchangeAdd64((LONG64*)mutexp, addend); # else return InterlockedExchangeAdd((LONG*)mutexp, addend); # endif } #else extern "C" { // This is only defined in the GAS assembly code - POLYUNSIGNED X86AsmAtomicExchangeAndAdd(PolyObject*, POLYSIGNED); + POLYSIGNED X86AsmAtomicExchangeAndAdd(PolyObject*, POLYSIGNED); } #endif -// Decrement the value contained in the first word of the mutex. -// The addend is TAGGED(+/-1) - 1 -POLYUNSIGNED X86TaskData::AtomicDecrement(PolyObject *mutexp) +// Do the exchange-and-add +POLYSIGNED X86TaskData::AtomicExchAdd(PolyObject *mutexp, POLYSIGNED incr) { - return X86AsmAtomicExchangeAndAdd(mutexp, -2) - 2; -} - -// Increment the value contained in the first word of the mutex. -POLYUNSIGNED X86TaskData::AtomicIncrement(PolyObject* mutexp) -{ - return X86AsmAtomicExchangeAndAdd(mutexp, 2) + 2; + return X86AsmAtomicExchangeAndAdd(mutexp, incr - TAGGED(0).AsSigned()/* Remove the tag */); } // Release a mutex. Because the atomic increment and decrement // use the hardware LOCK prefix we can simply set this to zero. void X86TaskData::AtomicReset(PolyObject* mutexp) { mutexp->Set(0, TAGGED(0)); } extern "C" { POLYEXTERNALSYMBOL void *PolyX86GetThreadData(); POLYEXTERNALSYMBOL POLYUNSIGNED PolyInterpretedEnterIntMode(); POLYEXTERNALSYMBOL POLYUNSIGNED PolyEndBootstrapMode(FirstArgument threadId, PolyWord function); } // Return the address of assembly data for the current thread. This is normally in // RBP except if we are in a callback. void *PolyX86GetThreadData() { // We should get the task data for the thread that is running this code. // If this thread has been created by the foreign code we will have to // create a new one here. TaskData* taskData = processes->GetTaskDataForThread(); if (taskData == 0) { try { taskData = processes->CreateNewTaskData(); } catch (std::bad_alloc&) { ::Exit("Unable to create thread data - insufficient memory"); } catch (MemoryException&) { ::Exit("Unable to create thread data - insufficient memory"); } } return &((X86TaskData*)taskData)->assemblyInterface; } // Do we require EnterInt instructions and if so for which architecture? // 0 = > None; 1 => X86_32, 2 => X86_64. 3 => X86_32_in_64. POLYUNSIGNED PolyInterpretedEnterIntMode() { #ifdef POLYML32IN64 return TAGGED(3).AsUnsigned(); #elif defined(HOSTARCHITECTURE_X86_64) return TAGGED(2).AsUnsigned(); #else return TAGGED(1).AsUnsigned(); #endif } // End bootstrap mode and run a new function. POLYUNSIGNED PolyEndBootstrapMode(FirstArgument threadId, PolyWord function) { TaskData* taskData = TaskData::FindTaskForId(threadId); ASSERT(taskData != 0); taskData->PreRTSCall(); Handle pushedFunction = taskData->saveVec.push(function); x86Dependent.mustInterpret = false; ((X86TaskData*)taskData)->EndBootStrap(); taskData->InitStackFrame(taskData, pushedFunction); taskData->EnterPolyCode(); // Should never return. ASSERT(0); return TAGGED(0).AsUnsigned(); } struct _entrypts machineSpecificEPT[] = { { "PolyX86GetThreadData", (polyRTSFunction)&PolyX86GetThreadData }, { "PolyInterpretedEnterIntMode", (polyRTSFunction)&PolyInterpretedEnterIntMode }, { "PolyEndBootstrapMode", (polyRTSFunction)&PolyEndBootstrapMode }, { NULL, NULL} // End of list. }; diff --git a/mlsource/MLCompiler/BUILTINS.sml b/mlsource/MLCompiler/BUILTINS.sml index 6e074f19..e3bd1f82 100644 --- a/mlsource/MLCompiler/BUILTINS.sml +++ b/mlsource/MLCompiler/BUILTINS.sml @@ -1,115 +1,115 @@ (* Signature for built-in functions - Copyright David C. J. Matthews 2016, 2018-20 + Copyright David C. J. Matthews 2016, 2018-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) signature BUILTINS = sig datatype testConditions = TestEqual (* No TestNotEqual because that is always generated with "not" *) | TestLess | TestLessEqual | TestGreater | TestGreaterEqual | TestUnordered (* Reals only. *) datatype arithmeticOperations = ArithAdd | ArithSub | ArithMult | ArithQuot | ArithRem | ArithDiv | ArithMod datatype logicalOperations = LogicalAnd | LogicalOr | LogicalXor datatype shiftOperations = ShiftLeft | ShiftRightLogical (* Logical shift - zero added bits. *) | ShiftRightArithmetic (* Arithmetic shift - add the sign bit. *) datatype unaryOps = NotBoolean (* true => false; false => true - XOR *) | IsTaggedValue (* Test the tag bit. *) | MemoryCellLength (* Return the length of a memory cell (heap object) *) | MemoryCellFlags (* Return the flags byte of a memory cell (heap object) *) | ClearMutableFlag (* Remove the mutable flag from the flags byte *) - | AtomicIncrement - | AtomicDecrement | AtomicReset (* Set a value to (tagged) zero atomically. *) | LongWordToTagged (* Convert a LargeWord.word to a Word.word or FixedInt.int. *) | SignedToLongWord (* Convert a tagged value to a LargeWord with sign extension. *) | UnsignedToLongWord (* Convert a tagged value to a LargeWord without sign extension. *) | RealAbs of precision (* Set the sign bit of a real to positive. *) | RealNeg of precision (* Invert the sign bit of a real. *) | RealFixedInt of precision (* Convert an integer value into a real value. *) | FloatToDouble (* Convert a single precision floating point value to double precision. *) | DoubleToFloat of IEEEReal.rounding_mode option (* Convert a double precision floating point value to single precision. *) | RealToInt of precision * IEEEReal.rounding_mode (* Convert a double or float to a fixed precision int. *) | TouchAddress (* Ensures that the cell is reachable. *) | AllocCStack (* Allocate space on the C stack. *) and precision = PrecSingle | PrecDouble (* Single or double precision floating pt. *) and binaryOps = (* Compare two words and return the result. This is used for both word values (isSigned=false) and fixed precision integer (isSigned=true). Values must be tagged and not pointers. *) WordComparison of { test: testConditions, isSigned: bool } (* Fixed precision int operations. These may raise Overflow. *) | FixedPrecisionArith of arithmeticOperations (* Arithmetic operations on word values. These do not raise Overflow. *) | WordArith of arithmeticOperations (* Load a word at a specific offset in a heap object. If this is immutable and the arguments are constants it can be folded at compile time since the result will never change. *) | WordLogical of logicalOperations (* Logical operations on words. *) | WordShift of shiftOperations (* Shift operations on words. *) (* Allocate a heap cell for byte data. The first argument is the number of words (not bytes) needed. The second argument is the "flags" byte which must include F_bytes and F_mutable. The new cell is not initialised. *) | AllocateByteMemory (* Operations on LargeWords. These are 32/64 bit values that are "boxed". *) | LargeWordComparison of testConditions | LargeWordArith of arithmeticOperations | LargeWordLogical of logicalOperations | LargeWordShift of shiftOperations | RealComparison of testConditions * precision | RealArith of arithmeticOperations * precision (* Equality of values which could be pointers or tagged values. At the lowest level this is the same as WordComparison but if we try to use an indexed case there must be a check that the values are tagged. *) | PointerEq | FreeCStack (* Free space on the C stack. *) + | AtomicExchangeAdd (* Add a value to a ref atomically and return the old value. *) and nullaryOps = (* Get the current thread id *) GetCurrentThreadId (* Check whether the last RTS call set the exception status and raise it if it had. *) | CheckRTSException + | CPUPause (* Pause a CPU while waiting for a spinlock. *) val unaryRepr: unaryOps -> string and binaryRepr: binaryOps -> string and testRepr: testConditions -> string and arithRepr: arithmeticOperations -> string and nullaryRepr: nullaryOps -> string end; diff --git a/mlsource/MLCompiler/CODETREESIG.ML b/mlsource/MLCompiler/CODETREESIG.ML index e21f54c2..95cd68d1 100644 --- a/mlsource/MLCompiler/CODETREESIG.ML +++ b/mlsource/MLCompiler/CODETREESIG.ML @@ -1,161 +1,163 @@ (* - Copyright (c) 2012,13,15-20 David C.J. Matthews + Copyright (c) 2012,13,15-21 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) signature CODETREESIG = sig type machineWord type codetree type pretty type codeBinding type level datatype argumentType = GeneralType | DoubleFloatType | SingleFloatType and loadStoreKind = LoadStoreMLWord of {isImmutable: bool} | LoadStoreMLByte of {isImmutable: bool} | LoadStoreC8 | LoadStoreC16 | LoadStoreC32 | LoadStoreC64 | LoadStoreCFloat | LoadStoreCDouble | LoadStoreUntaggedUnsigned and blockOpKind = BlockOpMove of {isByteMove: bool} | BlockOpEqualByte | BlockOpCompareByte structure BuiltIns: BUILTINS datatype arbPrecisionOps = ArbCompare of BuiltIns.testConditions | ArbArith of BuiltIns.arithmeticOperations val CodeTrue: codetree (* code for "true" *) val CodeFalse: codetree (* code for "false" *) val CodeZero: codetree (* code for 0, nil etc. *) val mkFunction: { body: codetree, argTypes:argumentType list, resultType: argumentType, name: string, closure: codetree list, numLocals: int } -> codetree val mkInlineFunction: { body: codetree, argTypes:argumentType list, resultType: argumentType, name: string, closure: codetree list, numLocals: int } -> codetree val mkCall: codetree * (codetree * argumentType) list * argumentType -> codetree val mkLoadLocal: int -> codetree and mkLoadArgument: int -> codetree and mkLoadClosure: int -> codetree val mkConst: machineWord -> codetree val mkInd: int * codetree -> codetree val mkVarField: int * codetree -> codetree val mkProc: codetree * int * string * codetree list * int -> codetree val mkInlproc: codetree * int * string * codetree list * int -> codetree val mkMacroProc: codetree * int * string * codetree list * int -> codetree val mkIf: codetree * codetree * codetree -> codetree val mkWhile: codetree * codetree -> codetree val mkEnv: codeBinding list * codetree -> codetree val mkStr: string -> codetree val mkTuple: codetree list -> codetree val mkDatatype: codetree list -> codetree val mkRaise: codetree -> codetree val mkCor: codetree * codetree -> codetree val mkCand: codetree * codetree -> codetree val mkHandle: codetree * codetree * int -> codetree val mkEval: codetree * codetree list -> codetree val identityFunction: string -> codetree val mkSetContainer: codetree * codetree * int -> codetree val mkTupleFromContainer: int * int -> codetree val mkTagTest: codetree * word * word -> codetree val mkBeginLoop: codetree * (int * codetree) list -> codetree val mkLoop: codetree list -> codetree val mkDec: int * codetree -> codeBinding val mkMutualDecs: (int * codetree) list -> codeBinding val mkNullDec: codetree -> codeBinding val mkContainer: int * int * codetree -> codeBinding val mkNot: codetree -> codetree val mkIsShort: codetree -> codetree val mkEqualTaggedWord: codetree * codetree -> codetree val mkEqualPointerOrWord: codetree * codetree -> codetree val equalTaggedWordFn: codetree val equalPointerOrWordFn: codetree val decSequenceWithFinalExp: codeBinding list -> codetree val pretty: codetree -> pretty val evalue: codetree -> machineWord option val genCode: codetree * Universal.universal list * int -> (unit -> codetree) (* Helper functions to build closure. *) val mkLoad: int * level * level -> codetree and mkLoadParam: int * level * level -> codetree val baseLevel: level val newLevel: level -> level val getClosure: level -> codetree list val multipleUses: codetree * (unit -> int) * level -> {load: level -> codetree, dec: codeBinding list} val mkUnary: BuiltIns.unaryOps * codetree -> codetree and mkBinary: BuiltIns.binaryOps * codetree * codetree -> codetree val mkUnaryFn: BuiltIns.unaryOps -> codetree and mkBinaryFn: BuiltIns.binaryOps -> codetree and mkArbitraryFn: arbPrecisionOps -> codetree val getCurrentThreadId: codetree and getCurrentThreadIdFn: codetree and checkRTSException: codetree + and cpuPauseFn: codetree + val mkAllocateWordMemory: codetree * codetree * codetree -> codetree and mkAllocateWordMemoryFn: codetree (* Load and store operations. At this level the first operand is the base address and the second is an index. *) val mkLoadOperation: loadStoreKind * codetree * codetree -> codetree val mkLoadOperationFn: loadStoreKind -> codetree val mkStoreOperation: loadStoreKind * codetree * codetree * codetree -> codetree val mkStoreOperationFn: loadStoreKind -> codetree val mkBlockOperation: {kind:blockOpKind, leftBase: codetree, rightBase: codetree, leftIndex: codetree, rightIndex: codetree, length: codetree} -> codetree val mkBlockOperationFn: blockOpKind -> codetree structure Foreign: FOREIGNCALLSIG structure Sharing: sig type machineWord = machineWord type codetree = codetree type pretty = pretty type argumentType=argumentType type codeBinding = codeBinding type level = level end end; diff --git a/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml b/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml index e94719c1..e9b13284 100644 --- a/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml +++ b/mlsource/MLCompiler/CodeTree/BackendIntermediateCode.sml @@ -1,739 +1,739 @@ (* - Copyright (c) 2012, 2016-20 David C.J. Matthews + Copyright (c) 2012, 2016-21 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Intermediate code tree for the back end of the compiler. *) structure BackendIntermediateCode: BackendIntermediateCodeSig = struct open Address structure BuiltIns = struct datatype testConditions = TestEqual | TestLess | TestLessEqual | TestGreater | TestGreaterEqual | TestUnordered (* Reals only. *) datatype arithmeticOperations = ArithAdd | ArithSub | ArithMult | ArithQuot | ArithRem | ArithDiv | ArithMod datatype logicalOperations = LogicalAnd | LogicalOr | LogicalXor datatype shiftOperations = ShiftLeft | ShiftRightLogical | ShiftRightArithmetic datatype unaryOps = NotBoolean | IsTaggedValue | MemoryCellLength | MemoryCellFlags | ClearMutableFlag - | AtomicIncrement - | AtomicDecrement | AtomicReset | LongWordToTagged | SignedToLongWord | UnsignedToLongWord | RealAbs of precision | RealNeg of precision | RealFixedInt of precision | FloatToDouble | DoubleToFloat of IEEEReal.rounding_mode option | RealToInt of precision * IEEEReal.rounding_mode | TouchAddress | AllocCStack and precision = PrecSingle | PrecDouble and binaryOps = WordComparison of { test: testConditions, isSigned: bool } | FixedPrecisionArith of arithmeticOperations | WordArith of arithmeticOperations | WordLogical of logicalOperations | WordShift of shiftOperations | AllocateByteMemory | LargeWordComparison of testConditions | LargeWordArith of arithmeticOperations | LargeWordLogical of logicalOperations | LargeWordShift of shiftOperations | RealComparison of testConditions * precision | RealArith of arithmeticOperations * precision | PointerEq | FreeCStack + | AtomicExchangeAdd and nullaryOps = GetCurrentThreadId | CheckRTSException + | CPUPause fun unaryRepr NotBoolean = "NotBoolean" | unaryRepr IsTaggedValue = "IsTaggedValue" | unaryRepr MemoryCellLength = "MemoryCellLength" | unaryRepr MemoryCellFlags = "MemoryCellFlags" | unaryRepr ClearMutableFlag = "ClearMutableFlag" - | unaryRepr AtomicIncrement = "AtomicIncrement" - | unaryRepr AtomicDecrement = "AtomicDecrement" | unaryRepr AtomicReset = "AtomicReset" | unaryRepr LongWordToTagged = "LongWordToTagged" | unaryRepr SignedToLongWord = "SignedToLongWord" | unaryRepr UnsignedToLongWord = "UnsignedToLongWord" | unaryRepr (RealAbs prec) = "RealAbs" ^ precRepr prec | unaryRepr (RealNeg prec) = "RealNeg" ^ precRepr prec | unaryRepr (RealFixedInt prec) = "RealFixedInt" ^ precRepr prec | unaryRepr FloatToDouble = "FloatToDouble" | unaryRepr (DoubleToFloat NONE) = "DoubleToFloat" | unaryRepr (DoubleToFloat (SOME mode)) = "DoubleToFloat" ^ rndModeRepr mode | unaryRepr (RealToInt (prec, mode)) = "RealToInt" ^ precRepr prec ^ rndModeRepr mode | unaryRepr TouchAddress = "TouchAddress" | unaryRepr AllocCStack = "AllocCStack" and binaryRepr (WordComparison{test, isSigned}) = "Test" ^ (testRepr test) ^ (if isSigned then "Signed" else "Unsigned") | binaryRepr (FixedPrecisionArith arithOp) = (arithRepr arithOp) ^ "Fixed" | binaryRepr (WordArith arithOp) = (arithRepr arithOp) ^ "Word" | binaryRepr (WordLogical logOp) = (logicRepr logOp) ^ "Word" | binaryRepr (WordShift shiftOp) = (shiftRepr shiftOp) ^ "Word" | binaryRepr AllocateByteMemory = "AllocateByteMemory" | binaryRepr (LargeWordComparison test) = "Test" ^ (testRepr test) ^ "LargeWord" | binaryRepr (LargeWordArith arithOp) = (arithRepr arithOp) ^ "LargeWord" | binaryRepr (LargeWordLogical logOp) = (logicRepr logOp) ^ "LargeWord" | binaryRepr (LargeWordShift shiftOp) = (shiftRepr shiftOp) ^ "LargeWord" | binaryRepr (RealComparison (test, prec)) = "Test" ^ testRepr test ^ precRepr prec | binaryRepr (RealArith (arithOp, prec)) = arithRepr arithOp ^ precRepr prec | binaryRepr PointerEq = "PointerEq" | binaryRepr FreeCStack = "FreeCStack" + | binaryRepr AtomicExchangeAdd = "AtomicExchangeAdd" and nullaryRepr GetCurrentThreadId = "GetCurrentThreadId" | nullaryRepr CheckRTSException = "CheckRTSException" + | nullaryRepr CPUPause = "CPUPause" and testRepr TestEqual = "Equal" | testRepr TestLess = "Less" | testRepr TestLessEqual = "LessEqual" | testRepr TestGreater = "Greater" | testRepr TestGreaterEqual = "GreaterEqual" | testRepr TestUnordered = "Unordered" and arithRepr ArithAdd = "Add" | arithRepr ArithSub = "Sub" | arithRepr ArithMult = "Mult" | arithRepr ArithQuot = "Quot" | arithRepr ArithRem = "Rem" | arithRepr ArithDiv = "Div" | arithRepr ArithMod = "Mod" and logicRepr LogicalAnd = "And" | logicRepr LogicalOr = "Or" | logicRepr LogicalXor = "Xor" and shiftRepr ShiftLeft = "Left" | shiftRepr ShiftRightLogical = "RightLogical" | shiftRepr ShiftRightArithmetic = "RightArithmetic" and precRepr PrecSingle = "Single" | precRepr PrecDouble = "Double" and rndModeRepr IEEEReal.TO_NEAREST = "Round" | rndModeRepr IEEEReal.TO_NEGINF = "Down" | rndModeRepr IEEEReal.TO_POSINF = "Up" | rndModeRepr IEEEReal.TO_ZERO = "Trunc" end datatype argumentType = GeneralType | DoubleFloatType | SingleFloatType datatype backendIC = BICNewenv of bicCodeBinding list * backendIC (* Set of bindings with an expression. *) | BICConstnt of machineWord * Universal.universal list (* Load a constant *) | BICExtract of bicLoadForm (* Get a local variable, an argument or a closure value *) | BICField of {base: backendIC, offset: int } (* Load a field from a tuple or record *) | BICEval of (* Evaluate a function with an argument list. *) { function: backendIC, argList: (backendIC * argumentType) list, resultType: argumentType } (* Built-in functions. *) | BICNullary of {oper: BuiltIns.nullaryOps} | BICUnary of {oper: BuiltIns.unaryOps, arg1: backendIC} | BICBinary of {oper: BuiltIns.binaryOps, arg1: backendIC, arg2: backendIC} | BICArbitrary of {oper: BuiltIns.arithmeticOperations, shortCond: backendIC, arg1: backendIC, arg2: backendIC, longCall: backendIC} | BICLambda of bicLambdaForm (* Lambda expressions. *) | BICCond of backendIC * backendIC * backendIC (* If-then-else expression *) | BICCase of (* Case expressions *) { cases : backendIC option list, (* NONE means "jump to the default". *) test : backendIC, default : backendIC, isExhaustive: bool, firstIndex: word } | BICBeginLoop of (* Start of tail-recursive inline function. *) { loop: backendIC, arguments: (bicSimpleBinding * argumentType) list } | BICLoop of (backendIC * argumentType) list (* Jump back to start of tail-recursive function. *) | BICRaise of backendIC (* Raise an exception *) | BICHandle of (* Exception handler. *) { exp: backendIC, handler: backendIC, exPacketAddr: int } | BICTuple of backendIC list (* Tuple *) | BICSetContainer of (* Copy a tuple to a container. *) { container: backendIC, tuple: backendIC, filter: BoolVector.vector } | BICLoadContainer of {base: backendIC, offset: int } | BICTagTest of { test: backendIC, tag: word, maxTag: word } | BICLoadOperation of { kind: loadStoreKind, address: bicAddress } | BICStoreOperation of { kind: loadStoreKind, address: bicAddress, value: backendIC } | BICBlockOperation of { kind: blockOpKind, sourceLeft: bicAddress, destRight: bicAddress, length: backendIC } | BICAllocateWordMemory of {numWords: backendIC, flags: backendIC, initial: backendIC} and bicCodeBinding = BICDeclar of bicSimpleBinding (* Make a local declaration or push an argument *) | BICRecDecs of { addr: int, lambda: bicLambdaForm } list (* Set of mutually recursive declarations. *) | BICNullBinding of backendIC (* Just evaluate the expression and discard the result. *) | BICDecContainer of { addr: int, size: int } (* Create a container for a tuple on the stack. *) and caseType = CaseWord (* Word or fixed-precision integer. *) | CaseTag of word and bicLoadForm = BICLoadLocal of int (* Local binding *) | BICLoadArgument of int (* Argument - 0 is first arg etc.*) | BICLoadClosure of int (* Closure - 0 is first closure item etc *) | BICLoadRecursive (* Recursive call *) and loadStoreKind = LoadStoreMLWord of {isImmutable: bool} (* Load/Store an ML word in an ML word cell. *) | LoadStoreMLByte of {isImmutable: bool} (* Load/Store a byte, tagging and untagging as appropriate, in an ML byte cell. *) | LoadStoreC8 (* Load/Store C values - The base address is a boxed SysWord.word value. *) | LoadStoreC16 | LoadStoreC32 | LoadStoreC64 | LoadStoreCFloat | LoadStoreCDouble | LoadStoreUntaggedUnsigned and blockOpKind = BlockOpMove of {isByteMove: bool} | BlockOpEqualByte | BlockOpCompareByte withtype bicSimpleBinding = { (* Declare a value or push an argument. *) value: backendIC, addr: int } and bicLambdaForm = { (* Lambda expressions. *) body : backendIC, name : string, closure : bicLoadForm list, argTypes : argumentType list, resultType : argumentType, localCount : int, heapClosure : bool } and bicAddress = (* Address form used in loads, store and block operations. The base is an ML address if this is to/from ML memory or a (boxed) SysWord.word if it is to/from C memory. The index is a value in units of the size of the item being loaded/stored and the offset is always in bytes. *) {base: backendIC, index: backendIC option, offset: int} structure CodeTags = struct open Universal val tupleTag: universal list list tag = tag() fun splitProps _ [] = (NONE, []) | splitProps tag (hd::tl) = if Universal.tagIs tag hd then (SOME hd, tl) else let val (p, l) = splitProps tag tl in (p, hd :: l) end fun mergeTupleProps(p, []) = p | mergeTupleProps([], p) = p | mergeTupleProps(m, n) = ( case (splitProps tupleTag m, splitProps tupleTag n) of ((SOME mp, ml), (SOME np, nl)) => let val mpl = Universal.tagProject tupleTag mp and npl = Universal.tagProject tupleTag np val merge = ListPair.mapEq mergeTupleProps (mpl, npl) in Universal.tagInject tupleTag merge :: (ml @ nl) end | _ => m @ n ) end fun loadStoreKindRepr(LoadStoreMLWord {isImmutable=true}) = "MLWordImmutable" | loadStoreKindRepr(LoadStoreMLWord {isImmutable=false}) = "MLWord" | loadStoreKindRepr(LoadStoreMLByte {isImmutable=true}) = "MLByteImmutable" | loadStoreKindRepr(LoadStoreMLByte {isImmutable=false}) = "MLByte" | loadStoreKindRepr LoadStoreC8 = "C8Bit" | loadStoreKindRepr LoadStoreC16 = "C16Bit" | loadStoreKindRepr LoadStoreC32 = "C32Bit" | loadStoreKindRepr LoadStoreC64 = "C64Bit" | loadStoreKindRepr LoadStoreCFloat = "CFloat" | loadStoreKindRepr LoadStoreCDouble = "CDouble" | loadStoreKindRepr LoadStoreUntaggedUnsigned = "MLWordUntagged" fun blockOpKindRepr (BlockOpMove{isByteMove=false}) = "MoveWord" | blockOpKindRepr (BlockOpMove{isByteMove=true}) = "MoveByte" | blockOpKindRepr BlockOpEqualByte = "EqualByte" | blockOpKindRepr BlockOpCompareByte = "CompareByte" open Pretty fun pList ([]: 'b list, _: string, _: 'b->pretty) = [] | pList ([h], _, disp) = [disp h] | pList (h::t, sep, disp) = PrettyBlock (0, false, [], [ disp h, PrettyBreak (0, 0), PrettyString sep ] ) :: PrettyBreak (1, 0) :: pList (t, sep, disp) fun pretty (pt : backendIC) : pretty = let fun printList(start, lst, sep) : pretty = PrettyBlock (1, true, [], PrettyString (start ^ "(") :: pList(lst, sep, pretty) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) fun prettyArgType GeneralType = PrettyString "G" | prettyArgType DoubleFloatType = PrettyString "D" | prettyArgType SingleFloatType = PrettyString "F" fun prettyArg (c, t) = PrettyBlock(1, false, [], [pretty c, PrettyBreak (1, 0), prettyArgType t]) fun prettyArgs(start, lst, sep) : pretty = PrettyBlock (1, true, [], PrettyString (start ^ "(") :: pList(lst, sep, prettyArg) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) fun prettyAddress({base, index, offset}: bicAddress): pretty = let in PrettyBlock (1, true, [], [ PrettyString "[", PrettyBreak (0, 3), pretty base, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), case index of NONE => PrettyString "-" | SOME i => pretty i, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), PrettyString(Int.toString offset), PrettyBreak (0, 0), PrettyString "]" ]) end in case pt of BICEval {function, argList, resultType} => let val prettyArgs = PrettyBlock (1, true, [], PrettyString ("$(") :: pList(argList, ",", prettyArg) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) in PrettyBlock (3, false, [], [ pretty function, PrettyBreak(1, 0), prettyArgType resultType, PrettyBreak(1, 0), prettyArgs ] ) end | BICUnary { oper, arg1 } => PrettyBlock (3, false, [], [ PrettyString(BuiltIns.unaryRepr oper), PrettyBreak(1, 0), printList("", [arg1], ",") ] ) | BICBinary { oper, arg1, arg2 } => PrettyBlock (3, false, [], [ PrettyString(BuiltIns.binaryRepr oper), PrettyBreak(1, 0), printList("", [arg1, arg2], ",") ] ) | BICNullary { oper } => PrettyString(BuiltIns.nullaryRepr oper) | BICArbitrary { oper, shortCond, arg1, arg2, longCall } => PrettyBlock (3, false, [], [ PrettyString(BuiltIns.arithRepr oper), PrettyBreak(1, 0), printList("", [shortCond, arg1, arg2, longCall], ",") ] ) | BICAllocateWordMemory { numWords, flags, initial } => PrettyBlock (3, false, [], [ PrettyString "AllocateWordMemory", PrettyBreak(1, 0), printList("", [numWords, flags, initial], ",") ] ) | BICExtract (BICLoadLocal addr) => let val str : string = concat ["LOCAL(", Int.toString addr, ")"] in PrettyString str end | BICExtract (BICLoadArgument addr) => let val str : string = concat ["PARAM(", Int.toString addr, ")"] in PrettyString str end | BICExtract (BICLoadClosure addr) => let val str : string = concat ["CLOS(", Int.toString addr, ")"] in PrettyString str end | BICExtract (BICLoadRecursive) => let val str : string = concat ["RECURSIVE(", ")"] in PrettyString str end | BICField {base, offset} => let val str = "INDIRECT(" ^ Int.toString offset ^ ", "; in PrettyBlock(0, false, [], [ PrettyString str, pretty base, PrettyString ")" ] ) end | BICLambda {body, name, closure, argTypes, heapClosure, resultType, localCount} => let fun prettyArgTypes [] = [] | prettyArgTypes [last] = [prettyArgType last] | prettyArgTypes (hd::tl) = prettyArgType hd :: PrettyBreak(1, 0) :: prettyArgTypes tl in PrettyBlock (1, true, [], [ PrettyString ("LAMBDA("), PrettyBreak (1, 0), PrettyString name, PrettyBreak (1, 0), PrettyString ( "CL=" ^ Bool.toString heapClosure), PrettyString (" LOCALS=" ^ Int.toString localCount), PrettyBreak(1, 0), PrettyBlock (1, false, [], PrettyString "ARGS=" :: prettyArgTypes argTypes), PrettyBreak(1, 0), PrettyBlock (1, false, [], [PrettyString "RES=", prettyArgType resultType]), printList (" CLOS=", map BICExtract closure, ","), PrettyBreak (1, 0), pretty body, PrettyString "){LAMBDA}" ] ) end | BICConstnt (w, _) => PrettyString (stringOfWord w) | BICCond (f, s, t) => PrettyBlock (1, true, [], [ PrettyString "IF(", pretty f, PrettyString ", ", PrettyBreak (0, 0), pretty s, PrettyString ", ", PrettyBreak (0, 0), pretty t, PrettyBreak (0, 0), PrettyString (")") ] ) | BICNewenv(decs, final) => PrettyBlock (1, true, [], PrettyString ("BLOCK" ^ "(") :: pList(decs, ";", prettyBinding) @ [ PrettyBreak (1, 0), pretty final, PrettyBreak (0, 0), PrettyString (")") ] ) | BICBeginLoop{loop=loopExp, arguments=args } => let fun prettyArg (c, t) = PrettyBlock(1, false, [], [prettySimpleBinding c, PrettyBreak (1, 0), prettyArgType t]) in PrettyBlock (3, false, [], [ PrettyBlock (1, true, [], PrettyString ("BEGINLOOP(") :: pList(args, ",", prettyArg) @ [ PrettyBreak (0, 0), PrettyString (")") ] ), PrettyBreak (0, 0), PrettyString "(", PrettyBreak (0, 0), pretty loopExp, PrettyBreak (0, 0), PrettyString ")" ] ) end | BICLoop ptl => prettyArgs("LOOP", ptl, ",") | BICRaise c => PrettyBlock (1, true, [], [ PrettyString "RAISE(", pretty c, PrettyBreak (0, 0), PrettyString (")") ] ) | BICHandle {exp, handler, exPacketAddr} => PrettyBlock (3, false, [], [ PrettyString "HANDLE(", pretty exp, PrettyString ("WITH exid=" ^ Int.toString exPacketAddr), PrettyBreak (1, 0), pretty handler, PrettyString ")" ] ) | BICCase {cases, test, default, isExhaustive, firstIndex, ...} => PrettyBlock (1, true, [], PrettyString "CASE (" :: pretty test :: PrettyBreak (1, 0) :: PrettyString ("( from " ^ Word.toString firstIndex ^ (if isExhaustive then " exhaustive" else "")) :: PrettyBreak (1, 0) :: pList(cases, ",", fn (SOME exp) => PrettyBlock (1, true, [], [ PrettyString "=>", PrettyBreak (1, 0), pretty exp ]) | NONE => PrettyString "=> default" ) @ [ PrettyBreak (1, 0), PrettyBlock (1, false, [], [ PrettyString "ELSE:", PrettyBreak (1, 0), pretty default ] ), PrettyBreak (1, 0), PrettyString (") {"^"CASE"^"}") ] ) | BICTuple ptl => printList("RECCONSTR", ptl, ",") | BICSetContainer{container, tuple, filter} => let val source = BoolVector.length filter val dest = BoolVector.foldl(fn (true, n) => n+1 | (false, n) => n) 0 filter in PrettyBlock (3, false, [], [ PrettyString (concat["SETCONTAINER(", Int.toString dest, "/", Int.toString source, ", "]), pretty container, PrettyBreak (0, 0), PrettyString ",", PrettyBreak (1, 0), pretty tuple, PrettyBreak (0, 0), PrettyString ")" ] ) end | BICLoadContainer {base, offset} => let val str = "INDIRECTCONTAINER(" ^ Int.toString offset ^ ", "; in PrettyBlock(0, false, [], [ PrettyString str, pretty base, PrettyString ")" ] ) end | BICTagTest { test, tag, maxTag } => PrettyBlock (3, false, [], [ PrettyString (concat["TAGTEST(", Word.toString tag, ", ", Word.toString maxTag, ","]), PrettyBreak (1, 0), pretty test, PrettyBreak (0, 0), PrettyString ")" ] ) | BICLoadOperation{ kind, address } => PrettyBlock (3, false, [], [ PrettyString("Load" ^ loadStoreKindRepr kind), PrettyBreak (1, 0), prettyAddress address ] ) | BICStoreOperation{ kind, address, value } => PrettyBlock (3, false, [], [ PrettyString("Store" ^ loadStoreKindRepr kind), PrettyBreak (1, 0), prettyAddress address, PrettyBreak (1, 0), PrettyString "<=", PrettyBreak (1, 0), pretty value ] ) | BICBlockOperation{ kind, sourceLeft, destRight, length } => PrettyBlock (3, false, [], [ PrettyString(blockOpKindRepr kind ^ "("), PrettyBreak (1, 0), prettyAddress sourceLeft, PrettyBreak (1, 0), PrettyString ",", prettyAddress destRight, PrettyBreak (1, 0), PrettyString ",", pretty length, PrettyBreak (1, 0), PrettyString ")" ] ) (* That list should be exhaustive! *) end (* pretty *) and prettyBinding(BICDeclar dec) = prettySimpleBinding dec | prettyBinding(BICRecDecs ptl) = let fun prettyRDec {lambda, addr} = PrettyBlock (1, false, [], [ PrettyString (concat ["DECL #", Int.toString addr, "="]), PrettyBreak (1, 0), pretty(BICLambda lambda) ] ) in PrettyBlock (1, true, [], PrettyString ("MUTUAL" ^ "(") :: pList(ptl, " AND ", prettyRDec) @ [ PrettyBreak (0, 0), PrettyString (")") ] ) end | prettyBinding(BICNullBinding c) = pretty c | prettyBinding(BICDecContainer{addr, size}) = PrettyString (concat ["CONTAINER #", Int.toString addr, "=", Int.toString size]) and prettySimpleBinding{value, addr} = PrettyBlock (1, false, [], [ PrettyString (concat ["DECL #", Int.toString addr, "="]), PrettyBreak (1, 0), pretty value ] ) structure Sharing = struct type backendIC = backendIC and bicLoadForm = bicLoadForm and caseType = caseType and pretty = pretty and argumentType = argumentType and bicCodeBinding = bicCodeBinding and bicSimpleBinding = bicSimpleBinding and loadStoreKind = loadStoreKind and blockOpKind = blockOpKind and unaryOps = BuiltIns.unaryOps and binaryOps = BuiltIns.binaryOps and nullaryOps = BuiltIns.nullaryOps and testConditions = BuiltIns.testConditions and arithmeticOperations = BuiltIns.arithmeticOperations end end; diff --git a/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONS.ML b/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONS.ML index e1e0edc3..8b30abef 100644 --- a/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONS.ML +++ b/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONS.ML @@ -1,1781 +1,1803 @@ (* - Copyright (c) 2015-18, 2020 David C.J. Matthews + Copyright (c) 2015-18, 2020-21 David C.J. Matthews Copyright (c) 2000 Cambridge University Technical Services Limited This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor INTCODECONS ( structure DEBUG: DEBUG structure PRETTY: PRETTYSIG ) : INTCODECONSSIG = struct open CODE_ARRAY open DEBUG open Address open Misc infix 5 << <<+ <<- >> >>+ >>- ~>> ~>>+ ~>>- (* Shift operators *) infix 3 andb orb xorb andbL orbL xorbL andb8 orb8 xorb8 val op << = Word.<< and op >> = Word.>> and op ~>> = Word.~>> val wordToWord8 = Word8.fromLargeWord o Word.toLargeWord and word8ToWord = Word.fromLargeWord o Word8.toLargeWord (* Typically the compiler is built on a little-endian machine but it could be run on a machine with either endian-ness. We have to find out the endian-ness when we run. There are separate versions of the compiler for 32-bit and 64-bit so that can be a constant. *) local val isBigEndian: unit -> bool = RunCall.rtsCallFast1 "PolyIsBigEndian" in val isBigEndian = isBigEndian() end val opcode_jump = 0wx02 (* 8-bit unsigned jump forward. *) and opcode_jumpFalse = 0wx03 (* Test top of stack. Take 8-bit unsigned jump if false. *) and opcode_loadMLWord = 0wx04 and opcode_storeMLWord = 0wx05 and opcode_alloc_ref = 0wx06 and opcode_blockMoveWord = 0wx07 and opcode_loadUntagged = 0wx08 and opcode_storeUntagged = 0wx09 and opcode_case16 = 0wx0a and opcode_callClosure = 0wx0c and opcode_returnW = 0wx0d and opcode_containerB = 0wx0e and opcode_raiseEx = 0wx10 (*and opcode_callConstAddr16 = 0wx11 and opcode_callConstAddr8 = 0wx12*) and opcode_localW = 0wx13 and opcode_constAddr16_8 = 0wx14 and opcode_constAddr8_8 = 0wx15 and opcode_callLocalB = 0wx16 and opcode_callConstAddr8_8 = 0wx17 and opcode_callConstAddr16_8 = 0wx18 (*and opcode_constAddr16 = 0wx1a *) and opcode_constIntW = 0wx1b and opcode_jumpBack8 = 0wx1e (* 8-bit unsigned jump backwards - relative to end of instr. *) and opcode_returnB = 0wx1f and opcode_jumpBack16 = 0wx20 (* 16-bit unsigned jump backwards - relative to end of instr. *) and opcode_indirectLocalBB = 0wx21 and opcode_localB = 0wx22 and opcode_indirectB = 0wx23 and opcode_moveToContainerB = 0wx24 and opcode_setStackValB = 0wx25 and opcode_resetB = 0wx26 and opcode_resetRB = 0wx27 and opcode_constIntB = 0wx28 and opcode_local_0 = 0wx29 and opcode_local_1 = 0wx2a and opcode_local_2 = 0wx2b and opcode_local_3 = 0wx2c and opcode_local_4 = 0wx2d and opcode_local_5 = 0wx2e and opcode_local_6 = 0wx2f and opcode_local_7 = 0wx30 and opcode_local_8 = 0wx31 and opcode_local_9 = 0wx32 and opcode_local_10 = 0wx33 and opcode_local_11 = 0wx34 and opcode_indirect_0 = 0wx35 and opcode_indirect_1 = 0wx36 and opcode_indirect_2 = 0wx37 and opcode_indirect_3 = 0wx38 and opcode_indirect_4 = 0wx39 and opcode_indirect_5 = 0wx3a and opcode_const_0 = 0wx3b and opcode_const_1 = 0wx3c and opcode_const_2 = 0wx3d and opcode_const_3 = 0wx3e and opcode_const_4 = 0wx3f and opcode_const_10 = 0wx40 and opcode_return_1 = 0wx42 and opcode_return_2 = 0wx43 and opcode_return_3 = 0wx44 and opcode_local_12 = 0wx45 and opcode_jumpTrue = 0wx46 and opcode_jump16True = 0wx47 and opcode_local_13 = 0wx49 and opcode_local_14 = 0wx4a and opcode_local_15 = 0wx4b and opcode_arbAdd = 0wx4c and opcode_arbSubtract = 0wx4d and opcode_arbMultiply = 0wx4e + (*and opcode_enterIntARM64 = 0wx4f*) (* Reserved - the low order byte of a load *) and opcode_reset_1 = 0wx50 and opcode_reset_2 = 0wx51 + and opcode_noop = 0wx52 (* Used for alignment in ARM64 enter-int. *) and opcode_indirectClosureBB = 0wx54 and opcode_constAddr8_0 = 0wx55 and opcode_constAddr8_1 = 0wx56 and opcode_callConstAddr8_0 = 0wx57 and opcode_callConstAddr8_1 = 0wx58 and opcode_resetR_1 = 0wx64 and opcode_resetR_2 = 0wx65 and opcode_resetR_3 = 0wx66 and opcode_tupleB = 0wx68 and opcode_tuple_2 = 0wx69 and opcode_tuple_3 = 0wx6a and opcode_tuple_4 = 0wx6b and opcode_lock = 0wx6c and opcode_ldexc = 0wx6d and opcode_indirectContainerB= 0wx74 and opcode_moveToMutClosureB = 0wx75 and opcode_allocMutClosureB = 0wx76 and opcode_indirectClosureB0 = 0wx77 and opcode_pushHandler = 0wx78 and opcode_indirectClosureB1 = 0wx7a and opcode_tailbb = 0wx7b and opcode_indirectClosureB2 = 0wx7c and opcode_setHandler = 0wx81 and opcode_callFastRTS0 = 0wx83 and opcode_callFastRTS1 = 0wx84 and opcode_callFastRTS2 = 0wx85 and opcode_callFastRTS3 = 0wx86 and opcode_callFastRTS4 = 0wx87 and opcode_callFastRTS5 = 0wx88 and opcode_notBoolean = 0wx91 and opcode_isTagged = 0wx92 and opcode_cellLength = 0wx93 and opcode_cellFlags = 0wx94 and opcode_clearMutable = 0wx95 - and opcode_atomicIncr = 0wx97 - and opcode_atomicDecr = 0wx98 and opcode_equalWord = 0wxa0 and opcode_lessSigned = 0wxa2 and opcode_lessUnsigned = 0wxa3 and opcode_lessEqSigned = 0wxa4 and opcode_lessEqUnsigned = 0wxa5 and opcode_greaterSigned = 0wxa6 and opcode_greaterUnsigned = 0wxa7 and opcode_greaterEqSigned = 0wxa8 and opcode_greaterEqUnsigned = 0wxa9 and opcode_fixedAdd = 0wxaa and opcode_fixedSub = 0wxab and opcode_fixedMult = 0wxac and opcode_fixedQuot = 0wxad and opcode_fixedRem = 0wxae and opcode_wordAdd = 0wxb1 and opcode_wordSub = 0wxb2 and opcode_wordMult = 0wxb3 and opcode_wordDiv = 0wxb4 and opcode_wordMod = 0wxb5 and opcode_wordAnd = 0wxb7 and opcode_wordOr = 0wxb8 and opcode_wordXor = 0wxb9 and opcode_wordShiftLeft = 0wxba and opcode_wordShiftRLog = 0wxbb and opcode_allocByteMem = 0wxbd and opcode_indirectLocalB1 = 0wxc1 and opcode_isTaggedLocalB = 0wxc2 and opcode_jumpNEqLocalInd = 0wxc3 and opcode_jumpTaggedLocal = 0wxc4 and opcode_jumpNEqLocal = 0wxc5 and opcode_indirect0Local0 = 0wxc6 and opcode_indirectLocalB0 = 0wxc7 and opcode_closureB = 0wxd0 and opcode_getThreadId = 0wxd9 and opcode_allocWordMemory = 0wxda and opcode_loadMLByte = 0wxdc and opcode_storeMLByte = 0wxe4 and opcode_blockMoveByte = 0wxec and opcode_blockEqualByte = 0wxed and opcode_blockCompareByte = 0wxee and opcode_deleteHandler = 0wxf1 (* Just deletes the handler - no jump. *) and opcode_jump16 = 0wxf7 and opcode_jump16False = 0wxf8 and opcode_setHandler16 = 0wxf9 (*and opcode_constAddr8 = 0wxfa*) (*and opcode_stackSize8 = 0wxfb*) and opcode_stackSize16 = 0wxfc and opcode_escape = 0wxfe (* For two-byte opcodes. *) (*and opcode_enterIntX86 = 0wxff*) (* Reserved - this is the first byte of a call *) (* Extended opcodes - preceded by 0xfe escape *) val ext_opcode_containerW = 0wx0b and ext_opcode_allocMutClosureW = 0wx0f (* Allocate a mutable closure for mutual recursion *) and ext_opcode_indirectClosureW = 0wx10 and ext_opcode_indirectContainerW= 0wx11 and ext_opcode_indirectW = 0wx14 and ext_opcode_moveToContainerW = 0wx15 and ext_opcode_moveToMutClosureW = 0wx16 and ext_opcode_setStackValW = 0wx17 and ext_opcode_resetW = 0wx18 and ext_opcode_resetR_w = 0wx19 and ext_opcode_callFastRTSRRtoR = 0wx1c and ext_opcode_callFastRTSRGtoR = 0wx1d and ext_opcode_jump32True = 0wx48 and ext_opcode_floatAbs = 0wx56 and ext_opcode_floatNeg = 0wx57 and ext_opcode_fixedIntToFloat = 0wx58 and ext_opcode_floatToReal = 0wx59 and ext_opcode_realToFloat = 0wx5a and ext_opcode_floatEqual = 0wx5b and ext_opcode_floatLess = 0wx5c and ext_opcode_floatLessEq = 0wx5d and ext_opcode_floatGreater = 0wx5e and ext_opcode_floatGreaterEq = 0wx5f and ext_opcode_floatAdd = 0wx60 and ext_opcode_floatSub = 0wx61 and ext_opcode_floatMult = 0wx62 and ext_opcode_floatDiv = 0wx63 and ext_opcode_tupleW = 0wx67 and ext_opcode_realToInt = 0wx6e and ext_opcode_floatToInt = 0wx6f and ext_opcode_callFastRTSFtoF = 0wx70 and ext_opcode_callFastRTSGtoF = 0wx71 and ext_opcode_callFastRTSFFtoF = 0wx72 and ext_opcode_callFastRTSFGtoF = 0wx73 and ext_opcode_realUnordered = 0wx79 and ext_opcode_floatUnordered = 0wx7a and ext_opcode_tail = 0wx7c and ext_opcode_callFastRTSRtoR = 0wx8f and ext_opcode_callFastRTSGtoR = 0wx90 + and ext_opcode_atomicExchAdd = 0wx96 and ext_opcode_atomicReset = 0wx99 and ext_opcode_longWToTagged = 0wx9a and ext_opcode_signedToLongW = 0wx9b and ext_opcode_unsignedToLongW = 0wx9c and ext_opcode_realAbs = 0wx9d and ext_opcode_realNeg = 0wx9e and ext_opcode_fixedIntToReal = 0wx9f and ext_opcode_fixedDiv = 0wxaf and ext_opcode_fixedMod = 0wxb0 and ext_opcode_wordShiftRArith = 0wxbc and ext_opcode_lgWordEqual = 0wxbe and ext_opcode_lgWordLess = 0wxc0 and ext_opcode_lgWordLessEq = 0wxc1 and ext_opcode_lgWordGreater = 0wxc2 and ext_opcode_lgWordGreaterEq = 0wxc3 and ext_opcode_lgWordAdd = 0wxc4 and ext_opcode_lgWordSub = 0wxc5 and ext_opcode_lgWordMult = 0wxc6 and ext_opcode_lgWordDiv = 0wxc7 and ext_opcode_lgWordMod = 0wxc8 and ext_opcode_lgWordAnd = 0wxc9 and ext_opcode_lgWordOr = 0wxca and ext_opcode_lgWordXor = 0wxcb and ext_opcode_lgWordShiftLeft = 0wxcc and ext_opcode_lgWordShiftRLog = 0wxcd and ext_opcode_lgWordShiftRArith = 0wxce and ext_opcode_realEqual = 0wxcf and ext_opcode_closureW = 0wxd0 and ext_opcode_realLess = 0wxd1 and ext_opcode_realLessEq = 0wxd2 and ext_opcode_realGreater = 0wxd3 and ext_opcode_realGreaterEq = 0wxd4 and ext_opcode_realAdd = 0wxd5 and ext_opcode_realSub = 0wxd6 and ext_opcode_realMult = 0wxd7 and ext_opcode_realDiv = 0wxd8 and ext_opcode_loadC8 = 0wxdd and ext_opcode_loadC16 = 0wxde and ext_opcode_loadC32 = 0wxdf and ext_opcode_loadC64 = 0wxe0 and ext_opcode_loadCFloat = 0wxe1 and ext_opcode_loadCDouble = 0wxe2 and ext_opcode_storeC8 = 0wxe5 and ext_opcode_storeC16 = 0wxe6 and ext_opcode_storeC32 = 0wxe7 and ext_opcode_storeC64 = 0wxe8 and ext_opcode_storeCFloat = 0wxe9 and ext_opcode_storeCDouble = 0wxea and ext_opcode_constAddr32_16 = 0wxf0 (* Followed by a 32-bit offset and a 16-bit constant number. *) and ext_opcode_jump32 = 0wxf2 (* 32-bit signed jump, forwards or backwards. *) and ext_opcode_jump32False = 0wxf3 (* Test top item. Take 32-bit signed jump if false. *) (*and ext_opcode_constAddr32 = 0wxf4 *) and ext_opcode_setHandler32 = 0wxf5 (* Setup a handler whose address is given by the 32-bit signed offset. *) and ext_opcode_case32 = 0wxf6 (* Indexed case with 32-bit offsets *) and ext_opcode_allocCSpace = 0wxfd and ext_opcode_freeCSpace = 0wxfe (* A Label is a ref that is later set to the location. Several labels can be linked together so that they are only set at a single point. Only forward jumps are linked so when we come to finally set the label we will have the full list. *) type labels = Word.word ref list ref (* Used for jump, jumpFalse, setHandler and delHandler. *) datatype jumpTypes = Jump | JumpBack | JumpFalse | JumpTrue | SetHandler datatype opcode = SimpleCode of Word8.word list (* Bytes that don't need any special treatment *) | LabelCode of labels (* A label - forwards or backwards. *) | JumpInstruction of { label: labels, jumpType: jumpTypes, size: jumpSize ref } (* Jumps or SetHandler. *) | PushConstant of { constNum: int, size : jumpSize ref, isCall: bool } | PushShort of Word.word | IndexedCase of { labels: labels list, size : jumpSize ref } | LoadLocal of Word8.word (* Locals - simplifies peephole optimisation. *) | IndirectLocal of { localAddr: Word8.word, indirect: Word8.word } (* Ditto *) | UncondTransfer of Word8.word list (* Raisex, return and tail. *) | IsTaggedLocalB of Word8.word | JumpOnIsTaggedLocalB of { label: labels, size: jumpSize ref, localAddr: Word8.word } | JumpNotEqualLocalInd0BB of { label: labels, size: jumpSize ref, localAddr: Word8.word, const: Word8.word } | JumpNotEqualLocalConstBB of { label: labels, size: jumpSize ref, localAddr: Word8.word, const: Word8.word } + | EnterIntArm64 of Word8.word (* Special case because it has to be 32-bit aligned. *) and jumpSize = Size8 | Size16 | Size32 and code = Code of { constVec: machineWord list ref, (* Vector of words to be put at end *) procName: string, (* Name of the procedure. *) printAssemblyCode:bool, (* Whether to print the code when we finish. *) printStream: string->unit, (* The stream to use *) stage1Code: opcode list ref, enterIntMode: int (* 0 => None, 1 => X86. *) } val getEnterIntMode: unit -> int = RunCall.rtsCallFast0 "PolyInterpretedEnterIntMode" (* create and initialise a code segment *) fun codeCreate (name : string, parameters) = let val printStream = PRETTY.getSimplePrinter(parameters, []) in Code { constVec = ref [], procName = name, printAssemblyCode = DEBUG.getParameter DEBUG.assemblyCodeTag parameters, printStream = printStream, stage1Code = ref [], enterIntMode = getEnterIntMode() } end (* Find the offset in the constant area of a constant. *) (* The first has offset 0. *) fun addConstToVec (valu, Code{constVec, ...}) = let (* Search the list to see if the constant is already there. *) fun findConst valu [] num = (* Add to the list *) ( constVec := ! constVec @ [valu]; num ) | findConst valu (h :: t) num = if wordEq (valu, h) then num else findConst valu t (num + 1) (* Not equal *) in findConst valu (! constVec) 0 end fun printCode (seg: codeVec, procName: string, endcode, printStream) = let val () = printStream "\n"; val () = if procName = "" (* No name *) then printStream "?" else printStream procName; val () = printStream ":\n"; (* prints a string representation of a number *) fun printHex (v) = printStream(Word.fmt StringCvt.HEX v); val ptr = ref 0w0; (* Gets "length" bytes from locations "addr", "addr"+1... Returns an unsigned number. *) fun getB (0, _, _) = 0w0 | getB (length, addr, seg) = (getB (length - 1, addr + 0w1, seg) << 0w8) + word8ToWord (codeVecGet (seg, addr)) (* Prints a relative address. *) fun printDisp (len, spacer: string) = let val ad = getB(len, !ptr, seg) + !ptr + Word.fromInt len val () = printStream spacer; val () = printHex ad; in ptr := !ptr + Word.fromInt len end (* Prints an operand of an instruction *) fun printOp (len, spacer : string) = let val () = printStream spacer; val () = printHex (getB (len, !ptr, seg)) in ptr := !ptr + Word.fromInt len end; in while !ptr < endcode do let val addr = !ptr in printHex addr; (* The address. *) let (* It's an instruction. *) val () = printStream "\t" val opc = codeVecGet (seg, !ptr) (* opcode *) val () = ptr := !ptr + 0w1 in case opc of 0wx02 => (printStream "jump"; printDisp (1, "\t\t")) | 0wx03 => (printStream "jumpFalse"; printDisp (1, "\t")) | 0wx04 => printStream "loadMLWord" | 0wx05 => printStream "storeMLWord" | 0wx06 => printStream "alloc_ref" | 0wx07 => printStream "blockMoveWord" | 0wx08 => printStream "loadUntagged" | 0wx09 => printStream "storeUntagged" | 0wx0a => let (* Have to find out how many items there are. *) val limit = getB (2, !ptr, seg); val () = printOp (2, "case16\t"); val base = !ptr; fun printEntry _ = (printStream "\n\t"; printHex(base + getB(2, !ptr, seg)); ptr := !ptr + 0w2) fun forLoop f i n = if i >= n then () else (f i; forLoop f (i + 0w1) n) in forLoop printEntry 0w0 limit end | 0wx0c => printStream "callClosure" | 0wx0d => printOp(2, "returnW\t") | 0wx0e => printStream "containerB" | 0wx0f => printOp(2, "allocMutClosure") | 0wx10 => printStream "raiseEx" | 0wx11 => printDisp (2, "callConstAddr16\t") | 0wx12 => printDisp (1, "callConstAddr8\t") | 0wx13 => printOp(2, "localW\t") | 0wx14 => (printDisp (2, "constAddr16_8\t"); printOp(1, ",")) | 0wx15 => (printDisp (1, "constAddr8_8\t"); printOp(1, ",")) | 0wx16 => printOp(1, "callLocalB\t") | 0wx17 => (printDisp (1, "callConstAddr8_8\t"); printOp(1, ",")) | 0wx18 => (printDisp (2, "callConstAddr16_8\t"); printOp(1, ",")) | 0wx1a => (printStream "constAddr16"; printDisp (2, "\t")) | 0wx1b => printOp(2, "constIntW\t") | 0wx1e => ((* Should be negative *) printStream "jumpBack8\t"; printHex((!ptr - 0w1) - getB(1, !ptr, seg)); ptr := !ptr + 0w1 ) | 0wx1f => printOp(1, "returnB\t") | 0wx20 => ( printStream "jumpBack16\t"; printHex((!ptr - 0w1) - getB(2, !ptr, seg)); ptr := !ptr + 0w2 ) | 0wx21 => (printOp(1, "indirectLocalBB\t"); printOp(1, ",")) | 0wx22 => printOp(1, "localB\t") | 0wx23 => printOp(1, "indirectB\t") | 0wx24 => printOp(1, "moveToContainerB\t") | 0wx25 => printOp(1, "setStackValB\t") | 0wx26 => printOp(1, "resetB\t") | 0wx27 => printOp(1, "resetRB\t") | 0wx28 => printOp(1, "constIntB\t") | 0wx29 => printStream "local_0" | 0wx2a => printStream "local_1" | 0wx2b => printStream "local_2" | 0wx2c => printStream "local_3" | 0wx2d => printStream "local_4" | 0wx2e => printStream "local_5" | 0wx2f => printStream "local_6" | 0wx30 => printStream "local_7" | 0wx31 => printStream "local_8" | 0wx32 => printStream "local_9" | 0wx33 => printStream "local_10" | 0wx34 => printStream "local_11" | 0wx35 => printStream "indirect_0" | 0wx36 => printStream "indirect_1" | 0wx37 => printStream "indirect_2" | 0wx38 => printStream "indirect_3" | 0wx39 => printStream "indirect_4" | 0wx3a => printStream "indirect_5" | 0wx3b => printStream "const_0" | 0wx3c => printStream "const_1" | 0wx3d => printStream "const_2" | 0wx3e => printStream "const_3" | 0wx3f => printStream "const_4" | 0wx40 => printStream "const_10" | 0wx41 => printStream "return_0" | 0wx42 => printStream "return_1" | 0wx43 => printStream "return_2" | 0wx44 => printStream "return_3" | 0wx45 => printStream "local_12" | 0wx46 => (printStream "jumpTrue"; printDisp (1, "\t")) | 0wx47 => (printStream "jumpTrue"; printDisp (2, "\t")) | 0wx49 => printStream "local_13" | 0wx4a => printStream "local_14" | 0wx4b => printStream "local_15" | 0wx4c => printStream "arbAdd" | 0wx4d => printStream "arbSubtract" | 0wx4e => printStream "arbMultiply" + | 0wx4f => (printStream "enterIntARM64"; ptr := !ptr + 0w8) | 0wx50 => printStream "reset_1" | 0wx51 => printStream "reset_2" + | 0wx52 => printStream "noop" | 0wx54 => (printOp(1, "indirectClosureBB\t"); printOp(1, ", ")) | 0wx55 => printDisp (1, "constAddr8_0\t") | 0wx56 => printDisp (1, "constAddr8_1\t") | 0wx57 => printDisp (1, "callConstAddr8_0\t") | 0wx58 => printDisp (1, "callConstAddr8_1\t") | 0wx64 => printStream "resetR_1" | 0wx65 => printStream "resetR_2" | 0wx66 => printStream "resetR_3" | 0wx68 => printOp(1, "tupleB\t") | 0wx69 => printStream "tuple_2" | 0wx6a => printStream "tuple_3" | 0wx6b => printStream "tuple_4" | 0wx6c => printStream "lock" | 0wx6d => printStream "ldexc" | 0wx74 => printOp(1, "indirectContainerB\t") | 0wx75 => printOp(1, "moveToMutClosureB\t") | 0wx76 => printOp(1, "allocMutClosureB\t") | 0wx77 => printOp(1, "indirectClosureB0\t") | 0wx78 => printStream "pushHandler" | 0wx7a => printOp(1, "indirectClosureB1\t") | 0wx7b => (printOp (1, "tailbb\t"); printOp (1, ",")) | 0wx7c => printOp(1, "indirectClosureB2\t") | 0wx7d => printOp(1, "tail3b\t") | 0wx7e => printOp(1, "tail4b\t") | 0wx7f => printStream "tail3_2" | 0wx80 => printStream "tail3_3" | 0wx81 => (printStream "setHandler"; printDisp (1, "\t")) | 0wx83 => printStream "callFastRTS0" | 0wx84 => printStream "callFastRTS1" | 0wx85 => printStream "callFastRTS2" | 0wx86 => printStream "callFastRTS3" | 0wx87 => printStream "callFastRTS4" | 0wx88 => printStream "callFastRTS5" | 0wx91 => printStream "notBoolean" | 0wx92 => printStream "isTagged" | 0wx93 => printStream "cellLength" | 0wx94 => printStream "cellFlags" | 0wx95 => printStream "clearMutable" - | 0wx97 => printStream "atomicIncr" - | 0wx98 => printStream "atomicDecr" | 0wxa0 => printStream "equalWord" | 0wxa1 => printOp(1, "equalWordConstB\t") | 0wxa2 => printStream "lessSigned" | 0wxa3 => printStream "lessUnsigned" | 0wxa4 => printStream "lessEqSigned" | 0wxa5 => printStream "lessEqUnsigned" | 0wxa6 => printStream "greaterSigned" | 0wxa7 => printStream "greaterUnsigned" | 0wxa8 => printStream "greaterEqSigned" | 0wxa9 => printStream "greaterEqUnsigned" | 0wxaa => printStream "fixedAdd" | 0wxab => printStream "fixedSub" | 0wxac => printStream "fixedMult" | 0wxad => printStream "fixedQuot" | 0wxae => printStream "fixedRem" | 0wxb1 => printStream "wordAdd" | 0wxb2 => printStream "wordSub" | 0wxb3 => printStream "wordMult" | 0wxb4 => printStream "wordDiv" | 0wxb5 => printStream "wordMod" | 0wxb7 => printStream "wordAnd" | 0wxb8 => printStream "wordOr" | 0wxb9 => printStream "wordXor" | 0wxba => printStream "wordShiftLeft" | 0wxbb => printStream "wordShiftRLog" | 0wxbd => printStream "allocByteMem" | 0wxc1 => printOp(1, "indirectLocalB1\t") | 0wxc2 => printOp(1, "isTaggedLocalB\t") | 0wxc3 => (printOp(1, "jumpNEqLocalInd\t"); printOp(1, ","); printOp(1, ","); printDisp(1, "\t")) | 0wxc4 => (printOp(1, "jumpTaggedLocal\t"); printDisp(1, "\t")) | 0wxc5 => (printOp(1, "jumpNEqLocal\t"); printOp(1, ","); printOp(1, ","); printDisp(1, "\t")) | 0wxc6 => printStream "indirect0Local0" | 0wxc7 => printOp(1, "indirectLocalB0\t") | 0wxd0 => printOp(1, "closureB\t") | 0wxd9 => printStream "getThreadId" | 0wxda => printStream "allocWordMemory" | 0wxdc => printStream "loadMLByte" | 0wxe4 => printStream "storeMLByte" | 0wxec => printStream "blockMoveByte" | 0wxed => printStream "blockEqualByte" | 0wxee => printStream "blockCompareByte" | 0wxf1 => printStream "deleteHandler" | 0wxf7 => printStream "jump16" | 0wxf8 => printStream "jump16False" | 0wxf9 => printStream "setHandler16" | 0wxfa => printDisp (1, "constAddr8\t") | 0wxfb => printOp(1, "stackSize8\t") | 0wxfc => printOp(2, "stackSize16\t") - | 0wxff => printStream "enterIntX86" + | 0wxff => (printStream "enterIntX86"; ptr := !ptr + 0w3) | 0wxfe => ( case codeVecGet (seg, !ptr) before ptr := !ptr + 0w1 of 0wx0b => printStream "containerW" | 0wx10 => printOp(2, "indirectClosureW\t") | 0wx11 => printOp(2, "indirectContainerW\t") | 0wx14 => printOp(2, "indirectW\t") | 0wx15 => printOp(2, "moveToContainerW\t") | 0wx16 => printOp(2, "moveToMutClosureW\t") | 0wx17 => printOp(2, "setStackValW\t") | 0wx18 => printOp(2, "resetW\t") | 0wx19 => printOp(2, "resetR_w\t") | 0wx1c => printStream "callFastRTSRRtoR" | 0wx1d => printStream "callFastRTSRGtoR" | 0wx48 => (printStream "jumpTrue"; printDisp (4, "\t")) | 0wx56 => printStream "floatAbs" | 0wx57 => printStream "floatNeg" | 0wx58 => printStream "fixedIntToFloat" | 0wx59 => printStream "floatToReal" | 0wx5a => printOp(1, "realToFloat\t") | 0wx5b => printStream "floatEqual" | 0wx5c => printStream "floatLess" | 0wx5d => printStream "floatLessEq" | 0wx5e => printStream "floatGreater" | 0wx5f => printStream "floatGreaterEq" | 0wx60 => printStream "floatAdd" | 0wx61 => printStream "floatSub" | 0wx62 => printStream "floatMult" | 0wx63 => printStream "floatDiv" | 0wx67 => printOp(2, "tupleW\t") | 0wx6e => printOp(1, "realToInt\t") | 0wx6f => printOp(1, "floatToInt\t") | 0wx70 => printStream "callFastRTSFtoF" | 0wx71 => printStream "callFastRTSGtoF" | 0wx72 => printStream "callFastRTSFFtoF" | 0wx73 => printStream "callFastRTSFGtoF" | 0wx79 => printStream "realUnordered" | 0wx7a => printStream "floatUnordered" | 0wx7c => (printOp (2, "tail\t"); printOp (2, ",")) | 0wx8f => printStream "callFastRTSRtoR" | 0wx90 => printStream "callFastRTSGtoR" + | 0wx96 => printStream "atomicExchAdd" | 0wx99 => printStream "atomicReset" | 0wx9a => printStream "longWToTagged" | 0wx9b => printStream "signedToLongW" | 0wx9c => printStream "unsignedToLongW" | 0wx9d => printStream "realAbs" | 0wx9e => printStream "realNeg" | 0wx9f => printStream "fixedIntToReal" | 0wxaf => printStream "fixedDiv" | 0wxb0 => printStream "fixedMod" | 0wxbc => printStream "wordShiftRArith" | 0wxbe => printStream "lgWordEqual" | 0wxc0 => printStream "lgWordLess" | 0wxc1 => printStream "lgWordLessEq" | 0wxc2 => printStream "lgWordGreater" | 0wxc3 => printStream "lgWordGreaterEq" | 0wxc4 => printStream "lgWordAdd" | 0wxc5 => printStream "lgWordSub" | 0wxc6 => printStream "lgWordMult" | 0wxc7 => printStream "lgWordDiv" | 0wxc8 => printStream "lgWordMod" | 0wxc9 => printStream "lgWordAnd" | 0wxca => printStream "lgWordOr" | 0wxcb => printStream "lgWordXor" | 0wxcc => printStream "lgWordShiftLeft" | 0wxcd => printStream "lgWordShiftRLog" | 0wxce => printStream "lgWordShiftRArith" | 0wxcf => printStream "realEqual" | 0wxd0 => printOp(2, "closureW\t") | 0wxd1 => printStream "realLess" | 0wxd2 => printStream "realLessEq" | 0wxd3 => printStream "realGreater" | 0wxd4 => printStream "realGreaterEq" | 0wxd5 => printStream "realAdd" | 0wxd6 => printStream "realSub" | 0wxd7 => printStream "realMult" | 0wxd8 => printStream "realDiv" | 0wxdd => printStream "loadC8" | 0wxde => printStream "loadC16" | 0wxdf => printStream "loadC32" | 0wxe0 => printStream "loadC64" | 0wxe1 => printStream "loadCFloat" | 0wxe2 => printStream "loadCDouble" | 0wxe5 => printStream "storeC8" | 0wxe6 => printStream "storeC16" | 0wxe7 => printStream "storeC32" | 0wxe8 => printStream "storeC64" | 0wxe9 => printStream "storeCFloat" | 0wxea => printStream "storeCDouble" | 0wxf0 => (printDisp(4, "constAddr32_16\t"); printOp (2, ",")) | 0wxf2 => printDisp (4, "jump32\t") | 0wxf3 => printDisp (4, "jump32False\t") | 0wxf4 => printDisp (4, "constAddr32\t") | 0wxf5 => printDisp (4, "setHandler32\t") | 0wxf6 => let (* Have to find out how many items there are. *) val limit = getB (2, !ptr, seg); val () = printOp (2, "case32\t"); val base = !ptr; fun printEntry _ = (printStream "\n\t"; printHex(base + getB(4, !ptr, seg)); ptr := !ptr + 0w4) fun forLoop f i n = if i >= n then () else (f i; forLoop f (i + 0w1) n) in forLoop printEntry 0w0 limit end | 0wxfd => printStream "allocCSpace" | 0wxfe => printStream "freeCSpace" | _ => printStream ("unknown:0xfe 0x" ^ Word8.toString opc) ) | opc => printStream("unknown:0x" ^ Word8.toString opc) end; (* an instruction. *) printStream "\n" end (* main loop *) end (* printCode *) fun codeSize (SimpleCode l) = List.length l | codeSize (LabelCode _) = 0 | codeSize (JumpInstruction{size=ref Size8, ...}) = 2 | codeSize (JumpInstruction{size=ref Size16, ...}) = 3 | codeSize (JumpInstruction{size=ref Size32, ...}) = 6 | codeSize (PushConstant{size=ref Size8, constNum, ...}) = if constNum <= 1 then 2 else 3 | codeSize (PushConstant{size=ref Size16, ...}) = 4 | codeSize (PushConstant{size=ref Size32, isCall=false, ...}) = 8 | codeSize (PushConstant{size=ref Size32, isCall=true, ...}) = 9 | codeSize (PushShort value) = if value <= 0w4 orelse value = 0w10 then 1 else if value < 0w256 then 2 else 3 | codeSize (IndexedCase{labels, size=ref Size32, ...}) = 4 + List.length labels * 4 | codeSize (IndexedCase{labels, size=ref Size16, ...}) = 3 + List.length labels * 2 | codeSize (IndexedCase{labels=_, size=ref Size8, ...}) = raise InternalError "codeSize" | codeSize (LoadLocal w) = if w <= 0w15 then 1 else 2 | codeSize (IndirectLocal{indirect=0w0, localAddr=0w0}) = 1 | codeSize (IndirectLocal{indirect=0w0, ...}) = 2 | codeSize (IndirectLocal{indirect=0w1, ...}) = 2 | codeSize (IndirectLocal _) = 3 | codeSize (UncondTransfer l) = List.length l | codeSize (IsTaggedLocalB _) = 2 | codeSize (JumpOnIsTaggedLocalB{size=ref Size8, ...}) = 3 | codeSize (JumpOnIsTaggedLocalB{size=ref Size16, ...}) = 5 | codeSize (JumpOnIsTaggedLocalB{size=ref Size32, ...}) = 8 | codeSize (JumpNotEqualLocalInd0BB{size=ref Size8, ...}) = 4 | codeSize (JumpNotEqualLocalInd0BB{label, size, localAddr, const}) = codeSize(IndirectLocal{localAddr=localAddr, indirect=0w0}) + codeSize(PushShort(word8ToWord const)) + 1 + codeSize(JumpInstruction{jumpType=JumpFalse, label=label, size=size}) | codeSize (JumpNotEqualLocalConstBB{size=ref Size8, ...}) = 4 | codeSize (JumpNotEqualLocalConstBB {label, size, localAddr, const}) = codeSize(LoadLocal localAddr) + codeSize(PushShort(word8ToWord const)) + 1 + codeSize(JumpInstruction{jumpType=JumpFalse, label=label, size=size}) + + | codeSize (EnterIntArm64 _) = 12 (* For simplicity we add no-ops before and/or after *) (* General function to process the code. ic is the byte counter within the original code. *) fun foldCode startIc foldFn ops = let fun doFold(oper :: operList, ic) = doFold(operList, (* Get the size BEFORE any possible change. *) ic + Word.fromInt(codeSize oper) before foldFn(oper, ic)) | doFold(_, ic) = ic in doFold(ops, startIc) end (* Process the code, setting the destination of any labels. Return the length of the code. *) fun setLabels(LabelCode(ref labs) :: ops, ic) = (List.app(fn d => d := ic) labs; setLabels(ops, ic)) | setLabels(oper :: ops, ic) = setLabels(ops, ic + Word.fromInt(codeSize oper)) | setLabels([], ic) = ic (* Set the sizes of branches depending on the distance to the destination. *) fun setLabelsAndSizes ops = let (* Set the labels and adjust the sizes, repeating until it never gets smaller*) fun setLabAndSize(ops, lastSize) = let (* Calculate offsets for constants. *) val endIC = Word.andb(lastSize + 0w7, ~ 0w8) val startOfConstants = endIC (* Because the constant area is 8-byte aligned we have to allow for the possibility that the distance between a "load constant" instruction and the target could actually increase. *) val alignment = 0w7 fun adjust(JumpInstruction{size as ref Size32, label=ref lab, ...}, ic) = let val dest = !(hd lab) val diff = if dest <= ic (* N.B. Include infinite loops as backwards. *) then ic - dest (* Backwards - Counts from start of instruction. *) else dest - (ic + 0w6) (* Forwards - Relative to the current end. *) in if diff < 0wx100 then size := Size8 else if diff < 0wx10000 then size := Size16 else () end | adjust(JumpInstruction{size as ref Size16, label=ref lab, ...}, ic) = let val dest = !(hd lab) in if dest <= ic then if ic - dest < 0wx100 then size := Size8 else () else if dest - (ic + 0w3) < 0wx100 then size := Size8 else () end | adjust(IndexedCase{size as ref Size32, labels}, ic) = let val startAddr = ic+0w4 (* Use 16-bit case if all the offsets are 16-bits. *) fun is16bit(ref lab) = let val dest = !(hd lab) in dest > startAddr andalso dest < startAddr+0wx10000 end in if List.all is16bit labels then size := Size16 else () end | adjust(PushConstant{size as ref Size32, constNum, ...}, ic) = let val offset = startOfConstants - (ic + 0w8) (* 8 is ext+opcode+4+2 *) in if constNum >= 0x100 (* Constant numbers >= 256 require full opcode. *) then () else if offset < 0wx100-alignment then size := Size8 else if offset < 0wx10000-alignment then size := Size16 else () end | adjust(PushConstant{size as ref Size16, ...}, ic) = let val offset = startOfConstants - (ic + 0w4) (* 4 is opc+2+1 *) in if offset < 0wx100-alignment then size := Size8 else () end | adjust(JumpOnIsTaggedLocalB{size as ref Size32, label=ref lab, ...}, ic) = let val dest = !(hd lab) val diff = dest - (ic + 0w8) in if diff < 0wx100 then size := Size8 else if diff < 0wx10000 then size := Size16 else () end | adjust(JumpOnIsTaggedLocalB{size as ref Size16, label=ref lab, ...}, ic) = let val dest = !(hd lab) in if dest - (ic + 0w5) < 0wx100 then size := Size8 else () end | adjust(j as JumpNotEqualLocalInd0BB{size as ref Size32, label=ref lab, ...}, ic) = let val dest = !(hd lab) val diff = dest - (ic + Word.fromInt(codeSize j)) in if diff < 0wx100 then size := Size8 else if diff < 0wx10000 then size := Size16 else () end | adjust(j as JumpNotEqualLocalInd0BB{size as ref Size16, label=ref lab, ...}, ic) = let val dest = !(hd lab) in if dest - (ic + Word.fromInt(codeSize j)) < 0wx100 then size := Size8 else () end | adjust(j as JumpNotEqualLocalConstBB{size as ref Size32, label=ref lab, ...}, ic) = let val dest = !(hd lab) val diff = dest - (ic + Word.fromInt(codeSize j)) in if diff < 0wx100 then size := Size8 else if diff < 0wx10000 then size := Size16 else () end | adjust(j as JumpNotEqualLocalConstBB{size as ref Size16, label=ref lab, ...}, ic) = let val dest = !(hd lab) in if dest - (ic + Word.fromInt(codeSize j)) < 0wx100 then size := Size8 else () end | adjust _ = () val _ = foldCode 0w0 adjust ops val nextSize = setLabels(ops, 0w0) in if nextSize < lastSize then setLabAndSize(ops, nextSize) else if nextSize = lastSize then lastSize else raise InternalError "setLabAndSize - size increased" end in setLabAndSize(ops, setLabels(ops, 0w0)) end fun genCode(ops, Code {constVec, ...}) = let (* First pass - set the labels. *) val codeSize = setLabelsAndSizes ops val wordSize = wordSize (* Align to 8 bytes on both 32-bits and 64-bits. *) val endIC = Word.andb(codeSize + 0w7, ~ 0w8) val paddingBytes = List.tabulate(Word.toInt(endIC - codeSize), fn _ => SimpleCode[opcode_const_0]) val endOfCode = endIC div wordSize val startOfConstants = endIC val segSize = endOfCode + Word.fromInt(List.length(! constVec)) + 0w4 val codeVec = byteVecMake segSize val ic = ref 0w0 fun genByte b = byteVecSet(codeVec, !ic, b) before ic := !ic + 0w1 fun genByteCode(SimpleCode bytes, _) = (* Simple code - just generate the bytes. *) List.app genByte bytes | genByteCode(UncondTransfer bytes, _) = List.app genByte bytes | genByteCode(LabelCode _, _) = () | genByteCode(JumpInstruction{label=ref labs, jumpType, size=ref Size32, ...}, ic) = let val dest = !(hd labs) val extOpc = case jumpType of SetHandler => ext_opcode_setHandler32 | JumpFalse => ext_opcode_jump32False | JumpTrue => ext_opcode_jump32True | Jump => ext_opcode_jump32 | JumpBack => ext_opcode_jump32 val diff = dest - (ic + 0w6) in genByte opcode_escape; genByte extOpc; genByte(wordToWord8 diff); (* This may be negative so we must use an arithmetic shift. *) genByte(wordToWord8(diff ~>> 0w8)); genByte(wordToWord8(diff ~>> 0w16)); genByte(wordToWord8(diff ~>> 0w24)) end | genByteCode(JumpInstruction{label=ref labs, jumpType, size=ref Size16, ...}, ic) = let val dest = !(hd labs) in if dest <= ic then (* Jump back. *) let val _ = jumpType = JumpBack orelse raise InternalError "genByteCode - back jump" val diff = ic - dest val _ = diff < 0wx10000 orelse raise InternalError "genByteCode - jump range" in genByte opcode_jumpBack16; genByte(wordToWord8 diff); genByte(wordToWord8(diff >> 0w8)) end else let val opc = case jumpType of SetHandler => opcode_setHandler16 | JumpFalse => opcode_jump16False | JumpTrue => opcode_jump16True | Jump => opcode_jump16 | JumpBack => raise InternalError "genByteCode: JumpBack goes forward" val diff = dest - (ic + 0w3) val _ = diff < 0wx10000 orelse raise InternalError "genByteCode - jump range" in genByte opc; genByte(wordToWord8 diff); genByte(wordToWord8(diff >> 0w8)) end end | genByteCode(JumpInstruction{label=ref labs, jumpType, size=ref Size8, ...}, ic) = let val dest = !(hd labs) in if dest <= ic then (* Jump back. *) let val _ = jumpType = JumpBack orelse raise InternalError "genByteCode - back jump" val diff = ic - dest val _ = diff < 0wx100 orelse raise InternalError "genByteCode - jump range" in genByte opcode_jumpBack8; genByte(wordToWord8 diff) end else let val opc = case jumpType of SetHandler => opcode_setHandler | JumpFalse => opcode_jumpFalse | JumpTrue => opcode_jumpTrue | Jump => opcode_jump | JumpBack => raise InternalError "genByteCode: JumpBack goes forward" val diff = dest - (ic + 0w2) val _ = diff < 0wx100 orelse raise InternalError "genByteCode - jump range" in genByte opc; genByte(wordToWord8 diff) end end | genByteCode(PushConstant{ constNum, size=ref Size32, isCall=false, ... }, ic) = let (* Offsets are calculated from the END of the instruction *) val offset = startOfConstants - (ic + 0w8) val constNumW = Word.fromInt constNum in genByte opcode_escape; genByte ext_opcode_constAddr32_16; genByte(wordToWord8 offset); genByte(wordToWord8(offset >> 0w8)); genByte(wordToWord8(offset >> 0w16)); genByte(wordToWord8(offset >> 0w24)); genByte(wordToWord8 constNumW); genByte(wordToWord8(constNumW >> 0w8)) end | genByteCode(PushConstant{ constNum, size=ref Size32, isCall=true, ... }, ic) = ( (* Turn this back into a push of a constant and call-closure. *) genByteCode(PushConstant{ constNum=constNum, size=ref Size32, isCall=false }, ic); genByte opcode_callClosure ) | genByteCode(PushConstant{ constNum, size=ref Size16, isCall, ... }, ic) = let val offset = startOfConstants - (ic + 0w4) val _ = offset < 0wx10000 orelse raise InternalError "genByteCode - constant range" in genByte(if isCall then opcode_callConstAddr16_8 else opcode_constAddr16_8); genByte(wordToWord8 offset); genByte(wordToWord8(offset >> 0w8)); genByte(Word8.fromInt constNum) end | genByteCode(PushConstant{ constNum, size=ref Size8, isCall, ... }, ic) = if constNum <= 1 then let val offset = startOfConstants - (ic + 0w2) val _ = offset < 0wx100 orelse raise InternalError "genByteCode - constant range" val opCode = case (isCall, constNum) of (false, 0) => opcode_constAddr8_0 | (false, 1) => opcode_constAddr8_1 | (true, 0) => opcode_callConstAddr8_0 | (true, 1) => opcode_callConstAddr8_1 | _ => raise InternalError "genByteCode: constAddr" in genByte opCode; genByte(wordToWord8 offset) end else let val offset = startOfConstants - (ic + 0w3) val _ = offset < 0wx100 orelse raise InternalError "genByteCode - constant range" in genByte(if isCall then opcode_callConstAddr8_8 else opcode_constAddr8_8); genByte(wordToWord8 offset); genByte(Word8.fromInt constNum) end | genByteCode(PushShort 0w0, _) = genByte opcode_const_0 | genByteCode(PushShort 0w1, _) = genByte opcode_const_1 | genByteCode(PushShort 0w2, _) = genByte opcode_const_2 | genByteCode(PushShort 0w3, _) = genByte opcode_const_3 | genByteCode(PushShort 0w4, _) = genByte opcode_const_4 | genByteCode(PushShort 0w10, _) = genByte opcode_const_10 | genByteCode(PushShort value, _) = if value < 0w256 then (genByte opcode_constIntB; genByte(wordToWord8 value)) else (genByte opcode_constIntW; genByte(wordToWord8 value); genByte(wordToWord8(value >> 0w8))) | genByteCode(IndexedCase{labels, size=ref Size32, ...}, ic) = let val nCases = List.length labels val () = genByte opcode_escape val () = genByte ext_opcode_case32 val () = genByte(Word8.fromInt nCases) val () = genByte(Word8.fromInt (nCases div 256)) val startOffset = ic+0w4 (* Offsets are relative to here. *) fun putLabel(ref labs) = let val dest = !(hd labs) val diff = dest - startOffset val _ = dest > startOffset orelse raise InternalError "genByteCode - indexed case" in genByte(wordToWord8 diff); genByte(wordToWord8(diff >> 0w8)); genByte(wordToWord8(diff >> 0w16)); genByte(wordToWord8(diff >> 0w24)) end in List.app putLabel labels end | genByteCode(IndexedCase{labels, size=ref Size16, ...}, ic) = let val nCases = List.length labels val () = genByte(opcode_case16) val () = genByte(Word8.fromInt nCases) val () = genByte(Word8.fromInt (nCases div 256)) val startOffset = ic+0w3 (* Offsets are relative to here. *) fun putLabel(ref labs) = let val dest = !(hd labs) val diff = dest - startOffset val _ = dest > startOffset orelse raise InternalError "genByteCode - indexed case" val _ = diff < 0wx10000 orelse raise InternalError "genByteCode - indexed case" in genByte(wordToWord8 diff); genByte(wordToWord8(diff >> 0w8)) end in List.app putLabel labels end | genByteCode(IndexedCase{size=ref Size8, ...}, _) = raise InternalError "genByteCode - IndexedCase byte" | genByteCode(LoadLocal 0w0, _) = genByte opcode_local_0 | genByteCode(LoadLocal 0w1, _) = genByte opcode_local_1 | genByteCode(LoadLocal 0w2, _) = genByte opcode_local_2 | genByteCode(LoadLocal 0w3, _) = genByte opcode_local_3 | genByteCode(LoadLocal 0w4, _) = genByte opcode_local_4 | genByteCode(LoadLocal 0w5, _) = genByte opcode_local_5 | genByteCode(LoadLocal 0w6, _) = genByte opcode_local_6 | genByteCode(LoadLocal 0w7, _) = genByte opcode_local_7 | genByteCode(LoadLocal 0w8, _) = genByte opcode_local_8 | genByteCode(LoadLocal 0w9, _) = genByte opcode_local_9 | genByteCode(LoadLocal 0w10, _) = genByte opcode_local_10 | genByteCode(LoadLocal 0w11, _) = genByte opcode_local_11 | genByteCode(LoadLocal 0w12, _) = genByte opcode_local_12 | genByteCode(LoadLocal 0w13, _) = genByte opcode_local_13 | genByteCode(LoadLocal 0w14, _) = genByte opcode_local_14 | genByteCode(LoadLocal 0w15, _) = genByte opcode_local_15 | genByteCode(LoadLocal w, _) = (genByte opcode_localB; genByte w) | genByteCode(IndirectLocal{localAddr=0w0, indirect=0w0}, _) = genByte opcode_indirect0Local0 | genByteCode(IndirectLocal{localAddr, indirect=0w0}, _) = (genByte opcode_indirectLocalB0; genByte localAddr) | genByteCode(IndirectLocal{localAddr, indirect=0w1}, _) = (genByte opcode_indirectLocalB1; genByte localAddr) | genByteCode(IndirectLocal{localAddr, indirect}, _) = (genByte opcode_indirectLocalBB; genByte localAddr; genByte indirect) | genByteCode(IsTaggedLocalB addr, _) = (genByte opcode_isTaggedLocalB; genByte addr) | genByteCode(JumpOnIsTaggedLocalB {label=ref labs, size=ref Size8, localAddr}, ic) = let val dest = !(hd labs) val diff = dest - (ic + 0w3) in genByte opcode_jumpTaggedLocal; genByte localAddr; genByte(wordToWord8 diff) end | genByteCode(JumpOnIsTaggedLocalB {label, size, localAddr}, ic) = ( (* Turn this back into the original sequence. *) genByteCode(IsTaggedLocalB localAddr, ic); genByteCode(JumpInstruction{jumpType=JumpTrue, label=label, size=size}, ic+0w2) ) | genByteCode(JumpNotEqualLocalInd0BB {label=ref labs, size=ref Size8, localAddr, const}, ic) = let val dest = !(hd labs) val diff = dest - (ic + 0w4) in genByte opcode_jumpNEqLocalInd; genByte localAddr; genByte const; genByte(wordToWord8 diff) end | genByteCode(JumpNotEqualLocalInd0BB {label, size, localAddr, const}, ic) = (* Turn this back into the original sequence. *) (foldCode ic genByteCode [IndirectLocal{localAddr=localAddr, indirect=0w0}, PushShort(word8ToWord const), SimpleCode[opcode_equalWord], JumpInstruction{jumpType=JumpFalse, label=label, size=size}]; ()) | genByteCode(JumpNotEqualLocalConstBB {label=ref labs, size=ref Size8, localAddr, const}, ic) = let val dest = !(hd labs) val diff = dest - (ic + 0w4) in genByte opcode_jumpNEqLocal; genByte localAddr; genByte const; genByte(wordToWord8 diff) end | genByteCode(JumpNotEqualLocalConstBB {label, size, localAddr, const}, ic) = (* Turn this back into the original sequence. *) (foldCode ic genByteCode [LoadLocal localAddr, PushShort(word8ToWord const), SimpleCode[opcode_equalWord], JumpInstruction{jumpType=JumpFalse, label=label, size=size}]; ()) + + | genByteCode(EnterIntArm64 b, ic) = + let + (* The machine code is 8 bytes that must be 32-bit aligned. There is then + a single byte that indicates the type of enter-int instruction. + We may need up to three bytes to get the alignment. It is simpler to + define this as a fixed-length (12-byte) instruction and add no-ops afterwards + if necessary. *) + val bytesBefore = if Word.andb(ic, 0w3) = 0w0 then 0w0 else 0w4-Word.andb(ic, 0w3) + val byteSequence = + List.tabulate(Word.toInt bytesBefore, fn _ => opcode_noop) @ + [0wx4f, 0wx03, 0wx40, 0wxf9, 0wxe0, 0wx01, 0wx3f, 0wxd6, b] @ + List.tabulate(3 - Word.toInt bytesBefore, fn _ => opcode_noop) + in + List.app genByte byteSequence + end + in foldCode 0w0 genByteCode (ops @ paddingBytes); (codeVec (* Return the completed code. *), endIC (* And the size. *)) end fun setLong (value, addrs, seg) = let val wordLength = wordSize fun putBytes(value, a, seg, i) = if i = wordLength then () else ( byteVecSet(seg, if not isBigEndian then a+i else a+wordLength-i-0w1, Word8.fromInt(value mod 256)); putBytes(value div 256, a, seg, i+0w1) ) in putBytes(value, addrs, seg, 0w0) end (* Peephole optimisation. *) local fun peepHole([], _, output) = List.rev output | peepHole(LabelCode lab1 :: (instrs as LabelCode lab2 :: _), exited, output) = ( (* Consecutive labels. Merge these, discarding the first. *) lab2 := !lab1 @ !lab2; peepHole(instrs, exited, output) ) (* A label followed by an unconditional branch. Forward the original label. Although JumpBack is also unconditional we don't forward those because we don't have a conditional backwards jump. *) | peepHole((LabelCode lab1) :: (jump as JumpInstruction{jumpType=Jump, label=lab2, ...}) :: tl, exited, output) = ( lab2 := !lab1 @ !lab2; (* Leave the jump in the stream and leave "exited" unchanged. This will now be unreachable if we had previously exited but we need to take the jump if we hadn't. *) peepHole(jump :: tl, exited, output) ) (* Discard everything after an unconditional transfer until the next label. *) | peepHole((label as LabelCode _) :: tl, _, output) = peepHole(tl, false, label::output) | peepHole(_ :: tl, true, output) = peepHole(tl, true, output) | peepHole((jump as JumpInstruction{jumpType=Jump, ...}) :: tl, _, output) = peepHole(tl, true, jump :: output) (* Return, raise-exception and tail-call. *) | peepHole((uncond as UncondTransfer _) :: tl, _, output) = peepHole(tl, true, uncond :: output) (* A conditional branch round an unconditional branch. Replace by a conditional branch with the sense reversed. *) | peepHole((cond as JumpInstruction{jumpType=JumpFalse, label=lab1, ...}) :: (uncond as JumpInstruction{jumpType=Jump, label=lab2, size}) :: (tail as LabelCode lab3 :: _), _, output) = if lab1 = lab3 then peepHole(tail, false, JumpInstruction{jumpType=JumpTrue, label=lab2, size=size} :: output) else peepHole(uncond :: tail, false, cond :: output) | peepHole((cond as JumpInstruction{jumpType=JumpTrue, label=lab1, ...}) :: (uncond as JumpInstruction{jumpType=Jump, label=lab2, size}) :: (tail as LabelCode lab3 :: _), _, output) = if lab1 = lab3 then peepHole(tail, false, JumpInstruction{jumpType=JumpFalse, label=lab2, size=size} :: output) else peepHole(uncond :: tail, false, cond :: output) | peepHole(IsTaggedLocalB addr :: JumpInstruction{jumpType=JumpTrue, label, size} :: tail, _, output) = peepHole(tail, false, JumpOnIsTaggedLocalB {label=label, size=size, localAddr=addr} :: output) | peepHole((indLocal as IndirectLocal{localAddr, indirect=0w0}) :: (instrs as PushShort const :: SimpleCode[0wxa0(*opcode_equalWord*)] :: JumpInstruction{jumpType=JumpFalse, label, size} :: tail), _, output) = if const < 0w256 then peepHole(tail, false, JumpNotEqualLocalInd0BB {label=label, size=size, localAddr=localAddr, const=wordToWord8 const} :: output) else peepHole(instrs, false, indLocal :: output) | peepHole((load as LoadLocal localAddr) :: (instrs as PushShort const :: SimpleCode[0wxa0(*opcode_equalWord*)] :: JumpInstruction{jumpType=JumpFalse, label, size} :: tail), _, output) = if const < 0w256 then peepHole(tail, false, JumpNotEqualLocalConstBB {label=label, size=size, localAddr=localAddr, const=wordToWord8 const} :: output) else peepHole(instrs, false, load :: output) | peepHole(hd::tl, exited, output) = peepHole(tl, exited, hd::output) in fun optimise code = peepHole(code, false, []) end (* Generate the code sequence to enter the interpreter when this code is called or returned to or an exception is raised. This is only required when bootstrapping a native code compiler. *) fun genEnterInt(_, Code { enterIntMode = 0 (* None *), ...}) = [] | genEnterInt(b, Code { enterIntMode = 1 (* X86_32 *), ...}) = [SimpleCode[0wxff, 0wx55, 0wx24, b]] | genEnterInt(b, Code { enterIntMode = 2 (* X86_64 *), ...}) = [SimpleCode[0wxff, 0wx55, 0wx48, b]] | genEnterInt(b, Code { enterIntMode = 3 (* X86_32_64 *), ...}) = [SimpleCode[0wxff, 0wx55, 0wx48, b]] + | genEnterInt(b, Code { enterIntMode = 4 (* ARM_64 *), ...}) = [EnterIntArm64 b] | genEnterInt _ = raise InternalError "genEnterInt: unknown architecture value" (* Adds the constants onto the code, and copies the code into a new segment *) fun copyCode {code as Code{ printAssemblyCode, printStream, procName, constVec, stage1Code, ...}, maxStack, numberOfArguments, resultClosure} = let val cvec = code local val revCode = optimise(List.rev(!stage1Code)) (* Add a stack check. This is only needed if the function needs more than 128 words since the call and tail functions check for this much. *) in val codeList = if maxStack < 128 then revCode else SimpleCode[opcode_stackSize16, Word8.fromInt maxStack, Word8.fromInt(maxStack div 256)] :: revCode end (* Add an enterInt if necessary *) (* If we need enter-int code it must go first. *) val enterInt = genEnterInt(Word8.fromInt numberOfArguments + 0wx80, cvec) val (byteVec, endIC) = genCode(enterInt @ codeList, cvec) val wordLength = wordSize (* +3 for profile count, function name and constants count *) val numOfConst = List.length(! constVec) val endOfCode = endIC div wordLength val segSize = endOfCode + Word.fromInt numOfConst + 0w4 val firstConstant = endIC + wordLength * 0w3 (* Add 3 for no of consts, fn name and profile count. *) (* Put in the number of constants. This must go in before we actually put in any constants. *) local val lastWord = (segSize - 0w1) * wordLength in val () = setLong(numOfConst + 2, endIC, byteVec) (* Set the last word of the code to the (negative) byte offset of the start of the code area from the end of this word. *) val () = setLong((numOfConst + 3) * ~ (Word.toInt wordLength), lastWord, byteVec) end (* Now we've filled in all the size info we need to convert the segment into a proper code segment before it's safe to put in any ML values. *) val codeVec = byteVecToCodeVec(byteVec, resultClosure) local val name : string = procName val nameWord : machineWord = toMachineWord name in val () = codeVecPutWord (codeVec, endOfCode+0w1, nameWord) end (* Profile ref. A byte ref used by the profiler in the RTS. *) local val v = RunCall.allocateByteMemory(0w1, Word.fromLargeWord(Word8.toLargeWord(Word8.orb(F_mutable, F_bytes)))) fun clear 0w0 = () | clear i = (assignByte(v, i-0w1, 0w0); clear (i-0w1)) val () = clear(wordSize) in val () = codeVecPutWord (codeVec, endOfCode+0w2, toMachineWord v) end (* and then copy the constants from the constant list. *) local fun setConstant(value, num) = let val constAddr = (firstConstant div wordLength) + num in codeVecPutWord (codeVec, constAddr, value); num+0w1 end in val _ = List.foldl setConstant 0w0 (!constVec) end in if printAssemblyCode then (* print out the code *) (printCode (codeVec, procName, endIC, printStream); printStream"\n") else (); codeVecLock(codeVec, resultClosure) end (* copyCode *) fun addItemToList(item, Code{stage1Code, ...}) = stage1Code := item :: !stage1Code val genOpcode = addItemToList fun putBranchInstruction(brOp, label, cvec) = addItemToList(JumpInstruction{label=label, jumpType=brOp, size = ref Size32}, cvec) fun setLabel(label, cvec) = addItemToList(LabelCode label, cvec) fun createLabel () = ref [ref 0w0] local fun genOpc(opc, cvec) = addItemToList(SimpleCode [opc], cvec) and genExtOpc(opc, cvec) = addItemToList(SimpleCode [opcode_escape, opc], cvec) and genOpcByte(opc, arg1, cvec) = if 0 <= arg1 andalso arg1 < 256 then addItemToList(SimpleCode [opc, Word8.fromInt arg1], cvec) else raise InternalError "genOpcByte" and genExtOpcByte(opc, arg1, cvec) = if 0 <= arg1 andalso arg1 < 256 then addItemToList(SimpleCode [opcode_escape, opc, Word8.fromInt arg1], cvec) else raise InternalError "genExtOpcByte" and genExtOpcWord(opc, arg1, cvec) = if 0 <= arg1 andalso arg1 < 65536 then addItemToList(SimpleCode[opcode_escape, opc, Word8.fromInt arg1, Word8.fromInt (arg1 div 256)], cvec) else raise InternalError "genExtOpcWord" open IEEEReal fun encodeRound TO_NEAREST = 0 | encodeRound TO_NEGINF = 1 | encodeRound TO_POSINF = 2 | encodeRound TO_ZERO = 3 in fun genRaiseEx cvec = addItemToList(UncondTransfer [opcode_raiseEx], cvec) fun genLock cvec = genOpc (opcode_lock, cvec) fun genLdexc cvec = genOpc (opcode_ldexc, cvec) fun genPushHandler cvec = genOpc (opcode_pushHandler, cvec) fun genRTSCallFast(0, cvec) = genOpc (opcode_callFastRTS0, cvec) | genRTSCallFast(1, cvec) = genOpc (opcode_callFastRTS1, cvec) | genRTSCallFast(2, cvec) = genOpc (opcode_callFastRTS2, cvec) | genRTSCallFast(3, cvec) = genOpc (opcode_callFastRTS3, cvec) | genRTSCallFast(4, cvec) = genOpc (opcode_callFastRTS4, cvec) | genRTSCallFast(5, cvec) = genOpc (opcode_callFastRTS5, cvec) | genRTSCallFast(_, _) = raise InternalError "genRTSFastCall" fun genContainer (size, cvec) = if size < 256 then genOpcByte(opcode_containerB, size, cvec) else genExtOpcWord(ext_opcode_containerW, size, cvec) fun genCase (nCases, cvec) = let val labels = List.tabulate(nCases, fn _ => createLabel()) in addItemToList(IndexedCase{labels=labels, size=ref Size32}, cvec); labels end (* For the moment don't try to merge stack resets. *) fun resetStack(0, _, _) = () | resetStack(1, true, cvec) = addItemToList(SimpleCode[opcode_resetR_1], cvec) | resetStack(2, true, cvec) = addItemToList(SimpleCode[opcode_resetR_2], cvec) | resetStack(3, true, cvec) = addItemToList(SimpleCode[opcode_resetR_3], cvec) | resetStack(offset, true, cvec) = if offset < 0 then raise InternalError "resetStack" else if offset > 255 then genExtOpcWord(ext_opcode_resetR_w, offset, cvec) else genOpcByte(opcode_resetRB, offset, cvec) | resetStack(1, false, cvec) = addItemToList(SimpleCode[opcode_reset_1], cvec) | resetStack(2, false, cvec) = addItemToList(SimpleCode[opcode_reset_2], cvec) | resetStack(offset, false, cvec) = if offset < 0 then raise InternalError "resetStack" else if offset > 255 then genExtOpcWord(ext_opcode_resetW, offset, cvec) else genOpcByte(opcode_resetB, offset, cvec) fun genCallClosure(Code{stage1Code as ref(PushConstant{constNum, size, isCall=false} :: tail), ...}) = stage1Code := PushConstant{constNum=constNum, size=size, isCall=true} :: tail | genCallClosure(Code{stage1Code as ref(LoadLocal w :: tail), ...}) = stage1Code := SimpleCode [opcode_callLocalB, w] :: tail | genCallClosure(Code{stage1Code, ...}) = stage1Code := SimpleCode [opcode_callClosure] :: !stage1Code fun genTailCall (toslide, slideby, cvec) = if toslide < 256 andalso slideby < 256 then (* General byte case *) addItemToList(UncondTransfer[opcode_tailbb, Word8.fromInt toslide, Word8.fromInt slideby], cvec) else (* General case. *) addItemToList( UncondTransfer[opcode_escape, ext_opcode_tail, Word8.fromInt toslide, Word8.fromInt(toslide div 256), Word8.fromInt slideby, Word8.fromInt (slideby div 256)], cvec) fun pushConst (value : machineWord, cvec) = if isShort value andalso toShort value < 0w32768 then addItemToList(PushShort(toShort value), cvec) else (* address or large short *) addItemToList(PushConstant{constNum = addConstToVec(value, cvec), size=ref Size32, isCall=false}, cvec) fun genRTSCallFastRealtoReal cvec = genExtOpc (ext_opcode_callFastRTSRtoR, cvec) and genRTSCallFastRealRealtoReal cvec = genExtOpc (ext_opcode_callFastRTSRRtoR, cvec) and genRTSCallFastGeneraltoReal cvec = genExtOpc (ext_opcode_callFastRTSGtoR, cvec) and genRTSCallFastRealGeneraltoReal cvec = genExtOpc (ext_opcode_callFastRTSRGtoR, cvec) and genRTSCallFastFloattoFloat cvec = genExtOpc (ext_opcode_callFastRTSFtoF, cvec) and genRTSCallFastFloatFloattoFloat cvec = genExtOpc (ext_opcode_callFastRTSFFtoF, cvec) and genRTSCallFastGeneraltoFloat cvec = genExtOpc (ext_opcode_callFastRTSGtoF, cvec) and genRTSCallFastFloatGeneraltoFloat cvec = genExtOpc (ext_opcode_callFastRTSFGtoF, cvec) fun genDoubleToFloat(SOME rnding, cvec) = genExtOpcByte(ext_opcode_realToFloat, encodeRound rnding, cvec) | genDoubleToFloat(NONE, cvec) = genExtOpcByte(ext_opcode_realToFloat, 5, cvec) and genRealToInt(rnding, cvec) = genExtOpcByte(ext_opcode_realToInt, encodeRound rnding, cvec) and genFloatToInt(rnding, cvec) = genExtOpcByte(ext_opcode_floatToInt, encodeRound rnding, cvec) fun genEqualWordConst(w, cvec) = (pushConst(toMachineWord w, cvec); genOpc(opcode_equalWord, cvec)) fun genIsTagged(Code{stage1Code as ref(LoadLocal addr :: tail), ...}) = stage1Code := IsTaggedLocalB addr :: tail | genIsTagged cvec = genOpc(opcode_isTagged, cvec) fun genIndirectSimple(0, cvec) = genOpc(opcode_indirect_0, cvec) | genIndirectSimple(1, cvec) = genOpc(opcode_indirect_1, cvec) | genIndirectSimple(2, cvec) = genOpc(opcode_indirect_2, cvec) | genIndirectSimple(3, cvec) = genOpc(opcode_indirect_3, cvec) | genIndirectSimple(4, cvec) = genOpc(opcode_indirect_4, cvec) | genIndirectSimple(5, cvec) = genOpc(opcode_indirect_5, cvec) | genIndirectSimple(arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_indirectB, arg1, cvec) else genExtOpcWord(ext_opcode_indirectW, arg1, cvec) fun genIndirectContainer(arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_indirectContainerB, arg1, cvec) else genExtOpcWord(ext_opcode_indirectContainerW, arg1, cvec) fun genMoveToContainer (arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_moveToContainerB, arg1, cvec) else genExtOpcWord(ext_opcode_moveToContainerW, arg1, cvec) fun genMoveToMutClosure (arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_moveToMutClosureB, arg1, cvec) else genExtOpcWord(ext_opcode_moveToMutClosureW, arg1, cvec) fun genSetStackVal (arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_setStackValB, arg1, cvec) else genExtOpcWord(ext_opcode_setStackValW, arg1, cvec) fun genTuple (2, cvec) = genOpc(opcode_tuple_2, cvec) | genTuple (3, cvec) = genOpc(opcode_tuple_3, cvec) | genTuple (4, cvec) = genOpc(opcode_tuple_4, cvec) | genTuple (arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_tupleB, arg1, cvec) else genExtOpcWord(ext_opcode_tupleW, arg1, cvec) fun genAllocMutableClosure(closureSize, cvec) = if closureSize < 256 then genOpcByte(opcode_allocMutClosureB, closureSize, cvec) else genExtOpcWord(ext_opcode_allocMutClosureW, closureSize, cvec) fun genClosure (arg1, cvec) = if arg1 < 256 then genOpcByte(opcode_closureB, arg1, cvec) else genExtOpcWord(ext_opcode_closureW, arg1, cvec) fun genLocal (arg1, cvec) = if 0 <= arg1 andalso arg1 < 256 then addItemToList(LoadLocal(Word8.fromInt arg1), cvec) else addItemToList(SimpleCode[opcode_localW, Word8.fromInt arg1, Word8.fromInt(arg1 div 256)], cvec) fun genIndirectClosure{ addr, item, code=cvec } = if addr < 256 andalso item < 256 then ( case item of 0 => genOpcByte(opcode_indirectClosureB0, addr, cvec) | 1 => genOpcByte(opcode_indirectClosureB1, addr, cvec) | 2 => genOpcByte(opcode_indirectClosureB2, addr, cvec) | _ => addItemToList(SimpleCode[opcode_indirectClosureBB, Word8.fromInt addr, Word8.fromInt item], cvec) ) else ( genLocal (addr, cvec); addItemToList(SimpleCode[opcode_escape, ext_opcode_indirectClosureW, Word8.fromInt item, Word8.fromInt (item div 256)], cvec) ) end fun genReturn(1, cvec) = addItemToList(UncondTransfer[opcode_return_1], cvec) | genReturn(2, cvec) = addItemToList(UncondTransfer[opcode_return_2], cvec) | genReturn(3, cvec) = addItemToList(UncondTransfer[opcode_return_3], cvec) | genReturn(arg1, cvec) = addItemToList(UncondTransfer( if 0 <= arg1 andalso arg1 <= 255 then [opcode_returnB, Word8.fromInt arg1] else [opcode_returnW, Word8.fromInt arg1, Word8.fromInt(arg1 div 256)]), cvec) fun genIndirect (arg1, cvec as Code{stage1Code as ref(LoadLocal w :: tail), ...}) = if 0 <= arg1 andalso arg1 <= 255 then stage1Code := IndirectLocal{localAddr=w, indirect=Word8.fromInt arg1} :: tail else genIndirectSimple(arg1, cvec) | genIndirect (arg1, cvec) = genIndirectSimple(arg1, cvec) fun genEnterIntCatch(code as Code{stage1Code, ...}) = stage1Code := genEnterInt(0wxff, code) @ !stage1Code and genEnterIntCall(code as Code{stage1Code, ...}, args) = stage1Code := genEnterInt(Word8.fromInt args, code) @ !stage1Code val opcode_notBoolean = SimpleCode [opcode_notBoolean] val opcode_cellLength = SimpleCode [opcode_cellLength] and opcode_cellFlags = SimpleCode [opcode_cellFlags] and opcode_clearMutable = SimpleCode [opcode_clearMutable] - and opcode_atomicIncr = SimpleCode [opcode_atomicIncr] - and opcode_atomicDecr = SimpleCode [opcode_atomicDecr] + and opcode_atomicExchAdd = SimpleCode [opcode_escape, ext_opcode_atomicExchAdd] and opcode_atomicReset = SimpleCode [opcode_escape, ext_opcode_atomicReset] and opcode_longWToTagged = SimpleCode [opcode_escape, ext_opcode_longWToTagged] and opcode_signedToLongW = SimpleCode [opcode_escape, ext_opcode_signedToLongW] and opcode_unsignedToLongW = SimpleCode [opcode_escape, ext_opcode_unsignedToLongW] and opcode_realAbs = SimpleCode [opcode_escape, ext_opcode_realAbs] and opcode_realNeg = SimpleCode [opcode_escape, ext_opcode_realNeg] and opcode_fixedIntToReal = SimpleCode [opcode_escape, ext_opcode_fixedIntToReal] and opcode_fixedIntToFloat = SimpleCode [opcode_escape, ext_opcode_fixedIntToFloat] and opcode_floatToReal = SimpleCode [opcode_escape, ext_opcode_floatToReal] val opcode_equalWord = SimpleCode [opcode_equalWord] and opcode_lessSigned = SimpleCode [opcode_lessSigned] and opcode_lessUnsigned = SimpleCode [opcode_lessUnsigned] and opcode_lessEqSigned = SimpleCode [opcode_lessEqSigned] and opcode_lessEqUnsigned = SimpleCode [opcode_lessEqUnsigned] and opcode_greaterSigned = SimpleCode [opcode_greaterSigned] and opcode_greaterUnsigned = SimpleCode [opcode_greaterUnsigned] and opcode_greaterEqSigned = SimpleCode [opcode_greaterEqSigned] and opcode_greaterEqUnsigned = SimpleCode [opcode_greaterEqUnsigned] val opcode_fixedAdd = SimpleCode [opcode_fixedAdd] val opcode_fixedSub = SimpleCode [opcode_fixedSub] val opcode_fixedMult = SimpleCode [opcode_fixedMult] val opcode_fixedQuot = SimpleCode [opcode_fixedQuot] val opcode_fixedRem = SimpleCode [opcode_fixedRem] val opcode_fixedDiv = SimpleCode [opcode_escape, ext_opcode_fixedDiv] val opcode_fixedMod = SimpleCode [opcode_escape, ext_opcode_fixedMod] val opcode_wordAdd = SimpleCode [opcode_wordAdd] val opcode_wordSub = SimpleCode [opcode_wordSub] val opcode_wordMult = SimpleCode [opcode_wordMult] val opcode_wordDiv = SimpleCode [opcode_wordDiv] val opcode_wordMod = SimpleCode [opcode_wordMod] val opcode_wordAnd = SimpleCode [opcode_wordAnd] val opcode_wordOr = SimpleCode [opcode_wordOr] val opcode_wordXor = SimpleCode [opcode_wordXor] val opcode_wordShiftLeft = SimpleCode [opcode_wordShiftLeft] val opcode_wordShiftRLog = SimpleCode [opcode_wordShiftRLog] val opcode_wordShiftRArith = SimpleCode [opcode_escape, ext_opcode_wordShiftRArith] val opcode_allocByteMem = SimpleCode [opcode_allocByteMem] val opcode_lgWordEqual = SimpleCode [opcode_escape, ext_opcode_lgWordEqual] val opcode_lgWordLess = SimpleCode [opcode_escape, ext_opcode_lgWordLess] val opcode_lgWordLessEq = SimpleCode [opcode_escape, ext_opcode_lgWordLessEq] val opcode_lgWordGreater = SimpleCode [opcode_escape, ext_opcode_lgWordGreater] val opcode_lgWordGreaterEq = SimpleCode [opcode_escape, ext_opcode_lgWordGreaterEq] val opcode_lgWordAdd = SimpleCode [opcode_escape, ext_opcode_lgWordAdd] val opcode_lgWordSub = SimpleCode [opcode_escape, ext_opcode_lgWordSub] val opcode_lgWordMult = SimpleCode [opcode_escape, ext_opcode_lgWordMult] val opcode_lgWordDiv = SimpleCode [opcode_escape, ext_opcode_lgWordDiv] val opcode_lgWordMod = SimpleCode [opcode_escape, ext_opcode_lgWordMod] val opcode_lgWordAnd = SimpleCode [opcode_escape, ext_opcode_lgWordAnd] val opcode_lgWordOr = SimpleCode [opcode_escape, ext_opcode_lgWordOr] val opcode_lgWordXor = SimpleCode [opcode_escape, ext_opcode_lgWordXor] val opcode_lgWordShiftLeft = SimpleCode [opcode_escape, ext_opcode_lgWordShiftLeft] val opcode_lgWordShiftRLog = SimpleCode [opcode_escape, ext_opcode_lgWordShiftRLog] val opcode_lgWordShiftRArith = SimpleCode [opcode_escape, ext_opcode_lgWordShiftRArith] val opcode_realEqual = SimpleCode [opcode_escape, ext_opcode_realEqual] val opcode_realLess = SimpleCode [opcode_escape, ext_opcode_realLess] val opcode_realLessEq = SimpleCode [opcode_escape, ext_opcode_realLessEq] val opcode_realGreater = SimpleCode [opcode_escape, ext_opcode_realGreater] val opcode_realGreaterEq = SimpleCode [opcode_escape, ext_opcode_realGreaterEq] val opcode_realUnordered = SimpleCode [opcode_escape, ext_opcode_realUnordered] val opcode_realAdd = SimpleCode [opcode_escape, ext_opcode_realAdd] val opcode_realSub = SimpleCode [opcode_escape, ext_opcode_realSub] val opcode_realMult = SimpleCode [opcode_escape, ext_opcode_realMult] val opcode_realDiv = SimpleCode [opcode_escape, ext_opcode_realDiv] and opcode_floatAbs = SimpleCode [opcode_escape, ext_opcode_floatAbs] and opcode_floatNeg = SimpleCode [opcode_escape, ext_opcode_floatNeg] val opcode_floatEqual = SimpleCode [opcode_escape, ext_opcode_floatEqual] val opcode_floatLess = SimpleCode [opcode_escape, ext_opcode_floatLess] val opcode_floatLessEq = SimpleCode [opcode_escape, ext_opcode_floatLessEq] val opcode_floatGreater = SimpleCode [opcode_escape, ext_opcode_floatGreater] val opcode_floatGreaterEq = SimpleCode [opcode_escape, ext_opcode_floatGreaterEq] val opcode_floatUnordered = SimpleCode [opcode_escape, ext_opcode_floatUnordered] val opcode_floatAdd = SimpleCode [opcode_escape, ext_opcode_floatAdd] val opcode_floatSub = SimpleCode [opcode_escape, ext_opcode_floatSub] val opcode_floatMult = SimpleCode [opcode_escape, ext_opcode_floatMult] val opcode_floatDiv = SimpleCode [opcode_escape, ext_opcode_floatDiv] val opcode_getThreadId = SimpleCode [opcode_getThreadId] val opcode_allocWordMemory = SimpleCode [opcode_allocWordMemory] val opcode_alloc_ref = SimpleCode [opcode_alloc_ref] val opcode_loadMLWord = SimpleCode [opcode_loadMLWord] val opcode_loadMLByte = SimpleCode [opcode_loadMLByte] val opcode_loadC8 = SimpleCode [opcode_escape, ext_opcode_loadC8] val opcode_loadC16 = SimpleCode [opcode_escape, ext_opcode_loadC16] val opcode_loadC32 = SimpleCode [opcode_escape, ext_opcode_loadC32] val opcode_loadC64 = SimpleCode [opcode_escape, ext_opcode_loadC64] val opcode_loadCFloat = SimpleCode [opcode_escape, ext_opcode_loadCFloat] val opcode_loadCDouble = SimpleCode [opcode_escape, ext_opcode_loadCDouble] val opcode_loadUntagged = SimpleCode [opcode_loadUntagged] val opcode_storeMLWord = SimpleCode [opcode_storeMLWord] val opcode_storeMLByte = SimpleCode [opcode_storeMLByte] val opcode_storeC8 = SimpleCode [opcode_escape, ext_opcode_storeC8] val opcode_storeC16 = SimpleCode [opcode_escape, ext_opcode_storeC16] val opcode_storeC32 = SimpleCode [opcode_escape, ext_opcode_storeC32] val opcode_storeC64 = SimpleCode [opcode_escape, ext_opcode_storeC64] val opcode_storeCFloat = SimpleCode [opcode_escape, ext_opcode_storeCFloat] val opcode_storeCDouble = SimpleCode [opcode_escape, ext_opcode_storeCDouble] val opcode_storeUntagged = SimpleCode [opcode_storeUntagged] val opcode_blockMoveWord = SimpleCode [opcode_blockMoveWord] val opcode_blockMoveByte = SimpleCode [opcode_blockMoveByte] val opcode_blockEqualByte = SimpleCode [opcode_blockEqualByte] val opcode_blockCompareByte = SimpleCode [opcode_blockCompareByte] val opcode_deleteHandler = SimpleCode [opcode_deleteHandler] val opcode_allocCSpace = SimpleCode [opcode_escape, ext_opcode_allocCSpace] val opcode_freeCSpace = SimpleCode [opcode_escape, ext_opcode_freeCSpace] val opcode_arbAdd = SimpleCode [opcode_arbAdd] val opcode_arbSubtract = SimpleCode [opcode_arbSubtract] val opcode_arbMultiply = SimpleCode [opcode_arbMultiply] structure Sharing = struct type code = code type opcode = opcode type labels = labels type closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONSSIG.sml b/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONSSIG.sml index 443fb9fb..bb022bb4 100644 --- a/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONSSIG.sml +++ b/mlsource/MLCompiler/CodeTree/ByteCode/INTCODECONSSIG.sml @@ -1,225 +1,224 @@ (* - Copyright (c) 2016-18, 2020 David C.J. Matthews + Copyright (c) 2016-18, 2020-21 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) signature INTCODECONSSIG = sig type machineWord = Address.machineWord type address = Address.address type code type opcode type labels type closureRef val opcode_notBoolean: opcode val opcode_cellLength: opcode and opcode_cellFlags: opcode and opcode_clearMutable: opcode - and opcode_atomicIncr: opcode - and opcode_atomicDecr: opcode + and opcode_atomicExchAdd: opcode and opcode_atomicReset: opcode and opcode_longWToTagged: opcode and opcode_signedToLongW: opcode and opcode_unsignedToLongW: opcode and opcode_realAbs: opcode and opcode_realNeg: opcode and opcode_fixedIntToReal: opcode and opcode_fixedIntToFloat: opcode and opcode_floatToReal: opcode and opcode_floatAbs: opcode and opcode_floatNeg: opcode val opcode_equalWord: opcode and opcode_lessSigned: opcode and opcode_lessUnsigned: opcode and opcode_lessEqSigned: opcode and opcode_lessEqUnsigned: opcode and opcode_greaterSigned: opcode and opcode_greaterUnsigned: opcode and opcode_greaterEqSigned: opcode and opcode_greaterEqUnsigned: opcode val opcode_fixedAdd: opcode val opcode_fixedSub: opcode val opcode_fixedMult: opcode val opcode_fixedQuot: opcode val opcode_fixedRem: opcode val opcode_fixedDiv: opcode val opcode_fixedMod: opcode val opcode_wordAdd: opcode val opcode_wordSub: opcode val opcode_wordMult: opcode val opcode_wordDiv: opcode val opcode_wordMod: opcode val opcode_wordAnd: opcode val opcode_wordOr: opcode val opcode_wordXor: opcode val opcode_wordShiftLeft: opcode val opcode_wordShiftRLog: opcode val opcode_wordShiftRArith: opcode val opcode_allocByteMem: opcode val opcode_lgWordEqual: opcode val opcode_lgWordLess: opcode val opcode_lgWordLessEq: opcode val opcode_lgWordGreater: opcode val opcode_lgWordGreaterEq: opcode val opcode_lgWordAdd: opcode val opcode_lgWordSub: opcode val opcode_lgWordMult: opcode val opcode_lgWordDiv: opcode val opcode_lgWordMod: opcode val opcode_lgWordAnd: opcode val opcode_lgWordOr: opcode val opcode_lgWordXor: opcode val opcode_lgWordShiftLeft: opcode val opcode_lgWordShiftRLog: opcode val opcode_lgWordShiftRArith: opcode val opcode_realEqual: opcode val opcode_realLess: opcode val opcode_realLessEq: opcode val opcode_realGreater: opcode val opcode_realGreaterEq: opcode val opcode_realUnordered: opcode val opcode_realAdd: opcode val opcode_realSub: opcode val opcode_realMult: opcode val opcode_realDiv: opcode val opcode_floatEqual: opcode val opcode_floatLess: opcode val opcode_floatLessEq: opcode val opcode_floatGreater: opcode val opcode_floatGreaterEq: opcode val opcode_floatUnordered: opcode val opcode_floatAdd: opcode val opcode_floatSub: opcode val opcode_floatMult: opcode val opcode_floatDiv: opcode val opcode_getThreadId: opcode val opcode_allocWordMemory: opcode val opcode_alloc_ref: opcode val opcode_loadMLWord: opcode val opcode_loadMLByte: opcode val opcode_loadC8: opcode val opcode_loadC16: opcode val opcode_loadC32: opcode val opcode_loadC64: opcode val opcode_loadCFloat: opcode val opcode_loadCDouble: opcode val opcode_loadUntagged: opcode val opcode_storeMLWord: opcode val opcode_storeMLByte: opcode val opcode_storeC8: opcode val opcode_storeC16: opcode val opcode_storeC32: opcode val opcode_storeC64: opcode val opcode_storeCFloat: opcode val opcode_storeCDouble: opcode val opcode_storeUntagged: opcode val opcode_blockMoveWord: opcode val opcode_blockMoveByte: opcode val opcode_blockEqualByte: opcode val opcode_blockCompareByte: opcode val opcode_deleteHandler: opcode val opcode_allocCSpace: opcode val opcode_freeCSpace: opcode val opcode_arbAdd: opcode val opcode_arbSubtract: opcode val opcode_arbMultiply: opcode val codeCreate: string * Universal.universal list -> code (* makes the initial segment. *) (* GEN- routines all put a value at the instruction counter and add an appropriate amount to it. *) (* gen... - put instructions and their operands. *) val genCallClosure : code -> unit val genRaiseEx : code -> unit val genLock : code -> unit val genLdexc : code -> unit val genPushHandler : code -> unit val genReturn : int * code -> unit val genLocal : int * code -> unit val genIndirect : int * code -> unit val genSetStackVal : int * code -> unit val genCase : int * code -> labels list val genTuple : int * code -> unit val genTailCall : int * int * code -> unit val genIndirectClosure: { addr: int, item: int, code: code } -> unit and genIndirectContainer: int * code -> unit and genMoveToContainer: int * code -> unit and genMoveToMutClosure: int * code -> unit and genClosure: int * code -> unit val genDoubleToFloat: IEEEReal.rounding_mode option * code -> unit and genRealToInt: IEEEReal.rounding_mode * code -> unit and genFloatToInt: IEEEReal.rounding_mode * code -> unit val genAllocMutableClosure: int * code -> unit val genRTSCallFast: int * code -> unit val genRTSCallFastRealtoReal: code -> unit val genRTSCallFastRealRealtoReal: code -> unit val genRTSCallFastGeneraltoReal: code -> unit val genRTSCallFastRealGeneraltoReal: code -> unit val genRTSCallFastFloattoFloat: code -> unit val genRTSCallFastFloatFloattoFloat: code -> unit val genRTSCallFastGeneraltoFloat: code -> unit val genRTSCallFastFloatGeneraltoFloat: code -> unit val genOpcode: opcode * code -> unit (* genEnter instructions are only needed when machine-code routines can call interpreted routines or vice-versa. The enterInt instruction causes the interpreter to be entered and the argument indicates the reason. *) val genEnterIntCatch : code -> unit val genEnterIntCall : code * int -> unit (* pushConst - Generates code to push a constant. *) val pushConst : machineWord * code -> unit (* Create a container on the stack *) val genContainer : int * code -> unit (* copyCode - Finish up after compiling a function. *) val copyCode : {code: code, maxStack: int, numberOfArguments: int, resultClosure: closureRef } -> unit (* putBranchInstruction puts in an instruction which involves a forward reference. *) datatype jumpTypes = Jump | JumpBack | JumpFalse | JumpTrue | SetHandler val putBranchInstruction: jumpTypes * labels * code -> unit val createLabel: unit -> labels (* Define the position of a label. *) val setLabel: labels * code -> unit val resetStack: int * bool * code -> unit (* Set a pending reset *) val genEqualWordConst: word * code -> unit val genIsTagged: code -> unit structure Sharing: sig type code = code type opcode = opcode type labels = labels type closureRef = closureRef end end ; diff --git a/mlsource/MLCompiler/CodeTree/ByteCode/INTGCODE.ML b/mlsource/MLCompiler/CodeTree/ByteCode/INTGCODE.ML index aff78ad3..6aa19d4b 100644 --- a/mlsource/MLCompiler/CodeTree/ByteCode/INTGCODE.ML +++ b/mlsource/MLCompiler/CodeTree/ByteCode/INTGCODE.ML @@ -1,1230 +1,1234 @@ (* Copyright (c) 2000 Cambridge University Technical Services Limited - Further development copyright David C.J. Matthews 2016-18,2020 + Further development copyright David C.J. Matthews 2016-18,2020-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Title: Generate interpretable code for Poly system from the code tree. Author: Dave Matthews, Cambridge University Computer Laboratory Copyright Cambridge University 1985 *) (* This generates byte-code that is interpreted by the run-time system. It is now used as a fall-back to allow Poly/ML to run on non-X86 architectures. Early versions were used as a porting aid while a native code-generator was being developed and the "enter-int" instructions that were needed for that have been retained although they no longer actually generate code. *) functor INTGCODE ( structure CODECONS : INTCODECONSSIG structure BACKENDTREE: BackendIntermediateCodeSig structure CODE_ARRAY: CODEARRAYSIG sharing CODECONS.Sharing = BACKENDTREE.Sharing = CODE_ARRAY.Sharing ) : GENCODESIG = struct open CODECONS open Address open BACKENDTREE open Misc open CODE_ARRAY val word0 = toMachineWord 0; val DummyValue : machineWord = word0; (* used as result of "raise e" etc. *) type caseForm = { cases : (backendIC * word) list, test : backendIC, caseType: caseType, default : backendIC } (* Where the result, if any, should go *) datatype whereto = NoResult (* discard result *) | ToStack (* Need a result but it can stay on the pseudo-stack *); (* Are we at the end of the function. *) datatype tail = EndOfProc | NotEnd (* Code generate a function or global declaration *) fun codegen (pt, cvec, resultClosure, numOfArgs, localCount, parameters) = let datatype decEntry = StackAddr of int | Empty val decVec = Array.array (localCount, Empty) (* Count of number of items on the stack. *) val realstackptr = ref 1 (* The closure ptr is already there *) (* Maximum size of the stack. *) val maxStack = ref 1 (* Push a value onto the stack. *) fun incsp () = ( realstackptr := !realstackptr + 1; if !realstackptr > !maxStack then maxStack := !realstackptr else () ) (* An entry has been removed from the stack. *) fun decsp () = realstackptr := !realstackptr - 1; fun pushLocalStackValue addr = ( genLocal(!realstackptr + addr, cvec); incsp() ) (* Loads a local, argument or closure value; translating local stack addresses to real stack offsets. *) fun locaddr(BICLoadArgument locn) = pushLocalStackValue (numOfArgs-locn) | locaddr(BICLoadLocal locn) = ( (* positive address - on the stack. *) case Array.sub (decVec, locn) of StackAddr n => pushLocalStackValue (~ n) | _ => (* Should be on the stack, not a function. *) raise InternalError "locaddr: bad stack address" ) | locaddr(BICLoadClosure locn) = (* closure-pointer relative *) ( genIndirectClosure{addr = !realstackptr-1, item=locn, code=cvec}; incsp() ) | locaddr BICLoadRecursive = pushLocalStackValue ~1 (* The closure itself - first value on the stack. *) (* generates code from the tree *) fun gencde (pt : backendIC, whereto : whereto, tailKind : tail, loopAddr) : unit = let (* Save the stack pointer value here. We may want to reset the stack. *) val oldsp = !realstackptr; (* Operations on ML memory always have the base as an ML address. Word operations are always word aligned. The higher level will have extracted any constant offset and scaled it if necessary. That's helpful for the X86 but not for the interpreter. We have to turn them back into indexes. *) fun genMLAddress({base, index, offset}, scale) = ( gencde (base, ToStack, NotEnd, loopAddr); offset mod scale = 0 orelse raise InternalError "genMLAddress"; case (index, offset div scale) of (NONE, soffset) => (pushConst (toMachineWord soffset, cvec); incsp()) | (SOME indexVal, 0) => gencde (indexVal, ToStack, NotEnd, loopAddr) | (SOME indexVal, soffset) => ( gencde (indexVal, ToStack, NotEnd, loopAddr); pushConst (toMachineWord soffset, cvec); genOpcode(opcode_wordAdd, cvec) ) ) (* Load the address, index value and offset for non-byte operations. Because the offset has already been scaled by the size of the operand we have to load the index and offset separately. *) fun genCAddress{base, index, offset} = ( gencde (base, ToStack, NotEnd, loopAddr); case index of NONE => (pushConst (toMachineWord 0, cvec); incsp()) | SOME indexVal => gencde (indexVal, ToStack, NotEnd, loopAddr); pushConst (toMachineWord offset, cvec); incsp() ) val () = case pt of BICEval evl => genEval (evl, tailKind) | BICExtract ext => (* This may just be being used to discard a value which isn't used on this branch. *) if whereto = NoResult then () else locaddr ext | BICField {base, offset} => (gencde (base, ToStack, NotEnd, loopAddr); genIndirect (offset, cvec)) | BICLoadContainer {base, offset} => (gencde (base, ToStack, NotEnd, loopAddr); genIndirectContainer (offset, cvec)) | BICLambda lam => genProc (lam, false, fn () => ()) | BICConstnt(w, _) => let val () = pushConst (w, cvec); in incsp () end | BICCond (testPart, thenPart, elsePart) => genCond (testPart, thenPart, elsePart, whereto, tailKind, loopAddr) | BICNewenv(decls, exp) => let (* Processes a list of entries. *) (* Mutually recursive declarations. May be either lambdas or constants. Recurse down the list pushing the addresses of the closure vectors, then unwind the recursion and fill them in. *) fun genMutualDecs [] = () | genMutualDecs ({lambda, addr, ...} :: otherDecs) = genProc (lambda, true, fn() => ( Array.update (decVec, addr, StackAddr (! realstackptr)); genMutualDecs (otherDecs) )) fun codeDecls(BICRecDecs dl) = genMutualDecs dl | codeDecls(BICDecContainer{size, addr}) = ( (* If this is a container we have to process it here otherwise it will be removed in the stack adjustment code. *) genContainer(size, cvec); (* Push the address of this container. *) realstackptr := !realstackptr + size + 1; (* Pushes N words plus the address. *) Array.update (decVec, addr, StackAddr(!realstackptr)) ) | codeDecls(BICDeclar{value, addr, ...}) = ( gencde (value, ToStack, NotEnd, loopAddr); Array.update (decVec, addr, StackAddr(!realstackptr)) ) | codeDecls(BICNullBinding exp) = gencde (exp, NoResult, NotEnd, loopAddr) in List.app codeDecls decls; gencde (exp, whereto, tailKind, loopAddr) end | BICBeginLoop {loop=body, arguments} => (* Execute the body which will contain at least one Loop instruction. There will also be path(s) which don't contain Loops and these will drop through. *) let val args = List.map #1 arguments (* Evaluate each of the arguments, pushing the result onto the stack. *) fun genLoopArg ({addr, value, ...}) = ( gencde (value, ToStack, NotEnd, loopAddr); Array.update (decVec, addr, StackAddr (!realstackptr)); !realstackptr (* Return the posn on the stack. *) ) val argIndexList = map genLoopArg args; val startSp = ! realstackptr; (* Remember the current top of stack. *) val startLoop = createLabel () val () = setLabel(startLoop, cvec) (* Start of loop *) in (* Process the body, passing the jump-back address down for the Loop instruction(s). *) gencde (body, whereto, tailKind, SOME(startLoop, startSp, argIndexList)) (* Leave the arguments on the stack. They can be cleared later if needed. *) end | BICLoop argList => (* Jump back to the enclosing BeginLoop. *) let val (startLoop, startSp, argIndexList) = case loopAddr of SOME l => l | NONE => raise InternalError "No BeginLoop for Loop instr" (* Evaluate the arguments. First push them to the stack because evaluating an argument may depend on the current value of others. Only when we've evaluated all of them can we overwrite the original argument positions. *) fun loadArgs ([], []) = !realstackptr - startSp (* The offset of all the args. *) | loadArgs (arg:: argList, _ :: argIndexList) = let (* Evaluate all the arguments. *) val () = gencde (arg, ToStack, NotEnd, NONE); val argOffset = loadArgs(argList, argIndexList); in genSetStackVal(argOffset, cvec); (* Copy the arg over. *) decsp(); (* The argument has now been popped. *) argOffset end | loadArgs _ = raise InternalError "loadArgs: Mismatched arguments"; val _: int = loadArgs(List.map #1 argList, argIndexList) in if !realstackptr <> startSp then resetStack (!realstackptr - startSp, false, cvec) (* Remove any local variables. *) else (); (* Jump back to the start of the loop. *) putBranchInstruction(JumpBack, startLoop, cvec) end | BICRaise exp => ( gencde (exp, ToStack, NotEnd, loopAddr); genRaiseEx cvec ) | BICHandle {exp, handler, exPacketAddr} => let (* Save old handler *) val () = genPushHandler cvec val () = incsp () val handlerLabel = createLabel() val () = putBranchInstruction (SetHandler, handlerLabel, cvec) val () = incsp() (* Code generate the body; "NotEnd" because we have to come back to remove the handler; "ToStack" because delHandler needs a result to carry down. *) val () = gencde (exp, ToStack, NotEnd, loopAddr) (* Now get out of the handler and restore the old one. *) val () = genOpcode(opcode_deleteHandler, cvec) val skipHandler = createLabel() val () = putBranchInstruction (Jump, skipHandler, cvec) (* Now process the handler itself. First we have to reset the stack. Note that we have to use "ToStack" again to be consistent with the stack-handling in the body-part. If we actually wanted "NoResult", the stack adjustment code at the end of gencde will take care of this. This means that I don't want to do any clever "end-of-function" optimisation either. SPF 6/1/97 *) val () = realstackptr := oldsp val () = setLabel (handlerLabel, cvec) (* If we were executing machine code we must re-enter the interpreter. *) val () = genEnterIntCatch cvec (* Push the exception packet and set the address. *) val () = genLdexc cvec val () = incsp () val () = Array.update (decVec, exPacketAddr, StackAddr(!realstackptr)) val () = gencde (handler, ToStack, NotEnd, loopAddr) (* Have to remove the exception packet. *) val () = resetStack(1, true, cvec) val () = decsp() (* Finally fix-up the jump around the handler *) val () = setLabel (skipHandler, cvec) in () end | BICCase ({cases, test, default, firstIndex, ...}) => let val () = gencde (test, ToStack, NotEnd, loopAddr) (* Label to jump to at the end of each case. *) val exitJump = createLabel() val () = if firstIndex = 0w0 then () else ( (* Subtract lower limit. Don't check for overflow. Instead allow large value to wrap around and check in "case" instruction. *) pushConst(toMachineWord firstIndex, cvec); genOpcode(opcode_wordSub, cvec) ) (* Generate the case instruction followed by the table of jumps. *) val nCases = List.length cases val caseLabels = genCase (nCases, cvec) val () = decsp () (* The default case, if any, follows the case statement. *) (* If we have a jump to the default set it to jump here. *) local fun fixDefault(NONE, defCase) = setLabel(defCase, cvec) | fixDefault(SOME _, _) = () in val () = ListPair.appEq fixDefault (cases, caseLabels) end val () = gencde (default, whereto, tailKind, loopAddr); fun genCases(SOME body, label) = ( (* First exit from the previous case or the default if this is the first. *) putBranchInstruction(Jump, exitJump, cvec); (* Remove the result - the last case will leave it. *) case whereto of ToStack => decsp () | NoResult => (); (* Fix up the jump to come here. *) setLabel(label, cvec); gencde (body, whereto, tailKind, loopAddr) ) | genCases(NONE, _) = () val () = ListPair.appEq genCases (cases, caseLabels) (* Finally set the exit jump to come here. *) val () = setLabel (exitJump, cvec) in () end | BICTuple recList => let val size = List.length recList in (* Move the fields into the vector. *) List.app(fn v => gencde (v, ToStack, NotEnd, loopAddr)) recList; genTuple (size, cvec); realstackptr := !realstackptr - (size - 1) end | BICSetContainer{container, tuple, filter} => (* Copy the contents of a tuple into a container. If the tuple is a Tuple instruction we can avoid generating the tuple and then unpacking it and simply copy the fields that make up the tuple directly into the container. *) ( case tuple of BICTuple cl => (* Simply set the container from the values. *) let (* Load the address of the container. *) val _ = gencde (container, ToStack, NotEnd, loopAddr); fun setValues([], _, _) = () | setValues(v::tl, sourceOffset, destOffset) = if sourceOffset < BoolVector.length filter andalso BoolVector.sub(filter, sourceOffset) then ( gencde (v, ToStack, NotEnd, loopAddr); (* Move the entry into the container. This instruction pops the value to be moved but not the destination. *) genMoveToContainer(destOffset, cvec); decsp(); setValues(tl, sourceOffset+1, destOffset+1) ) else setValues(tl, sourceOffset+1, destOffset) in setValues(cl, 0, 0) (* The container address is still on the stack. *) end | _ => let (* General case. *) (* First the target tuple, then the container. *) val () = gencde (tuple, ToStack, NotEnd, loopAddr) val () = gencde (container, ToStack, NotEnd, loopAddr) val last = BoolVector.foldli(fn (i, true, _) => i | (_, false, n) => n) ~1 filter fun copy (sourceOffset, destOffset) = if BoolVector.sub(filter, sourceOffset) then ( (* Duplicate the tuple address . *) genLocal(1, cvec); genIndirect(sourceOffset, cvec); genMoveToContainer(destOffset, cvec); if sourceOffset = last then () else copy (sourceOffset+1, destOffset+1) ) else copy(sourceOffset+1, destOffset) in copy (0, 0) (* The container and tuple addresses are still on the stack. *) end ) | BICTagTest { test, tag, ... } => ( gencde (test, ToStack, NotEnd, loopAddr); genEqualWordConst(tag, cvec) ) | BICNullary {oper=BuiltIns.GetCurrentThreadId} => ( genOpcode(opcode_getThreadId, cvec); incsp() ) - + | BICNullary {oper=BuiltIns.CheckRTSException} => ( (* Do nothing. This is done in the RTS call. *) ) + | BICNullary {oper=BuiltIns.CPUPause} => + ( (* Do nothing. It's really only a hint. *) + ) + | BICUnary { oper, arg1 } => let open BuiltIns val () = gencde (arg1, ToStack, NotEnd, loopAddr) in case oper of NotBoolean => genOpcode(opcode_notBoolean, cvec) | IsTaggedValue => genIsTagged cvec | MemoryCellLength => genOpcode(opcode_cellLength, cvec) | MemoryCellFlags => genOpcode(opcode_cellFlags, cvec) | ClearMutableFlag => genOpcode(opcode_clearMutable, cvec) - | AtomicIncrement => genOpcode(opcode_atomicIncr, cvec) - | AtomicDecrement => genOpcode(opcode_atomicDecr, cvec) | AtomicReset => genOpcode(opcode_atomicReset, cvec) | LongWordToTagged => genOpcode(opcode_longWToTagged, cvec) | SignedToLongWord => genOpcode(opcode_signedToLongW, cvec) | UnsignedToLongWord => genOpcode(opcode_unsignedToLongW, cvec) | RealAbs PrecDouble => genOpcode(opcode_realAbs, cvec) | RealNeg PrecDouble => genOpcode(opcode_realNeg, cvec) | RealFixedInt PrecDouble => genOpcode(opcode_fixedIntToReal, cvec) | RealAbs PrecSingle => genOpcode(opcode_floatAbs, cvec) | RealNeg PrecSingle => genOpcode(opcode_floatNeg, cvec) | RealFixedInt PrecSingle => genOpcode(opcode_fixedIntToFloat, cvec) | FloatToDouble => genOpcode(opcode_floatToReal, cvec) | DoubleToFloat rnding => genDoubleToFloat(rnding, cvec) | RealToInt (PrecDouble, rnding) => genRealToInt(rnding, cvec) | RealToInt (PrecSingle, rnding) => genFloatToInt(rnding, cvec) | TouchAddress => resetStack(1, false, cvec) (* Discard this *) | AllocCStack => genOpcode(opcode_allocCSpace, cvec) end | BICBinary { oper=BuiltIns.WordComparison{test=BuiltIns.TestEqual, ...}, arg1, arg2=BICConstnt(w, _) } => let val () = gencde (arg1, ToStack, NotEnd, loopAddr) in genEqualWordConst(toShort w, cvec) end | BICBinary { oper=BuiltIns.WordComparison{test=BuiltIns.TestEqual, ...}, arg1=BICConstnt(w, _), arg2 } => let val () = gencde (arg2, ToStack, NotEnd, loopAddr) in genEqualWordConst(toShort w, cvec) end | BICBinary { oper, arg1, arg2 } => let open BuiltIns val () = gencde (arg1, ToStack, NotEnd, loopAddr) val () = gencde (arg2, ToStack, NotEnd, loopAddr) in case oper of WordComparison{test=TestEqual, ...} => genOpcode(opcode_equalWord, cvec) | WordComparison{test=TestLess, isSigned=true} => genOpcode(opcode_lessSigned, cvec) | WordComparison{test=TestLessEqual, isSigned=true} => genOpcode(opcode_lessEqSigned, cvec) | WordComparison{test=TestGreater, isSigned=true} => genOpcode(opcode_greaterSigned, cvec) | WordComparison{test=TestGreaterEqual, isSigned=true} => genOpcode(opcode_greaterEqSigned, cvec) | WordComparison{test=TestLess, isSigned=false} => genOpcode(opcode_lessUnsigned, cvec) | WordComparison{test=TestLessEqual, isSigned=false} => genOpcode(opcode_lessEqUnsigned, cvec) | WordComparison{test=TestGreater, isSigned=false} => genOpcode(opcode_greaterUnsigned, cvec) | WordComparison{test=TestGreaterEqual, isSigned=false} => genOpcode(opcode_greaterEqUnsigned, cvec) | WordComparison{test=TestUnordered, ...} => raise InternalError "WordComparison: TestUnordered" | PointerEq => genOpcode(opcode_equalWord, cvec) | FixedPrecisionArith ArithAdd => genOpcode(opcode_fixedAdd, cvec) | FixedPrecisionArith ArithSub => genOpcode(opcode_fixedSub, cvec) | FixedPrecisionArith ArithMult => genOpcode(opcode_fixedMult, cvec) | FixedPrecisionArith ArithQuot => genOpcode(opcode_fixedQuot, cvec) | FixedPrecisionArith ArithRem => genOpcode(opcode_fixedRem, cvec) | FixedPrecisionArith ArithDiv => raise InternalError "TODO: FixedPrecisionArith ArithDiv" | FixedPrecisionArith ArithMod => raise InternalError "TODO: FixedPrecisionArith ArithMod" | WordArith ArithAdd => genOpcode(opcode_wordAdd, cvec) | WordArith ArithSub => genOpcode(opcode_wordSub, cvec) | WordArith ArithMult => genOpcode(opcode_wordMult, cvec) | WordArith ArithDiv => genOpcode(opcode_wordDiv, cvec) | WordArith ArithMod => genOpcode(opcode_wordMod, cvec) | WordArith _ => raise InternalError "WordArith - unimplemented instruction" | WordLogical LogicalAnd => genOpcode(opcode_wordAnd, cvec) | WordLogical LogicalOr => genOpcode(opcode_wordOr, cvec) | WordLogical LogicalXor => genOpcode(opcode_wordXor, cvec) | WordShift ShiftLeft => genOpcode(opcode_wordShiftLeft, cvec) | WordShift ShiftRightLogical => genOpcode(opcode_wordShiftRLog, cvec) | WordShift ShiftRightArithmetic => genOpcode(opcode_wordShiftRArith, cvec) | AllocateByteMemory => genOpcode(opcode_allocByteMem, cvec) | LargeWordComparison TestEqual => genOpcode(opcode_lgWordEqual, cvec) | LargeWordComparison TestLess => genOpcode(opcode_lgWordLess, cvec) | LargeWordComparison TestLessEqual => genOpcode(opcode_lgWordLessEq, cvec) | LargeWordComparison TestGreater => genOpcode(opcode_lgWordGreater, cvec) | LargeWordComparison TestGreaterEqual => genOpcode(opcode_lgWordGreaterEq, cvec) | LargeWordComparison TestUnordered => raise InternalError "LargeWordComparison: TestUnordered" | LargeWordArith ArithAdd => genOpcode(opcode_lgWordAdd, cvec) | LargeWordArith ArithSub => genOpcode(opcode_lgWordSub, cvec) | LargeWordArith ArithMult => genOpcode(opcode_lgWordMult, cvec) | LargeWordArith ArithDiv => genOpcode(opcode_lgWordDiv, cvec) | LargeWordArith ArithMod => genOpcode(opcode_lgWordMod, cvec) | LargeWordArith _ => raise InternalError "LargeWordArith - unimplemented instruction" | LargeWordLogical LogicalAnd => genOpcode(opcode_lgWordAnd, cvec) | LargeWordLogical LogicalOr => genOpcode(opcode_lgWordOr, cvec) | LargeWordLogical LogicalXor => genOpcode(opcode_lgWordXor, cvec) | LargeWordShift ShiftLeft => genOpcode(opcode_lgWordShiftLeft, cvec) | LargeWordShift ShiftRightLogical => genOpcode(opcode_lgWordShiftRLog, cvec) | LargeWordShift ShiftRightArithmetic => genOpcode(opcode_lgWordShiftRArith, cvec) | RealComparison (TestEqual, PrecDouble) => genOpcode(opcode_realEqual, cvec) | RealComparison (TestLess, PrecDouble) => genOpcode(opcode_realLess, cvec) | RealComparison (TestLessEqual, PrecDouble) => genOpcode(opcode_realLessEq, cvec) | RealComparison (TestGreater, PrecDouble) => genOpcode(opcode_realGreater, cvec) | RealComparison (TestGreaterEqual, PrecDouble) => genOpcode(opcode_realGreaterEq, cvec) | RealComparison (TestUnordered, PrecDouble) => genOpcode(opcode_realUnordered, cvec) | RealComparison (TestEqual, PrecSingle) => genOpcode(opcode_floatEqual, cvec) | RealComparison (TestLess, PrecSingle) => genOpcode(opcode_floatLess, cvec) | RealComparison (TestLessEqual, PrecSingle) => genOpcode(opcode_floatLessEq, cvec) | RealComparison (TestGreater, PrecSingle) => genOpcode(opcode_floatGreater, cvec) | RealComparison (TestGreaterEqual, PrecSingle) => genOpcode(opcode_floatGreaterEq, cvec) | RealComparison (TestUnordered, PrecSingle) => genOpcode(opcode_floatUnordered, cvec) | RealArith (ArithAdd, PrecDouble) => genOpcode(opcode_realAdd, cvec) | RealArith (ArithSub, PrecDouble) => genOpcode(opcode_realSub, cvec) | RealArith (ArithMult, PrecDouble) => genOpcode(opcode_realMult, cvec) | RealArith (ArithDiv, PrecDouble) => genOpcode(opcode_realDiv, cvec) | RealArith (ArithAdd, PrecSingle) => genOpcode(opcode_floatAdd, cvec) | RealArith (ArithSub, PrecSingle) => genOpcode(opcode_floatSub, cvec) | RealArith (ArithMult, PrecSingle) => genOpcode(opcode_floatMult, cvec) | RealArith (ArithDiv, PrecSingle) => genOpcode(opcode_floatDiv, cvec) | RealArith _ => raise InternalError "RealArith - unimplemented instruction" | FreeCStack => genOpcode(opcode_freeCSpace, cvec) + + | AtomicExchangeAdd => genOpcode(opcode_atomicExchAdd, cvec) ; decsp() (* Removes one item from the stack. *) end | BICAllocateWordMemory {numWords as BICConstnt(length, _), flags as BICConstnt(flagByte, _), initial } => if isShort length andalso toShort length = 0w1 andalso isShort flagByte andalso toShort flagByte = 0wx40 then (* This is a very common case. *) ( gencde (initial, ToStack, NotEnd, loopAddr); genOpcode(opcode_alloc_ref, cvec) ) else let val () = gencde (numWords, ToStack, NotEnd, loopAddr) val () = gencde (flags, ToStack, NotEnd, loopAddr) val () = gencde (initial, ToStack, NotEnd, loopAddr) in genOpcode(opcode_allocWordMemory, cvec); decsp(); decsp() end | BICAllocateWordMemory { numWords, flags, initial } => let val () = gencde (numWords, ToStack, NotEnd, loopAddr) val () = gencde (flags, ToStack, NotEnd, loopAddr) val () = gencde (initial, ToStack, NotEnd, loopAddr) in genOpcode(opcode_allocWordMemory, cvec); decsp(); decsp() end | BICLoadOperation { kind=LoadStoreMLWord _, address={base, index=NONE, offset}} => ( (* If the index is a constant, frequently zero, we can use indirection. The offset is a byte count so has to be divided by the word size but it should always be an exact multiple. *) gencde (base, ToStack, NotEnd, loopAddr); offset mod Word.toInt wordSize = 0 orelse raise InternalError "gencde: BICLoadOperation - not word multiple"; genIndirect (offset div Word.toInt wordSize, cvec) ) | BICLoadOperation { kind=LoadStoreMLWord _, address} => ( genMLAddress(address, Word.toInt wordSize); genOpcode(opcode_loadMLWord, cvec); decsp() ) | BICLoadOperation { kind=LoadStoreMLByte _, address} => ( genMLAddress(address, 1); genOpcode(opcode_loadMLByte, cvec); decsp() ) | BICLoadOperation { kind=LoadStoreC8, address} => ( genCAddress address; genOpcode(opcode_loadC8, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC16, address} => ( genCAddress address; genOpcode(opcode_loadC16, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC32, address} => ( genCAddress address; genOpcode(opcode_loadC32, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreC64, address} => ( genCAddress address; genOpcode(opcode_loadC64, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreCFloat, address} => ( genCAddress address; genOpcode(opcode_loadCFloat, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreCDouble, address} => ( genCAddress address; genOpcode(opcode_loadCDouble, cvec); decsp(); decsp() ) | BICLoadOperation { kind=LoadStoreUntaggedUnsigned, address} => ( genMLAddress(address, Word.toInt wordSize); genOpcode(opcode_loadUntagged, cvec); decsp() ) | BICStoreOperation { kind=LoadStoreMLWord _, address, value } => ( genMLAddress(address, Word.toInt wordSize); gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeMLWord, cvec); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreMLByte _, address, value } => ( genMLAddress(address, 1); gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeMLByte, cvec); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC8, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC8, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC16, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC16, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC32, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC32, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreC64, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeC64, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreCFloat, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeCFloat, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreCDouble, address, value} => ( genCAddress address; gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeCDouble, cvec); decsp(); decsp(); decsp() ) | BICStoreOperation { kind=LoadStoreUntaggedUnsigned, address, value} => ( genMLAddress(address, Word.toInt wordSize); gencde (value, ToStack, NotEnd, loopAddr); genOpcode(opcode_storeUntagged, cvec); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpMove{isByteMove=true}, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, 1); genMLAddress(destRight, 1); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockMoveByte, cvec); decsp(); decsp(); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpMove{isByteMove=false}, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, Word.toInt wordSize); genMLAddress(destRight, Word.toInt wordSize); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockMoveWord, cvec); decsp(); decsp(); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpEqualByte, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, 1); genMLAddress(destRight, 1); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockEqualByte, cvec); decsp(); decsp(); decsp(); decsp() ) | BICBlockOperation { kind=BlockOpCompareByte, sourceLeft, destRight, length } => ( genMLAddress(sourceLeft, 1); genMLAddress(destRight, 1); gencde (length, ToStack, NotEnd, loopAddr); genOpcode(opcode_blockCompareByte, cvec); decsp(); decsp(); decsp(); decsp() ) | BICArbitrary { oper, arg1, arg2, ... } => let open BuiltIns val () = gencde (arg1, ToStack, NotEnd, loopAddr) val () = gencde (arg2, ToStack, NotEnd, loopAddr) in case oper of ArithAdd => genOpcode(opcode_arbAdd, cvec) | ArithSub => genOpcode(opcode_arbSubtract, cvec) | ArithMult => genOpcode(opcode_arbMultiply, cvec) | _ => raise InternalError "Unknown arbitrary precision operation"; decsp() (* Removes one item from the stack. *) end in (* body of gencde *) (* This ensures that there is precisely one item on the stack if whereto = ToStack and no items if whereto = NoResult. There are two points to note carefully here: (1) Negative stack adjustments are legal if we have exited. This is because matchFailFn can cut the stack back too far for its immediately enclosing expression. This is harmless because the code actually exits that expression. (2) A stack adjustment of ~1 is legal if we're generating a declaration in "ToStack" mode, because not all declarations actually generate the dummy value that we expect. This used to be handled in resetStack itself, but it's more transparent to do it here. (In addition, there was a bug in resetStack - it accumulated the stack resets, but didn't correctly accumulate these "~1" dummy value pushes.) It's all much better now. SPF 9/1/97 *) case whereto of ToStack => let val newsp = oldsp + 1; val adjustment = !realstackptr - newsp val () = if adjustment = 0 then () else if adjustment < ~1 then raise InternalError ("gencde: bad adjustment " ^ Int.toString adjustment) (* Hack for declarations that should push values, but don't *) else if adjustment = ~1 then pushConst (DummyValue, cvec) else resetStack (adjustment, true, cvec) in realstackptr := newsp end | NoResult => let val adjustment = !realstackptr - oldsp val () = if adjustment = 0 then () else if adjustment < 0 then raise InternalError ("gencde: bad adjustment " ^ Int.toString adjustment) else resetStack (adjustment, false, cvec) in realstackptr := oldsp end end (* gencde *) (* doNext is only used for mutually recursive functions where a function may not be able to fill in its closure if it does not have all the remaining declarations. *) (* TODO: This always creates the closure on the heap even when makeClosure is false. *) and genProc ({ closure=[], localCount, body, argTypes, name, ...}: bicLambdaForm, mutualDecs, doNext: unit -> unit) : unit = let (* Create a one word item for the closure. This is returned for recursive references and filled in with the address of the code when we've finished. *) val closure = makeConstantClosure() val newCode : code = codeCreate(name, parameters); (* Code-gen function. No non-local references. *) val () = codegen (body, newCode, closure, List.length argTypes, localCount, parameters); val () = pushConst(closureAsAddress closure, cvec); val () = incsp(); in if mutualDecs then doNext () else () end | genProc ({ localCount, body, name, argTypes, closure, ...}, mutualDecs, doNext) = let (* Full closure required. *) val resClosure = makeConstantClosure() val newCode = codeCreate (name, parameters) (* Code-gen function. *) val () = codegen (body, newCode, resClosure, List.length argTypes, localCount, parameters) val closureVars = List.length closure (* Size excluding the code address *) in if mutualDecs then let (* Have to make the closure now and fill it in later. *) val () = pushConst(toMachineWord resClosure, cvec) val () = genAllocMutableClosure(closureVars, cvec) val () = incsp () val entryAddr : int = !realstackptr val () = doNext () (* Any mutually recursive functions. *) (* Push the address of the vector - If we have processed other closures the vector will no longer be on the top of the stack. *) val () = pushLocalStackValue (~ entryAddr) (* Load items for the closure. *) fun loadItems ([], _) = () | loadItems (v :: vs, addr : int) = let (* Generate an item and move it into the clsoure *) val () = gencde (BICExtract v, ToStack, NotEnd, NONE) (* The closure "address" excludes the code address. *) val () = genMoveToMutClosure(addr, cvec) val () = decsp () in loadItems (vs, addr + 1) end val () = loadItems (closure, 0) val () = genLock cvec (* Lock it. *) (* Remove the extra reference. *) val () = resetStack (1, false, cvec) in realstackptr := !realstackptr - 1 end else let (* Put it on the stack. *) val () = pushConst (toMachineWord resClosure, cvec) val () = incsp () val () = List.app (fn pt => gencde (BICExtract pt, ToStack, NotEnd, NONE)) closure val () = genClosure (closureVars, cvec) in realstackptr := !realstackptr - closureVars end end and genCond (testCode, thenCode, elseCode, whereto, tailKind, loopAddr) = let (* andalso and orelse are turned into conditionals with constants. Convert this into a series of tests. *) fun genTest(BICConstnt(w, _), jumpOn, targetLabel) = let val cVal = case toShort w of 0w0 => false | 0w1 => true | _ => raise InternalError "genTest" in if cVal = jumpOn then putBranchInstruction (Jump, targetLabel, cvec) else () end | genTest(BICUnary { oper=BuiltIns.NotBoolean, arg1 }, jumpOn, targetLabel) = genTest(arg1, not jumpOn, targetLabel) | genTest(BICCond (testPart, thenPart, elsePart), jumpOn, targetLabel) = let val toElse = createLabel() and exitJump = createLabel() in genTest(testPart, false, toElse); genTest(thenPart, jumpOn, targetLabel); putBranchInstruction (Jump, exitJump, cvec); setLabel (toElse, cvec); genTest(elsePart, jumpOn, targetLabel); setLabel (exitJump, cvec) end | genTest(testCode, jumpOn, targetLabel) = ( gencde (testCode, ToStack, NotEnd, loopAddr); putBranchInstruction(if jumpOn then JumpTrue else JumpFalse, targetLabel, cvec); decsp() (* conditional branch pops a value. *) ) val toElse = createLabel() and exitJump = createLabel() val () = genTest(testCode, false, toElse) val () = gencde (thenCode, whereto, tailKind, loopAddr) (* Get rid of the result from the stack. If there is a result then the ``else-part'' will push it. *) val () = case whereto of ToStack => decsp () | NoResult => () val () = putBranchInstruction (Jump, exitJump, cvec) (* start of "else part" *) val () = setLabel (toElse, cvec) val () = gencde (elseCode, whereto, tailKind, loopAddr) val () = setLabel (exitJump, cvec) in () end (* genCond *) and genEval (eval, tailKind : tail) : unit = let val argList : backendIC list = List.map #1 (#argList eval) val argsToPass : int = List.length argList; (* Load arguments *) fun loadArgs [] = () | loadArgs (v :: vs) = let (* Push each expression onto the stack. *) val () = gencde(v, ToStack, NotEnd, NONE) in loadArgs vs end; (* Called after the args and the closure to call have been pushed onto the stack. *) fun callClosure () : unit = case tailKind of NotEnd => (* Normal call. *) genCallClosure cvec | EndOfProc => (* Tail recursive call. *) let (* Get the return address onto the top of the stack. *) val () = pushLocalStackValue 0 (* Slide the return address, closure and args over the old closure, return address and args, and reset the stack. Then jump to the closure. *) val () = genTailCall(argsToPass + 2, !realstackptr - 1 + (numOfArgs - argsToPass), cvec); (* It's "-1" not "-2", because we didn't bump the realstackptr when we pushed the return address. SPF 3/1/97 *) in () end (* Have to guarantee that the expression to return the function is evaluated before the arguments. *) (* Returns true if evaluating it later is safe. *) fun safeToLeave (BICConstnt _) = true | safeToLeave (BICLambda _) = true | safeToLeave (BICExtract _) = true | safeToLeave (BICField {base, ...}) = safeToLeave base | safeToLeave (BICLoadContainer {base, ...}) = safeToLeave base | safeToLeave _ = false val () = if (case argList of [] => true | _ => safeToLeave (#function eval)) then let (* Can load the args first. *) val () = loadArgs argList in gencde (#function eval, ToStack, NotEnd, NONE) end else let (* The expression for the function is too complicated to risk leaving. It might have a side-effect and we must ensure that any side-effects it has are done before the arguments are loaded. *) val () = gencde(#function eval, ToStack, NotEnd, NONE); val () = loadArgs(argList); (* Load the function again. *) val () = genLocal(argsToPass, cvec); in incsp () end val () = callClosure () (* Call the function. *) (* Make sure we interpret when we return from the call *) val () = genEnterIntCall (cvec, argsToPass) in (* body of genEval *) realstackptr := !realstackptr - argsToPass (* Args popped by caller. *) end (* Generate the function. *) (* Assume we always want a result. There is otherwise a problem if the called routine returns a result of type void (i.e. no result) but the caller wants a result (e.g. the identity function). *) val () = gencde (pt, ToStack, EndOfProc, NONE) val () = genReturn (numOfArgs, cvec); in (* body of codegen *) (* Having code-generated the body of the function, it is copied into a new data segment. *) copyCode{code = cvec, maxStack = !maxStack, resultClosure=resultClosure, numberOfArguments=numOfArgs} end (* codegen *); fun gencodeLambda({ name, body, argTypes, localCount, ...}:bicLambdaForm, parameters, closure) = let (* make the code buffer for the new function. *) val newCode : code = codeCreate (name, parameters) (* This function must have no non-local references. *) in codegen (body, newCode, closure, List.length argTypes, localCount, parameters) end local val makeEntryPoint: string -> machineWord = RunCall.rtsCallFull1 "PolyCreateEntryPointObject" fun rtsCall makeCall (entryName: string, numOfArgs, debugArgs: Universal.universal list): machineWord = let open Address val cvec = codeCreate (entryName, debugArgs) val entryPointAddr = makeEntryPoint entryName (* Each argument is at the same offset, essentially we're just shifting them *) fun genLocals 0 = () | genLocals n = (genLocal(numOfArgs +1, cvec); genLocals (n-1)) val () = genLocals numOfArgs val () = pushConst(entryPointAddr, cvec) val () = makeCall(numOfArgs, cvec) val () = genReturn (numOfArgs, cvec) val closure = makeConstantClosure() val () = copyCode{code=cvec, maxStack=numOfArgs+1, numberOfArguments=numOfArgs, resultClosure=closure} in closureAsAddress closure end in structure Foreign = struct val rtsCallFast = rtsCall genRTSCallFast fun rtsCallFastRealtoReal(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastRealtoReal c) (entryName, 1, debugArgs) and rtsCallFastRealRealtoReal(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastRealRealtoReal c) (entryName, 2, debugArgs) and rtsCallFastGeneraltoReal(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastGeneraltoReal c) (entryName, 1, debugArgs) and rtsCallFastRealGeneraltoReal(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastRealGeneraltoReal c) (entryName, 2, debugArgs) fun rtsCallFastFloattoFloat(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastFloattoFloat c) (entryName, 1, debugArgs) and rtsCallFastFloatFloattoFloat(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastFloatFloattoFloat c) (entryName, 2, debugArgs) and rtsCallFastGeneraltoFloat(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastGeneraltoFloat c) (entryName, 1, debugArgs) and rtsCallFastFloatGeneraltoFloat(entryName, debugArgs) = rtsCall (fn (_, c) => genRTSCallFastFloatGeneraltoFloat c) (entryName, 2, debugArgs) type abi = int (* This must match the type in Foreign.LowLevel. Once this is bootstrapped we could use that type but note that this is the type we use within the compiler and we build Foreign.LowLevel AFTER compiling this. *) datatype cTypeForm = CTypeFloatingPt | CTypePointer | CTypeSignedInt | CTypeUnsignedInt | CTypeStruct of cType list | CTypeVoid withtype cType = { typeForm: cTypeForm, align: word, size: word } val abiList: unit -> (string * abi) list = RunCall.rtsCallFull0 "PolyInterpretedGetAbiList" type cif = Foreign.Memory.voidStar val createCIF: abi * cType * cType list -> cif= RunCall.rtsCallFull3 "PolyInterpretedCreateCIF" val callCFunction: cif * LargeWord.word * LargeWord.word * LargeWord.word -> unit = RunCall.rtsCallFull4 "PolyInterpretedCallFunction" (* foreignCall returns a function that actually calls the foreign function. *) fun foreignCall(abi, argTypes, resultType) = let val memocif = Foreign.Memory.memoise (fn () => createCIF(abi, resultType, argTypes)) () val closure = makeConstantClosure() (* For compatibility with the native code version we have to construct a function that takes three arguments rather than a single triple. *) val bodyCode = BICEval{function=BICConstnt(toMachineWord callCFunction, []), argList=[ (BICTuple[ BICEval{ function=BICConstnt(toMachineWord memocif, []), argList=[(BICConstnt(toMachineWord 0, []), GeneralType)], (* Unit. *) resultType=GeneralType }, BICExtract(BICLoadArgument 0), BICExtract(BICLoadArgument 2), BICExtract(BICLoadArgument 1)], GeneralType) ], resultType=GeneralType} val lambdaCode = { body = bodyCode, name = "foreignCall", closure=[], argTypes=[GeneralType, GeneralType, GeneralType], resultType = GeneralType, localCount=0, heapClosure=false} val () = gencodeLambda(lambdaCode, [], closure) in closureAsAddress closure end fun buildCallBack((*abi*) _, (*argTypes*) _, (*resultType*)_) = let fun buildClosure ((*mlFun*)_: LargeWord.word*LargeWord.word -> unit) = (* The result is the SysWord.word holding the C function. *) raise Foreign.Foreign "foreignCall not implemented" in Address.toMachineWord buildClosure end end end structure Sharing = struct open BACKENDTREE.Sharing type closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/CODETREE.ML b/mlsource/MLCompiler/CodeTree/CODETREE.ML index 887f11ea..137920e3 100644 --- a/mlsource/MLCompiler/CodeTree/CODETREE.ML +++ b/mlsource/MLCompiler/CodeTree/CODETREE.ML @@ -1,606 +1,610 @@ (* - Copyright (c) 2012,13,15-20 David C.J. Matthews + Copyright (c) 2012,13,15-21 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor CODETREE ( structure DEBUG: DEBUG structure PRETTY : PRETTYSIG structure BASECODETREE: BaseCodeTreeSig structure CODETREE_FUNCTIONS: CodetreeFunctionsSig structure BACKEND: sig type codetree type machineWord = Address.machineWord val codeGenerate: codetree * int * Universal.universal list -> (unit -> machineWord) * Universal.universal list structure Foreign: FOREIGNCALLSIG structure Sharing : sig type codetree = codetree end end structure OPTIMISER: sig type codetree and envSpecial and codeBinding val codetreeOptimiser: codetree * Universal.universal list * int -> { numLocals: int, general: codetree, bindings: codeBinding list, special: envSpecial } structure Sharing: sig type codetree = codetree and envSpecial = envSpecial and codeBinding = codeBinding end end sharing type PRETTY.pretty = BASECODETREE.pretty sharing BASECODETREE.Sharing = CODETREE_FUNCTIONS.Sharing = BACKEND.Sharing = OPTIMISER.Sharing ) : CODETREESIG = struct open Address; open StretchArray; open BASECODETREE; open PRETTY; open CODETREE_FUNCTIONS exception InternalError = Misc.InternalError and Interrupt = Thread.Thread.Interrupt infix 9 sub; fun mkDec (laddr, res) = Declar{value = res, addr = laddr, use=[]} fun deExtract(Extract ext) = ext | deExtract _ = raise InternalError "deExtract" datatype level = Level of { lev: int, closure: createClosure, lookup: int * int * bool -> loadForm } local (* We can have locals at the outer level. *) fun bottomLevel(addr, 0, false) = if addr < 0 then raise InternalError "load: negative" else LoadLocal addr | bottomLevel _ = (* Either the level is wrong or it's a parameter. *) raise InternalError "bottom level" in val baseLevel = Level { lev = 0, closure = makeClosure(), lookup = bottomLevel } end fun newLevel (Level{ lev, lookup, ...}) = let val closureList = makeClosure() val makeClosure = addToClosure closureList fun check n = if n < 0 then raise InternalError "load: negative" else n fun thisLevel(addr, level, isParam) = if level < 0 then raise InternalError "mkLoad: level must be non-negative" else if level > 0 then makeClosure(lookup(addr, level-1, isParam)) else (* This level *) if isParam then LoadArgument(check addr) else LoadLocal(check addr) in Level { lev = lev+1, closure = closureList, lookup = thisLevel } end fun getClosure(Level{ closure, ...}) = List.map Extract (extractClosure closure) fun mkLoad (addr, Level { lev = newLevel, lookup, ... } , Level { lev = oldLevel, ... }) = Extract(lookup(addr, newLevel - oldLevel, false)) and mkLoadParam(addr, Level { lev = newLevel, lookup, ... } , Level { lev = oldLevel, ... }) = Extract(lookup(addr, newLevel - oldLevel, true)) (* Transform a function so that free variables are converted to closure form. Returns the maximum local address used. *) fun genCode(pt, debugSwitches, numLocals) = let val printCodeTree = DEBUG.getParameter DEBUG.codetreeTag debugSwitches and compilerOut = PRETTY.getCompilerOutput debugSwitches (* val printCodeTree = true and compilerOut = PRETTY.prettyPrint(TextIO.print, 70) *) (* If required, print it first. This is the code that the front-end has produced. *) val () = if printCodeTree then compilerOut(pretty pt) else () (* This ensures that everything is printed just before it is code-generated. *) fun codeAndPrint(code, localCount) = let val () = if printCodeTree then compilerOut (BASECODETREE.pretty code) else (); in BACKEND.codeGenerate(code, localCount, debugSwitches) end (* Optimise it. *) val { numLocals = localCount, general = gen, bindings = decs, special = spec } = OPTIMISER.codetreeOptimiser(pt, debugSwitches, numLocals) (* At this stage we have a "general" value and also, possibly a "special" value. We could simply create mkEnv(decs, gen) and run preCode and genCode on that. However, we would lose the ability to insert any inline functions from this code into subsequent top-level expressions. We can't simply retain the "special" entry either because that may refer to values that have to be created once when the code is run. Such values will be referenced by "load" entries which refer to entries in the "decs". We construct a tuple which will contain the actual values after the code is run. Then if we want the value at some time in the future when we use something from the "special" entry we can extract the corresponding value from this tuple. Previously, this code always generated a tuple containing every declaration. That led to some very long compilation times because the back-end has some code which is quadratic in the number of entries on the stack. We now try to prune bindings by only generating the tuple if we have an inline function somewhere and only generating bindings we actually need. *) fun simplifySpec (EnvSpecTuple(size, env)) = let (* Get all the field entries. *) fun simpPair (gen, spec) = (gen, simplifySpec spec) val fields = List.tabulate(size, simpPair o env) in if List.all(fn (_, EnvSpecNone) => true | _ => false) fields then EnvSpecNone else EnvSpecTuple(size, fn n => List.nth(fields, n)) end | simplifySpec s = s (* None or inline function. *) in case simplifySpec spec of EnvSpecNone => let val (code, props) = codeAndPrint (mkEnv(decs, gen), localCount) in fn () => Constnt(code(), props) end | simpleSpec => let (* The bindings are marked using a three-valued mark. A binding is needed if it is referenced in any way. During the scan to find the references we need to avoid processing an entry that has already been processed but it is possible that a binding may be referenced as a general value only (e.g. from a function closure) and separately as a special value. See Test148.ML *) datatype visit = UnVisited | VisitedGeneral | VisitedSpecial local val refArray = Array.array(localCount, UnVisited) fun findDecs EnvSpecNone = () | findDecs (EnvSpecTuple(size, env)) = let val fields = List.tabulate(size, env) in List.app processGenAndSpec fields end | findDecs (EnvSpecInlineFunction({closure, ...}, env)) = let val closureItems = List.tabulate(List.length closure, env) in List.app processGenAndSpec closureItems end | findDecs (EnvSpecUnary _) = () | findDecs (EnvSpecBinary _) = () and processGenAndSpec (gen, spec) = (* The spec part needs only to be processed if this entry has not yet been visited, *) case gen of EnvGenLoad(LoadLocal addr) => let val previous = Array.sub(refArray, addr) in case (previous, spec) of (VisitedSpecial, _) => () (* Fully done *) | (VisitedGeneral, EnvSpecNone) => () (* Nothing useful *) | (_, EnvSpecNone) => (* We need this entry but we don't have any special entry to process. We could find another reference with a special entry. *) Array.update(refArray, addr, VisitedGeneral) | (_, _) => ( (* This has a special entry. Mark it and process. *) Array.update(refArray, addr, VisitedSpecial); findDecs spec ) end | EnvGenConst _ => () | _ => raise InternalError "doGeneral: not LoadLocal or Constant" val () = findDecs simpleSpec in (* Convert to an immutable data structure. This will continue to be referenced in any inline function after the code has run. *) val refVector = Array.vector refArray end val decArray = Array.array(localCount, CodeZero) fun addDec(addr, dec) = if Vector.sub(refVector, addr) <> UnVisited then Array.update(decArray, addr, dec) else () fun addDecs(Declar{addr, ...}) = addDec(addr, mkLoadLocal addr) | addDecs(RecDecs decs) = List.app(fn {addr, ...} => addDec(addr, mkLoadLocal addr)) decs | addDecs(NullBinding _) = () | addDecs(Container{addr, size, ...}) = addDec(addr, mkTupleFromContainer(addr, size)) val () = List.app addDecs decs (* Construct the tuple and add the "general" value at the start. *) val resultTuple = mkTuple(gen :: Array.foldr(op ::) nil decArray) (* Now generate the machine code and return it as a function that can be called. *) val (code, codeProps) = codeAndPrint (mkEnv (decs, resultTuple), localCount) in (* Return a function that executes the compiled code and then creates the final "global" value as the result. *) fn () => let local (* Execute the code. This will perform any side-effects the user has programmed and may raise an exception if that is required. *) val resVector = code () (* The result is a vector containing the "general" value as the first word and the evaluated bindings for any "special" entries in subsequent words. *) val decVals : address = if isShort resVector then raise InternalError "Result vector is not an address" else toAddress resVector in fun resultWordN n = loadWord (decVals, n) (* Get the general value, the zero'th entry in the vector. *) val generalVal = resultWordN 0w0 (* Get the properties for a field in the tuple. Because the result is a tuple all the properties should be contained in a tupleTag entry. *) val fieldProps = case Option.map (Universal.tagProject CodeTags.tupleTag) (List.find(Universal.tagIs CodeTags.tupleTag) codeProps) of NONE => (fn _ => []) | SOME p => (fn n => List.nth(p, n)) val generalProps = fieldProps 0 end (* Construct a new environment so that when an entry is looked up the corresponding constant is returned. *) fun newEnviron (oldEnv) args = let val (oldGeneral, oldSpecial) = oldEnv args val genPair = case oldGeneral of EnvGenLoad(LoadLocal addr) => ( (* For the moment retain this check. It's better to have an assertion failure than a segfault. *) Vector.sub(refVector, addr) <> UnVisited orelse raise InternalError "Reference to non-existent binding"; (resultWordN(Word.fromInt addr+0w1), fieldProps(addr+1)) ) | EnvGenConst c => c | _ => raise InternalError "codetree newEnviron: Not Extract or Constnt" val specVal = mapSpec oldSpecial in (EnvGenConst genPair, specVal) end and mapSpec EnvSpecNone = EnvSpecNone | mapSpec (EnvSpecTuple(size, env)) = EnvSpecTuple(size, newEnviron env) | mapSpec (EnvSpecInlineFunction(spec, env)) = EnvSpecInlineFunction(spec, (newEnviron env)) | mapSpec (EnvSpecUnary _) = EnvSpecNone | mapSpec (EnvSpecBinary _) = EnvSpecNone in (* and return the whole lot as a global value. *) Constnt(generalVal, setInline(mapSpec simpleSpec) generalProps) end end end (* genCode *) (* Constructor functions for the front-end of the compiler. *) local fun mkSimpleFunction inlineType (lval, args, name, closure, numLocals) = { body = lval, isInline = inlineType, name = if name = "" then "" else name, closure = map deExtract closure, argTypes = List.tabulate(args, fn _ => (GeneralType, [])), resultType = GeneralType, localCount = numLocals, recUse = [] } in val mkProc = Lambda o mkSimpleFunction DontInline (* Normal function *) and mkInlproc = Lambda o mkSimpleFunction InlineAlways (* Explicitly inlined by the front-end *) (* Unless Compiler.inlineFunctor is false functors are treated as macros and expanded when they are applied. Unlike core-language functions they are not first-class values so if they are inline the "value" returned in the initial binding can just be zero except if there is something in the closure. Almost always the closure will be empty since free variables will come from previous topdecs and will be constants, The exception is if a structure and a functor using the structure appear in the same topdec (no semicolon between them). In that case we can't leave it. We would have to update the closure even if we leave the body untouched but we could have closure entries that are constants. e.g. structure S = struct val x = 1 end functor F() = struct open S end *) fun mkMacroProc (args as (_, _, _, [], _)) = Constnt(toMachineWord 0, setInline ( EnvSpecInlineFunction(mkSimpleFunction InlineAlways args, fn _ => raise InternalError "mkMacroProc: closure")) []) | mkMacroProc args = Lambda(mkSimpleFunction InlineAlways args) end local fun mkFunWithTypes inlineType { body, argTypes=argsAndTypes, resultType, name, closure, numLocals } = Lambda { body = body, isInline = inlineType, name = if name = "" then "" else name, closure = map deExtract closure, argTypes = map (fn t => (t, [])) argsAndTypes, resultType = resultType, localCount = numLocals, recUse = [] } in val mkFunction = mkFunWithTypes DontInline and mkInlineFunction = mkFunWithTypes InlineAlways end fun mkEval (ct, clist) = Eval { function = ct, argList = List.map(fn c => (c, GeneralType)) clist, resultType=GeneralType } fun mkCall(func, argsAndTypes, resultType) = Eval { function = func, argList = argsAndTypes, resultType=resultType } (* Basic built-in operations. *) fun mkUnary (oper, arg1) = Unary { oper = oper, arg1 = arg1 } and mkBinary (oper, arg1, arg2) = Binary { oper = oper, arg1 = arg1, arg2 = arg2 } val getCurrentThreadId = Nullary{oper=BuiltIns.GetCurrentThreadId} val getCurrentThreadIdFn = mkInlproc(getCurrentThreadId, 1 (* Ignores argument *), "GetThreadId()", [], 0) val checkRTSException = Nullary{oper=BuiltIns.CheckRTSException} + + val cpuPause = Nullary{oper=BuiltIns.CPUPause} + val cpuPauseFn = + mkInlproc(cpuPause, 1 (* Ignores argument *), "CPUPause()", [], 0) fun mkAllocateWordMemory (numWords, flags, initial) = AllocateWordMemory { numWords = numWords, flags = flags, initial = initial } val mkAllocateWordMemoryFn = mkInlproc( mkAllocateWordMemory(mkInd(0, mkLoadArgument 0), mkInd(1, mkLoadArgument 0), mkInd(2, mkLoadArgument 0)), 1, "AllocateWordMemory()", [], 0) (* Builtins wrapped as functions. N.B. These all take a single argument which may be a tuple. *) fun mkUnaryFn oper = mkInlproc(mkUnary(oper, mkLoadArgument 0), 1, BuiltIns.unaryRepr oper ^ "()", [], 0) and mkBinaryFn oper = mkInlproc(mkBinary(oper, mkInd(0, mkLoadArgument 0), mkInd(1, mkLoadArgument 0)), 1, BuiltIns.binaryRepr oper ^ "()", [], 0) local open BuiltIns (* Word equality. The value of isSigned doesn't matter. *) val eqWord = WordComparison{test=TestEqual, isSigned=false} in fun mkNot arg = Unary{oper=NotBoolean, arg1=arg} and mkIsShort arg = Unary{oper=IsTaggedValue, arg1=arg} and mkEqualTaggedWord (arg1, arg2) = Binary{oper=eqWord, arg1=arg1, arg2=arg2} and mkEqualPointerOrWord (arg1, arg2) = Binary{oper=PointerEq, arg1=arg1, arg2=arg2} val equalTaggedWordFn = (* This takes two words, not a tuple. *) mkInlproc(mkBinary(eqWord, mkLoadArgument 0, mkLoadArgument 1), 2, "EqualWord()", [], 0) and equalPointerOrWordFn = (* This takes two words, not a tuple. *) mkInlproc(mkBinary(PointerEq, mkLoadArgument 0, mkLoadArgument 1), 2, "EqualWord()", [], 0) end fun mkLoadOperation(oper, base, index) = LoadOperation{kind=oper, address={base=base, index=SOME index, offset=0}} fun mkLoadOperationFn oper = mkInlproc(mkLoadOperation(oper, mkInd(0, mkLoadArgument 0), mkInd(1, mkLoadArgument 0)), 1, "loadOperation()", [], 0) fun mkStoreOperation(oper, base, index, value) = StoreOperation{kind=oper, address={base=base, index=SOME index, offset=0}, value=value} fun mkStoreOperationFn oper = mkInlproc(mkStoreOperation(oper, mkInd(0, mkLoadArgument 0), mkInd(1, mkLoadArgument 0), mkInd(2, mkLoadArgument 0)), 1, "storeOperation()", [], 0) fun mkBlockOperation {kind, leftBase, leftIndex, rightBase, rightIndex, length } = BlockOperation { kind = kind, sourceLeft={base=leftBase, index=SOME leftIndex, offset=0}, destRight={base=rightBase, index=SOME rightIndex, offset=0}, length=length} (* Construct a function that takes five arguments. The order is left-base, right-base, left-index, right-index, length. *) fun mkBlockOperationFn kind = mkInlproc( mkBlockOperation{kind=kind, leftBase=mkInd(0, mkLoadArgument 0), rightBase=mkInd(1, mkLoadArgument 0), leftIndex=mkInd(2, mkLoadArgument 0), rightIndex=mkInd(3, mkLoadArgument 0), length=mkInd(4, mkLoadArgument 0)}, 1, "blockOperation()", [], 0) fun identityFunction (name : string) : codetree = mkInlproc (mkLoadArgument 0, 1, name, [], 0) (* Returns its argument. *); (* Test a tag value. *) fun mkTagTest(test: codetree, tagValue: word, maxTag: word) = TagTest {test=test, tag=tagValue, maxTag=maxTag } fun mkHandle (exp, handler, exId) = Handle {exp = exp, handler = handler, exPacketAddr = exId} fun mkStr (strbuff:string) = Constnt (toMachineWord strbuff, []) (* If we have multiple references to a piece of code we may have to save it in a temporary and then use it from there. If the code has side-effects we certainly must do that to ensure that the side-effects are done exactly once and in the correct order, however if the code is just a constant or a load we can reduce the amount of code we generate by simply returning the original code. *) fun multipleUses (code as Constnt _, _, _) = {load = (fn _ => code), dec = []} (* | multipleUses (code as Extract(LoadLegacy{addr, level=loadLevel, ...}), _, level) = let (* May have to adjust the level. *) fun loadFn lev = if lev = level then code else mkLoad (addr, loadLevel + lev, level)) in {load = loadFn, dec = []} end | multipleUses (code as Extract(LoadLocal addr), _, level) = let (* May have to adjust the level. *) fun loadFn lev = if lev = level then code else mkLoad (addr, lev - level) in {load = loadFn, dec = []} end | multipleUses (code as Extract(LoadArgument _), _, level) = let (* May have to adjust the level. *) fun loadFn lev = if lev = level then code else raise InternalError "multipleUses: different level" (*else mkLoad (addr, lev - level)*) in {load = loadFn, dec = []} end | multipleUses (Extract _, _, _) = raise InternalError "multipleUses: TODO" *) | multipleUses (code, nextAddress, level) = let val addr = nextAddress(); fun loadFn lev = mkLoad (addr, lev, level); in {load = loadFn, dec = [mkDec (addr, code)]} end (* multipleUses *); fun mkMutualDecs [] = raise InternalError "mkMutualDecs: empty declaration list" | mkMutualDecs l = let fun convertDec(a, Lambda lam) = {lambda = lam, addr = a, use=[]} | convertDec _ = raise InternalError "mkMutualDecs: Recursive declaration is not a function" in RecDecs(List.map convertDec l) end val mkNullDec = NullBinding fun mkContainer(addr, size, setter) = Container{addr=addr, size=size, use=[], setter=setter} val mkIf = Cond and mkRaise = Raise fun mkConst v = Constnt(v, []) (* For the moment limit these to general arguments. *) fun mkLoop args = Loop (List.map(fn c => (c, GeneralType)) args) and mkBeginLoop(exp, args) = BeginLoop{loop=exp, arguments=List.map(fn(i, v) => ({value=v, addr=i, use=[]}, GeneralType)) args} fun mkWhile(b, e) = (* Generated as if b then (e; ) else (). *) mkBeginLoop(mkIf(b, mkEnv([NullBinding e], mkLoop[]), CodeZero), []) (* We previously had conditional-or and conditional-and as separate instructions. I've taken them out since they can be implemented just as efficiently as a normal conditional. In addition they were interfering with the optimisation where the second expression contained the last reference to something. We needed to add a "kill entry" to the other branch but there wasn't another branch to add it to. DCJM 7/12/00. *) fun mkCor(xp1, xp2) = mkIf(xp1, CodeTrue, xp2); fun mkCand(xp1, xp2) = mkIf(xp1, xp2, CodeZero); val mkSetContainer = fn (container, tuple, size) => mkSetContainer(container, tuple, BoolVector.tabulate(size, fn _ => true)) (* We don't generate the +, -, < etc operations directly here. Instead we create functions that the basis library can use to create the final versions by applying these functions to the arguments and an RTS function. The inline expansion system takes care of all the optimisation. An arbitrary precision operation takes a tuple consisting of a pair of arguments and a function. The code that is constructed checks both arguments to see if they are short. If they are not or the short precision operation overflows the code to call the function is executed. *) local val argX = mkInd(0, mkLoadArgument 0) and argY = mkInd(1, mkLoadArgument 0) val testShort = mkCand(mkIsShort argX, mkIsShort argY) val longCall = mkEval(mkInd(2, mkLoadArgument 0), [mkTuple[argX, argY]]) in fun mkArbitraryFn (oper as ArbArith arith) = mkInlproc( Arbitrary{oper=oper, shortCond=testShort, arg1=argX, arg2=argY, longCall=longCall }, 1, "Arbitrary" ^ BuiltIns.arithRepr arith ^ "()", [], 0) | mkArbitraryFn (oper as ArbCompare test) = (* The long function here is PolyCompareArbitrary which returns -1,0,+1 so the result has to be compared with zero. *) let val comparedResult = Binary{oper=BuiltIns.WordComparison{test=test, isSigned=true}, arg1=longCall, arg2=CodeZero} in mkInlproc( Arbitrary{oper=oper, shortCond=testShort, arg1=argX, arg2=argY, longCall=comparedResult }, 1, "Arbitrary" ^ BuiltIns.testRepr test ^ "()", [], 0) end end structure Foreign = BACKEND.Foreign structure Sharing = struct type machineWord = machineWord type codetree = codetree type pretty = pretty type argumentType=argumentType type codeBinding = codeBinding type level = level end end (* CODETREE functor body *); diff --git a/mlsource/MLCompiler/CodeTree/CODETREE_FUNCTIONS.sml b/mlsource/MLCompiler/CodeTree/CODETREE_FUNCTIONS.sml index 0142fae4..a697ed4d 100644 --- a/mlsource/MLCompiler/CodeTree/CODETREE_FUNCTIONS.sml +++ b/mlsource/MLCompiler/CodeTree/CODETREE_FUNCTIONS.sml @@ -1,624 +1,625 @@ (* - Copyright (c) 2012,13,16,18-20 David C.J. Matthews + Copyright (c) 2012,13,16,18-21 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Miscellaneous construction and operation functions on the code-tree. *) functor CODETREE_FUNCTIONS( structure BASECODETREE: BaseCodeTreeSig structure STRONGLY: sig val stronglyConnectedComponents: {nodeAddress: 'a -> int, arcs: 'a -> int list } -> 'a list -> 'a list list end ) : CodetreeFunctionsSig = struct open BASECODETREE open STRONGLY open Address exception InternalError = Misc.InternalError fun mkEnv([], exp) = exp | mkEnv(decs, exp) = Newenv(decs, exp) val word0 = toMachineWord 0 and word1 = toMachineWord 1 val False = word0 and True = word1 val F_mutable_words : Word8.word = Word8.orb (F_words, F_mutable) val CodeFalse = Constnt(False, []) and CodeTrue = Constnt(True, []) and CodeZero = Constnt(word0, []) (* Properties of code. This indicates the extent to which the code has side-effects (i.e. where even if the result is unused the code still needs to be produced) or is applicative (i.e. where its value depends only arguments and can safely be reordered). *) (* The RTS has a table of properties for RTS functions. The 103 call returns these Or-ed into the register mask. *) val PROPWORD_NORAISE = 0wx40000000 and PROPWORD_NOUPDATE = 0wx20000000 and PROPWORD_NODEREF = 0wx10000000 (* Since RTS calls are being eliminated leave residual versions of these. *) fun earlyRtsCall _ = false and sideEffectFreeRTSCall _ = false local infix orb andb val op orb = Word.orb and op andb = Word.andb val noSideEffect = PROPWORD_NORAISE orb PROPWORD_NOUPDATE val applicative = noSideEffect orb PROPWORD_NODEREF in fun codeProps (Lambda _) = applicative | codeProps (Constnt _) = applicative | codeProps (Extract _) = applicative | codeProps (TagTest{ test, ... }) = codeProps test | codeProps (Cond(i, t, e)) = codeProps i andb codeProps t andb codeProps e | codeProps (Newenv(decs, exp)) = List.foldl (fn (d, r) => bindingProps d andb r) (codeProps exp) decs | codeProps (Handle { exp, handler, ... }) = (* A handler processes all the exceptions in the body *) (codeProps exp orb PROPWORD_NORAISE) andb codeProps handler | codeProps (Tuple { fields, ...}) = testList fields | codeProps (Indirect{base, ...}) = codeProps base (* A built-in function may be side-effect free. This can occur if we have, for example, "if exp1 orelse exp2" where exp2 can be reduced to "true", typically because it's inside an inline function and some of the arguments to the function are constants. This then gets converted to (exp1; true) and we can eliminate exp1 if it is simply a comparison. *) | codeProps (Unary{oper, arg1}) = let open BuiltIns val operProps = case oper of NotBoolean => applicative | IsTaggedValue => applicative | MemoryCellLength => applicative (* MemoryCellFlags could return a different result if a mutable cell was locked. *) | MemoryCellFlags => applicative | ClearMutableFlag => Word.orb(PROPWORD_NODEREF, PROPWORD_NORAISE) - | AtomicIncrement => PROPWORD_NORAISE - | AtomicDecrement => PROPWORD_NORAISE | AtomicReset => Word.orb(PROPWORD_NODEREF, PROPWORD_NORAISE) | LongWordToTagged => applicative | SignedToLongWord => applicative | UnsignedToLongWord => applicative | RealAbs _ => applicative (* Does not depend on rounding setting. *) | RealNeg _ => applicative (* Does not depend on rounding setting. *) (* If we float a 64-bit int to a 64-bit floating point value we may lose precision so this depends on the current rounding mode. *) | RealFixedInt _ => Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) | FloatToDouble => applicative (* The rounding mode is set explicitly. *) | DoubleToFloat _ => applicative (* May raise the overflow exception *) | RealToInt _ => PROPWORD_NOUPDATE orb PROPWORD_NODEREF | TouchAddress => PROPWORD_NORAISE (* Treat as updating a notional reference count. *) | AllocCStack => PROPWORD_NORAISE in operProps andb codeProps arg1 end | codeProps (Binary{oper, arg1, arg2}) = let open BuiltIns val mayRaise = PROPWORD_NOUPDATE orb PROPWORD_NODEREF val operProps = case oper of WordComparison _ => applicative | FixedPrecisionArith _ => mayRaise | WordArith _ => applicative (* Quot and Rem don't raise exceptions - zero checking is done before. *) | WordLogical _ => applicative | WordShift _ => applicative | AllocateByteMemory => Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) (* Allocation returns a different value on each call. *) | LargeWordComparison _ => applicative | LargeWordArith _ => applicative (* Quot and Rem don't raise exceptions - zero checking is done before. *) | LargeWordLogical _ => applicative | LargeWordShift _ => applicative | RealComparison _ => applicative (* Real arithmetic operations depend on the current rounding setting. *) | RealArith _ => Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) | FreeCStack => PROPWORD_NORAISE orb PROPWORD_NODEREF | PointerEq => applicative + | AtomicExchangeAdd => PROPWORD_NORAISE in operProps andb codeProps arg1 andb codeProps arg2 end | codeProps (Nullary{oper=BuiltIns.GetCurrentThreadId}) = Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) | codeProps (Nullary{oper=BuiltIns.CheckRTSException}) = PROPWORD_NOUPDATE + (* Although Pause does not affect the store directly it does have observable effects. *) + | codeProps (Nullary{oper=BuiltIns.CPUPause}) = PROPWORD_NORAISE | codeProps (Arbitrary{shortCond, arg1, arg2, longCall, ...}) = (* Arbitrary precision operations are applicative but the longCall is a function call. It should never have a side-effect so it might be better to remove it. *) codeProps shortCond andb codeProps arg1 andb codeProps arg2 andb codeProps longCall | codeProps (AllocateWordMemory {numWords, flags, initial}) = let val operProps = Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) in operProps andb codeProps numWords andb codeProps flags andb codeProps initial end | codeProps (Eval _) = 0w0 | codeProps(Raise exp) = codeProps exp andb (Word.notb PROPWORD_NORAISE) (* Treat these as unsafe at least for the moment. *) | codeProps(BeginLoop _) = 0w0 | codeProps(Loop _) = 0w0 | codeProps (SetContainer _) = 0w0 | codeProps (LoadOperation {address, kind}) = let val operProps = case kind of LoadStoreMLWord {isImmutable=true} => applicative | LoadStoreMLByte {isImmutable=true} => applicative | _ => Word.orb(PROPWORD_NOUPDATE, PROPWORD_NORAISE) in operProps andb addressProps address end | codeProps (StoreOperation {address, value, ...}) = Word.orb(PROPWORD_NODEREF, PROPWORD_NORAISE) andb addressProps address andb codeProps value | codeProps (BlockOperation {kind, sourceLeft, destRight, length}) = let val operProps = case kind of BlockOpMove _ => PROPWORD_NORAISE | BlockOpEqualByte => applicative | BlockOpCompareByte => applicative in operProps andb addressProps sourceLeft andb addressProps destRight andb codeProps length end and testList t = List.foldl(fn (c, r) => codeProps c andb r) applicative t and bindingProps(Declar{value, ...}) = codeProps value | bindingProps(RecDecs _) = applicative (* These should all be lambdas *) | bindingProps(NullBinding c) = codeProps c | bindingProps(Container{setter, ...}) = codeProps setter and addressProps{base, index=NONE, ...} = codeProps base | addressProps{base, index=SOME index, ...} = codeProps base andb codeProps index (* sideEffectFree - does not raise an exception or make an assignment. *) fun sideEffectFree c = (codeProps c andb noSideEffect) = noSideEffect (* reorderable - does not raise an exception or access a reference. *) and reorderable c = codeProps c = applicative end (* Return the inline property if it is set. *) fun findInline [] = EnvSpecNone | findInline (h::t) = if Universal.tagIs CodeTags.inlineCodeTag h then Universal.tagProject CodeTags.inlineCodeTag h else findInline t (* Makes a constant value from an expression which is known to be constant but may involve inline functions, tuples etc. *) fun makeConstVal (cVal:codetree) = let fun makeVal (c as Constnt _) = c (* should just be a tuple *) (* Get a vector, copy the entries into it and return it as a constant. *) | makeVal (Tuple {fields= [], ...}) = CodeZero (* should have been optimised already! *) | makeVal (Tuple {fields, ...}) = let val tupleSize = List.length fields val vec : address = allocWordData(Word.fromInt tupleSize, F_mutable_words, word0) val fieldCode = map makeVal fields fun copyToVec ([], _) = [] | copyToVec (Constnt(w, prop) :: t, locn) = ( assignWord (vec, locn, w); prop :: copyToVec (t, locn + 0w1) ) | copyToVec _ = raise InternalError "not constant" val props = copyToVec(fieldCode, 0w0) (* If any of the constants have properties create a tuple property for the result. *) val tupleProps = if List.all null props then [] else let (* We also need to construct an EnvSpecTuple property because findInline does not look at tuple properties. *) val inlineProps = map findInline props val inlineProp = if List.all (fn EnvSpecNone => true | _ => false) inlineProps then [] else let fun tupleEntry n = (EnvGenConst(loadWord(vec, Word.fromInt n), List.nth(props, n)), List.nth(inlineProps, n)) in [Universal.tagInject CodeTags.inlineCodeTag (EnvSpecTuple(tupleSize, tupleEntry))] end in Universal.tagInject CodeTags.tupleTag props :: inlineProp end in lock vec; Constnt(toMachineWord vec, tupleProps) end | makeVal _ = raise InternalError "makeVal - not constant or tuple" in makeVal cVal end local fun allConsts [] = true | allConsts (Constnt _ :: t) = allConsts t | allConsts _ = false fun mkRecord isVar xp = let val tuple = Tuple{fields = xp, isVariant = isVar } in if allConsts xp then (* Make it now. *) makeConstVal tuple else tuple end; in val mkTuple = mkRecord false and mkDatatype = mkRecord true end (* Set the inline property. If the property is already present it is replaced. If the property we are setting is EnvSpecNone no property is set. *) fun setInline p (h::t) = if Universal.tagIs CodeTags.inlineCodeTag h then setInline p t else h :: setInline p t | setInline EnvSpecNone [] = [] | setInline p [] = [Universal.tagInject CodeTags.inlineCodeTag p] (* These are very frequently used and it might be worth making special bindings for values such as 0, 1, 2, 3 etc to reduce garbage. *) fun checkNonZero n = if n < 0 then raise InternalError "mkLoadxx: argument negative" else n val mkLoadLocal = Extract o LoadLocal o checkNonZero and mkLoadArgument = Extract o LoadArgument o checkNonZero and mkLoadClosure = Extract o LoadClosure o checkNonZero (* Set the container to the fields of the record. Try to push this down as far as possible. *) fun mkSetContainer(container, Cond(ifpt, thenpt, elsept), filter) = Cond(ifpt, mkSetContainer(container, thenpt, filter), mkSetContainer(container, elsept, filter)) | mkSetContainer(container, Newenv(decs, exp), filter) = Newenv(decs, mkSetContainer(container, exp, filter)) | mkSetContainer(_, r as Raise _, _) = r (* We may well have the situation where one branch of an "if" raises an exception. We can simply raise the exception on that branch. *) | mkSetContainer(container, Handle {exp, handler, exPacketAddr}, filter) = Handle{exp=mkSetContainer(container, exp, filter), handler=mkSetContainer(container, handler, filter), exPacketAddr = exPacketAddr} | mkSetContainer(container, tuple, filter) = SetContainer{container = container, tuple = tuple, filter = filter } local val except: exn = InternalError "Invalid load encountered in compiler" (* Exception value to use for invalid cases. We put this in the code but it should never actually be executed. *) val raiseError = Raise (Constnt (toMachineWord except, [])) in (* Look for an entry in a tuple. Used in both the optimiser and in mkInd. *) fun findEntryInBlock (Tuple { fields, isVariant, ...}, offset, isVar) = ( isVariant = isVar orelse raise InternalError "findEntryInBlock: tuple/datatype mismatch"; if offset < List.length fields then List.nth(fields, offset) (* This can arise if we're processing a branch of a case discriminating on a datatype which won't actually match at run-time. e.g. Tests/Succeed/Test030. *) else if isVar then raiseError else raise InternalError "findEntryInBlock: invalid address" ) | findEntryInBlock (Constnt (b, props), offset, isVar) = let (* Find the tuple property if it is present and extract the field props. *) val fieldProps = case List.find(Universal.tagIs CodeTags.tupleTag) props of NONE => [] | SOME p => List.nth(Universal.tagProject CodeTags.tupleTag p, offset) in case findInline props of EnvSpecTuple(_, env) => (* Do the selection now. This is especially useful if we have a global structure *) (* At the moment at least we assume that we can get all the properties from the tuple selection. *) ( case env offset of (EnvGenConst(w, p), inl) => Constnt(w, setInline inl p) (* The general value from selecting a field from a constant tuple must be a constant. *) | _ => raise InternalError "findEntryInBlock: not constant" ) | _ => (* The ML compiler may generate loads from invalid addresses as a result of a val binding to a constant which has the wrong shape. e.g. val a :: b = nil It will always result in a Bind exception being generated before the invalid load, but we have to be careful that the optimiser does not fall over. *) if isShort b orelse not (Address.isWords (toAddress b)) orelse Address.length (toAddress b) <= Word.fromInt offset then if isVar then raiseError else raise InternalError "findEntryInBlock: invalid address" else Constnt (loadWord (toAddress b, Word.fromInt offset), fieldProps) end | findEntryInBlock(base, offset, isVar) = Indirect {base = base, offset = offset, indKind = if isVar then IndVariant else IndTuple} (* anything else *) end (* Exported indirect load operation i.e. load a field from a tuple. We can't use findEntryInBlock in every case since that discards unused entries in a tuple and at this point we haven't checked that the unused entries don't have side-effects/raise exceptions e.g. #1 (1, raise Fail "bad") *) local fun mkIndirect isVar (addr, base as Constnt _) = findEntryInBlock(base, addr, isVar) | mkIndirect isVar (addr, base) = Indirect {base = base, offset = addr, indKind = if isVar then IndVariant else IndTuple} in val mkInd = mkIndirect false and mkVarField = mkIndirect true end fun mkIndContainer(addr, base) = Indirect{offset=addr, base=base, indKind=IndContainer} (* Create a tuple from a container. *) fun mkTupleFromContainer(addr, size) = Tuple{fields = List.tabulate(size, fn n => mkIndContainer(n, mkLoadLocal addr)), isVariant = false} (* Get the value from the code. *) fun evalue (Constnt(c, _)) = SOME c | evalue _ = NONE (* This is really to simplify the change from mkEnv taking a codetree list to taking a codeBinding list * code. This extracts the last entry which must be a NullBinding and packages the declarations with it. *) fun decSequenceWithFinalExp decs = let fun splitLast _ [] = raise InternalError "decSequenceWithFinalExp: empty" | splitLast decs [NullBinding exp] = (List.rev decs, exp) | splitLast _ [_] = raise InternalError "decSequenceWithFinalExp: last is not a NullDec" | splitLast decs (hd::tl) = splitLast (hd:: decs) tl in mkEnv(splitLast [] decs) end local type node = { addr: int, lambda: lambdaForm, use: codeUse list } fun nodeAddress({addr, ...}: node) = addr and arcs({lambda={closure, ...}, ...}: node) = List.foldl(fn (LoadLocal addr, l) => addr :: l | (_, l) => l) [] closure in val stronglyConnected = stronglyConnectedComponents{nodeAddress=nodeAddress, arcs=arcs} end (* In general any mutually recursive declaration can refer to any other. It's better to partition the recursive declarations into strongly connected components i.e. those that actually refer to each other. *) fun partitionMutualBindings(RecDecs rlist) = let val processed = stronglyConnected rlist (* Convert the result. Note that stronglyConnectedComponents returns the dependencies in the reverse order i.e. if X depends on Y but not the other way round then X will appear before Y in the list. We need to reverse it so that X goes after Y. *) fun rebuild ([{lambda, addr, use}], tl) = Declar{addr=addr, use=use, value=Lambda lambda} :: tl | rebuild (multiple, tl) = RecDecs multiple :: tl in List.foldl rebuild [] processed end (* This is only intended for RecDecs but it's simpler to handle all bindings. *) | partitionMutualBindings other = [other] (* Functions to help in building a closure. *) datatype createClosure = Closure of (loadForm * int) list ref fun makeClosure() = Closure(ref []) (* Function to build a closure. Items are added to the closure if they are not already there. *) fun addToClosure (Closure closureList) (ext: loadForm): loadForm = case (List.find (fn (l, _) => l = ext) (!closureList), ! closureList) of (SOME(_, n), _) => (* Already there *) LoadClosure n | (NONE, []) => (* Not there - first *) (closureList := [(ext, 0)]; LoadClosure 0) | (NONE, cl as (_, n) :: _) => (closureList := (ext, n+1) :: cl; LoadClosure(n+1)) fun extractClosure(Closure (ref closureList)) = List.foldl (fn ((ext, _), l) => ext :: l) [] closureList datatype inlineTest = TooBig | NonRecursive | TailRecursive of bool vector | NonTailRecursive of bool vector fun evaluateInlining(function, numArgs, maxInlineSize) = let (* This checks for the possibility of inlining a function. It sees if it is small enough according to some rough estimate of the cost and it also looks for recursive uses of the function. Typically if the function is small enough to inline there will be only one recursive use but we consider the possibility of more than one. If the only uses are tail recursive we can replace the recursive calls by a Loop with a BeginLoop outside it. If there are non-tail recursive calls we may be able to lift out arguments that are unchanged. For example for fun map f [] = [] | map f (a::b) = f a :: map f b it may be worth lifting out f and generating specific mapping functions for each application. *) val hasRecursiveCall = ref false (* Set to true if rec call *) val allTail = ref true (* Set to false if non recursive *) (* An element of this is set to false if the actual value if anything other than the original argument. At the end we are then left with the arguments that are unchanged. *) val argMod = Array.array(numArgs, true) infix 6 -- (* Subtract y from x but return 0 rather than a negative number. *) fun x -- y = if x >= y then x-y else 0 (* Check for the code size and also recursive references. N,B. We assume in hasLoop that tail recursion applies only with Cond, Newenv and Handler. *) fun checkUse _ (_, 0, _) = 0 (* The function is too big to inline. *) | checkUse isMain (Newenv(decs, exp), cl, isTail) = let fun checkBind (Declar{value, ...}, cl) = checkUse isMain(value, cl, false) | checkBind (RecDecs decs, cl) = List.foldl(fn ({lambda, ...}, n) => checkUse isMain (Lambda lambda, n, false)) cl decs | checkBind (NullBinding c, cl) = checkUse isMain (c, cl, false) | checkBind (Container{setter, ...}, cl) = checkUse isMain(setter, cl -- 1, false) in checkUse isMain (exp, List.foldl checkBind cl decs, isTail) end | checkUse _ (Constnt(w, _), cl, _) = if isShort w then cl else cl -- 1 (* A recursive reference in any context other than a call prevents any inlining. *) | checkUse true (Extract LoadRecursive, _, _) = 0 | checkUse _ (Extract _, cl, _) = cl -- 1 | checkUse isMain (Indirect{base, ...}, cl, _) = checkUse isMain (base, cl -- 1, false) | checkUse _ (Lambda {body, argTypes, closure, ...}, cl, _) = (* For the moment, any recursive use in an inner function prevents inlining. *) if List.exists (fn LoadRecursive => true | _ => false) closure then 0 else checkUse false (body, cl -- (List.length argTypes + List.length closure), false) | checkUse true (Eval{function = Extract LoadRecursive, argList, ...}, cl, isTail) = let (* If the actual argument is anything but the original argument then the corresponding entry in the array is set to false. *) fun testArg((exp, _), n) = ( if (case exp of Extract(LoadArgument a) => n = a | _ => false) then () else Array.update(argMod, n, false); n+1 ) in List.foldl testArg 0 argList; hasRecursiveCall := true; if isTail then () else allTail := false; List.foldl(fn ((e, _), n) => checkUse true (e, n, false)) (cl--3) argList end | checkUse isMain (Eval{function, argList, ...}, cl, _) = checkUse isMain (function, List.foldl(fn ((e, _), n) => checkUse isMain (e, n, false)) (cl--2) argList, false) | checkUse _ (Nullary _, cl, _) = cl -- 1 | checkUse isMain (Unary{arg1, ...}, cl, _) = checkUse isMain (arg1, cl -- 1, false) | checkUse isMain (Binary{arg1, arg2, ...}, cl, _) = checkUseList isMain ([arg1, arg2], cl -- 1) | checkUse isMain (Arbitrary{arg1, arg2, ...}, cl, _) = checkUseList isMain ([arg1, arg2], cl -- 4) | checkUse isMain (AllocateWordMemory {numWords, flags, initial}, cl, _) = checkUseList isMain ([numWords, flags, initial], cl -- 1) | checkUse isMain (Cond(i, t, e), cl, isTail) = checkUse isMain (i, checkUse isMain (t, checkUse isMain (e, cl -- 2, isTail), isTail), false) | checkUse isMain (BeginLoop { loop, arguments, ...}, cl, _) = checkUse isMain (loop, List.foldl (fn (({value, ...}, _), n) => checkUse isMain (value, n, false)) cl arguments, false) | checkUse isMain (Loop args, cl, _) = List.foldl(fn ((e, _), n) => checkUse isMain (e, n, false)) cl args | checkUse isMain (Raise c, cl, _) = checkUse isMain (c, cl -- 1, false) | checkUse isMain (Handle {exp, handler, ...}, cl, isTail) = checkUse isMain (exp, checkUse isMain (handler, cl, isTail), false) | checkUse isMain (Tuple{ fields, ...}, cl, _) = checkUseList isMain (fields, cl) | checkUse isMain (SetContainer{container, tuple = Tuple { fields, ...}, ...}, cl, _) = (* This can be optimised *) checkUse isMain (container, checkUseList isMain (fields, cl), false) | checkUse isMain (SetContainer{container, tuple, filter}, cl, _) = checkUse isMain (container, checkUse isMain (tuple, cl -- (BoolVector.length filter), false), false) | checkUse isMain (TagTest{test, ...}, cl, _) = checkUse isMain (test, cl -- 1, false) | checkUse isMain (LoadOperation{address, ...}, cl, _) = checkUseAddress isMain (address, cl -- 1) | checkUse isMain (StoreOperation{address, value, ...}, cl, _) = checkUse isMain (value, checkUseAddress isMain (address, cl -- 1), false) | checkUse isMain (BlockOperation{sourceLeft, destRight, length, ...}, cl, _) = checkUse isMain (length, checkUseAddress isMain (destRight, checkUseAddress isMain (sourceLeft, cl -- 1)), false) and checkUseList isMain (elems, cl) = List.foldl(fn (e, n) => checkUse isMain (e, n, false)) cl elems and checkUseAddress isMain ({base, index=NONE, ...}, cl) = checkUse isMain (base, cl, false) | checkUseAddress isMain ({base, index=SOME index, ...}, cl) = checkUseList isMain ([base, index], cl) val costLeft = checkUse true (function, maxInlineSize, true) in if costLeft = 0 then TooBig else if not (! hasRecursiveCall) then NonRecursive else if ! allTail then TailRecursive(Array.vector argMod) else NonTailRecursive(Array.vector argMod) end structure Sharing = struct type codetree = codetree and codeBinding = codeBinding and loadForm = loadForm and createClosure = createClosure and envSpecial = envSpecial end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/ICodeSig.ML b/mlsource/MLCompiler/CodeTree/X86Code/ICodeSig.ML index 1258c7c6..04353835 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/ICodeSig.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/ICodeSig.ML @@ -1,420 +1,423 @@ (* Signature for the high-level X86 code - Copyright David C. J. Matthews 2016-19 + Copyright David C. J. Matthews 2016-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) signature ICodeSig = sig type machineWord = Address.machineWord type address = Address.address type closureRef (* Registers. *) datatype genReg = GeneralReg of Word8.word * bool and fpReg = FloatingPtReg of Word8.word and xmmReg = SSE2Reg of Word8.word datatype reg = GenReg of genReg | FPReg of fpReg | XMMReg of xmmReg val regRepr: reg -> string val nReg: reg -> int val is32bit: LargeInt.int -> bool datatype targetArch = Native32Bit | Native64Bit | ObjectId32Bit val targetArch: targetArch (* Should we use SSE2 or X87 floating point? *) datatype fpMode = FPModeSSE2 | FPModeX87 val fpMode: fpMode val eax: genReg and ebx: genReg and ecx: genReg and edx: genReg and edi: genReg and esi: genReg and esp: genReg and ebp: genReg and r8: genReg and r9: genReg and r10: genReg and r11: genReg and r12: genReg and r13: genReg and r14: genReg and r15: genReg and fp0: fpReg and fp1: fpReg and fp2: fpReg and fp3: fpReg and fp4: fpReg and fp5: fpReg and fp6: fpReg and fp7: fpReg and xmm0:xmmReg and xmm1:xmmReg and xmm2:xmmReg and xmm3:xmmReg and xmm4:xmmReg and xmm5:xmmReg and xmm6:xmmReg datatype branchOps = JO | JNO | JE | JNE | JL | JGE | JLE | JG | JB | JNB | JNA | JA | JP | JNP and arithOp = ADD | OR (*|ADC | SBB*) | AND | SUB | XOR | CMP and shiftType = SHL | SHR | SAR datatype boxKind = BoxLargeWord | BoxSSE2Double | BoxSSE2Float | BoxX87Double | BoxX87Float and fpOps = FADD | FMUL | FCOM | FCOMP | FSUB | FSUBR | FDIV | FDIVR and fpUnaryOps = FABS | FCHS | FLD1 | FLDZ datatype sse2UnaryOps = SSE2UDoubleToFloat | SSE2UFloatToDouble and sse2BinaryOps = SSE2BAddDouble | SSE2BSubDouble | SSE2BMulDouble | SSE2BDivDouble | SSE2BXor | SSE2BAnd | SSE2BAddSingle | SSE2BSubSingle | SSE2BMulSingle | SSE2BDivSingle val memRegThreadSelf: int (* Copied from X86CodeSig *) and memRegExceptionPacket: int and memRegCStackPtr: int datatype callKinds = Recursive | ConstantCode of machineWord | FullCall datatype preg = PReg of int (* A pseudo-register - an abstract register. *) (* A location on the stack. May be more than word if this is a container or a handler entry. *) datatype stackLocn = StackLoc of {size: int, rno: int } (* This combines pregKind and stackLocn. *) datatype regProperty = RegPropGeneral (* A general register. *) | RegPropUntagged (* An untagged general register. *) | RegPropStack of int (* A stack location or container. *) | RegPropCacheTagged | RegPropCacheUntagged | RegPropMultiple (* The result of a conditional or case. May be defined at multiple points. *) datatype argument = RegisterArgument of preg | AddressConstant of machineWord (* A constant that is an address. *) | IntegerConstant of LargeInt.int (* A non-address constant. Will usually be shifted and tagged. *) | MemoryLocation of { base: preg, offset: int, index: memoryIndex, cache: preg option } (* A memory location. *) (* Offset on the stack. The container is the stack location identifier, the field is an offset in a container. cache is an optional cache register. *) | StackLocation of { wordOffset: int, container: stackLocn, field: int, cache: preg option } (* Address of a container. *) | ContainerAddr of { container: stackLocn, stackOffset: int } (* Generally this indicates the index register if present. For 32-in-64 the "index" may be ObjectIndex in which case the base is actually an object index. *) and memoryIndex = NoMemIndex | MemIndex1 of preg | MemIndex2 of preg | MemIndex4 of preg | MemIndex8 of preg | ObjectIndex (* Kinds of moves. Move32Bit - 32-bit loads and stores Move64Bit - 64-bit loads and stores MoveByte - When loading, load a byte and zero extend. Move16Bit - Used for C-memory loads and stores. Zero extends on load. MoveFloat - Load and store a single-precision value MoveDouble - Load and store a double-precision value. *) datatype moveKind = MoveByte | Move16Bit | Move32Bit | Move64Bit | MoveFloat | MoveDouble val movePolyWord: moveKind and moveNativeWord: moveKind (* The reference to a condition code. *) datatype ccRef = CcRef of int (* Size of operand. OpSize64 is only valid in 64-bit mode. *) datatype opSize = OpSize32 | OpSize64 val polyWordOpSize: opSize and nativeWordOpSize: opSize datatype x86ICode = (* Move a value into a register. *) LoadArgument of { source: argument, dest: preg, kind: moveKind } (* Store a value into memory. The source will usually be a register but could be a constant depending on the value. If isMutable is true we're assigning to a ref and we need to flush the memory cache. *) | StoreArgument of { source: argument, base: preg, offset: int, index: memoryIndex, kind: moveKind, isMutable: bool } (* Load an entry from the "memory registers". Used for ThreadSelf and AllocCStack. *) | LoadMemReg of { offset: int, dest: preg, kind: moveKind } (* Store a value into an entry in the "memory registers". Used AllocCStack/FreeCStack. *) | StoreMemReg of { offset: int, source: preg, kind: moveKind } (* Start of function. Set the register arguments. stackArgs is the list of stack arguments. The last entry is the return address. If the function has a real closure regArgs includes the closure register (rdx). *) | BeginFunction of { regArgs: (preg * reg) list, stackArgs: stackLocn list } (* Call a function. If the code address is a constant it is passed here. Otherwise the address is obtained by indirecting through rdx which has been loaded as one of the argument registers. The result is stored in the destination register. *) | FunctionCall of { callKind: callKinds, regArgs: (argument * reg) list, stackArgs: argument list, dest: preg, realDest: reg, saveRegs: preg list} (* Jump to a tail-recursive function. This is similar to FunctionCall but complicated for stack arguments because the stack and the return address need to be overwritten. stackAdjust is the number of words to remove (positive) or add (negative) to the stack before the call. currStackSize contains the number of items currently on the stack. *) | TailRecursiveCall of { callKind: callKinds, regArgs: (argument * reg) list, stackArgs: {src: argument, stack: int} list, stackAdjust: int, currStackSize: int, workReg: preg } (* Allocate a fixed sized piece of memory. The size is the number of words required. This sets the length word including the flags bits. saveRegs is the list of registers that need to be saved if we need to do a garbage collection. *) | AllocateMemoryOperation of { size: int, flags: Word8.word, dest: preg, saveRegs: preg list } (* Allocate a piece of memory whose size is not known at compile-time. The size argument is the number of words. *) | AllocateMemoryVariable of { size: preg, dest: preg, saveRegs: preg list } (* Initialise a piece of memory. N.B. The size is an untagged value containing the number of words. This uses REP STOSL/Q so addr must be rdi, size must be rcx and init must be rax. *) | InitialiseMem of { size: preg, addr: preg, init: preg } (* Signal that a tuple has been fully initialised. Really a check in the low-level code-generator. *) | InitialisationComplete (* Mark the beginning of a loop. This is really only to prevent the initialisation code being duplicated in ICodeOptimise. *) | BeginLoop (* Set up the registers for a jump back to the start of a loop. *) | JumpLoop of { regArgs: (argument * preg) list, stackArgs: (argument * int * stackLocn) list, checkInterrupt: preg list option, workReg: preg option } (* Raise an exception. The packet is always loaded into rax. *) | RaiseExceptionPacket of { packetReg: preg } (* Reserve a contiguous area on the stack to receive a result tuple. *) | ReserveContainer of { size: int, container: stackLocn } (* Indexed case. *) | IndexedCaseOperation of { testReg: preg, workReg: preg } (* Lock a mutable cell by turning off the mutable bit. *) | LockMutable of { addr: preg } (* Compare two word values. The first argument must be a register. *) | WordComparison of { arg1: preg, arg2: argument, ccRef: ccRef, opSize: opSize } (* Compare with a literal. This is generally used to compare a memory or stack location with a literal and overlaps to some extent with WordComparison. *) | CompareLiteral of { arg1: argument, arg2: LargeInt.int, opSize: opSize, ccRef: ccRef } (* Compare a byte location with a literal. This is the only operation that specifically deals with single bytes. Other cases will use word operations. *) | CompareByteMem of { arg1: { base: preg, offset: int, index: memoryIndex }, arg2: Word8.word, ccRef: ccRef } (* Exception handling. - Set up an exception handler. *) | PushExceptionHandler of { workReg: preg } (* End of a handled section. Restore the previous handler. *) | PopExceptionHandler of { workReg: preg } (* Marks the start of a handler. This sets the stack pointer and restores the old handler. Sets the exception packet register. *) | BeginHandler of { packetReg: preg, workReg: preg } (* Return from the function. *) | ReturnResultFromFunction of { resultReg: preg, realReg: reg, numStackArgs: int } (* Arithmetic or logical operation. These can set the condition codes. *) | ArithmeticFunction of { oper: arithOp, resultReg: preg, operand1: preg, operand2: argument, ccRef: ccRef, opSize: opSize } (* Test the tag bit of a word. Sets the Zero bit if the value is an address i.e. untagged. *) | TestTagBit of { arg: argument, ccRef: ccRef } (* Push a value to the stack. Added during translation phase. *) | PushValue of { arg: argument, container: stackLocn } (* Copy a value to a cache register. LoadArgument could be used for this but it may be better to keep it separate. *) | CopyToCache of { source: preg, dest: preg, kind: moveKind } (* Remove items from the stack. Added during translation phase. *) | ResetStackPtr of { numWords: int, preserveCC: bool } (* Store a value into the stack. *) | StoreToStack of { source: argument, container: stackLocn, field: int, stackOffset: int } (* Tag a value by shifting and setting the tag bit. *) | TagValue of { source: preg, dest: preg, isSigned: bool, opSize: opSize } (* Shift a value to remove the tag bit. The cache is used if this is untagging a value that has previously been tagged. *) | UntagValue of { source: preg, dest: preg, isSigned: bool, cache: preg option, opSize: opSize } (* This provides the LEA instruction which can be used for various sorts of arithmetic. The base register is optional in this case. *) | LoadEffectiveAddress of { base: preg option, offset: int, index: memoryIndex, dest: preg, opSize: opSize } (* Shift a word by an amount that can either be a constant or a register. *) | ShiftOperation of { shift: shiftType, resultReg: preg, operand: preg, shiftAmount: argument, ccRef: ccRef, opSize: opSize } (* Multiplication. We can use signed multiplication for both fixed precision and word (unsigned) multiplication. There are various forms of the instruction including a three-operand version. *) | Multiplication of { resultReg: preg, operand1: preg, operand2: argument, ccRef: ccRef, opSize: opSize } (* Division. This takes a register pair, always RDX:RAX, divides it by the operand register and puts the quotient in RAX and remainder in RDX. At the preg level we represent all of these by pRegs. The divisor can be either a register or a memory location. *) | Division of { isSigned: bool, dividend: preg, divisor: argument, quotient: preg, remainder: preg, opSize: opSize } (* Atomic exchange and addition. This is executed with a lock prefix and is used for atomic increment and decrement for mutexes. Before the operation the source contains an increment. After the operation - the source contains the old value of the destination and the destination + the resultReg contains the old value of the destination and the destination has been updated with its old value added to the increment. The destination is actually the word pointed at by "base". *) - | AtomicExchangeAndAdd of { base: preg, source: preg } + | AtomicExchangeAndAdd of { base: preg, source: preg, resultReg: preg } (* Create a "box" of a single-word "byte" cell and store the source into it. This can be implemented using AllocateMemoryOperation but the idea is to allow the transform layer to recognise when a value is being boxed and then unboxed and remove unnecessary allocation. *) | BoxValue of { boxKind: boxKind, source: preg, dest: preg, saveRegs: preg list } (* Compare two vectors of bytes and set the condition code on the result. In general vec1Addr and vec2Addr will be pointers inside memory cells so have to be untagged registers. *) | CompareByteVectors of { vec1Addr: preg, vec2Addr: preg, length: preg, ccRef: ccRef } (* Move a block of bytes (isByteMove true) or words (isByteMove false). The length is the number of items (bytes or words) to move. *) | BlockMove of { srcAddr: preg, destAddr: preg, length: preg, isByteMove: bool } (* Floating point comparison. *) | X87Compare of { arg1: preg, arg2: argument, isDouble: bool, ccRef: ccRef } (* Floating point comparison. *) | SSE2Compare of { arg1: preg, arg2: argument, isDouble: bool, ccRef: ccRef } (* The X87 FP unit does not generate condition codes directly. We have to load the cc into RAX and test it there. *) | X87FPGetCondition of { ccRef: ccRef, dest: preg } (* Binary floating point operations on the X87. *) | X87FPArith of { opc: fpOps, resultReg: preg, arg1: preg, arg2: argument, isDouble: bool } (* Floating point operations: negate and set sign positive. *) | X87FPUnaryOps of { fpOp: fpUnaryOps, dest: preg, source: preg } (* Load a fixed point value as a floating point value. *) | X87Float of { dest: preg, source: argument } (* Load a fixed point value as a floating point value. *) | SSE2Float of { dest: preg, source: argument } (* Binary floating point operations using SSE2 instructions. *) | SSE2FPUnary of { opc: sse2UnaryOps, resultReg: preg, source: argument } (* Binary floating point operations using SSE2 instructions. *) | SSE2FPBinary of { opc: sse2BinaryOps, resultReg: preg, arg1: preg, arg2: argument } (* Tag a 32-bit floating point value. This is tagged by shifting left 32-bits and then setting the bottom bit. This allows memory operands to be untagged simply by loading the high-order word. *) | TagFloat of { source: preg, dest: preg } (* Untag a 32-bit floating point value into a XMM register. If the source is in memory we just need to load the high-order word. *) | UntagFloat of { source: argument, dest: preg, cache: preg option } (* Get and set the control registers. These all have to work through memory but it's simpler to assume they work through registers. *) | GetSSE2ControlReg of { dest: preg } | SetSSE2ControlReg of { source: preg } | GetX87ControlReg of { dest: preg } | SetX87ControlReg of { source: preg } (* Convert a floating point value to an integer. *) | X87RealToInt of { source: preg, dest: preg } (* Convert a floating point value to an integer. *) | SSE2RealToInt of { source: argument, dest: preg, isDouble: bool, isTruncate: bool } (* Sign extend a 32-bit value to 64-bits. Not included in LoadArgument because that assumes that if we have the result in a register we can simply reuse the register. *) | SignExtend32To64 of { source: argument, dest: preg } (* Touch an entry. Actually doesn't do anything except make sure it is referenced. *) | TouchArgument of { source: preg } + (* Pause instruction - used only in mutex spinlock *) + | PauseCPU + (* Destinations at the end of a basic block. *) and controlFlow = (* Unconditional branch to a label - should be a merge point. *) Unconditional of int (* Conditional branch. Jumps to trueJump if the condional is false, falseJump if false. *) | Conditional of { ccRef: ccRef, condition: branchOps, trueJump: int, falseJump: int } (* Exit - the last instruction of the block is a return, raise or tailcall. *) | ExitCode (* Indexed case - this branches to one of a number of labels *) | IndexedBr of int list (* Set up a handler. This doesn't cause an immediate branch but the state at the start of the handler is the state at this point. *) | SetHandler of { handler: int, continue: int } (* Unconditional branch to a handler. If an exception is raised explicitly within the scope of a handler. *) | UnconditionalHandle of int (* Conditional branch to a handler. Occurs if there is a call to a function within the scope of a handler. It may jump to the handler. *) | ConditionalHandle of { handler: int, continue: int } and basicBlock = BasicBlock of { block: x86ICode list, flow: controlFlow } (* Return the successor blocks from a control flow. *) val successorBlocks: controlFlow -> int list val printICodeAbstract: basicBlock vector * (string -> unit) -> unit val indexRegister: memoryIndex -> preg option (* Destinations used in move. *) datatype destinations = RegDest of reg | StackDest of int structure Sharing: sig type genReg = genReg and argument = argument and memoryIndex = memoryIndex and x86ICode = x86ICode and branchOps = branchOps and reg = reg and preg = preg and destinations = destinations and controlFlow = controlFlow and basicBlock = basicBlock and stackLocn = stackLocn and regProperty = regProperty and callKinds = callKinds and arithOp = arithOp and shiftType = shiftType and fpOps = fpOps and fpUnaryOps = fpUnaryOps and sse2UnaryOps = sse2UnaryOps and sse2BinaryOps = sse2BinaryOps and ccRef = ccRef and opSize = opSize and closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86AllocateRegisters.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86AllocateRegisters.ML index 22042d46..09cd99b6 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86AllocateRegisters.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86AllocateRegisters.ML @@ -1,857 +1,862 @@ (* - Copyright David C. J. Matthews 2016-19 + Copyright David C. J. Matthews 2016-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86AllocateRegisters( structure ICODE: ICodeSig structure IDENTIFY: X86IDENTIFYREFSSIG structure CONFLICTSETS: X86GETCONFLICTSETSIG structure INTSET: INTSETSIG sharing ICODE.Sharing = IDENTIFY.Sharing = CONFLICTSETS.Sharing = INTSET ): X86ALLOCATEREGISTERSSIG = struct open ICODE open IDENTIFY open CONFLICTSETS open INTSET open Address exception InternalError = Misc.InternalError local val regs = case targetArch of Native32Bit => [edi, esi, edx, ecx, ebx, eax] | Native64Bit => [edi, esi, edx, ecx, ebx, eax, r14, r13, r12, r11, r10, r9, r8] | ObjectId32Bit => [edi, esi, edx, ecx, eax, r14, r13, r12, r11, r10, r9, r8] in val generalRegisters = List.map GenReg regs end val floatingPtRegisters = case fpMode of (* XMM0-5 are the only volatile SSE2 registers in Windows X64. *) FPModeSSE2 => List.map XMMReg [xmm5, xmm4, xmm3, xmm2, xmm1, xmm0] (* We can't include fp7 because we need one spare. *) (* For the moment we only have FP0 here. There are problems with using the others because we need to ensure the stack is empty if we call any non-ML function and we don't currently manage it properly. *) | FPModeX87 => List.map FPReg [fp0(*, fp1, fp2, fp3, fp4, fp5, fp6*)] datatype allocateResult = AllocateSuccess of reg vector | AllocateFailure of intSet list fun allocateRegisters{blocks, regStates, regProps, ...} = let val maxPRegs = Vector.length regStates and numBlocks = Vector.length blocks (* Hint values. The idea of hints is that by using a hinted register we may avoid an unnecessary move instruction. realHints is set when a pseudo-register is going to be loaded from a specific register e.g. a register argument, or moved into one e.g. ecx for a shift. friends is set to the other pseudo-registers that may be associated with the pseudo-register. E.g. the argument and destination of an arithmetic operation where choosing the same register for each may avoid a move. *) val realHints = Array.array(maxPRegs, NONE: reg option) (* Sources and destinations. These indicate the registers that are the sources and destinations of the indexing register and are used as hints. If a register has been allocated for a source or destination we may be able to reuse it. *) val sourceRegs = Array.array(maxPRegs, []: int list) and destinationRegs = Array.array(maxPRegs, []: int list) local (* Turn cached locations into register arguments. *) fun decache(StackLocation{cache=SOME r, ...}) = RegisterArgument r | decache(MemoryLocation{cache=SOME r, ...}) = RegisterArgument r | decache arg = arg fun addRealHint(r, reg) = case Array.sub(realHints, r) of NONE => Array.update(realHints, r, SOME reg) | SOME _ => () fun addSourceAndDestinationHint{src, dst} = let val {conflicts, ...} = Vector.sub(regStates, src) in (* If they conflict we can't add them. *) if member(dst, conflicts) then () else let val currentDests = Array.sub(destinationRegs, src) val currentSources = Array.sub(sourceRegs, dst) in if List.exists(fn i => i=dst) currentDests then () else Array.update(destinationRegs, src, dst :: currentDests); if List.exists(fn i => i=src) currentSources then () else Array.update(sourceRegs, dst, src :: currentSources) end end in (* Add the hints to steer the register allocation. The idea is to avoid moves between registers by getting values into the appropriate register in advance. *) fun addHints{instr=LoadArgument{source, dest=PReg dreg, ...}, ...} = ( case decache source of RegisterArgument(PReg sreg) => addSourceAndDestinationHint {src=sreg, dst=dreg} | _ => () ) | addHints{instr=StoreArgument{ source, kind, ... }, ...} = ( case (decache source, kind, targetArch) of (* Special case for byte register on X86/32 *) (RegisterArgument(PReg sReg), MoveByte, Native32Bit) => addRealHint(sReg, GenReg ecx) | _ => () ) | addHints{instr=BeginFunction{regArgs, ...}, ...} = List.app (fn (PReg pr, reg) => addRealHint(pr, reg)) regArgs | addHints{instr=TailRecursiveCall{regArgs, ...}, ...} = List.app (fn (arg, reg) => case decache arg of RegisterArgument(PReg pr) => addRealHint(pr, reg) | _ => ()) regArgs | addHints{instr=FunctionCall{regArgs, dest=PReg dreg, realDest, ...}, ...} = ( addRealHint(dreg, realDest); List.app (fn (arg, reg) => case decache arg of RegisterArgument(PReg pr) => addRealHint(pr, reg) | _ => ()) regArgs ) | addHints{instr=InitialiseMem{size=PReg sReg, addr=PReg aReg, init=PReg iReg}, ...} = (addRealHint(aReg, GenReg edi); addRealHint(iReg, GenReg eax); addRealHint(sReg, GenReg ecx)) | addHints{instr=JumpLoop{regArgs, ...}, ...} = let fun addRegArg (arg, PReg resReg) = case decache arg of RegisterArgument(PReg argReg) => addSourceAndDestinationHint {dst=resReg, src=argReg} | _ => () in List.app addRegArg regArgs end | addHints{instr=RaiseExceptionPacket{ packetReg=PReg preg }, ...} = addRealHint(preg, GenReg eax) | addHints{instr=BeginHandler{packetReg=PReg pReg, workReg=_}, ...} = (* The exception packet is in rax. *) addRealHint(pReg, GenReg eax) | addHints{instr=ReturnResultFromFunction { resultReg=PReg resReg, realReg, ... }, ...} = addRealHint(resReg, realReg) | addHints{instr=ArithmeticFunction{oper=SUB, resultReg=PReg resReg, operand1=PReg op1Reg, ...}, ...} = (* Can only be one way round. *) addSourceAndDestinationHint {dst=resReg, src=op1Reg} | addHints{instr=ArithmeticFunction{resultReg=PReg resReg, operand1=PReg op1Reg, operand2, ...}, ...} = ( addSourceAndDestinationHint {dst=resReg, src=op1Reg}; case decache operand2 of RegisterArgument(PReg op2Reg) => addSourceAndDestinationHint {dst=resReg, src=op2Reg} | _ => () ) | addHints{instr=CopyToCache{source=PReg sreg, dest=PReg dreg, ...}, ...} = addSourceAndDestinationHint {src=sreg, dst=dreg} | addHints{instr=UntagValue{source=PReg sReg, dest=PReg dReg, ...}, ...} = addSourceAndDestinationHint{src=sReg, dst=dReg} | addHints{instr=ShiftOperation{resultReg=PReg resReg, operand=PReg operReg, shiftAmount=IntegerConstant _, ...}, ...} = addSourceAndDestinationHint{dst=resReg, src=operReg} | addHints{instr=ShiftOperation{resultReg=PReg resReg, operand=PReg operReg, shiftAmount=RegisterArgument(PReg shiftReg), ...}, ...} = (addSourceAndDestinationHint{dst=resReg, src=operReg}; addRealHint(shiftReg, GenReg ecx)) | addHints{instr=Multiplication{resultReg=PReg resReg, operand1=PReg op1Reg, operand2, ...}, ...} = ( addSourceAndDestinationHint{dst=resReg, src=op1Reg}; case decache operand2 of RegisterArgument(PReg op2Reg) => addSourceAndDestinationHint {dst=resReg, src=op2Reg} | _ => () ) | addHints{instr=Division{dividend=PReg regDivid, quotient=PReg regQuot, remainder=PReg regRem, ...}, ...} = (addRealHint(regDivid, GenReg eax); addRealHint(regQuot, GenReg eax); addRealHint(regRem, GenReg edx)) | addHints{instr=CompareByteVectors{vec1Addr=PReg v1Reg, vec2Addr=PReg v2Reg, length=PReg lReg, ...}, ...} = (addRealHint(v1Reg, GenReg esi); addRealHint(v2Reg, GenReg edi); addRealHint(lReg, GenReg ecx)) | addHints{instr=BlockMove{srcAddr=PReg sReg, destAddr=PReg dReg, length=PReg lReg, ...}, ...} = (addRealHint(sReg, GenReg esi); addRealHint(dReg, GenReg edi); addRealHint(lReg, GenReg ecx)) | addHints{instr=X87FPGetCondition{dest=PReg dReg, ...}, ...} = addRealHint(dReg, GenReg eax) | addHints{instr=X87FPArith{resultReg=PReg resReg, arg1=PReg op1Reg, ...}, ...} = addSourceAndDestinationHint{dst=resReg, src=op1Reg} | addHints{instr=X87FPUnaryOps{dest=PReg resReg, source=PReg op1Reg, ...}, ...} = addSourceAndDestinationHint{dst=resReg, src=op1Reg} | addHints{instr=SSE2FPBinary{resultReg=PReg resReg, arg1=PReg op1Reg, ...}, ...} = addSourceAndDestinationHint{dst=resReg, src=op1Reg} + + | addHints{instr=AtomicExchangeAndAdd{resultReg=PReg resReg, source=PReg op1Reg, ...}, ...} = + addSourceAndDestinationHint{dst=resReg, src=op1Reg} | addHints _ = () end val allocatedRegs = Array.array(maxPRegs, NONE: reg option) val failures = ref []: intSet list ref (* Find a real register for a preg. 1. If a register is already allocated use that. 2. Try the "preferred" register if one has been given. 3. Try the realHints value if there is one. 4. See if there is a "friend" that has an appropriate register 5. Look at all the registers and find one. *) fun findRegister(r, pref, regSet) = case Array.sub(allocatedRegs, r) of SOME reg => reg | NONE => let val {conflicts, realConflicts, ...} = Vector.sub(regStates, r) (* Find the registers we've already allocated that may conflict. *) val conflictingRegs = List.mapPartial(fn i => Array.sub(allocatedRegs, i)) (setToList conflicts) @ realConflicts fun isFree aReg = not (List.exists(fn i => i=aReg) conflictingRegs) fun tryAReg NONE = NONE | tryAReg (somePref as SOME prefReg) = if isFree prefReg then (Array.update(allocatedRegs, r, somePref); somePref) else NONE fun findAReg [] = ( (* This failed. We're going to have to spill something. *) failures := conflicts :: ! failures; hd regSet (* Return a register to satisfy everything. *) ) | findAReg (reg::regs) = if isFree reg then (Array.update(allocatedRegs, r, SOME reg); reg) else findAReg regs (* Search the sources and destinations to see if a register has already been allocated or there is a hint. *) fun findAFriend([], [], _) = NONE | findAFriend(aDest :: otherDests, sources, alreadySeen) = let val possReg = case Array.sub(allocatedRegs, aDest) of v as SOME _ => tryAReg v | NONE => tryAReg(Array.sub(realHints, aDest)) in case possReg of reg as SOME _ => reg | NONE => let (* Add the destinations of the destinations to the list if they don't conflict and haven't been seen. *) fun newFriend f = not(List.exists (fn n => n=f) alreadySeen) andalso not(member(f, conflicts)) val fOfF = List.filter newFriend (Array.sub(destinationRegs, aDest)) in findAFriend(otherDests @ fOfF, sources, aDest :: alreadySeen) end end | findAFriend([], aSrc :: otherSrcs, alreadySeen) = let val possReg = case Array.sub(allocatedRegs, aSrc) of v as SOME _ => tryAReg v | NONE => tryAReg(Array.sub(realHints, aSrc)) in case possReg of reg as SOME _ => reg | NONE => let (* Add the sources of the sources to the list if they don't conflict and haven't been seen. *) fun newFriend f = not(List.exists (fn n => n=f) alreadySeen) andalso not(member(f, conflicts)) val fOfF = List.filter newFriend (Array.sub(sourceRegs, aSrc)) in findAFriend([], otherSrcs @ fOfF, aSrc :: alreadySeen) end end (* See if there is a friend that has a register already or a hint. Friends are registers that don't conflict and can possibly avoid an extra move. *) (* fun findAFriend([], _) = NONE | findAFriend(friend :: tail, old) = let val possReg = case Array.sub(allocatedRegs, friend) of v as SOME _ => tryAReg v | NONE => tryAReg(Array.sub(realHints, friend)) in case possReg of reg as SOME _ => reg | NONE => let (* Add a friend of a friend to the list if we haven't already seen it and it doesn't conflict. *) fun newFriend f = not(List.exists (fn n => n=f) old) andalso not(List.exists (fn n => n=f) conflicts) val fOfF = List.filter newFriend (Array.sub(friends, friend)) in findAFriend(tail @ fOfF, friend :: old) end end*) in case tryAReg pref of SOME r => r | NONE => ( case tryAReg (Array.sub(realHints, r)) of SOME r => r | NONE => ( case findAFriend(Array.sub(destinationRegs, r), Array.sub(sourceRegs, r), []) of SOME r => r (* Look through the registers to find one that's free. *) | NONE => findAReg regSet ) ) end fun allocateRegister args = ignore(findRegister args) fun allocateGeneralReg r = allocateRegister(r, NONE, generalRegisters) and allocateFloatReg r = allocateRegister(r, NONE, floatingPtRegisters) fun allocateArgument(RegisterArgument(PReg r), regSet) = allocateRegister(r, NONE, regSet) | allocateArgument(MemoryLocation{base=PReg bReg, index, cache=NONE, ...}, _) = (allocateGeneralReg bReg; allocateArgIndex index) (* Unfortunately we still have to allocate a register for the base even if we're going to use the cache. That's because the conflict sets are based on the assumption that the registers are allocated at the last occurrence (first when working from the end back) and it uses getInstructionRegisters which in turn uses argRegs which returns both the base and the cache. GetConflictSets could use a different version but we also have to take account of save registers in e.g. AllocateMemoryOperation. If we don't allocate a register because it's not needed at some point it shouldn't be allocated for the save set. *) | allocateArgument(MemoryLocation{cache=SOME(PReg r), base=PReg bReg, index, ...}, regSet) = (allocateGeneralReg bReg; allocateArgIndex index; allocateRegister(r, NONE, regSet)) | allocateArgument(StackLocation{cache=SOME(PReg r), ...}, regSet) = allocateRegister(r, NONE, regSet) | allocateArgument _ = () and allocateArgGeneral arg = allocateArgument(arg, generalRegisters) and allocateArgFloat arg = allocateArgument(arg, floatingPtRegisters) and allocateArgIndex NoMemIndex = () | allocateArgIndex(MemIndex1(PReg r)) = allocateGeneralReg r | allocateArgIndex(MemIndex2(PReg r)) = allocateGeneralReg r | allocateArgIndex(MemIndex4(PReg r)) = allocateGeneralReg r | allocateArgIndex(MemIndex8(PReg r)) = allocateGeneralReg r | allocateArgIndex ObjectIndex = () (* Return the register part of a cached item. We must still, unfortunately, ensure that a register is allocated for base registers because we're assuming that a register is allocated on the last occurrence and this might be it. *) fun decache(StackLocation{cache=SOME r, ...}) = RegisterArgument r | decache(MemoryLocation{cache=SOME r, base=PReg bReg, ...}) = (allocateGeneralReg bReg; RegisterArgument r) | decache arg = arg val allocateFindRegister = findRegister fun registerAllocate({instr=LoadArgument{source, dest=PReg dreg, kind}, ...}) = let val regSet = case kind of MoveFloat => floatingPtRegisters | MoveDouble => floatingPtRegisters | _ => generalRegisters val realDestReg = findRegister(dreg, NONE, regSet) in (* We previously used decache here but that has the disadvantage that it may allocate the destination register as the base register resulting in it not being available as the cache register. *) case source of RegisterArgument(PReg sreg) => allocateRegister(sreg, SOME realDestReg, regSet) | StackLocation{cache=SOME(PReg sreg), ...} => allocateRegister(sreg, SOME realDestReg, regSet) | MemoryLocation{cache=SOME(PReg sreg), base=PReg bReg, ...} => ( (* Cached source. Allocate this first. *) allocateRegister(sreg, SOME realDestReg, regSet); (* We need to allocate a register but do it afterwards. *) allocateGeneralReg bReg ) | source => allocateArgument(source, regSet) end | registerAllocate({instr=StoreArgument{ source, base=PReg bReg, index, kind, ... }, ...}) = ( case (decache source, kind) of (RegisterArgument(PReg sReg), MoveByte) => if targetArch <> Native32Bit then (allocateArgGeneral source; allocateGeneralReg bReg; allocateArgIndex index) else (* This is complicated on X86/32. We can't use edi or esi for the store registers. Instead we reserve ecx (see special case in "identify") and use that if we have to. *) ( allocateRegister(sReg, SOME(GenReg ecx), generalRegisters); allocateGeneralReg bReg; allocateArgIndex index ) | _ => let val regSet = case kind of MoveFloat => floatingPtRegisters | MoveDouble => floatingPtRegisters | _ => generalRegisters in allocateArgument(source, regSet); allocateGeneralReg bReg; allocateArgIndex index end ) | registerAllocate{instr=LoadMemReg { dest=PReg pr, ...}, ...} = allocateGeneralReg pr | registerAllocate{instr=StoreMemReg { source=PReg pr, ...}, ...} = allocateGeneralReg pr | registerAllocate{instr=BeginFunction _, ...} = () (* Any registers that are referenced will have been allocated real registers. *) | registerAllocate({instr=TailRecursiveCall{regArgs=oRegArgs, stackArgs=oStackArgs, workReg=PReg wReg, ...}, ...}) = let val regArgs = List.map (fn (arg, reg) => (decache arg, reg)) oRegArgs and stackArgs = List.map(fn {src, stack } => {src=decache src, stack=stack}) oStackArgs fun allocateRegArg(argReg, GenReg _) = allocateArgGeneral argReg | allocateRegArg(argReg, XMMReg _) = allocateArgument(argReg, floatingPtRegisters) | allocateRegArg(_, FPReg _) = raise InternalError "allocateRegArg" (* Never used. *) in allocateGeneralReg wReg; List.app (allocateArgGeneral o #src) stackArgs; (* We've already hinted the arguments. *) List.app allocateRegArg regArgs end | registerAllocate({instr=FunctionCall{regArgs=oRegArgs, stackArgs=oStackArgs, dest=PReg dReg, realDest, saveRegs, ...}, ...}) = let val regArgs = List.map (fn (arg, reg) => (decache arg, reg)) oRegArgs and stackArgs = List.map decache oStackArgs fun allocateRegArg(argReg, GenReg _) = allocateArgGeneral argReg | allocateRegArg(argReg, XMMReg _) = allocateArgument(argReg, floatingPtRegisters) | allocateRegArg(_, FPReg _) = raise InternalError "allocateRegArg" (* Never used. *) in List.app(fn (PReg r) => allocateGeneralReg r) saveRegs; (* Result will be in rax/fp0/xmm0. *) allocateRegister(dReg, SOME realDest, [realDest]); List.app allocateArgGeneral stackArgs; (* We've already hinted the arguments. *) List.app allocateRegArg regArgs end | registerAllocate({instr=AllocateMemoryOperation{ dest=PReg dReg, saveRegs, ...}, ...}) = ( List.app(fn (PReg r) => allocateGeneralReg r) saveRegs; allocateGeneralReg dReg ) | registerAllocate({instr=AllocateMemoryVariable{size=PReg sReg, dest=PReg dReg, saveRegs}, ...}) = ( List.app(fn (PReg r) => allocateGeneralReg r) saveRegs; allocateGeneralReg dReg; allocateGeneralReg sReg ) | registerAllocate({instr=InitialiseMem{size=PReg sReg, addr=PReg aReg, init=PReg iReg}, ...}) = ( (* We are going to use rep stosl/q to set the memory. That requires the length to be in ecx, the initialiser to be in eax and the destination to be edi. *) allocateRegister(aReg, SOME(GenReg edi), generalRegisters); allocateRegister(iReg, SOME(GenReg eax), generalRegisters); allocateRegister(sReg, SOME(GenReg ecx), generalRegisters) ) | registerAllocate{instr=InitialisationComplete, ...} = () | registerAllocate{instr=BeginLoop, ...} = () | registerAllocate({instr=JumpLoop{regArgs, stackArgs, checkInterrupt, workReg}, ...}) = ( case workReg of SOME(PReg r) => allocateGeneralReg r | NONE => (); List.app (fn (src, _, _) => allocateArgGeneral src) stackArgs; List.app (fn (a, PReg r) => (allocateArgGeneral a; allocateGeneralReg r)) regArgs; case checkInterrupt of SOME regs => List.app(fn PReg r => allocateGeneralReg r) regs | NONE => () ) | registerAllocate({instr=RaiseExceptionPacket{ packetReg=PReg preg }, ...}) = (* The argument must be put into rax. *) allocateRegister(preg, SOME(GenReg eax), generalRegisters) | registerAllocate{instr=ReserveContainer _, ...} = () | registerAllocate({instr=IndexedCaseOperation{testReg=PReg tReg, workReg=PReg wReg}, ...}) = ( allocateRegister(tReg, NONE, generalRegisters); allocateRegister(wReg, NONE, generalRegisters) ) | registerAllocate({instr=LockMutable{addr=PReg pr}, ...}) = allocateRegister(pr, NONE, generalRegisters) | registerAllocate({instr=WordComparison{ arg1=PReg arg1Reg, arg2, ... }, ...}) = ( allocateRegister(arg1Reg, NONE, generalRegisters); allocateArgGeneral arg2 ) | registerAllocate({instr=CompareLiteral{ arg1, ... }, ...}) = allocateArgGeneral arg1 | registerAllocate({instr=CompareByteMem{ arg1={base=PReg bReg, index, ...}, ...}, ...}) = (allocateGeneralReg bReg; allocateArgIndex index) (* Set up an exception handler. *) | registerAllocate({instr=PushExceptionHandler{workReg=PReg hReg}, ...}) = allocateGeneralReg hReg (* Pop an exception handler at the end of a handled section. Executed if no exception has been raised. This removes items from the stack. *) | registerAllocate({instr=PopExceptionHandler{workReg=PReg wReg, ...}, ...}) = allocateGeneralReg wReg (* Start of a handler. Sets the address associated with PushExceptionHandler and provides a register for the packet.*) | registerAllocate({instr=BeginHandler{packetReg=PReg pReg, workReg=PReg wReg}, ...}) = ( (* The exception packet is in rax. *) allocateRegister(pReg, SOME(GenReg eax), generalRegisters); allocateGeneralReg wReg ) | registerAllocate({instr=ReturnResultFromFunction { resultReg=PReg resReg, realReg, ... }, ...}) = allocateRegister(resReg, SOME realReg, [realReg] (* It MUST be in this register *)) | registerAllocate{instr=ArithmeticFunction{oper=SUB, resultReg=PReg resReg, operand1=PReg op1Reg, operand2, ...}, ...} = (* Subtraction - Unlike the other arithmetic operations we can't put the second argument into the result register and then do the operation. *) let val realDestReg = findRegister(resReg, NONE, generalRegisters) (* Try to put the argument into the same register as the result. *) in allocateRegister(op1Reg, SOME realDestReg, generalRegisters); allocateArgGeneral operand2 end | registerAllocate({instr=ArithmeticFunction{resultReg=PReg resReg, operand1=PReg op1Reg, operand2, ...}, ...}) = let val realDestReg = findRegister(resReg, NONE, generalRegisters) val () = allocateRegister(op1Reg, SOME realDestReg, generalRegisters) in case decache operand2 of RegisterArgument(PReg op2Reg) => (* Arithmetic operation with both arguments as registers. These operations are all symmetric so we can try to put either argument into the result reg and then do the operation on the other arg. *) allocateRegister(op2Reg, SOME realDestReg, generalRegisters) | operand2 => allocateArgGeneral operand2 end | registerAllocate({instr=TestTagBit{arg, ...}, ...}) = allocateArgGeneral arg | registerAllocate({instr=PushValue {arg, ...}, ...}) = allocateArgGeneral arg | registerAllocate({instr=CopyToCache{source=PReg sreg, dest=PReg dreg, kind}, ...}) = let val regSet = case kind of MoveFloat => floatingPtRegisters | MoveDouble => floatingPtRegisters | _ => generalRegisters val realDestReg = findRegister(dreg, NONE, regSet) in (* Get the source register using the current destination as a preference. *) allocateRegister(sreg, SOME realDestReg, regSet) end | registerAllocate({instr=ResetStackPtr _, ...}) = () | registerAllocate({instr=StoreToStack{ source, ... }, ...}) = allocateArgument(source, generalRegisters) | registerAllocate({instr=TagValue{source=PReg srcReg, dest=PReg dReg, ...}, ...}) = ( (* Since we're using LEA to tag there's no cost to using a different reg. *) allocateRegister(dReg, NONE, generalRegisters); allocateRegister(srcReg, NONE, generalRegisters) ) | registerAllocate({instr=UntagValue{source=PReg sReg, dest=PReg dReg, cache, ...}, ...}) = let val regResult = findRegister(dReg, NONE, generalRegisters) val () = case cache of SOME(PReg cReg) => allocateRegister(cReg, SOME regResult, generalRegisters) | NONE => () in allocateRegister(sReg, SOME regResult, generalRegisters) end | registerAllocate({instr=LoadEffectiveAddress{base, index, dest=PReg dReg, ...}, ...}) = ( allocateGeneralReg dReg; case base of SOME(PReg br) => allocateGeneralReg br | _ => (); allocateArgIndex index ) | registerAllocate({instr=ShiftOperation{resultReg=PReg resReg, operand=PReg operReg, shiftAmount=IntegerConstant _, ...}, ...}) = let val realDestReg = findRegister(resReg, NONE, generalRegisters) in allocateRegister(operReg, SOME realDestReg, generalRegisters) end | registerAllocate({instr=ShiftOperation{resultReg=PReg resReg, operand=PReg operReg, shiftAmount=RegisterArgument(PReg shiftReg), ...}, ...}) = let val realDestReg = findRegister(resReg, NONE, generalRegisters) in allocateRegister(shiftReg, SOME(GenReg ecx), generalRegisters); allocateRegister(operReg, SOME realDestReg, generalRegisters) end | registerAllocate{instr=ShiftOperation _, ...} = raise InternalError "registerAllocate - ShiftOperation" | registerAllocate({instr= Multiplication{resultReg=PReg resReg, operand1=PReg op1Reg, operand2, ...}, ...}) = let val realDestReg = findRegister(resReg, NONE, generalRegisters) val () = allocateRegister(op1Reg, SOME realDestReg, generalRegisters) in case decache operand2 of RegisterArgument(PReg op2Reg) => (* Treat exactly the same as ArithmeticFunction. *) allocateRegister(op2Reg, SOME realDestReg, generalRegisters) | operand2 => allocateArgGeneral operand2 end | registerAllocate({instr=Division{dividend=PReg regDivid, divisor, quotient=PReg regQuot, remainder=PReg regRem, ...}, ...}) = ( (* Division is specific as to the registers. The dividend must be eax, quotient is eax and the remainder is edx. *) allocateRegister(regDivid, SOME(GenReg eax), generalRegisters); allocateRegister(regQuot, SOME(GenReg eax), generalRegisters); allocateRegister(regRem, SOME(GenReg edx), generalRegisters); allocateArgGeneral divisor ) - | registerAllocate({instr=AtomicExchangeAndAdd{base=PReg bReg, source=PReg sReg}, ...}) = - (allocateGeneralReg sReg; allocateGeneralReg bReg) + | registerAllocate({instr=AtomicExchangeAndAdd{base=PReg bReg, source=PReg sReg, resultReg=PReg rReg}, ...}) = + (allocateGeneralReg sReg; allocateGeneralReg bReg; allocateGeneralReg rReg) | registerAllocate({instr=BoxValue{boxKind, source=PReg sReg, dest=PReg dReg, saveRegs}, ...}) = ( List.app(fn (PReg r) => allocateGeneralReg r) saveRegs; case boxKind of BoxLargeWord => allocateGeneralReg sReg | BoxX87Double => allocateFloatReg sReg | BoxX87Float => allocateFloatReg sReg | BoxSSE2Float => allocateFloatReg sReg | BoxSSE2Double => allocateFloatReg sReg; allocateGeneralReg dReg ) | registerAllocate({instr=CompareByteVectors{vec1Addr=PReg v1Reg, vec2Addr=PReg v2Reg, length=PReg lReg, ...}, ...}) = ( allocateRegister(v1Reg, SOME(GenReg esi), generalRegisters); allocateRegister(v2Reg, SOME(GenReg edi), generalRegisters); allocateRegister(lReg, SOME(GenReg ecx), generalRegisters) ) | registerAllocate({instr=BlockMove{srcAddr=PReg sReg, destAddr=PReg dReg, length=PReg lReg, ...}, ...}) = ( allocateRegister(sReg, SOME(GenReg esi), generalRegisters); allocateRegister(dReg, SOME(GenReg edi), generalRegisters); allocateRegister(lReg, SOME(GenReg ecx), generalRegisters) ) | registerAllocate{instr=X87Compare{arg1=PReg arg1Reg, arg2, ...}, ...} = (allocateRegister(arg1Reg, NONE, floatingPtRegisters); allocateArgFloat arg2) | registerAllocate{instr=SSE2Compare{arg1=PReg arg1Reg, arg2, ...}, ...} = (allocateRegister(arg1Reg, NONE, floatingPtRegisters); allocateArgFloat arg2) | registerAllocate({instr=X87FPGetCondition{dest=PReg dReg, ...}, ...}) = (* We can only use RAX here. *) allocateRegister(dReg, SOME(GenReg eax), generalRegisters) | registerAllocate({instr=X87FPArith{resultReg=PReg resReg, arg1=PReg op1Reg, arg2, ...}, ...}) = let val realDestReg = findRegister(resReg, NONE, floatingPtRegisters) in allocateRegister(op1Reg, SOME realDestReg, floatingPtRegisters); allocateArgFloat arg2 end | registerAllocate({instr=X87FPUnaryOps{dest=PReg resReg, source=PReg op1Reg, ...}, ...}) = let val realDestReg = findRegister(resReg, NONE, floatingPtRegisters) in allocateRegister(op1Reg, SOME realDestReg, floatingPtRegisters) end | registerAllocate({instr=X87Float{dest=PReg resReg, source}, ...}) = (allocateArgGeneral source; allocateRegister(resReg, NONE, floatingPtRegisters)) | registerAllocate({instr=SSE2Float{dest=PReg resReg, source}, ...}) = (allocateArgGeneral source; allocateRegister(resReg, NONE, floatingPtRegisters)) | registerAllocate({instr=SSE2FPUnary{resultReg=PReg resReg, source, ...}, ...}) = ( allocateRegister(resReg, NONE, floatingPtRegisters); allocateArgFloat source ) | registerAllocate({instr=SSE2FPBinary{resultReg=PReg resReg, arg1=PReg op1Reg, arg2, ...}, ...}) = let val realDestReg = findRegister(resReg, NONE, floatingPtRegisters) in allocateRegister(op1Reg, SOME realDestReg, floatingPtRegisters); allocateArgFloat arg2 end | registerAllocate({instr=TagFloat{dest=PReg resReg, source=PReg sReg, ...}, ...}) = ( allocateRegister(resReg, NONE, generalRegisters); allocateRegister(sReg, NONE, floatingPtRegisters) ) | registerAllocate({instr=UntagFloat{source, dest=PReg dReg, cache, ...}, ...}) = let val regResult = findRegister(dReg, NONE, floatingPtRegisters) val () = case cache of SOME(PReg cReg) => allocateRegister(cReg, SOME regResult, floatingPtRegisters) | NONE => () in allocateArgGeneral source end | registerAllocate({instr=GetSSE2ControlReg{dest=PReg destReg}, ...}) = allocateRegister(destReg, NONE, generalRegisters) | registerAllocate({instr=SetSSE2ControlReg{source=PReg srcReg}, ...}) = allocateRegister(srcReg, NONE, generalRegisters) | registerAllocate({instr=GetX87ControlReg{dest=PReg destReg}, ...}) = allocateRegister(destReg, NONE, generalRegisters) | registerAllocate({instr=SetX87ControlReg{source=PReg srcReg}, ...}) = allocateRegister(srcReg, NONE, generalRegisters) | registerAllocate({instr=X87RealToInt{source=PReg srcReg, dest=PReg destReg}, ...}) = ( allocateRegister(srcReg, NONE, floatingPtRegisters); allocateRegister(destReg, NONE, generalRegisters) ) | registerAllocate({instr=SSE2RealToInt{source, dest=PReg destReg, ...}, ...}) = ( allocateRegister(destReg, NONE, generalRegisters); allocateArgFloat source ) | registerAllocate({instr=SignExtend32To64{source, dest=PReg destReg, ...}, ...}) = ( allocateRegister(destReg, NONE, generalRegisters); allocateArgGeneral source ) | registerAllocate({instr=TouchArgument{source=PReg srcReg}, ...}) = allocateRegister(srcReg, NONE, generalRegisters) + + | registerAllocate({instr=PauseCPU, ...}) = () (* Depth-first scan. *) val visited = Array.array(numBlocks, false) fun processBlocks blockNo = if Array.sub(visited, blockNo) then () (* Done or currently being done. *) else let val () = Array.update(visited, blockNo, true) val ExtendedBasicBlock { flow, block, passThrough, exports, ...} = Vector.sub(blocks, blockNo) (* Add the hints for this block before the actual allocation of registers. *) val _ = List.app addHints block val () = (* Process the dependencies first. *) case flow of ExitCode => () | Unconditional m => processBlocks m | Conditional {trueJump, falseJump, ...} => (processBlocks trueJump; processBlocks falseJump) | IndexedBr cases => List.app processBlocks cases | SetHandler{ handler, continue } => (processBlocks handler; processBlocks continue) | UnconditionalHandle _ => () | ConditionalHandle { continue, ...} => processBlocks continue (* Now this block. *) local (* We assume that anything used later will have been allocated a register. This is generally true except for a loop where the use may occur earlier. *) val exported = setToList passThrough @ setToList exports fun findAReg r = case Vector.sub(regProps, r) of RegPropStack _ => () | _ => ignore(allocateFindRegister(r, NONE, generalRegisters)) in val () = List.app findAReg exported end in List.foldr(fn (c, ()) => registerAllocate c) () block end in processBlocks 0; (* If the failures list is empty we succeeded. *) case !failures of [] => (* Return the allocation vector. If a register isn't used replace it with rax. *) AllocateSuccess(Vector.tabulate(maxPRegs, fn i => getOpt(Array.sub(allocatedRegs, i), GenReg eax))) (* Else we'll have to spill something. *) | l => AllocateFailure l end structure Sharing = struct type intSet = intSet and extendedBasicBlock = extendedBasicBlock and regProperty = regProperty and reg = reg and allocateResult = allocateResult end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86CODESIG.sml b/mlsource/MLCompiler/CodeTree/X86Code/X86CODESIG.sml index 7ee5c458..660f222d 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86CODESIG.sml +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86CODESIG.sml @@ -1,232 +1,233 @@ (* - Copyright David C. J. Matthews 2010, 2012, 2016-19 + Copyright David C. J. Matthews 2010, 2012, 2016-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) signature X86CODESIG = sig type machineWord = Address.machineWord type short = Address.short type address = Address.address type closureRef type code (* Registers. *) datatype genReg = GeneralReg of Word8.word * bool and fpReg = FloatingPtReg of Word8.word and xmmReg = SSE2Reg of Word8.word datatype reg = GenReg of genReg | FPReg of fpReg | XMMReg of xmmReg val is32bit: LargeInt.int -> bool val eax: genReg and ebx: genReg and ecx: genReg and edx: genReg and edi: genReg and esi: genReg and esp: genReg and ebp: genReg and rax: genReg and rbx: genReg and rcx: genReg and rdx: genReg and rdi: genReg and rsi: genReg and rsp: genReg and rbp: genReg and r8: genReg and r9: genReg and r10: genReg and r11: genReg and r12: genReg and r13: genReg and r14: genReg and r15: genReg and fp0: fpReg and fp1: fpReg and fp2: fpReg and fp3: fpReg and fp4: fpReg and fp5: fpReg and fp6: fpReg and fp7: fpReg and xmm0:xmmReg and xmm1:xmmReg and xmm2:xmmReg and xmm3:xmmReg and xmm4:xmmReg and xmm5:xmmReg and xmm6:xmmReg and xmm7:xmmReg (* For vector indexing we provide a numbering for the registers. *) val regs: int val regN: int -> reg val nReg: reg -> int val regRepr: reg -> string (* May be targeted at native 32-bit, native 64-bit or X86/64 with 32-bit words and addresses as object Ids. *) datatype targetArch = Native32Bit | Native64Bit | ObjectId32Bit val targetArch: targetArch type addrs val addrZero: addrs structure RegSet: sig eqtype regSet val singleton: reg -> regSet val allRegisters: regSet (* All registers: data, address, floating pt. *) val generalRegisters: regSet val floatingPtRegisters: regSet val sse2Registers: regSet val noRegisters: regSet val isAllRegs: regSet->bool val regSetUnion: regSet * regSet -> regSet val regSetIntersect: regSet * regSet -> regSet val listToSet: reg list -> regSet val setToList: regSet -> reg list val regSetMinus: regSet * regSet -> regSet val inSet: reg * regSet -> bool val cardinality: regSet -> int val regSetRepr: regSet -> string val oneOf: regSet -> reg end (* Size of operand. OpSize64 is only valid in 64-bit mode. *) datatype opSize = OpSize32 | OpSize64 datatype arithOp = ADD | OR (*|ADC | SBB*) | AND | SUB | XOR | CMP and shiftType = SHL | SHR | SAR and repOps = CMPS8 | MOVS8 | MOVS32 | STOS8 | STOS32 | MOVS64 | STOS64 and fpOps = FADD | FMUL | FCOM | FCOMP | FSUB | FSUBR | FDIV | FDIVR and fpUnaryOps = FABS | FCHS | FLD1 | FLDZ and branchOps = JO | JNO | JE | JNE | JL | JGE | JLE | JG | JB | JNB | JNA | JA | JP | JNP and sse2Operations = SSE2MoveDouble | SSE2MoveFloat | SSE2CompDouble | SSE2AddDouble | SSE2SubDouble | SSE2MulDouble | SSE2DivDouble | SSE2Xor | SSE2And | SSE2FloatToDouble | SSE2DoubleToFloat | SSE2CompSingle | SSE2AddSingle | SSE2SubSingle | SSE2MulSingle | SSE2DivSingle val invertTest: branchOps -> branchOps datatype label = Label of { labelNo: int } datatype indexType = NoIndex | Index1 of genReg | Index2 of genReg | Index4 of genReg | Index8 of genReg type memoryAddress = { base: genReg, offset: int, index: indexType } datatype 'reg regOrMemoryArg = RegisterArg of 'reg | MemoryArg of memoryAddress | NonAddressConstArg of LargeInt.int | AddressConstArg of machineWord datatype moveSize = Move64 | Move32 | Move8 | Move16 | Move32X64 | Move8X32 | Move8X64 | Move16X32 | Move16X64 and fpSize = SinglePrecision | DoublePrecision datatype trapEntries = StackOverflowCall | StackOverflowCallEx | HeapOverflowCall datatype operation = Move of { source: genReg regOrMemoryArg, destination: genReg regOrMemoryArg, moveSize: moveSize } | PushToStack of genReg regOrMemoryArg | PopR of genReg | ArithToGenReg of { opc: arithOp, output: genReg, source: genReg regOrMemoryArg, opSize: opSize } | ArithMemConst of { opc: arithOp, address: memoryAddress, source: LargeInt.int, opSize: opSize } | ArithMemLongConst of { opc: arithOp, address: memoryAddress, source: machineWord } | ArithByteMemConst of { opc: arithOp, address: memoryAddress, source: Word8.word } | ShiftConstant of { shiftType: shiftType, output: genReg, shift: Word8.word, opSize: opSize } | ShiftVariable of { shiftType: shiftType, output: genReg, opSize: opSize } (* Shift amount is in ecx *) | ConditionalBranch of { test: branchOps, label: label } | SetCondition of { output: genReg, test: branchOps } | LoadAddress of { output: genReg, offset: int, base: genReg option, index: indexType, opSize: opSize } | TestByteBits of { arg: genReg regOrMemoryArg, bits: Word8.word } | CallRTS of {rtsEntry: trapEntries, saveRegs: genReg list } | AllocStore of { size: int, output: genReg, saveRegs: genReg list } | AllocStoreVariable of { size: genReg, output: genReg, saveRegs: genReg list } | StoreInitialised | CallAddress of genReg regOrMemoryArg | JumpAddress of genReg regOrMemoryArg | ReturnFromFunction of int | RaiseException of { workReg: genReg } | UncondBranch of label | ResetStack of { numWords: int, preserveCC: bool } | JumpLabel of label | LoadLabelAddress of { label: label, output: genReg } | RepeatOperation of repOps | DivideAccR of {arg: genReg, isSigned: bool, opSize: opSize } | DivideAccM of {base: genReg, offset: int, isSigned: bool, opSize: opSize } | AtomicXAdd of {address: memoryAddress, output: genReg, opSize: opSize } | FPLoadFromMemory of { address: memoryAddress, precision: fpSize } | FPLoadFromFPReg of { source: fpReg, lastRef: bool } | FPLoadFromConst of { constant: machineWord, precision: fpSize } | FPStoreToFPReg of { output: fpReg, andPop: bool } | FPStoreToMemory of { address: memoryAddress, precision: fpSize, andPop: bool } | FPArithR of { opc: fpOps, source: fpReg } | FPArithConst of { opc: fpOps, source: machineWord, precision: fpSize } | FPArithMemory of { opc: fpOps, base: genReg, offset: int, precision: fpSize } | FPUnary of fpUnaryOps | FPStatusToEAX | FPLoadInt of { base: genReg, offset: int, opSize: opSize } | FPFree of fpReg | MultiplyR of { source: genReg regOrMemoryArg, output: genReg, opSize: opSize } | XMMArith of { opc: sse2Operations, source: xmmReg regOrMemoryArg, output: xmmReg } | XMMStoreToMemory of { toStore: xmmReg, address: memoryAddress, precision: fpSize } | XMMConvertFromInt of { source: genReg, output: xmmReg, opSize: opSize } | SignExtendForDivide of opSize | XChng of { reg: genReg, arg: genReg regOrMemoryArg, opSize: opSize } | Negative of { output: genReg, opSize: opSize } | JumpTable of { cases: label list, jumpSize: jumpSize ref } | IndexedJumpCalc of { addrReg: genReg, indexReg: genReg, jumpSize: jumpSize ref } | MoveXMMRegToGenReg of { source: xmmReg, output: genReg } | MoveGenRegToXMMReg of { source: genReg, output: xmmReg } | XMMShiftRight of { output: xmmReg, shift: Word8.word } | FPLoadCtrlWord of memoryAddress (* Load FP control word. *) | FPStoreCtrlWord of memoryAddress (* Store FP control word. *) | XMMLoadCSR of memoryAddress (* Load combined control/status word. *) | XMMStoreCSR of memoryAddress (* Store combined control/status word. *) | FPStoreInt of memoryAddress | XMMStoreInt of { source: xmmReg regOrMemoryArg, output: genReg, precision: fpSize, isTruncate: bool } | CondMove of { test: branchOps, output: genReg, source: genReg regOrMemoryArg, opSize: opSize } | LoadAbsolute of { destination: genReg, value: machineWord } + | PauseForSpinLock and jumpSize = JumpSize2 | JumpSize8 type operations = operation list val printOperation: operation * (string -> unit) -> unit val codeCreate: string * machineWord * Universal.universal list -> code (* makes the initial segment. *) (* Code generate operations and construct the final code. *) val generateCode: { ops: operations, code: code, labelCount: int, resultClosure: closureRef } -> unit val memRegLocalMPointer: int and memRegHandlerRegister: int and memRegLocalMbottom: int and memRegStackLimit: int and memRegExceptionPacket: int and memRegCStackPtr: int and memRegThreadSelf: int and memRegStackPtr: int and memRegSavedRbx: int (* Debugging controls and streams for optimiser. *) val lowLevelOptimise: code -> bool val printLowLevelCode: operation list * code -> unit structure Sharing: sig type code = code and reg = reg and genReg = genReg and fpReg = fpReg and addrs = addrs and operation = operation and regSet = RegSet.regSet and label = label and branchOps = branchOps and arithOp = arithOp and shiftType = shiftType and repOps = repOps and fpOps = fpOps and fpUnaryOps = fpUnaryOps and sse2Operations = sse2Operations and opSize = opSize and closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML index 8b6ca56c..e6ad38db 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86CodetreeToICode.ML @@ -1,4080 +1,4089 @@ (* - Copyright David C. J. Matthews 2016-20 + Copyright David C. J. Matthews 2016-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86CodetreeToICode( structure BACKENDTREE: BackendIntermediateCodeSig structure ICODE: ICodeSig structure DEBUG: DEBUG structure X86FOREIGN: FOREIGNCALLSIG structure ICODETRANSFORM: X86ICODETRANSFORMSIG structure CODE_ARRAY: CODEARRAYSIG sharing ICODE.Sharing = ICODETRANSFORM.Sharing = CODE_ARRAY.Sharing ): GENCODESIG = struct open BACKENDTREE open Address open ICODE open CODE_ARRAY exception InternalError = Misc.InternalError local val regs = case targetArch of Native32Bit => [eax, ebx] | Native64Bit => [eax, ebx, r8, r9, r10] | ObjectId32Bit => [eax, esi, r8, r9, r10] val fpResult = case targetArch of Native32Bit => FPReg fp0 | _ => XMMReg xmm0 val fpArgRegs = case targetArch of Native32Bit => [] | _ => [xmm0, xmm1, xmm2] in val generalArgRegs = List.map GenReg regs val floatingPtArgRegs = List.map XMMReg fpArgRegs fun resultReg GeneralType = GenReg eax | resultReg DoubleFloatType = fpResult | resultReg SingleFloatType = fpResult end (* tag a short constant *) fun tag c = 2 * c + 1 (* shift a short constant, but don't set tag bit *) fun semitag c = 2 * c (* Reverse a list and append the second. This is used a lot when converting between the reverse and forward list versions. e.g. codeToICode and codeToICodeRev *) fun revApp([], l) = l | revApp(hd :: tl, l) = revApp(tl, hd :: l) datatype blockStruct = BlockSimple of x86ICode | BlockExit of x86ICode | BlockLabel of int | BlockFlow of controlFlow | BlockBegin of { regArgs: (preg * reg) list, stackArgs: stackLocn list } | BlockRaiseAndHandle of x86ICode * int | BlockOptionalHandle of {call: x86ICode, handler: int, label: int } local open RunCall val F_mutable_bytes = Word.fromLargeWord(Word8.toLargeWord(Word8.orb (F_mutable, F_bytes))) fun makeRealConst l = let val r = allocateByteMemory(0wx8 div bytesPerWord, F_mutable_bytes) fun setBytes([], _) = () | setBytes(hd::tl, n) = (storeByte(r, n, hd); setBytes(tl, n+0wx1)) val () = setBytes(l, 0w0) val () = clearMutableBit r in r end in (* These are floating point constants used to change and mask the sign bit. *) val realSignBit: machineWord = makeRealConst [0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx00, 0wx80] and realAbsMask: machineWord = makeRealConst [0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wxff, 0wx7f] and floatSignBit: machineWord = makeRealConst [0wx00, 0wx00, 0wx00, 0wx80, 0wx00, 0wx00, 0wx00, 0wx00] and floatAbsMask: machineWord = makeRealConst [0wxff, 0wxff, 0wxff, 0wx7f, 0wx00, 0wx00, 0wx00, 0wx00] end datatype commutative = Commutative | NonCommutative (* Check that a large-word constant looks right and get the value as a large int*) fun largeWordConstant value = if isShort value then raise InternalError "largeWordConstant: invalid" else let val addr = toAddress value in if length addr <> nativeWordSize div wordSize orelse flags addr <> F_bytes then raise InternalError "largeWordConstant: invalid" else (); LargeWord.toLargeInt(RunCall.unsafeCast addr) end fun codeFunctionToX86({body, localCount, name, argTypes, resultType=fnResultType, closure, ...}:bicLambdaForm, debugSwitches, resultClosure) = let (* Pseudo-registers are allocated sequentially and the properties added to the list. *) val pregCounter = ref 0 val pregPropList = ref [] fun newPReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropGeneral :: !pregPropList in PReg regNo end and newUReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropUntagged :: !pregPropList in PReg regNo end and newStackLoc size = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropStack size :: !pregPropList in StackLoc{size=size, rno=regNo} end and newMergeReg() = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropMultiple :: !pregPropList in PReg regNo end datatype locationValue = NoLocation | PregLocation of preg | ContainerLocation of { container: stackLocn, stackOffset: int } val locToPregArray = Array.array(localCount, NoLocation) val labelCounter = ref 1 (* Start at 1. Zero is used for the root. *) fun newLabel() = !labelCounter before labelCounter := !labelCounter + 1 val ccRefCounter = ref 0 fun newCCRef() = CcRef(!ccRefCounter) before ccRefCounter := !ccRefCounter + 1 fun constantAsArgument value = if isShort value then IntegerConstant(tag(Word.toLargeIntX(toShort value))) else AddressConstant value (* Create the branch condition from the test, isSigned and jumpOn values. (In)equality tests are the same for signed and unsigned values. *) local open BuiltIns in fun testAsBranch(TestEqual, _, true) = JE | testAsBranch(TestEqual, _, false) = JNE (* Signed tests *) | testAsBranch(TestLess, true, true) = JL | testAsBranch(TestLess, true, false) = JGE | testAsBranch(TestLessEqual, true, true) = JLE | testAsBranch(TestLessEqual, true, false) = JG | testAsBranch(TestGreater, true, true) = JG | testAsBranch(TestGreater, true, false) = JLE | testAsBranch(TestGreaterEqual, true, true) = JGE | testAsBranch(TestGreaterEqual, true, false) = JL (* Unsigned tests *) | testAsBranch(TestLess, false, true) = JB | testAsBranch(TestLess, false, false) = JNB | testAsBranch(TestLessEqual, false, true) = JNA | testAsBranch(TestLessEqual, false, false) = JA | testAsBranch(TestGreater, false, true) = JA | testAsBranch(TestGreater, false, false) = JNA | testAsBranch(TestGreaterEqual, false, true) = JNB | testAsBranch(TestGreaterEqual, false, false) = JB | testAsBranch(TestUnordered, _, _) = raise InternalError "TestUnordered" (* Switch the direction of a test if we turn c op x into x op c. *) fun leftRightTest TestEqual = TestEqual | leftRightTest TestLess = TestGreater | leftRightTest TestLessEqual = TestGreaterEqual | leftRightTest TestGreater = TestLess | leftRightTest TestGreaterEqual = TestLessEqual | leftRightTest TestUnordered = TestUnordered end (* Overflow check. This raises Overflow if the overflow bit is set in the cc. This generates a single block for the function unless there is a handler. As well as reducing the size of the code this also means that overflow checks are generally JO instructions to the end of the code. Since the default branch prediction is not to take forward jumps this should improve prefetching on the normal, non-overflow, path. *) fun checkOverflow ({currHandler=NONE, overflowBlock=ref(SOME overFlowLab), ...}) ccRef = (* It's already been set and there's no surrounding handler - use this. *) let val noOverflowLab = newLabel() in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel noOverflowLab ] end | checkOverflow ({currHandler=NONE, overflowBlock, ...}) ccRef = let (* *) val overFlowLab = newLabel() and noOverflowLab = newLabel() val packetReg = newPReg() val () = overflowBlock := SOME overFlowLab in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel overFlowLab, BlockSimple(LoadArgument{source=AddressConstant(toMachineWord(Overflow)), dest=packetReg, kind=movePolyWord}), BlockExit(RaiseExceptionPacket{packetReg=packetReg}), BlockLabel noOverflowLab ] end | checkOverflow ({currHandler=SOME h, ...}) ccRef = let val overFlowLab = newLabel() and noOverflowLab = newLabel() val packetReg = newPReg() in [ BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=overFlowLab, falseJump=noOverflowLab }), BlockLabel overFlowLab, BlockSimple(LoadArgument{source=AddressConstant(toMachineWord(Overflow)), dest=packetReg, kind=movePolyWord}), BlockRaiseAndHandle(RaiseExceptionPacket{packetReg=packetReg}, h), BlockLabel noOverflowLab ] end fun setAndRestoreRounding (rndMode, doWithRounding) = let open IEEEReal val savedRnd = newUReg() and setRnd = newUReg() in case fpMode of FPModeX87 => [BlockSimple(GetX87ControlReg{dest=savedRnd})] @ (* Set the appropriate bits in the control word. *) (case rndMode of TO_NEAREST => (* The bits need to be zero - just mask them. *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32})] | TO_NEGINF => let val wrk = newUReg() in (* Mask the bits and set to 01 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x400, ccRef=newCCRef(), opSize=OpSize32})] end | TO_POSINF => let val wrk = newUReg() in (* Mask the bits and set to 10 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xf3ff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x800, ccRef=newCCRef(), opSize=OpSize32})] end | TO_ZERO => (* The bits need to be one - just set them. *) [BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xc00, ccRef=newCCRef(), opSize=OpSize32})]) @ [BlockSimple(SetX87ControlReg{source=setRnd})] @ doWithRounding() @ (* Restore the original rounding. *) [BlockSimple(SetX87ControlReg{source=savedRnd})] | FPModeSSE2 => [BlockSimple(GetSSE2ControlReg{dest=savedRnd})] @ (* Set the appropriate bits in the control word. *) (case rndMode of TO_NEAREST => (* The bits need to be zero - just mask them. *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32})] | TO_NEGINF => let val wrk = newUReg() in (* Mask the bits and set to 01 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x2000, ccRef=newCCRef(), opSize=OpSize32})] end | TO_POSINF => let val wrk = newUReg() in (* Mask the bits and set to 10 *) [BlockSimple( ArithmeticFunction{oper=AND, resultReg=wrk, operand1=savedRnd, operand2=IntegerConstant 0xffff9fff, ccRef=newCCRef(), opSize=OpSize32}), BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x4000, ccRef=newCCRef(), opSize=OpSize32})] end | TO_ZERO => (* The bits need to be one - just set them. *) [BlockSimple( ArithmeticFunction{oper=OR, resultReg=setRnd, operand1=savedRnd, operand2=IntegerConstant 0x6000, ccRef=newCCRef(), opSize=OpSize32})]) @ [BlockSimple(SetSSE2ControlReg{source=setRnd})] @ doWithRounding() @ [BlockSimple(SetSSE2ControlReg{source=savedRnd})] end (* Put a floating point value into a box or tag it so the value can be held in a general register. *) fun boxOrTagReal(srcReg, destReg, precision) = if precision = BuiltIns.PrecDouble orelse wordSize <> 0w8 then let open BuiltIns val boxFloat = case (fpMode, precision) of (FPModeX87, PrecDouble) => BoxX87Double | (FPModeX87, PrecSingle) => BoxX87Float | (FPModeSSE2, PrecDouble) => BoxSSE2Double | (FPModeSSE2, PrecSingle) => BoxSSE2Float in [BlockSimple(BoxValue{boxKind=boxFloat, source=srcReg, dest=destReg, saveRegs=[]})] end else [BlockSimple(TagFloat{source=srcReg, dest=destReg})] (* Indicate that the base address is actually an object index where appropriate. *) val memIndexOrObject = case targetArch of ObjectId32Bit => ObjectIndex | _ => NoMemIndex (* Generally we have an offset in words and no index register. *) fun wordOffsetAddress(offset, baseReg: preg): argument = MemoryLocation{offset=offset*Word.toInt wordSize, base=baseReg, index=memIndexOrObject, cache=NONE} (* The large-word operations all work on the value within the box pointed at by the register. We generate all large-word operations using this even where the X86 instruction requires a register. This allows the next level to optimise cases of cascaded instructions and avoid creating boxes for intermediate values. *) fun wordAt reg = wordOffsetAddress(0, reg) val returnAddressEntry = newStackLoc 1 datatype argLoc = ArgInReg of { realReg: reg, argReg: preg } | ArgOnStack of { stackOffset: int, stackReg: stackLocn } (* Pseudo-regs for the result, the closure and the args that were passed in real regs. *) val resultTarget = newPReg() val closureRegAddr = newPReg() (* Create a map for the arguments indicating their register or stack location. *) local (* Select the appropriate argument register depending on the argument type. *) fun argTypesToArgEntries([], _, _, _) = ([], [], [], []) | argTypesToArgEntries(DoubleFloatType :: tl, gRegs, fpReg :: fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val pRegArg = newPReg() and uRegArg = newUReg() in (ArgInReg{realReg=fpReg, argReg=pRegArg} :: argTypes, boxOrTagReal(uRegArg, pRegArg, BuiltIns.PrecDouble) @ argCode, (uRegArg, fpReg) :: argRegs, stackArgs) end | argTypesToArgEntries(SingleFloatType :: tl, gRegs, fpReg :: fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val pRegArg = newPReg() and uRegArg = newUReg() in (ArgInReg{realReg=fpReg, argReg=pRegArg} :: argTypes, boxOrTagReal(uRegArg, pRegArg, BuiltIns.PrecSingle) @ argCode, (uRegArg, fpReg) :: argRegs, stackArgs) end | argTypesToArgEntries(_ :: tl, gReg :: gRegs, fpRegs, n) = (* This deals with general arguments but also with extra floating point arguments. They are boxed as usual. *) let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, gRegs, fpRegs, n-1) val argReg=newPReg() in (ArgInReg{realReg=gReg, argReg=argReg} :: argTypes, argCode, (argReg, gReg) :: argRegs, stackArgs) end | argTypesToArgEntries(_ :: tl, [], fpRegs, n) = let val (argTypes, argCode, argRegs, stackArgs) = argTypesToArgEntries(tl, [], fpRegs, n-1) val stackLoc = newStackLoc 1 in (ArgOnStack {stackOffset=n, stackReg = stackLoc } :: argTypes, argCode, argRegs, stackLoc :: stackArgs) end val (argEntries, argCode, argRegs, stackArguments) = argTypesToArgEntries(argTypes, generalArgRegs, floatingPtArgRegs, List.length argTypes) val clReg = case closure of [] => [] | _ => [(closureRegAddr, GenReg edx)] in val argumentVector = Vector.fromList argEntries (* Start code for the function. *) val beginInstructions = argCode @ [BlockBegin{regArgs=clReg @ argRegs, stackArgs=stackArguments @ [returnAddressEntry]}] (* The number of arguments on the stack. Needed in return instrs and tail calls. *) val currentStackArgs = List.length stackArguments end (* The return instruction. This can be added on to various tails but there is always one at the end anyway. *) fun returnInstruction({stackPtr, ...}, target, tailCode) = let val (returnCode, resReg) = case fnResultType of GeneralType => ([], target) | DoubleFloatType => let val resReg = newUReg() in ([BlockSimple(LoadArgument{source=wordAt target, dest=resReg, kind=MoveDouble})], resReg) end | SingleFloatType => let val resReg = newUReg() val unpack = if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument target, dest=resReg, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt target, dest=resReg, kind=MoveFloat}) in ([unpack], resReg) end in BlockExit(ReturnResultFromFunction{resultReg=resReg, realReg=resultReg fnResultType, numStackArgs=currentStackArgs}) :: returnCode @ (if stackPtr <> 0 then BlockSimple(ResetStackPtr{numWords=stackPtr, preserveCC=false}) :: tailCode else tailCode) end (* This controls what codeAsArgument returns. Different instructions have different requirements. If an option is set to false the value is instead loaded into a new preg. "const32s" means that it will fit into 32-bits. Any constant satisfies that on X86/32 but on the X86/64 we don't allow addresses because we can't be sure whether they will fit or not. *) type allowedArgument = { anyConstant: bool, const32s: bool, memAddr: bool, existingPreg: bool } val allowInMemMove = (* We can move a 32-bit constant into memory but not a long constant. *) { anyConstant=false, const32s=true, memAddr=false, existingPreg=true } and allowInPReg = { anyConstant=false, const32s=false, memAddr=false, existingPreg=true } (* AllowDefer can be used to ensure that any side-effects are done before something else but otherwise we only evaluate afterwards. *) and allowDefer = { anyConstant=true, const32s=true, memAddr=true, existingPreg=true } datatype destination = SpecificPReg of preg | NoResult | Allowed of allowedArgument (* Context type. *) type context = { loopArgs: (preg list * int * int) option, stackPtr: int, currHandler: int option, overflowBlock: int option ref } (* If a preg has been provided, use that, otherwise generate a new one. *) fun asTarget(SpecificPReg preg) = preg | asTarget NoResult = newPReg() | asTarget(Allowed _) = newPReg() fun moveIfNotAllowed(NoResult, code, arg) = (code, arg, false) | moveIfNotAllowed(Allowed{anyConstant=true, ...}, code, arg as AddressConstant _) = (code, arg, false) | moveIfNotAllowed(Allowed{anyConstant=true, ...}, code, arg as IntegerConstant _) = (code, arg, false) | moveIfNotAllowed(dest as Allowed{const32s=true, ...}, code, arg as IntegerConstant value) = (* This is allowed if the value is within 32-bits *) if is32bit value then (code, arg, false) else moveToTarget(dest, code, arg) | moveIfNotAllowed(dest as Allowed{const32s=true, ...}, code, arg as AddressConstant _) = if targetArch = Native32Bit then (code, arg, false) (* We can store the address directly *) else moveToTarget(dest, code, arg) | moveIfNotAllowed(Allowed{existingPreg=true, ...}, code, arg as RegisterArgument(PReg _)) = (code, arg, false) | moveIfNotAllowed(Allowed{memAddr=true, ...}, code, arg as MemoryLocation _) = (code, arg, false) | moveIfNotAllowed(dest, code, arg) = moveToTarget(dest, code, arg) and moveToTarget(dest, code, arg) = let val target = asTarget dest val moveSize = case arg of AddressConstant _ => movePolyWord | MemoryLocation _ => movePolyWord | _ => moveNativeWord in (code @ [BlockSimple(LoadArgument{source=arg, dest=target, kind=moveSize})], RegisterArgument target, false) end (* Create a bool result from a test by returning true or false. *) fun makeBoolResultRev(condition, ccRef, target, testCode) = let val trueLab = newLabel() and falseLab = newLabel() and mergeLab = newLabel() val mergeReg = newMergeReg() in BlockSimple(LoadArgument{dest=target, source=RegisterArgument mergeReg, kind=Move32Bit}) :: BlockLabel mergeLab :: BlockFlow(Unconditional mergeLab) :: BlockSimple(LoadArgument{dest=mergeReg, source=IntegerConstant(tag 0), kind=Move32Bit}) :: BlockLabel falseLab :: BlockFlow(Unconditional mergeLab) :: BlockSimple(LoadArgument{dest=mergeReg, source=IntegerConstant(tag 1), kind=Move32Bit}) :: BlockLabel trueLab :: BlockFlow(Conditional{ ccRef=ccRef, condition=condition, trueJump=trueLab, falseJump=falseLab }) :: testCode end fun moveIfNotAllowedRev(NoResult, code, arg) = (code, arg, false) | moveIfNotAllowedRev(Allowed{anyConstant=true, ...}, code, arg as AddressConstant _) = (code, arg, false) | moveIfNotAllowedRev(Allowed{anyConstant=true, ...}, code, arg as IntegerConstant _) = (code, arg, false) | moveIfNotAllowedRev(dest as Allowed{const32s=true, ...}, code, arg as IntegerConstant value) = (* This is allowed if the value is within 32-bits *) if is32bit value then (code, arg, false) else moveToTargetRev(dest, code, arg) | moveIfNotAllowedRev(dest as Allowed{const32s=true, ...}, code, arg as AddressConstant _) = if targetArch = Native32Bit then (code, arg, false) else moveToTargetRev(dest, code, arg) | moveIfNotAllowedRev(Allowed{existingPreg=true, ...}, code, arg as RegisterArgument(PReg _)) = (code, arg, false) | moveIfNotAllowedRev(Allowed{memAddr=true, ...}, code, arg as MemoryLocation _) = (code, arg, false) | moveIfNotAllowedRev(dest, code, arg) = moveToTargetRev(dest, code, arg) and moveToTargetRev(dest, code, arg) = let val target = asTarget dest val moveSize = case arg of AddressConstant _ => movePolyWord | MemoryLocation _ => movePolyWord | _ => moveNativeWord in (BlockSimple(LoadArgument{source=arg, dest=target, kind=moveSize}) :: code, RegisterArgument target, false) end (* Use a move if there's no offset or index. We could use an add if there's no index. *) and loadAddress{base, offset=0, index=NoMemIndex, dest} = LoadArgument{source=RegisterArgument base, dest=dest, kind=movePolyWord} | loadAddress{base, offset, index, dest} = LoadEffectiveAddress{base=SOME base, offset=offset, dest=dest, index=index, opSize=nativeWordOpSize} and codeToICodeTarget(instr, context: context, isTail, target) = (* This is really for backwards compatibility. *) let val (code, _, _) = codeToICode(instr, context, isTail, SpecificPReg target) in code end and codeToPReg(instr, context) = let (* Many instructions require an argument in a register. If it's already in a register use that rather than creating a new one. *) val (code, result, _) = codeToICode(instr, context, false, Allowed allowInPReg) val preg = case result of RegisterArgument pr => pr | _ => raise InternalError "codeToPReg" in (code, preg) end and codeToPRegRev(instr, context, tailCode) = let (* Many instructions require an argument in a register. If it's already in a register use that rather than creating a new one. *) val (code, result, _) = codeToICodeRev(instr, context, false, Allowed allowInPReg, tailCode) val preg = case result of RegisterArgument pr => pr | _ => raise InternalError "codeToPRegRev" in (code, preg) end and codeToICode(instr, context, isTail, destination) = let val (code, dest, haveExited) = codeToICodeRev(instr, context, isTail, destination, []) in (List.rev code, dest, haveExited) end (* Main function to turn the codetree into ICode. Optimisation is generally left to later passes. This does detect tail recursion. This builds the result up in reverse order. There was an allocation hotspot in loadFields in the BICTuple case which was eliminated by building the list in reverse and then reversing the result. It seems better to build the list in reverse generally but for the moment there are too many special cases to do everything. *) and codeToICodeRev(BICNewenv (bindings, exp), context: context as {stackPtr=initialSp, ...} , isTail, destination, tailCode) = let (* Process a list of bindings. We need to accumulate the space used by any containers and reset the stack pointer at the end if necessary. *) fun doBindings([], context, tailCode) = (tailCode, context) | doBindings(BICDeclar{value=BICExtract(BICLoadLocal l), addr, ...} :: decs, context, tailCode) = let (* Giving a new name to an existing entry. This should have been removed at a higher level but it doesn't always seem to be. In particular we must treat this specially if it's a container. *) val original = Array.sub(locToPregArray, l) val () = Array.update(locToPregArray, addr, original) in doBindings(decs, context, tailCode) end | doBindings(BICDeclar{value, addr, ...} :: decs, context, tailCode) = let val (code, dest) = codeToPRegRev(value, context, tailCode) val () = Array.update(locToPregArray, addr, PregLocation dest) in doBindings(decs, context, code) end | doBindings(BICRecDecs [{lambda, addr, ...}] :: decs, context, tailCode) = (* We shouldn't have single entries in RecDecs but it seems to occur at the moment. *) let val dest = newPReg() val (code, _, _) = codeToICodeRev(BICLambda lambda, context, false, SpecificPReg dest, tailCode) val () = Array.update(locToPregArray, addr, PregLocation dest) in doBindings(decs, context, code) end | doBindings(BICRecDecs recDecs :: decs, context, tailCode) = let val destRegs = map (fn _ => newPReg()) recDecs (* First build the closures as mutable cells containing zeros. Set the entry in the address table to the register containing the address. *) fun makeClosure({lambda={closure, ...}, addr, ...}, dest, c) = let val () = Array.update(locToPregArray, addr, PregLocation dest) val sizeClosure = List.length closure + (if targetArch = ObjectId32Bit then 2 else 1) open Address fun clear n = if n = sizeClosure then [BlockSimple(AllocateMemoryOperation{size=sizeClosure, flags=if targetArch = ObjectId32Bit then Word8.orb(F_mutable, F_closure) else F_mutable, dest=dest, saveRegs=[]})] else (clear (n+1) @ [BlockSimple( StoreArgument{source=IntegerConstant(tag 0), base=dest, offset=n*Word.toInt wordSize, index=memIndexOrObject, kind=movePolyWord, isMutable=false})]) in c @ clear 0 @ [BlockSimple InitialisationComplete] end val allocClosures = ListPair.foldlEq makeClosure [] (recDecs, destRegs) fun setClosure({lambda as {closure, ...}, ...}, dest, l) = let val clResult = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, clResult) (* Basically the same as tuple except we load the address of the closure we've made. *) fun loadFields([], _) = [] | loadFields(f :: rest, n) = let val (code, source, _) = codeToICode(BICExtract f, context, false, Allowed allowInMemMove) val storeValue = [BlockSimple(StoreArgument{ source=source, base=dest, offset=n*Word.toInt wordSize, index=memIndexOrObject, kind=movePolyWord, isMutable=false })] in code @ storeValue @ loadFields(rest, n+1) end val setCodeAddress = if targetArch = ObjectId32Bit then let (* We can't get the code address until run time. *) val codeReg = newUReg() val closureReg = newPReg() in map BlockSimple [ LoadArgument{ source=AddressConstant(toMachineWord clResult), dest=closureReg, kind=movePolyWord}, LoadArgument{ source=MemoryLocation{offset=0, base=closureReg, index=ObjectIndex, cache=NONE}, dest=codeReg, kind=Move64Bit}, StoreArgument{ source=RegisterArgument codeReg, offset=0, base=dest, index=ObjectIndex, kind=moveNativeWord, isMutable=false} ] end else let val codeAddr = codeAddressFromClosure clResult val (code, source, _) = moveIfNotAllowed(Allowed allowInMemMove, [], AddressConstant codeAddr) in code @ [BlockSimple( StoreArgument{ source=source, base=dest, offset=0, index=NoMemIndex, kind=movePolyWord, isMutable=false })] end val setFields = setCodeAddress @ loadFields(closure, if targetArch = ObjectId32Bit then 2 else 1) in l @ setFields @ [BlockSimple(LockMutable{addr=dest})] end val setClosures = ListPair.foldlEq setClosure [] (recDecs, destRegs) val code = List.rev(allocClosures @ setClosures) in doBindings(decs, context, code @ tailCode) end | doBindings(BICNullBinding exp :: decs, context, tailCode) = let val (code, _, _) = codeToICodeRev(exp, context, false, NoResult, tailCode) (* And discard result. *) in doBindings(decs, context, code) end | doBindings(BICDecContainer{ addr, size } :: decs, {loopArgs, stackPtr, currHandler, overflowBlock}, tailCode) = let val containerReg = newStackLoc size val () = Array.update(locToPregArray, addr, ContainerLocation{container=containerReg, stackOffset=stackPtr+size}) in doBindings(decs, {loopArgs=loopArgs, stackPtr=stackPtr+size, currHandler=currHandler, overflowBlock=overflowBlock}, BlockSimple(ReserveContainer{size=size, container=containerReg}) :: tailCode) end val (codeBindings, resContext as {stackPtr=finalSp, ...}) = doBindings(bindings, context, tailCode) (* If we have had a container we'll need to reset the stack *) in if initialSp <> finalSp then let val _ = finalSp >= initialSp orelse raise InternalError "codeToICode - stack ptr" val bodyReg = newPReg() and resultReg = asTarget destination val (codeExp, result, haveExited) = codeToICodeRev(exp, resContext, isTail, SpecificPReg bodyReg, codeBindings) val afterAdjustSp = if haveExited then codeExp else BlockSimple(LoadArgument{source=result, dest=resultReg, kind=movePolyWord}) :: BlockSimple(ResetStackPtr{numWords=finalSp-initialSp, preserveCC=false}) :: codeExp in (afterAdjustSp, RegisterArgument resultReg, haveExited) end else codeToICodeRev(exp, resContext, isTail, destination, codeBindings) end | codeToICodeRev(BICConstnt(value, _), _, _, destination, tailCode) = moveIfNotAllowedRev(destination, tailCode, constantAsArgument value) | codeToICodeRev(BICExtract(BICLoadLocal l), {stackPtr, ...}, _, destination, tailCode) = ( case Array.sub(locToPregArray, l) of NoLocation => raise InternalError "codeToICodeRev - local unset" | PregLocation preg => moveIfNotAllowedRev(destination, tailCode, RegisterArgument preg) | ContainerLocation{container, stackOffset} => (* This always returns a ContainerAddr whatever the "allowed". *) (tailCode, ContainerAddr{container=container, stackOffset=stackPtr-stackOffset}, false) ) | codeToICodeRev(BICExtract(BICLoadArgument a), {stackPtr, ...}, _, destination, tailCode) = ( case Vector.sub(argumentVector, a) of ArgInReg{argReg, ...} => (* It was originally in a register. It's now in a preg. *) moveIfNotAllowedRev(destination, tailCode, RegisterArgument argReg) | ArgOnStack{stackOffset, stackReg} => (* Pushed before call. *) let val target = asTarget destination in (BlockSimple(LoadArgument{ source=StackLocation{wordOffset=stackOffset+stackPtr, container=stackReg, field=0, cache=NONE}, dest=target, kind=moveNativeWord}) :: tailCode, RegisterArgument target, false) end ) | codeToICodeRev(BICExtract(BICLoadClosure c), _, _, destination, tailCode) = let (* Add the number of words for the code address. This is 1 in native but 2 in 32-in-64. *) val offset = case targetArch of ObjectId32Bit => c+2 | _ => c+1 in if c >= List.length closure then raise InternalError "BICExtract: closure" else (); (* N.B. We need to add one to the closure entry because zero is the code address. *) moveIfNotAllowedRev(destination, tailCode, wordOffsetAddress(offset, closureRegAddr)) end | codeToICodeRev(BICExtract BICLoadRecursive, _, _, destination, tailCode) = (* If the closure is empty we must use the constant. We can't guarantee that the caller will actually load the closure register if it knows the closure is empty. *) moveIfNotAllowedRev(destination, tailCode, case closure of [] => AddressConstant(closureAsAddress resultClosure) | _ => RegisterArgument closureRegAddr) | codeToICodeRev(BICField{base, offset}, context, _, destination, tailCode) = let val (codeBase, baseEntry, _) = codeToICodeRev(base, context, false, Allowed allowInPReg, tailCode) in (* This should not be used with a container. *) case baseEntry of RegisterArgument baseR => moveIfNotAllowedRev(destination, codeBase, wordOffsetAddress(offset, baseR)) | _ => raise InternalError "codeToICodeRev-BICField" end | codeToICodeRev(BICLoadContainer{base, offset}, context, _, destination, tailCode) = let val (codeBase, baseEntry, _) = codeToICodeRev(base, context, false, Allowed allowInPReg, tailCode) val multiplier = Word.toInt(nativeWordSize div wordSize) in (* If this is a local container we extract the field. *) case baseEntry of RegisterArgument baseR => moveIfNotAllowedRev(destination, codeBase, wordOffsetAddress(offset*multiplier, baseR)) | ContainerAddr{container, stackOffset} => let val target = asTarget destination val finalOffset = stackOffset+offset val _ = finalOffset >= 0 orelse raise InternalError "offset" in (BlockSimple(LoadArgument{ source=StackLocation{wordOffset=finalOffset, container=container, field=offset, cache=NONE}, dest=target, kind=moveNativeWord}) :: tailCode, RegisterArgument target, false) end | _ => raise InternalError "codeToICodeRev-BICField" end | codeToICodeRev(BICEval{function, argList, resultType, ...}, context as { currHandler, ...}, isTail, destination, tailCode) = let val target = asTarget destination (* Create pregs for the closure and each argument. *) val clPReg = newPReg() (* If we have a constant closure we can go directly to the entry point. If the closure is a single word we don't need to load the closure register. *) val (functionCode, closureEntry, callKind) = case function of BICConstnt(addr, _) => let val addrAsAddr = toAddress addr (* If this is a closure we're still compiling we can't get the code address. However if this is directly recursive we can use the recursive convention. *) in if wordEq(closureAsAddress resultClosure, addr) then (tailCode, [], Recursive) else if flags addrAsAddr <> Address.F_words andalso flags addrAsAddr <> Address.F_closure then (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], FullCall) else if targetArch = ObjectId32Bit then (* We can't actually load the code address here. *) let val addrLength = length addrAsAddr val _ = addrLength >= 0w1 orelse raise InternalError "BICEval address" val _ = flags addrAsAddr = Address.F_closure orelse raise InternalError "BICEval address not a closure" in if addrLength = 0w2 then (tailCode, [], ConstantCode addr) else (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], ConstantCode addr) end else (* Native 32 or 64-bits. *) let val addrLength = length addrAsAddr val _ = addrLength >= 0w1 orelse raise InternalError "BICEval address" val codeAddr = loadWord(addrAsAddr, 0w0) val _ = isCode (toAddress codeAddr) orelse raise InternalError "BICEval address not code" in if addrLength = 0w1 then (tailCode, [], ConstantCode codeAddr) else (BlockSimple(LoadArgument{source=AddressConstant addr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], ConstantCode codeAddr) end end | BICExtract BICLoadRecursive => ( (* If the closure is empty we don't need to load rdx *) case closure of [] => (tailCode, [], Recursive) | _ => (BlockSimple(LoadArgument {source=RegisterArgument closureRegAddr, dest=clPReg, kind=movePolyWord}) :: tailCode, [(RegisterArgument clPReg, GenReg edx)], Recursive) ) | function => (* General case. *) (#1 (codeToICodeRev(function, context, false, SpecificPReg clPReg, tailCode)), [(RegisterArgument clPReg, GenReg edx)], FullCall) (* Optimise arguments. We have to be careful with tail-recursive functions because they need to save any stack arguments that could be overwritten. This is complicated because we overwrite the stack before loading the register arguments. In some circumstances it could be safe but for the moment leave it. This should be safe in the new code-transform but not the old codeICode. Currently we don't allow memory arguments at all. There's the potential for problems later. Memory arguments could possibly lead to aliasing of the stack if the memory actually refers to a container on the stack. That would mess up the code that ensures that stack arguments are stored in the right order. *) (* We don't allow long constants in stack arguments to a tail-recursive call because we may use a memory move to set them. We also don't allow them in 32-in-64 because we can't push an address constant. *) val allowInStackArg = Allowed {anyConstant=not isTail andalso targetArch <> ObjectId32Bit, const32s=true, memAddr=false, existingPreg=not isTail } and allowInRegArg = Allowed {anyConstant=true, const32s=true, memAddr=false, existingPreg=not isTail } (* Load the first arguments into registers and the rest to the stack. *) fun loadArgs ([], _, _, tailCode) = (tailCode, [], []) | loadArgs ((arg, DoubleFloatType) :: args, gRegs, fpReg :: fpRegs, tailCode) = let (* Floating point register argument. *) val (c, r) = codeToPRegRev(arg, context, tailCode) val r1 = newUReg() val c1 = BlockSimple(LoadArgument{source=wordAt r, dest=r1, kind=MoveDouble}) :: c val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c1) in (code, (RegisterArgument r1, fpReg) :: regArgs, stackArgs) end | loadArgs ((arg, SingleFloatType) :: args, gRegs, fpReg :: fpRegs, tailCode) = let (* Floating point register argument. *) val (c, r) = codeToPRegRev(arg, context, tailCode) val r1 = newUReg() val c1 = if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument r, dest=r1, cache=NONE}) :: c else BlockSimple(LoadArgument{source=wordAt r, dest=r1, kind=MoveFloat}) :: c val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c1) in (code, (RegisterArgument r1, fpReg) :: regArgs, stackArgs) end | loadArgs ((arg, _) :: args, gReg::gRegs, fpRegs, tailCode) = let (* General register argument. *) val (c, r, _) = codeToICodeRev(arg, context, false, allowInRegArg, tailCode) val (code, regArgs, stackArgs) = loadArgs(args, gRegs, fpRegs, c) in (code, (r, gReg) :: regArgs, stackArgs) end | loadArgs ((arg, _) :: args, [], fpRegs, tailCode) = let (* Stack argument. *) val (c, r, _) = codeToICodeRev(arg, context, false, allowInStackArg, tailCode) val (code, regArgs, stackArgs) = loadArgs(args, [], fpRegs, c) in (code, regArgs, r :: stackArgs) end val (codeArgs, regArgs, stackArgs) = loadArgs(argList, generalArgRegs, floatingPtArgRegs, functionCode) (* If this is at the end of the function and the result types are the same we can use a tail-recursive call. *) val tailCall = isTail andalso resultType = fnResultType val callCode = if tailCall then let val {stackPtr, ...} = context (* The number of arguments currently on the stack. *) val currentStackArgCount = currentStackArgs val newStackArgCount = List.length stackArgs (* The offset of the first argument or the return address if there are no stack arguments. N.B. We actually have currentStackArgCount+1 items on the stack including the return address. Offsets can be negative. *) val stackOffset = stackPtr val firstArgumentAddr = currentStackArgCount fun makeStackArgs([], _) = [] | makeStackArgs(arg::args, offset) = {src=arg, stack=offset} :: makeStackArgs(args, offset-1) val stackArgs = makeStackArgs(stackArgs, firstArgumentAddr) (* The stack adjustment needed to compensate for any items that have been pushed and the differences in the number of arguments. May be positive or negative. This is also the destination address of the return address so when we enter the new function the return address will be the first item on the stack. *) val stackAdjust = firstArgumentAddr - newStackArgCount (* Add an entry for the return address to the stack arguments. *) val returnEntry = {src=StackLocation{wordOffset=stackPtr, container=returnAddressEntry, field=0, cache=NONE}, stack=stackAdjust} (* Because we're storing into the stack we may be overwriting values we want. If the source of any value is a stack location below the current stack pointer we load it except in the special case where the destination is the same as the source (which is often the case with the return address). *) local fun loadArgs [] = ([], []) | loadArgs (arg :: rest) = let val (loadCode, loadedArgs) = loadArgs rest in case arg of {src as StackLocation{wordOffset, ...}, stack} => if wordOffset = stack+stackOffset (* Same location *) orelse stack+stackOffset < 0 (* Storing above current top of stack *) orelse stackOffset+wordOffset > ~ stackAdjust (* Above the last argument *) then (loadCode, arg :: loadedArgs) else let val preg = newPReg() in (BlockSimple(LoadArgument{source=src, dest=preg, kind=moveNativeWord}) :: loadCode, {src=RegisterArgument preg, stack=stack} :: loadedArgs) end | _ => (loadCode, arg :: loadedArgs) end in val (loadStackArgs, loadedStackArgs) = loadArgs(returnEntry :: stackArgs) end in BlockExit(TailRecursiveCall{regArgs=closureEntry @ regArgs, stackArgs=loadedStackArgs, stackAdjust = stackAdjust, currStackSize=stackOffset, callKind=callKind, workReg=newPReg()}) :: loadStackArgs @ codeArgs end else let val (moveResult, resReg) = case resultType of GeneralType => ([], target) | DoubleFloatType => let val fpRegDest = newUReg() in (boxOrTagReal(fpRegDest, target, BuiltIns.PrecDouble), fpRegDest) end | SingleFloatType => let val fpRegDest = newUReg() in (boxOrTagReal(fpRegDest, target, BuiltIns.PrecSingle), fpRegDest) end val call = FunctionCall{regArgs=closureEntry @ regArgs, stackArgs=stackArgs, dest=resReg, realDest=resultReg resultType, callKind=callKind, saveRegs=[]} val callBlock = case currHandler of NONE => BlockSimple call :: codeArgs | SOME h => BlockOptionalHandle{call=call, handler=h, label=newLabel()} :: codeArgs in moveResult @ callBlock end in (callCode, RegisterArgument target, tailCall (* We've exited if this was a tail jump *)) end | codeToICodeRev(BICNullary{oper=BuiltIns.GetCurrentThreadId}, _, _, destination, tailCode) = (* Get the ID of the current thread. *) let val target = asTarget destination in (BlockSimple(LoadMemReg{offset=memRegThreadSelf, dest=target, kind=movePolyWord}) :: tailCode, RegisterArgument target, false) end - | codeToICodeRev(BICNullary{oper=BuiltIns.CheckRTSException}, { currHandler, ...}, _, _, tailCode) = + | codeToICodeRev(BICNullary{oper=BuiltIns.CheckRTSException}, { currHandler, ...}, _, destination, tailCode) = let (* Raise an exception in ML if the last RTS call set the exception packet. *) val haveException = newLabel() and noException = newLabel() val ccRef = newCCRef() val testReg = newPReg() val raiseCode = RaiseExceptionPacket{packetReg=testReg} val code = BlockLabel noException :: (case currHandler of NONE => BlockExit raiseCode | SOME h => BlockRaiseAndHandle(raiseCode, h)) :: BlockLabel haveException :: BlockFlow(Conditional{ ccRef=ccRef, condition=JNE, trueJump=haveException, falseJump=noException }) :: BlockSimple(CompareLiteral{arg1=RegisterArgument testReg, arg2=tag 0, opSize=polyWordOpSize, ccRef=ccRef}) :: BlockSimple(LoadMemReg{offset=memRegExceptionPacket, dest=testReg, kind=movePolyWord}) :: tailCode in - (code, (* Unit result *) IntegerConstant(tag 0), false) + moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end + | codeToICodeRev(BICNullary{oper=BuiltIns.CPUPause}, _, _, destination, tailCode) = + (* Pause during spinlock phase of mutex locking. *) + moveIfNotAllowedRev(destination, BlockSimple PauseCPU :: tailCode, (* Unit result *) IntegerConstant(tag 0)) + | codeToICodeRev(BICUnary instr, context, isTail, destination, tailCode) = codeToICodeUnaryRev(instr, context, isTail, destination, tailCode) | codeToICodeRev(BICBinary instr, context, isTail, destination, tailCode) = codeToICodeBinaryRev(instr, context, isTail, destination, tailCode) | codeToICodeRev(BICArbitrary{oper, shortCond, arg1, arg2, longCall}, context, _, destination, tailCode) = let val startLong = newLabel() and resultLabel = newLabel() val target = asTarget destination val condResult = newMergeReg() (* Overflow check - if there's an overflow jump to the long precision case. *) fun jumpOnOverflow ccRef = let val noOverFlow = newLabel() in [BlockFlow(Conditional{ ccRef=ccRef, condition=JO, trueJump=startLong, falseJump=noOverFlow }), BlockLabel noOverFlow] end val (longCode, _, _) = codeToICode(longCall, context, false, SpecificPReg condResult) (* We could use a tail jump here if this is a tail. *) val (code, dest, haveExited) = ( (* Test the tag bits and skip to the long case if either is clear. *) List.rev(codeConditionRev(shortCond, context, false, startLong, [])) @ (* Try evaluating as fixed precision and jump if we get an overflow. *) codeFixedPrecisionArith(oper, arg1, arg2, context, condResult, jumpOnOverflow) @ (* If we haven't had an overflow jump to the result. *) [BlockFlow(Unconditional resultLabel), (* If we need to use the full long-precision call we come here. *) BlockLabel startLong] @ longCode @ [BlockLabel resultLabel, BlockSimple(LoadArgument{source=RegisterArgument condResult, dest=target, kind=movePolyWord})], RegisterArgument target, false) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICAllocateWordMemory instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeAllocate(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICLambda(lambda as { closure = [], ...}), _, _, destination, tailCode) = (* Empty closure - create a constant closure for any recursive calls. *) let val closure = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, closure) (* Return the closure itself as the value. *) in moveIfNotAllowedRev(destination, tailCode, AddressConstant(closureAsAddress closure)) end | codeToICodeRev(BICLambda(lambda as { closure, ...}), context, isTail, destination, tailCode) = (* Non-empty closure. Ignore stack closure option at the moment. *) let val closureRef = makeConstantClosure() val () = codeFunctionToX86(lambda, debugSwitches, closureRef) in if targetArch = ObjectId32Bit then let val target = asTarget destination val memAddr = newPReg() fun loadFields([], n, tlCode) = let val codeReg = newUReg() val closureReg = newPReg() in (* The code address occupies the first native word but we need to extract it at run-time. We don't currently have a way to have 64-bit constants. *) BlockSimple( StoreArgument{ source=RegisterArgument codeReg, offset=0, base=memAddr, index=ObjectIndex, kind=moveNativeWord, isMutable=false}) :: BlockSimple(LoadArgument{ source=MemoryLocation{offset=0, base=closureReg, index=ObjectIndex, cache=NONE}, dest=codeReg, kind=Move64Bit}) :: BlockSimple(LoadArgument{ source=AddressConstant(toMachineWord closureRef), dest=closureReg, kind=movePolyWord}) :: BlockSimple(AllocateMemoryOperation{size=n, flags=F_closure, dest=memAddr, saveRegs=[]}) :: tlCode end | loadFields(f :: rest, n, tlCode) = let (* Defer the evaluation if possible. We may have a constant that we can't move directly but it's better to load it after the allocation otherwise we will have to push the register if we need to GC. *) val (code1, source1, _) = codeToICodeRev(BICExtract f, context, false, Allowed allowDefer, tlCode) val restAndAlloc = loadFields(rest, n+1, code1) val (code2, source, _) = moveIfNotAllowedRev(Allowed allowInMemMove, restAndAlloc, source1) val storeValue = BlockSimple(StoreArgument{ source=source, offset=n*Word.toInt wordSize, base=memAddr, index=ObjectIndex, kind=movePolyWord, isMutable=false}) in storeValue :: code2 end val code = BlockSimple InitialisationComplete :: BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord}) :: loadFields(closure, 2, tailCode) in (code, RegisterArgument target, false) end (* Treat it as a tuple with the code as the first field. *) else codeToICodeRev(BICTuple(BICConstnt(codeAddressFromClosure closureRef, []) :: map BICExtract closure), context, isTail, destination, tailCode) end | codeToICodeRev(BICCond(test, thenPt, elsePt), context, isTail, NoResult, tailCode) = let (* If we don't want the result but are only evaluating for side-effects we may be able to optimise special cases. This was easier in the forward case but for now we don't bother and leave it to the lower levels. *) val startElse = newLabel() and skipElse = newLabel() val codeTest = codeConditionRev(test, context, false, startElse, tailCode) val (codeThen, _, _) = codeToICodeRev(thenPt, context, isTail, NoResult, codeTest) val (codeElse, _, _) = codeToICodeRev(elsePt, context, isTail, NoResult, BlockLabel startElse :: BlockFlow(Unconditional skipElse) :: codeThen) in (BlockLabel skipElse :: codeElse, (* Unit result *) IntegerConstant(tag 0), false) end | codeToICodeRev(BICCond(test, thenPt, elsePt), context, isTail, destination, tailCode) = let (* Because we may push the result onto the stack we have to create a new preg to hold the result and then copy that to the final result. *) (* If this is a tail each arm will exit separately and neither will return a result. *) val target = asTarget destination val condResult = newMergeReg() val thenTarget = if isTail then newPReg() else condResult val startElse = newLabel() val testCode = codeConditionRev(test, context, false, startElse, tailCode) (* Put the result in the target register. *) val (thenCode, _, thenExited) = codeToICodeRev(thenPt, context, isTail, SpecificPReg thenTarget, testCode) (* Add a jump round the else-part except that if this is a tail we return. The then-part could have exited e.g. with a raise or a loop. *) val (exitThen, thenLabel, elseTarget) = if thenExited then (thenCode, [], target (* Can use original target. *)) else if isTail then (returnInstruction(context, thenTarget, thenCode), [], newPReg()) else let val skipElse = newLabel() in (BlockFlow(Unconditional skipElse) :: thenCode, [BlockSimple(LoadArgument{source=RegisterArgument condResult, dest=target, kind=movePolyWord}), BlockLabel skipElse], condResult) end val (elseCode, _, elseExited) = codeToICodeRev(elsePt, context, isTail, SpecificPReg elseTarget, BlockLabel startElse :: exitThen) (* Add a return to the else-part if necessary so we will always exit on a tail. *) val exitElse = if isTail andalso not elseExited then returnInstruction(context, elseTarget, elseCode) else elseCode in (thenLabel @ exitElse, RegisterArgument target, isTail orelse thenExited andalso elseExited) end | codeToICodeRev(BICCase { cases, test, default, isExhaustive, firstIndex}, context, isTail, destination, tailCode) = let (* We have to create a new preg for the result in case we need to push it to the stack. *) val targetReg = newMergeReg() local val initialTestReg = newPReg() val (testCode, _, _) = codeToICodeRev(test, context, false, SpecificPReg initialTestReg, tailCode) (* Subtract the minimum value so the value we're testing is always in the range of (tagged) 0 to the maximum. It is possible to adjust the value when computing the index but that can lead to overflows during compilation if the minimum is very large or small. We can ignore overflow and allow values to wrap round. *) in val (testCode, testReg) = if firstIndex = 0w0 then (testCode, initialTestReg) else let val newTestReg = newPReg() val subtract = BlockSimple(ArithmeticFunction{oper=SUB, resultReg=newTestReg, operand1=initialTestReg, operand2=IntegerConstant(semitag(Word.toLargeInt firstIndex)), ccRef=newCCRef(), opSize=polyWordOpSize}) in (subtract :: testCode, newTestReg) end end val workReg = newPReg() (* Unless this is exhaustive we need to add a range check. *) val (rangeCheck, extraDefaults) = if isExhaustive then (testCode, []) else let val defLab1 = newLabel() val tReg1 = newPReg() val ccRef1 = newCCRef() (* Since we've subtracted any minimum we only have to check whether the value is greater (unsigned) than the maximum. *) val numberOfCases = LargeInt.fromInt(List.length cases) val continueLab = newLabel() val testCode2 = BlockLabel continueLab :: BlockFlow(Conditional{ccRef=ccRef1, condition=JNB, trueJump=defLab1, falseJump=continueLab}) :: BlockSimple(WordComparison{arg1=tReg1, arg2=IntegerConstant(tag numberOfCases), ccRef=ccRef1, opSize=polyWordOpSize}) :: BlockSimple(LoadArgument {source=RegisterArgument testReg, dest=tReg1, kind=movePolyWord}) :: testCode in (testCode2, [defLab1]) end (* Make a label for each item in the list. *) val codeLabels = map (fn _ => newLabel()) cases (* Create an exit label in case it's needed. *) val labelForExit = if isTail then ~1 (* Illegal label. *) else newLabel() (* Generate the code for each of the cases and the default. We need to put an unconditional branch after each to skip the other cases. *) fun codeCases (SOME c :: otherCases, startLabel :: otherLabels, tailCode) = let val caseTarget = if isTail then newPReg() else targetReg (* Put in the case with a jump to the end of the sequence. *) val (codeThisCase, _, caseExited) = codeToICodeRev(c, context, isTail, SpecificPReg caseTarget, BlockLabel startLabel :: tailCode) val exitThisCase = if caseExited then codeThisCase else if isTail then returnInstruction(context, caseTarget, codeThisCase) else BlockFlow(Unconditional labelForExit) :: codeThisCase in codeCases(otherCases, otherLabels, exitThisCase) end | codeCases(NONE :: otherCases, _ :: otherLabels, tailCode) = codeCases(otherCases, otherLabels, tailCode) | codeCases ([], [], tailCode) = let (* We need to add labels for all the gaps we filled and also for a "default" label for the indexed-case instruction itself as well as any range checks. *) fun addDefault (startLabel, NONE, l) = BlockLabel startLabel :: l | addDefault (_, SOME _, l) = l fun asForward l = BlockLabel l val dLabs = map asForward extraDefaults @ tailCode val defLabels = ListPair.foldlEq addDefault dLabs (codeLabels, cases) val defaultTarget = if isTail then newPReg() else targetReg val (defaultCode, _, defaultExited) = codeToICodeRev(default, context, isTail, SpecificPReg defaultTarget, defLabels) in (* Put in the default. Because this is the last we don't need to jump round it. However if this is a tail and we haven't exited we put in a return. That way the case will always have exited if this is a tail. *) if isTail andalso not defaultExited then returnInstruction(context, defaultTarget, defaultCode) else defaultCode end | codeCases _ = raise InternalError "codeCases: mismatch" val codedCases = codeCases(cases, codeLabels, BlockFlow(IndexedBr codeLabels) :: BlockSimple(IndexedCaseOperation{testReg=testReg, workReg=workReg}) :: rangeCheck) (* We can now copy to the target. If we need to push the result this load will be converted into a push. *) val target = asTarget destination val copyToTarget = if isTail then codedCases else BlockSimple(LoadArgument{source=RegisterArgument targetReg, dest=target, kind=movePolyWord}) :: BlockLabel labelForExit :: codedCases in (copyToTarget, RegisterArgument target, isTail (* We have always exited on a tail. *)) end | codeToICodeRev(BICBeginLoop {loop, arguments}, context as { stackPtr, currHandler, overflowBlock, ...}, isTail, destination, tailCode) = let val target = asTarget destination fun codeArgs ([], tailCode) = ([], tailCode) | codeArgs (({value, addr}, _) :: rest, tailCode) = let val pr = newPReg() val () = Array.update(locToPregArray, addr, PregLocation pr) val (code, _, _) = codeToICodeRev(value, context, false, SpecificPReg pr, tailCode) val (pregs, othercode) = codeArgs(rest, code) in (pr::pregs, othercode) end val (loopRegs, argCode) = codeArgs(arguments, tailCode) val loopLabel = newLabel() val (loopBody, _, loopExited) = codeToICodeRev(loop, {loopArgs=SOME (loopRegs, loopLabel, stackPtr), stackPtr=stackPtr, currHandler=currHandler, overflowBlock=overflowBlock }, isTail, SpecificPReg target, BlockLabel loopLabel :: BlockSimple BeginLoop :: argCode) in (loopBody, RegisterArgument target, loopExited) end | codeToICodeRev(BICLoop args, context as {loopArgs=SOME (loopRegs, loopLabel, loopSp), stackPtr, currHandler, ...}, _, destination, tailCode) = let val target = asTarget destination (* Registers to receive the evaluated arguments. We can't put the values into the loop variables yet because the values could depend on the current values of the loop variables. *) val argPRegs = map(fn _ => newPReg()) args val codeArgs = ListPair.foldlEq(fn ((arg, _), pr, l) => #1 (codeToICodeRev(arg, context, false, SpecificPReg pr, l))) tailCode (args, argPRegs) val jumpArgs = ListPair.mapEq(fn (s, l) => (RegisterArgument s, l)) (argPRegs, loopRegs) (* If we've allocated a container in the loop we have to remove it before jumping back. *) val stackReset = if loopSp = stackPtr then codeArgs else BlockSimple(ResetStackPtr{numWords=stackPtr-loopSp, preserveCC=false}) :: codeArgs val jumpLoop = JumpLoop{regArgs=jumpArgs, stackArgs=[], checkInterrupt=SOME[], workReg=NONE} (* "checkInterrupt" could result in a Interrupt exception so we treat this like a function call. *) val code = case currHandler of NONE => BlockFlow(Unconditional loopLabel) :: BlockSimple jumpLoop :: stackReset | SOME h => BlockOptionalHandle{call=jumpLoop, handler=h, label=loopLabel} :: stackReset in (code, RegisterArgument target, true) end | codeToICodeRev(BICLoop _, {loopArgs=NONE, ...}, _, _, _) = raise InternalError "BICLoop without BICBeginLoop" | codeToICodeRev(BICRaise exc, context as { currHandler, ...}, _, destination, tailCode) = let val packetReg = newPReg() val (code, _, _) = codeToICodeRev(exc, context, false, SpecificPReg packetReg, tailCode) val raiseCode = RaiseExceptionPacket{packetReg=packetReg} val block = case currHandler of NONE => BlockExit raiseCode | SOME h => BlockRaiseAndHandle(raiseCode, h) in (block :: code, RegisterArgument(asTarget destination), true (* Always exits *)) end | codeToICodeRev(BICHandle{exp, handler, exPacketAddr}, context as { stackPtr, loopArgs, overflowBlock, ... }, isTail, destination, tailCode) = let (* As with BICCond and BICCase we need to create a new register for the result in case we need to push it to the stack. *) val handleResult = newMergeReg() val handlerLab = newLabel() and startHandling = newLabel() val (bodyTarget, handlerTarget) = if isTail then (newPReg(), newPReg()) else (handleResult, handleResult) (* TODO: Even if we don't actually want a result we force one in here by using "asTarget". *) (* The expression cannot be treated as a tail because the handler has to be removed after. It may "exit" if it has raised an unconditional exception. If it has we mustn't generate a PopExceptionHandler because there won't be any result for resultReg. We need to add two words to the stack to account for the items pushed by PushExceptionHandler. We create an instruction to push the handler followed by a block fork to the start of the code and, potentially the handler, then a label to start the code that the handler is in effect for. *) val initialCode = BlockLabel startHandling :: BlockFlow(SetHandler{handler=handlerLab, continue=startHandling}) :: BlockSimple(PushExceptionHandler{workReg=newPReg()}) :: tailCode val (expCode, _, expExit) = codeToICodeRev(exp, {stackPtr=stackPtr+2, loopArgs=loopArgs, currHandler=SOME handlerLab, overflowBlock=overflowBlock}, false (* Not tail *), SpecificPReg bodyTarget, initialCode) (* If this is the tail we can replace the jump at the end of the handled code with returns. If the handler has exited we don't need a return there. Otherwise we need to add an unconditional jump to skip the handler. *) val (atExpEnd, skipExpLabel) = case (isTail, expExit) of (true, true) => (* Tail and exited. *) (expCode, NONE) | (true, false) => (* Tail and not exited. *) (returnInstruction(context, bodyTarget, BlockSimple(PopExceptionHandler{workReg=newPReg()}) :: expCode), NONE) | (false, true) => (* Not tail but exited. *) (expCode, NONE) | (false, false) => let val skipHandler = newLabel() in (BlockFlow(Unconditional skipHandler) :: BlockSimple(PopExceptionHandler{workReg=newPReg()}) :: expCode, SOME skipHandler) end (* Make a register to hold the exception packet and put eax into it. *) val packetAddr = newPReg() val () = Array.update(locToPregArray, exPacketAddr, PregLocation packetAddr) val (handleCode, _, handleExit) = codeToICodeRev(handler, context, isTail, SpecificPReg handlerTarget, BlockSimple(BeginHandler{workReg=newPReg(), packetReg=packetAddr}) :: BlockLabel handlerLab :: atExpEnd) val target = asTarget destination val afterHandler = case (isTail, handleExit) of (true, true) => (* Tail and exited. *) handleCode | (true, false) => (* Tail and not exited. *) returnInstruction(context, handlerTarget, handleCode) | (false, _) => (* Not tail. *) handleCode val addLabel = case skipExpLabel of SOME lab => BlockLabel lab:: afterHandler | NONE => afterHandler in (BlockSimple(LoadArgument{source=RegisterArgument handleResult, dest=target, kind=movePolyWord}) :: addLabel, RegisterArgument target, isTail) end | codeToICodeRev(BICTuple fields, context, _, destination, tailCode) = let (* TODO: This is a relic of the old fall-back code-generator. It required the result of a tuple to be at the top of the stack. It should be changed. *) val target = asTarget destination (* Actually we want this. *) val memAddr = newPReg() fun loadFields([], n, tlCode) = BlockSimple(AllocateMemoryOperation{size=n, flags=0w0, dest=memAddr, saveRegs=[]}) :: tlCode | loadFields(f :: rest, n, tlCode) = let (* Defer the evaluation if possible. We may have a constant that we can't move directly but it's better to load it after the allocation otherwise we will have to push the register if we need to GC. *) val (code1, source1, _) = codeToICodeRev(f, context, false, Allowed allowDefer, tlCode) val restAndAlloc = loadFields(rest, n+1, code1) val (code2, source, _) = moveIfNotAllowedRev(Allowed allowInMemMove, restAndAlloc, source1) val storeValue = BlockSimple(StoreArgument{ source=source, offset=n*Word.toInt wordSize, base=memAddr, index=memIndexOrObject, kind=movePolyWord, isMutable=false}) in storeValue :: code2 end val code = BlockSimple InitialisationComplete :: BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord}) :: loadFields(fields, 0, tailCode) in (code, RegisterArgument target, false) end (* Copy the source tuple into the container. There are important special cases for both the source tuple and the container. If the source tuple is a BICTuple we have the fields and can store them without creating a tuple on the heap. If the destination is a local container we can store directly into the stack. *) | codeToICodeRev(BICSetContainer{container, tuple, filter}, context as {stackPtr, ...}, _, destination, tailCode) = let local fun createStore containerReg (source, destWord) = StoreArgument{source=source, offset=destWord*Word.toInt nativeWordSize, base=containerReg, index=NoMemIndex, kind=moveNativeWord, isMutable=false} in val findContainer = case container of BICExtract(BICLoadLocal l) => ( case Array.sub(locToPregArray, l) of ContainerLocation{container, stackOffset} => let fun storeToStack(source, destWord) = StoreToStack{source=source, container=container, field=destWord, stackOffset=stackPtr-stackOffset+destWord} in SOME storeToStack end | _ => NONE ) | _ => NONE val (codeContainer, storeInstr) = case findContainer of SOME storeToStack => (tailCode, storeToStack) | NONE => let val containerTarget = newPReg() val (codeContainer, _, _) = codeToICodeRev(container, context, false, SpecificPReg containerTarget, tailCode) in (codeContainer, createStore containerTarget) end end val filterLength = BoolVector.length filter val code = case tuple of BICTuple cl => let (* In theory it's possible that the tuple could contain fields that are not used but nevertheless need to be evaluated for their side-effects. Create all the fields and push to the stack. *) fun codeField(arg, (regs, tailCode)) = let val (c, r, _) = codeToICodeRev(arg, context, false, Allowed allowInMemMove, tailCode) in (r :: regs, c) end val (pregsRev, codeFields) = List.foldl codeField ([], codeContainer) cl val pregs = List.rev pregsRev fun copyField(srcReg, (sourceWord, destWord, tailCode)) = if sourceWord < filterLength andalso BoolVector.sub(filter, sourceWord) then (sourceWord+1, destWord+1, BlockSimple(storeInstr(srcReg, destWord)) :: tailCode) else (sourceWord+1, destWord, tailCode) val (_, _, resultCode) = List.foldl copyField (0, 0, codeFields) pregs in resultCode end | tuple => let (* Copy a heap tuple. It is possible that this is another container in which case we must load the fields directly. We mustn't load its address and then copy because loading the address would be the last reference and might cause the container to be reused prematurely. *) val findContainer = case tuple of BICExtract(BICLoadLocal l) => ( case Array.sub(locToPregArray, l) of ContainerLocation{container, stackOffset} => let fun getAddr sourceWord = StackLocation{wordOffset=stackPtr-stackOffset+sourceWord, container=container, field=sourceWord, cache=NONE} in SOME getAddr end | _ => NONE ) | _ => NONE val (codeTuple, loadField) = case findContainer of SOME getAddr => (codeContainer, getAddr) | NONE => let val tupleTarget = newPReg() val (codeTuple, _, _) = codeToICodeRev(tuple, context, false, SpecificPReg tupleTarget, codeContainer) fun loadField sourceWord = wordOffsetAddress(sourceWord, tupleTarget) in (codeTuple, loadField) end fun copyContainer(sourceWord, destWord, tailCode) = if sourceWord = filterLength then tailCode else if BoolVector.sub(filter, sourceWord) then let val loadReg = newPReg() val code = BlockSimple(storeInstr(RegisterArgument loadReg, destWord)) :: BlockSimple(LoadArgument{source=loadField sourceWord, dest=loadReg, kind=movePolyWord}) :: tailCode in copyContainer(sourceWord+1, destWord+1, code) end else copyContainer(sourceWord+1, destWord, tailCode) in copyContainer(0, 0, codeTuple) end in moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeRev(BICTagTest{test, tag=tagValue, ...}, context, _, destination, tailCode) = (* Check the "tag" word of a union (datatype). N.B. Not the same as testing the tag bit of a word. *) let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (testCode, tagArg, _) = codeToICodeRev(test, context, false, Allowed memOrReg, tailCode) val target = asTarget destination in (makeBoolResultRev(JE, ccRef, target, (* Use CompareLiteral because the tag must fit in 32-bits. *) BlockSimple(CompareLiteral{arg1=tagArg, arg2=tag(Word.toLargeInt tagValue), opSize=polyWordOpSize, ccRef=ccRef}) :: testCode), RegisterArgument target, false) end | codeToICodeRev(BICLoadOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeLoad(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICStoreOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeStore(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end | codeToICodeRev(BICBlockOperation ({kind=BlockOpEqualByte, sourceLeft, destRight, length}), context, _, destination, tailCode) = let val vec1Reg = newUReg() and vec2Reg = newUReg() val ccRef = newCCRef() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddressRev(sourceLeft, true, context, tailCode) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddressRev(destRight, true, context, leftCode) val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToRegRev(length, false (* unsigned *), context, rightCode) val target = asTarget destination val code = makeBoolResultRev(JE, ccRef, target, BlockSimple(CompareByteVectors{ vec1Addr=vec1Reg, vec2Addr=vec2Reg, length=lengthArg, ccRef=ccRef }) :: lengthUntag @ BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg}) :: rightUntag @ BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg}) :: leftUntag @ lengthCode) in (code, RegisterArgument target, false) end | codeToICodeRev(BICBlockOperation instr, context, isTail, destination, tailCode) = let val (code, dest, haveExited) = codeToICodeBlock(instr, context, isTail, destination) in (revApp(code, tailCode), dest, haveExited) end and codeToICodeUnaryRev({oper=BuiltIns.NotBoolean, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val ccRef = newCCRef() val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} val (argCode, testDest, _) = codeToICodeRev(arg1, context, false, allow, tailCode) in (* Test the argument and return a boolean result. If either the argument is a condition or the result is used in a test this will be better than using XOR. *) (makeBoolResultRev(JNE, ccRef, target, BlockSimple(CompareLiteral{arg1=testDest, arg2=tag 1, opSize=polyWordOpSize, ccRef=ccRef}) :: argCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.IsTaggedValue, arg1}, context, _, destination, tailCode) = let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (testCode, testResult, _) = codeToICodeRev(arg1, context, false, Allowed memOrReg, tailCode) (* Test the tag bit. This sets the zero bit if the value is untagged. *) val target = asTarget destination in (makeBoolResultRev(JNE, ccRef, target, BlockSimple(TestTagBit{arg=testResult, ccRef=ccRef}) :: testCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.MemoryCellLength, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val argReg1 = newUReg() and argReg2 = newUReg() and argReg3 = newUReg() (* These are untagged until the tag is put in. *) and ccRef1 = newCCRef() and ccRef2 = newCCRef() and ccRef3 = newCCRef() (* Get the length of a memory cell (heap object). We need to mask out the top byte containing the flags and to tag the result. The mask is 56 bits on 64-bit which won't fit in an inline constant. Since we have to shift it anyway we might as well do this by shifts. *) val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=argReg3, operand2=IntegerConstant 1, ccRef=ccRef3, opSize=polyWordOpSize}) :: BlockSimple(ShiftOperation{shift=SHR, resultReg=argReg3, operand=argReg2, shiftAmount=IntegerConstant 7 (* 8-tagshift*), ccRef=ccRef2, opSize=polyWordOpSize }) :: BlockSimple(ShiftOperation{shift=SHL, resultReg=argReg2, operand=argReg1, shiftAmount=IntegerConstant 8, ccRef=ccRef1, opSize=polyWordOpSize }) :: BlockSimple(LoadArgument{source=wordOffsetAddress(~1, addrReg), dest=argReg1, kind=movePolyWord}) :: argCode, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.MemoryCellFlags, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val argReg1 = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(TagValue{ source=argReg1, dest=target, isSigned=false, opSize=OpSize32 }) :: BlockSimple(LoadArgument{source=MemoryLocation{offset= ~1, base=addrReg, index=memIndexOrObject, cache=NONE}, dest=argReg1, kind=MoveByte}) :: argCode, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.ClearMutableFlag, arg1}, context, _, destination, tailCode) = let val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) in moveIfNotAllowedRev(destination, BlockSimple(LockMutable{addr=addrReg}) :: argCode, (* Unit result *) IntegerConstant(tag 0)) end - | codeToICodeUnaryRev({oper=BuiltIns.AtomicIncrement, arg1}, context, _, destination, tailCode) = - let - val target = asTarget destination - val incrReg = newUReg() - val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) - val code = - (* We want the result to be the new value but we've returned the old value. *) - BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=incrReg, operand2=IntegerConstant(semitag 1), - ccRef=newCCRef(), opSize=polyWordOpSize}) :: - BlockSimple(AtomicExchangeAndAdd{ base=addrReg, source=incrReg }) :: - BlockSimple(LoadArgument{source=IntegerConstant(semitag 1), dest=incrReg, kind=movePolyWord}) :: - argCode - in - (code, RegisterArgument target, false) - end - - | codeToICodeUnaryRev({oper=BuiltIns.AtomicDecrement, arg1}, context, _, destination, tailCode) = - let - val target = asTarget destination - val incrReg = newUReg() - val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) - val code = - BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=incrReg, operand2=IntegerConstant(semitag 1), - ccRef=newCCRef(), opSize=polyWordOpSize}) :: - BlockSimple(AtomicExchangeAndAdd{ base=addrReg, source=incrReg }) :: - BlockSimple(LoadArgument{source=IntegerConstant(semitag ~1), dest=incrReg, kind=movePolyWord}) :: - argCode - in - (code, RegisterArgument target, false) - end - | codeToICodeUnaryRev({oper=BuiltIns.AtomicReset, arg1}, context, _, destination, tailCode) = let (* This is needed only for the interpreted version where we have a single real mutex to interlock atomic increment and decrement. We have to use the same mutex to interlock clearing a mutex. On the X86 we use hardware locking and the hardware guarantees that an assignment of a word will be atomic. *) val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) (* Store tagged 0 in the mutex. This is the unlocked value. *) val code = BlockSimple(StoreArgument{source=IntegerConstant(tag 0), base=addrReg, index=memIndexOrObject, offset=0, kind=movePolyWord, isMutable=true}) :: argCode in moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeUnaryRev({oper=BuiltIns.LongWordToTagged, arg1}, context, _, destination, tailCode) = let (* This is exactly the same as StringLengthWord at the moment. TODO: introduce a new ICode entry so that the next stage can optimise longword operations. *) val target = asTarget destination val argReg1 = newUReg() val (argCode, addrReg) = codeToPRegRev(arg1, context, tailCode) val code = BlockSimple(TagValue{ source=argReg1, dest=target, isSigned=false, opSize=polyWordOpSize }) :: (* Use movePolyWord even on 32-in-64 since we're producing a 32-bit value anyway. *) BlockSimple(LoadArgument{source=wordAt addrReg, dest=argReg1, kind=movePolyWord}) :: argCode in (code, RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.SignedToLongWord, arg1}, context, _, destination, tailCode) = let val addrReg = newPReg() and untagArg = newUReg() val (argCode, argReg1) = codeToPRegRev(arg1, context, tailCode) val (signExtend, sxReg) = case targetArch of ObjectId32Bit => let val sReg = newUReg() in ([BlockSimple(SignExtend32To64{source=RegisterArgument argReg1, dest=sReg})], sReg) end | _ => ([], argReg1) val code = BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untagArg, dest=addrReg, saveRegs=[]}) :: BlockSimple(UntagValue{source=sxReg, dest=untagArg, isSigned=true, cache=NONE, opSize=nativeWordOpSize}) :: signExtend @ argCode in moveIfNotAllowedRev(destination, code, RegisterArgument addrReg) end | codeToICodeUnaryRev({oper=BuiltIns.UnsignedToLongWord, arg1}, context, _, destination, tailCode) = let val addrReg = newPReg() and untagArg = newUReg() val (argCode, argReg1) = codeToPRegRev(arg1, context, tailCode) val code = BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untagArg, dest=addrReg, saveRegs=[]}) :: (* We can just use a polyWord operation to untag the unsigned value. *) BlockSimple(UntagValue{source=argReg1, dest=untagArg, isSigned=false, cache=NONE, opSize=polyWordOpSize}) :: argCode in moveIfNotAllowedRev(destination, code, RegisterArgument addrReg) end | codeToICodeUnaryRev({oper=BuiltIns.RealNeg precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val fpRegSrc = newUReg() and fpRegDest = newUReg() and sse2ConstReg = newUReg() (* The SSE2 code uses an SSE2 logical operation to flip the sign bit. This requires the values to be loaded into registers first because the logical operations require 128-bit operands. *) val (argCode, aReg1) = codeToPReg(arg1, context) (* Double precision values are always boxed and single precision values if they won't fit in a word. Otherwise we can using tagging. *) open BuiltIns val load = if precision = PrecDouble then BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveDouble}) else if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpRegSrc, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveFloat}) val code = case fpMode of FPModeX87 => [BlockSimple(X87FPUnaryOps{ fpOp=FCHS, dest=fpRegDest, source=fpRegSrc})] | FPModeSSE2 => let (* In single precision mode the sign bit is in the low 32-bits. There may be a better way to load it. *) val signBit = if precision = PrecDouble then realSignBit else floatSignBit in [BlockSimple(LoadArgument{source=AddressConstant signBit, dest=sse2ConstReg, kind=MoveDouble}), BlockSimple(SSE2FPBinary{opc=SSE2BXor, resultReg=fpRegDest, arg1=fpRegSrc, arg2=RegisterArgument sse2ConstReg})] end val result = boxOrTagReal(fpRegDest, target, precision) in (revApp(argCode @ load :: code @ result, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.RealAbs precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val fpRegSrc = newUReg() and fpRegDest = newUReg() and sse2ConstReg = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) open BuiltIns val load = if precision = PrecDouble then BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveDouble}) else if wordSize = 0w8 then BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpRegSrc, cache=NONE}) else BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpRegSrc, kind=MoveFloat}) val code = case fpMode of FPModeX87 => [BlockSimple(X87FPUnaryOps{ fpOp=FABS, dest=fpRegDest, source=fpRegSrc})] | FPModeSSE2 => let val mask = if precision = PrecDouble then realAbsMask else floatAbsMask in [BlockSimple(LoadArgument{source=AddressConstant mask, dest=sse2ConstReg, kind=MoveDouble}), BlockSimple(SSE2FPBinary{opc=SSE2BAnd, resultReg=fpRegDest, arg1=fpRegSrc, arg2=RegisterArgument sse2ConstReg})] end val result = boxOrTagReal(fpRegDest, target, precision) in (revApp(argCode @ load :: code @ result, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.RealFixedInt precision, arg1}, context, _, destination, tailCode) = let val target = asTarget destination val untagReg = newUReg() and fpReg = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) val floatOp = case fpMode of FPModeX87 => X87Float | FPModeSSE2 => SSE2Float val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double val _ = precision = BuiltIns.PrecDouble orelse raise InternalError "RealFixedInt - single" val code = argCode @ [BlockSimple(UntagValue{source=aReg1, dest=untagReg, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(floatOp{ dest=fpReg, source=RegisterArgument untagReg}), BlockSimple(BoxValue{boxKind=boxFloat, source=fpReg, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.FloatToDouble, arg1}, context, _, destination, tailCode) = let (* Convert a single precision floating point value to double precision. *) val target = asTarget destination val fpReg = newUReg() and fpReg2 = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) (* MoveFloat always converts from single to double-precision. *) val unboxOrUntag = case (fpMode, wordSize) of (FPModeX87, _) => [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg2, kind=MoveFloat})] | (FPModeSSE2, 0w4) => [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveFloat}), BlockSimple(SSE2FPUnary{opc=SSE2UFloatToDouble, resultReg=fpReg2, source=RegisterArgument fpReg})] | (FPModeSSE2, _) => [BlockSimple(UntagFloat{source=RegisterArgument aReg1, dest=fpReg, cache=NONE}), BlockSimple(SSE2FPUnary{opc=SSE2UFloatToDouble, resultReg=fpReg2, source=RegisterArgument fpReg})] val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double val code = argCode @ unboxOrUntag @ [BlockSimple(BoxValue{boxKind=boxFloat, source=fpReg2, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.DoubleToFloat NONE, arg1}, context, _, destination, tailCode) = let (* Convert a double precision value to a single precision using the current rounding mode. This is simpler than setting the rounding mode and then restoring it. *) val target = asTarget destination val fpReg = newUReg() and fpReg2 = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) (* In 32-bit mode we need to box the float. In 64-bit mode we can tag it. *) val boxOrTag = case fpMode of FPModeX87 => [BlockSimple(BoxValue{boxKind=BoxX87Float, source=fpReg, dest=target, saveRegs=[]})] | FPModeSSE2 => BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=fpReg2, source=RegisterArgument fpReg}) :: boxOrTagReal(fpReg2, target, BuiltIns.PrecSingle) val code = argCode @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveDouble})] @ boxOrTag in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.DoubleToFloat (SOME rndMode), arg1}, context, _, destination, tailCode) = let (* Convert a double precision value to a single precision. The rounding mode is passed in explicitly. *) val target = asTarget destination val fpReg = newUReg() and fpReg2 = newUReg() val (argCode, aReg1) = codeToPReg(arg1, context) (* In 32-bit mode we need to box the float. In 64-bit mode we can tag it. *) (* We need to save the rounding mode before we change it and restore it afterwards. *) open IEEEReal fun doConversion() = case fpMode of FPModeX87 => (* Convert the value using the appropriate rounding. *) [BlockSimple(BoxValue{boxKind=BoxX87Float, source=fpReg, dest=target, saveRegs=[]})] | FPModeSSE2 => BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=fpReg2, source=RegisterArgument fpReg}) :: boxOrTagReal(fpReg2, target, BuiltIns.PrecSingle) val code = argCode @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=fpReg, kind=MoveDouble})] @ setAndRestoreRounding(rndMode, doConversion) in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.RealToInt(precision, rndMode), arg1}, context, _, destination, tailCode) = let val target = asTarget destination val chkOverflow = newCCRef() val convResult = newUReg() and wrkReg2 = newUReg() (* Convert a floating point value to an integer. We need to raise overflow if the result is out of range. We first convert the value to 32/64 bits then tag it. An overflow can happen either because the real number does not fit in 32/64 bits or if it is not a 31/63 bit value. Fortunately, if the first conversion fails the result is a value that causes an overflow when we try it shift it so the check for overflow only needs to happen there. There is an SSE2 instruction that implements truncation (round to zero) directly but in other cases we need to set the rounding mode. *) val doConvert = case (fpMode, precision) of (FPModeX87, _) => let val fpReg = newUReg() val (argCode, aReg) = codeToPReg(arg1, context) fun doConvert() = [BlockSimple(X87RealToInt{source=fpReg, dest=convResult })] in argCode @ [BlockSimple(LoadArgument{source=wordAt aReg, dest=fpReg, kind=MoveDouble})] @ setAndRestoreRounding(rndMode, doConvert) end | (FPModeSSE2, BuiltIns.PrecDouble) => let val (argCode, argReg) = codeToPReg(arg1, context) fun doConvert() = [BlockSimple( SSE2RealToInt{source=wordAt argReg, dest=convResult, isDouble=true, isTruncate = rndMode = IEEEReal.TO_ZERO }) ] in argCode @ ( case rndMode of IEEEReal.TO_ZERO => doConvert() | _ => setAndRestoreRounding(rndMode, doConvert)) end | (FPModeSSE2, BuiltIns.PrecSingle) => let val (argCode, aReg) = codeToPReg(arg1, context) val fpReg = newUReg() fun doConvert() = [BlockSimple( SSE2RealToInt{source=RegisterArgument fpReg, dest=convResult, isDouble=false, isTruncate = rndMode = IEEEReal.TO_ZERO })] in argCode @ [BlockSimple(UntagFloat{source=RegisterArgument aReg, dest=fpReg, cache=NONE})] @ ( case rndMode of IEEEReal.TO_ZERO => doConvert() | _ => setAndRestoreRounding(rndMode, doConvert) ) end val checkAndTag = BlockSimple(ShiftOperation{ shift=SHL, resultReg=wrkReg2, operand=convResult, shiftAmount=IntegerConstant 1, ccRef=chkOverflow, opSize=polyWordOpSize}) :: checkOverflow context chkOverflow @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=wrkReg2, operand2=IntegerConstant 1, ccRef = newCCRef(), opSize=polyWordOpSize})] in (revApp(doConvert @ checkAndTag, tailCode), RegisterArgument target, false) end | codeToICodeUnaryRev({oper=BuiltIns.TouchAddress, arg1}, context, _, destination, tailCode) = let (* Put the value in a register. This is not entirely necessary but ensures that if the value is a constant the constant will be included in the code. *) val (argCode, aReg) = codeToPRegRev(arg1, context, tailCode) in moveIfNotAllowedRev(destination, BlockSimple(TouchArgument{source=aReg}) :: argCode, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeUnaryRev({oper=BuiltIns.AllocCStack, arg1}, context, _, destination, tailCode) = (* Allocate space on the C stack. Assumes that the argument has already been aligned. *) let val target = asTarget destination val (argCode, untaggedArg) = case arg1 of BICConstnt(value, _) => (tailCode, IntegerConstant(Word.toLargeInt(toShort value)) (* Leave untagged *)) | _ => let val (argCode, aReg) = codeToPRegRev(arg1, context, tailCode) val arg1Untagged = newUReg() in ( BlockSimple(UntagValue{source=aReg, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}) :: argCode, RegisterArgument arg1Untagged ) end val argReg1 = newUReg() and resReg1 = newUReg() val code = BlockSimple(BoxValue{boxKind=BoxLargeWord, source=resReg1, dest=target, saveRegs=[]}) :: BlockSimple(StoreMemReg{offset=memRegCStackPtr, source=resReg1, kind=moveNativeWord}) :: BlockSimple(ArithmeticFunction{oper=SUB, resultReg=resReg1, operand1=argReg1, operand2=untaggedArg, ccRef=newCCRef(), opSize=nativeWordOpSize}) :: BlockSimple(LoadMemReg{offset=memRegCStackPtr, dest=argReg1, kind=moveNativeWord}) :: argCode in (code, RegisterArgument target, false) end and codeToICodeBinaryRev({oper=BuiltIns.WordComparison{test, isSigned}, arg1, arg2=BICConstnt(arg2Value, _)}, context, _, destination, tailCode) = let (* Comparisons. Because this is also used for pointer equality and even for exception matching it is perfectly possible that the argument could be an address. The higher levels used to generate this for pointer equality. *) val ccRef = newCCRef() val comparison = (* If the argument is a tagged value that will fit in 32-bits we can use the literal version. Use toLargeIntX here because the value will be sign-extended even if we're actually doing an unsigned comparison. *) if isShort arg2Value andalso is32bit(tag(Word.toLargeIntX(toShort arg2Value))) then let val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} in (* We're often comparing with a character or a string length field that has to be untagged. In that case we can avoid loading it into a register and untagging it by doing the comparison directly. *) case arg1 of BICLoadOperation{kind=LoadStoreUntaggedUnsigned, address} => let val (codeBaseIndex, codeUntag, memLoc) = codeAddressRev(address, false, context, tailCode) val literal = Word.toLargeIntX(toShort arg2Value) in BlockSimple(CompareLiteral{arg1=MemoryLocation memLoc, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: codeUntag @ codeBaseIndex end | BICLoadOperation{kind=LoadStoreMLByte _, address} => let val (codeBaseIndex, codeUntag, {base, index, offset, ...}) = codeAddressRev(address, true, context, tailCode) val _ = toShort arg2Value >= 0w0 andalso toShort arg2Value < 0w256 orelse raise InternalError "Compare byte not a byte" val literal = Word8.fromLargeWord(Word.toLargeWord(toShort arg2Value)) in BlockSimple(CompareByteMem{arg1={base=base, index=index, offset=offset}, arg2=literal, ccRef=ccRef}) :: codeUntag @ codeBaseIndex end | BICUnary({oper=BuiltIns.MemoryCellFlags, arg1}) => (* This occurs particularly in arbitrary precision comparisons. *) let val (baseCode, baseReg) = codeToPRegRev(arg1, context, tailCode) val _ = toShort arg2Value >= 0w0 andalso toShort arg2Value < 0w256 orelse raise InternalError "Compare memory cell not a byte" val literal = Word8.fromLargeWord(Word.toLargeWord(toShort arg2Value)) in BlockSimple(CompareByteMem{arg1={base=baseReg, index=memIndexOrObject, offset= ~1}, arg2=literal, ccRef=ccRef}) :: baseCode end | _ => let (* TODO: We could include rarer cases of tagging by looking at the code and seeing if it's a TagValue. *) val (testCode, testDest, _) = codeToICodeRev(arg1, context, false, allow, tailCode) val literal = tag(Word.toLargeIntX(toShort arg2Value)) in BlockSimple(CompareLiteral{arg1=testDest, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: testCode end end else (* Addresses or larger values. We need to use a register comparison. *) let val (testCode, testReg) = codeToPRegRev(arg1, context, tailCode) val arg2Arg = constantAsArgument arg2Value in BlockSimple(WordComparison{arg1=testReg, arg2=arg2Arg, ccRef=ccRef, opSize=polyWordOpSize}) :: testCode end val target = asTarget destination in (makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, comparison), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordComparison{test, isSigned}, arg1=BICConstnt(arg1Value, _), arg2}, context, _, destination, tailCode) = let (* If we have the constant first we need to reverse the test so the first argument is a register. *) val ccRef = newCCRef() val comparison = if isShort arg1Value andalso is32bit(tag(Word.toLargeIntX(toShort arg1Value))) then let val allow = Allowed {anyConstant=false, const32s=false, memAddr=true, existingPreg=true} val (testCode, testDest, _) = codeToICodeRev(arg2, context, false, allow, tailCode) val literal = tag(Word.toLargeIntX(toShort arg1Value)) in BlockSimple(CompareLiteral{arg1=testDest, arg2=literal, opSize=polyWordOpSize, ccRef=ccRef}) :: testCode end else (* Addresses or larger values. We need to use a register comparison. *) let val (testCode, testReg) = codeToPRegRev(arg2, context, tailCode) val arg1Arg = constantAsArgument arg1Value in BlockSimple(WordComparison{arg1=testReg, arg2=arg1Arg, ccRef=ccRef, opSize=polyWordOpSize}) :: testCode end val target = asTarget destination in (makeBoolResultRev(testAsBranch(leftRightTest test, isSigned, true), ccRef, target, comparison), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordComparison {test, isSigned}, arg1, arg2}, context, _, destination, tailCode) = let val ccRef = newCCRef() val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (arg1Code, arg1Result, _) = codeToICodeRev(arg1, context, false, Allowed memOrReg, tailCode) val (arg2Code, arg2Result, _) = codeToICodeRev(arg2, context, false, Allowed memOrReg, arg1Code) val target = asTarget destination val code = case (arg1Result, arg2Result) of (RegisterArgument arg1Reg, arg2Result) => makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg1Reg, arg2=arg2Result, ccRef=ccRef, opSize=polyWordOpSize}) :: arg2Code) | (arg1Result, RegisterArgument arg2Reg) => (* The second argument is in a register - switch the sense of the test. *) makeBoolResultRev(testAsBranch(leftRightTest test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg2Reg, arg2=arg1Result, ccRef=ccRef, opSize=polyWordOpSize}) :: arg2Code) | (arg1Result, arg2Result) => let (* Have to load an argument - pick the first. *) val arg1Reg = newPReg() in makeBoolResultRev(testAsBranch(test, isSigned, true), ccRef, target, BlockSimple(WordComparison{arg1=arg1Reg, arg2=arg2Result, ccRef=ccRef, opSize=polyWordOpSize}) :: BlockSimple(LoadArgument{source=arg1Result, dest=arg1Reg, kind=movePolyWord}) :: arg2Code) end in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.PointerEq, arg1, arg2}, context, isTail, destination, tailCode) = (* Equality of general values which can include pointers. This can be treated exactly as a word equality. It has to be analysed differently for indexed cases. *) codeToICodeBinaryRev({oper=BuiltIns.WordComparison{test=BuiltIns.TestEqual, isSigned=false}, arg1=arg1, arg2=arg2}, context, isTail, destination, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.FixedPrecisionArith oper, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val code = codeFixedPrecisionArith(oper, arg1, arg2, context, target, checkOverflow context) in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. N.B. it is possible to have type-incorrect values in dead code. i.e. code that will never be executed because of a run-time check. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef = newCCRef(), opSize=polyWordOpSize}) :: arg1Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg2Code, aReg2) = codeToPRegRev(arg2, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg2, operand2=IntegerConstant constVal, ccRef = newCCRef(), opSize=polyWordOpSize}) :: arg2Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithAdd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) (* Use LEA to do the addition since we're not concerned with overflow. This is shorter than subtracting the tag and adding the values and also moves the result into the appropriate register. *) val code = arg1Code @ arg2Code @ [BlockSimple(LoadEffectiveAddress{base=SOME aReg1, offset= ~1, index=MemIndex1 aReg2, dest=target, opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithSub, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPRegRev(arg1, context, tailCode) in (BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=polyWordOpSize}) :: arg1Code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithSub, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val aReg3 = newPReg() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Do the subtraction and add in the tag bit. This could be reordered if we have cascaded operations since we don't need to check for overflow. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=RegisterArgument aReg2, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = codeMultiplyConstantWordRev(arg1, context, destination, if isShort value then toShort value else 0w0, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = codeMultiplyConstantWordRev(arg2, context, destination, if isShort value then toShort value else 0w0, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMult, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() and resUntagged = newUReg() val code = arg1Code @ arg2Code @ (* Shift one argument and subtract the tag from the other. It's possible this could be reordered if we have a value that is already untagged. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=arg2Untagged, operand1=aReg2, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=arg1Untagged, operand2=RegisterArgument arg2Untagged, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithDiv, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() val code = arg1Code @ arg2Code @ (* Shift both of the arguments to remove the tags. We don't test for zero here - that's done explicitly. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = false, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=quotient, dest=target, isSigned=false, opSize=polyWordOpSize })] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith BuiltIns.ArithMod, arg1, arg2}, context, _, destination, tailCode) = let (* Identical to Quot except that the result is the remainder. *) val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() val code = arg1Code @ arg2Code @ (* Shift both of the arguments to remove the tags. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = false, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=remainder, dest=target, isSigned=false, opSize=polyWordOpSize })] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordArith _, ...}, _, _, _, _) = raise InternalError "codeToICodeNonRev: WordArith - unimplemented operation" | codeToICodeBinaryRev({oper=BuiltIns.WordLogical logOp, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) (* Use a semitagged value for XOR. This preserves the tag bit. Use toLargeIntX here because the operations will sign-extend 32-bit values. *) val constVal = if isShort value then (case logOp of BuiltIns.LogicalXor => semitag | _ => tag) (Word.toLargeIntX(toShort value)) else 0 val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constVal <= 0xffffffff andalso constVal >= 0 then OpSize32 else polyWordOpSize val code = arg1Code @ [BlockSimple(ArithmeticFunction{oper=oper, resultReg=target, operand1=arg1Reg, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=opSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical logOp, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, arg2Reg) = codeToPReg(arg2, context) (* Use a semitagged value for XOR. This preserves the tag bit. *) val constVal = if isShort value then (case logOp of BuiltIns.LogicalXor => semitag | _ => tag) (Word.toLargeIntX(toShort value)) else 0 val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constVal <= 0xffffffff andalso constVal >= 0 then OpSize32 else polyWordOpSize val code = arg2Code @ [BlockSimple(ArithmeticFunction{oper=oper, resultReg=target, operand1=arg2Reg, operand2=IntegerConstant constVal, ccRef=newCCRef(), opSize=opSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalOr, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Or-ing preserves the tag bit. *) [BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalAnd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val code = arg1Code @ arg2Code @ (* Since they're both tagged the result will be tagged. *) [BlockSimple(ArithmeticFunction{oper=AND, resultReg=target, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordLogical BuiltIns.LogicalXor, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val (arg2Code, arg2Reg) = codeToPReg(arg2, context) val aReg3 = newPReg() val code = arg1Code @ arg2Code @ (* We need to restore the tag bit after the operation. *) [BlockSimple(ArithmeticFunction{oper=XOR, resultReg=aReg3, operand1=arg1Reg, operand2=RegisterArgument arg2Reg, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=OR, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.WordShift BuiltIns.ShiftLeft, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = (* Use the general case multiplication code. This will use a shift except for small values. It does detect special cases such as multiplication by 4 and 8 which can be implemented with LEA. *) codeMultiplyConstantWordRev(arg1, context, destination, if isShort value then Word.<<(0w1, toShort value) else 0w1, tailCode) | codeToICodeBinaryRev({oper=BuiltIns.WordShift shift, arg1, arg2}, context, _, destination, tailCode) = (* N.B. X86 shifts of greater than the word length mask the higher bits. That isn't what ML wants but that is dealt with at a higher level *) let open BuiltIns val target = asTarget destination (* Load the value into an untagged register. If this is a left shift we need to clear the tag bit. We don't need to do that for right shifts. *) val argRegUntagged = newUReg() val arg1Code = case arg1 of BICConstnt(value, _) => let (* Remove the tag bit. This isn't required for right shifts. *) val cnstntVal = if isShort value then semitag(Word.toLargeInt(toShort value)) else 1 in [BlockSimple(LoadArgument{source=IntegerConstant cnstntVal, dest=argRegUntagged, kind=movePolyWord})] end | _ => let val (arg1Code, arg1Reg) = codeToPReg(arg1, context) val removeTag = case shift of ShiftLeft => ArithmeticFunction{oper=SUB, resultReg=argRegUntagged, operand1=arg1Reg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize} | _ => LoadArgument{source=RegisterArgument arg1Reg, dest=argRegUntagged, kind=movePolyWord} in arg1Code @ [BlockSimple removeTag] end (* The shift amount can usefully be a constant. *) val (arg2Code, untag2Code, arg2Arg) = codeAsUntaggedByte(arg2, false, context) val resRegUntagged = newUReg() val shiftOp = case shift of ShiftLeft => SHL | ShiftRightLogical => SHR | ShiftRightArithmetic => SAR val code = arg1Code @ arg2Code @ untag2Code @ [BlockSimple(ShiftOperation{ shift=shiftOp, resultReg=resRegUntagged, operand=argRegUntagged, shiftAmount=arg2Arg, ccRef=newCCRef(), opSize=polyWordOpSize }), (* Set the tag by ORing it in. This will work whether or not a right shift has shifted a 1 into this position. *) BlockSimple( ArithmeticFunction{oper=OR, resultReg=target, operand1=resRegUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.AllocateByteMemory, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val sizeReg = newPReg() and baseReg = newPReg() val sizeCode = codeToICodeTarget(arg1, context, false, sizeReg) val (flagsCode, flagUntag, flagArg) = codeAsUntaggedByte(arg2, false, context) val code =sizeCode @ flagsCode @ [BlockSimple(AllocateMemoryVariable{size=sizeReg, dest=baseReg, saveRegs=[]})] @ flagUntag @ [BlockSimple(StoreArgument{ source=flagArg, base=baseReg, offset= ~1, index=memIndexOrObject, kind=MoveByte, isMutable=false}), BlockSimple InitialisationComplete, BlockSimple(LoadArgument{ source=RegisterArgument baseReg, dest=target, kind=movePolyWord})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordComparison test, arg1, arg2}, context, _, destination, tailCode) = let val ccRef = newCCRef() val (arg1Code, arg1Reg) = codeToPRegRev(arg1, context, tailCode) (* In X64 we can extract the word from a constant and do the comparison directly. That can't be done in X86/32 because the value isn't tagged and might look like an address. The RTS scans for comparisons with inline constant addresses. *) val (arg2Code, arg2Operand) = if targetArch <> Native32Bit then (* Native 64-bit or 32-in-64. *) ( case arg2 of BICConstnt(value, _) => (arg1Code, IntegerConstant(largeWordConstant value)) | _ => let val (code, reg) = codeToPRegRev(arg2, context, arg1Code) in (code, wordAt reg) end ) else let val (code, reg) = codeToPRegRev(arg2, context, arg1Code) in (code, wordAt reg) end val argReg = newUReg() val target = asTarget destination val code = makeBoolResultRev(testAsBranch(test, false, true), ccRef, target, BlockSimple(WordComparison{arg1=argReg, arg2=arg2Operand, ccRef=ccRef, opSize=nativeWordOpSize}) :: BlockSimple(LoadArgument{source=wordAt arg1Reg, dest=argReg, kind=moveNativeWord}) :: arg2Code) in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code =arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code = arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg2, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithAdd, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithSub, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val code = arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithSub, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithMult, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val resValue = newUReg() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val argReg1 = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg1, kind=moveNativeWord}), BlockSimple(Multiplication{resultReg=resValue, operand1=argReg1, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=resValue, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithDiv, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val quotient = newUReg() and remainder = newUReg() val dividendReg = newUReg() and divisorReg = newUReg() val code = arg1Code @ arg2Code @ (* We don't test for zero here - that's done explicitly. *) [BlockSimple(LoadArgument{source=wordAt aReg1, dest=dividendReg, kind=moveNativeWord}), BlockSimple(LoadArgument{source=wordAt aReg2, dest=divisorReg, kind=moveNativeWord}), BlockSimple(Division { isSigned = false, dividend=dividendReg, divisor=RegisterArgument divisorReg, quotient=quotient, remainder=remainder, opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=quotient, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith BuiltIns.ArithMod, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val quotient = newUReg() and remainder = newUReg() val dividendReg = newUReg() and divisorReg = newUReg() val code = arg1Code @ arg2Code @ (* We don't test for zero here - that's done explicitly. *) [BlockSimple(LoadArgument{source=wordAt aReg1, dest=dividendReg, kind=moveNativeWord}), BlockSimple(LoadArgument{source=wordAt aReg2, dest=divisorReg, kind=moveNativeWord}), BlockSimple(Division { isSigned = false, dividend=dividendReg, divisor=RegisterArgument divisorReg, quotient=quotient, remainder=remainder, opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=remainder, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordArith _, ...}, _, _, _, _) = raise InternalError "codeToICodeNonRev: LargeWordArith - unimplemented operation" | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constantValue <= 0xffffffff andalso constantValue >= 0 then OpSize32 else nativeWordOpSize val code = arg1Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=opSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1=BICConstnt(value, _), arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val constantValue = largeWordConstant value val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR (* If we AND with a value that fits in 32-bits we can use a 32-bit operation. *) val opSize = if logOp = BuiltIns.LogicalAnd andalso constantValue <= 0xffffffff andalso constantValue >= 0 then OpSize32 else nativeWordOpSize val code = arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg2, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=IntegerConstant constantValue, ccRef=newCCRef(), opSize=opSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordLogical logOp, arg1, arg2}, context, _, destination, tailCode) = let val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) val aReg3 = newUReg() val argReg = newUReg() val oper = case logOp of BuiltIns.LogicalOr => OR | BuiltIns.LogicalAnd => AND | BuiltIns.LogicalXor => XOR val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord}), BlockSimple(ArithmeticFunction{oper=oper, resultReg=aReg3, operand1=argReg, operand2=wordAt aReg2, ccRef=newCCRef(), opSize=nativeWordOpSize}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.LargeWordShift shift, arg1, arg2}, context, _, destination, tailCode) = (* The shift is always a Word.word value i.e. tagged. There is a check at the higher level that the shift does not exceed 32/64 bits. *) let open BuiltIns val target = asTarget destination val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, untag2Code, arg2Arg) = codeAsUntaggedByte(arg2, false, context) val aReg3 = newUReg() val shiftOp = case shift of ShiftLeft => SHL | ShiftRightLogical => SHR | ShiftRightArithmetic => SAR val argReg = newUReg() val code = arg1Code @ arg2Code @ [BlockSimple(LoadArgument{source=wordAt aReg1, dest=argReg, kind=moveNativeWord})] @ untag2Code @ [BlockSimple(ShiftOperation{ shift=shiftOp, resultReg=aReg3, operand=argReg, shiftAmount=arg2Arg, ccRef=newCCRef(), opSize=nativeWordOpSize }), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=aReg3, dest=target, saveRegs=[]})] in (revApp(code, tailCode), RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealArith(fpOpPrec as (fpOp, fpPrec)), arg1, arg2}, context, _, destination, tailCode) = let open BuiltIns val commutative = case fpOp of ArithSub => NonCommutative | ArithDiv => NonCommutative | ArithAdd => Commutative | ArithMult => Commutative | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" val (argCodeRev, fpRegSrc, arg2Value) = codeFPBinaryArgsRev(arg1, arg2, fpPrec, commutative, context, []) val argCode = List.rev argCodeRev val target = asTarget destination val fpRegDest = newUReg() val arith = case fpMode of FPModeX87 => let val fpOp = case fpOp of ArithAdd => FADD | ArithSub => FSUB | ArithMult => FMUL | ArithDiv => FDIV | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" val isDouble = case fpPrec of PrecSingle => false | PrecDouble => true in [BlockSimple(X87FPArith{ opc=fpOp, resultReg=fpRegDest, arg1=fpRegSrc, arg2=arg2Value, isDouble=isDouble})] end | FPModeSSE2 => let val fpOp = case fpOpPrec of (ArithAdd, PrecSingle) => SSE2BAddSingle | (ArithSub, PrecSingle) => SSE2BSubSingle | (ArithMult, PrecSingle) => SSE2BMulSingle | (ArithDiv, PrecSingle) => SSE2BDivSingle | (ArithAdd, PrecDouble) => SSE2BAddDouble | (ArithSub, PrecDouble) => SSE2BSubDouble | (ArithMult, PrecDouble) => SSE2BMulDouble | (ArithDiv, PrecDouble) => SSE2BDivDouble | _ => raise InternalError "codeToICodeNonRev: RealArith - unimplemented operation" in [BlockSimple(SSE2FPBinary{ opc=fpOp, resultReg=fpRegDest, arg1=fpRegSrc, arg2=arg2Value})] end (* Box or tag the result. *) val result = boxOrTagReal(fpRegDest, target, fpPrec) in (revApp(argCode @ arith @ result, tailCode), RegisterArgument target, false) end (* Floating point comparison. This is complicated because we have different instruction sequences for SSE2 and X87. We also have to get the handling of unordered (NaN) values right. All the tests are treated as false if either argument is a NaN. To combine that test with the other tests we sometimes have to reverse the comparison. *) | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(BuiltIns.TestEqual, precision), arg1, arg2}, context, _, destination, tailCode) = let (* Get the arguments. It's commutative. *) val (arg2Code, fpReg, arg2Val) = codeFPBinaryArgsRev(arg1, arg2, precision, Commutative, context, tailCode) val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testReg1 = newUReg() and testReg2 = newUReg() and testReg3 = newUReg() (* If this is X87 we get the condition into RAX and test it there. If it is SSE2 we have to treat the unordered result (parity set) specially. *) val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=XOR, resultReg=testReg3, operand1=testReg2, operand2=IntegerConstant 0x4000, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant 0x4400, ccRef=newCCRef(), opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) | FPModeSSE2 => let val noParityLabel = newLabel() val resultLabel = newLabel() val falseLabel = newLabel() val trueLabel = newLabel() val mergeReg = newMergeReg() in BlockSimple(LoadArgument{ source=RegisterArgument mergeReg, dest=target, kind=Move32Bit }) :: BlockLabel resultLabel :: BlockFlow(Unconditional resultLabel) :: (* Result is false if parity is set i.e. unordered or if unequal. *) BlockSimple(LoadArgument{ source=IntegerConstant(tag 0), dest=mergeReg, kind=Move32Bit }) :: BlockLabel falseLabel :: BlockFlow(Unconditional resultLabel) :: (* Result is true if it's ordered and equal. *) BlockSimple(LoadArgument{ source=IntegerConstant(tag 1), dest=mergeReg, kind=Move32Bit }) :: BlockLabel trueLabel :: (* Not unordered - test the equality *) BlockFlow(Conditional{ccRef=ccRef1, condition=JE, trueJump=trueLabel, falseJump=falseLabel}) :: BlockLabel noParityLabel :: (* Go to falseLabel if unordered and therefore not equal. *) BlockFlow(Conditional{ccRef=ccRef1, condition=JP, trueJump=falseLabel, falseJump=noParityLabel}) :: BlockSimple(SSE2Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code end in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(BuiltIns.TestUnordered, precision), arg1, arg2}, context, _, destination, tailCode) = let (* The unordered test is really included because it is easy to implement and is the simplest way of implementing isNan. *) (* Get the arguments. It's commutative. *) val (arg2Code, fpReg, arg2Val) = codeFPBinaryArgsRev(arg1, arg2, precision, Commutative, context, tailCode) val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testReg1 = newUReg() and testReg2 = newUReg() and testReg3 = newUReg() (* If this is X87 we get the condition into RAX and test it there. If it is SSE2 we have to treat the unordered result (parity set) specially. *) val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => (* And with 0x4500. We have to use XOR rather than CMP to avoid having an untagged constant comparison. *) makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=XOR, resultReg=testReg3, operand1=testReg2, operand2=IntegerConstant 0x4500, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant 0x4500, ccRef=newCCRef(), opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) | FPModeSSE2 => makeBoolResultRev(JP, ccRef1, target, BlockSimple(SSE2Compare{arg1=fpReg, arg2=arg2Val, ccRef=ccRef1, isDouble = isDouble}) :: arg2Code) in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.RealComparison(comparison, precision), arg1, arg2}, context, _, destination, tailCode) = let (* Ordered comparisons are complicated because they are all defined to be false if either argument is a NaN. We have two different tests for a > b and a >= b and implement a < b and a <= b by changing the order of the arguments. *) val (arg1Code, arg1Value) = codeFPArgument(arg1, precision, context, tailCode) val (arg2Code, arg2Value) = codeFPArgument(arg2, precision, context, arg1Code) val (regArg, opArg, isGeq) = case comparison of BuiltIns.TestGreater => (arg1Value, arg2Value, false) | BuiltIns.TestLess => (arg2Value, arg1Value, false) (* Reversed: aa. *) | BuiltIns.TestGreaterEqual => (arg1Value, arg2Value, true) | BuiltIns.TestLessEqual => (arg2Value, arg1Value, true) (* Reversed: a<=b is b>=a. *) | _ => raise InternalError "RealComparison: unimplemented operation" (* Load the first operand into a register. *) val (fpReg, loadCode) = case regArg of RegisterArgument fpReg => (fpReg, arg2Code) | regArg => let val fpReg = newUReg() val moveOp = case precision of BuiltIns.PrecDouble => MoveDouble | BuiltIns.PrecSingle => MoveFloat in (fpReg, BlockSimple(LoadArgument{source=regArg, dest=fpReg, kind=moveOp}) :: arg2Code) end val isDouble = precision = BuiltIns.PrecDouble val target = asTarget destination val code = case fpMode of FPModeX87 => let val testReg1 = newUReg() and testReg2 = newUReg() val ccRef1 = newCCRef() and ccRef2 = newCCRef() val testBits = if isGeq then 0x500 else 0x4500 in makeBoolResultRev(JE, ccRef2, target, BlockSimple(ArithmeticFunction{ oper=AND, resultReg=testReg2, operand1=testReg1, operand2=IntegerConstant testBits, ccRef=ccRef2, opSize=OpSize32 }) :: BlockSimple(X87FPGetCondition { ccRef=ccRef1, dest=testReg1 }) :: BlockSimple(X87Compare{arg1=fpReg, arg2=opArg, ccRef=ccRef1, isDouble = isDouble}) :: loadCode) end | FPModeSSE2 => let val ccRef1 = newCCRef() val condition = if isGeq then JNB (* >=, <= *) else JA (* >, < *) in makeBoolResultRev(condition, ccRef1, target, BlockSimple(SSE2Compare{arg1=fpReg, arg2=opArg, ccRef=ccRef1, isDouble = isDouble}) :: loadCode) end in (code, RegisterArgument target, false) end | codeToICodeBinaryRev({oper=BuiltIns.FreeCStack, arg1, arg2}, context, _, destination, tailCode) = (* Free space on the C stack by storing the address in the argument into the "memory register". This is a binary operation that takes the base address and the size. The base address isn't used in this version. *) let val (arg2Code, untaggedLength) = case arg2 of BICConstnt(value, _) => (tailCode, IntegerConstant(Word.toLargeInt(toShort value)) (* Leave untagged *)) | _ => let val (arg2Code, lengthReg) = codeToPRegRev(arg2, context, tailCode) val lengthUntagged = newUReg() in ( BlockSimple(UntagValue{source=lengthReg, dest=lengthUntagged, isSigned=false, cache=NONE, opSize=polyWordOpSize}) :: arg2Code, RegisterArgument lengthUntagged ) end (* Evaluate the first argument for side-effects but discard it. *) val (arg1Code, _, _) = codeToICodeRev(arg1, context, false, Allowed allowDefer, arg2Code) val addrReg = newUReg() and resAddrReg = newUReg() val code = BlockSimple(StoreMemReg{offset=memRegCStackPtr, source=resAddrReg, kind=moveNativeWord}) :: BlockSimple(ArithmeticFunction{oper=ADD, resultReg=resAddrReg, operand1=addrReg, operand2=untaggedLength, ccRef=newCCRef(), opSize=nativeWordOpSize}) :: BlockSimple(LoadMemReg{offset=memRegCStackPtr, dest=addrReg, kind=moveNativeWord}) :: arg1Code in moveIfNotAllowedRev(destination, code, (* Unit result *) IntegerConstant(tag 0)) end + + | codeToICodeBinaryRev({oper=BuiltIns.AtomicExchangeAdd, arg1, arg2=BICConstnt(value, _)}, context, _, destination, tailCode) = + let + (* The value to be added is always a constant. *) + val target = asTarget destination + val incrReg = newUReg() + val (arg1Code, addrReg) = codeToPRegRev(arg1, context, tailCode) + (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) + val constVal = + if isShort value + then semitag(Word.toLargeIntX(toShort value)) + else 0 + val code = + BlockSimple(AtomicExchangeAndAdd{ base=addrReg, source=incrReg, resultReg=target }) :: + BlockSimple(LoadArgument{source=IntegerConstant constVal, dest=incrReg, kind=movePolyWord}) :: + arg1Code + in + (code, RegisterArgument target, false) + end + + | codeToICodeBinaryRev({oper=BuiltIns.AtomicExchangeAdd, arg1, arg2}, context, _, destination, tailCode) = + let + (* This is only needed if the function is returned as a value. *) + val target = asTarget destination + val incrReg = newUReg() + val (arg1Code, addrReg) = codeToPRegRev(arg1, context, tailCode) + val (arg2Code, arg2Reg) = codeToPRegRev(arg2, context, arg1Code) + val code = + BlockSimple(AtomicExchangeAndAdd{ base=addrReg, source=incrReg, resultReg=target }) :: + (* Subtract the tag from the increment before the operation *) + BlockSimple(ArithmeticFunction{oper=SUB, resultReg=incrReg, operand1=arg2Reg, operand2=IntegerConstant 1, + ccRef=newCCRef(), opSize=polyWordOpSize}) :: + arg2Code + in + (code, RegisterArgument target, false) + end (* Multiply tagged word by a constant. We're not concerned with overflow so it's possible to use various short cuts. *) and codeMultiplyConstantWordRev(arg, context, destination, multiplier, tailCode) = let val target = asTarget destination val (argCode, aReg) = codeToPReg(arg, context) val doMultiply = case multiplier of 0w0 => [BlockSimple(LoadArgument{source=IntegerConstant 1, dest=target, kind=movePolyWord})] | 0w1 => [BlockSimple(LoadArgument{source=RegisterArgument aReg, dest=target, kind=movePolyWord})] | 0w2 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~1, index=MemIndex1 aReg, dest=target, opSize=polyWordOpSize})] | 0w3 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~2, index=MemIndex2 aReg, dest=target, opSize=polyWordOpSize})] | 0w4 => [BlockSimple(LoadEffectiveAddress{base=NONE, offset= ~3, index=MemIndex4 aReg, dest=target, opSize=polyWordOpSize})] | 0w5 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~4, index=MemIndex4 aReg, dest=target, opSize=polyWordOpSize})] | 0w8 => [BlockSimple(LoadEffectiveAddress{base=NONE, offset= ~7, index=MemIndex8 aReg, dest=target, opSize=polyWordOpSize})] | 0w9 => [BlockSimple(LoadEffectiveAddress{base=SOME aReg, offset= ~8, index=MemIndex8 aReg, dest=target, opSize=polyWordOpSize})] | _ => let val tReg = newUReg() val tagCorrection = Word.toLargeInt multiplier - 1 fun getPower2 n = let fun p2 (n, l) = if n = 0w1 then SOME l else if Word.andb(n, 0w1) = 0w1 then NONE else p2(Word.>>(n, 0w1), l+0w1) in if n = 0w0 then NONE else p2(n,0w0) end val multiply = case getPower2 multiplier of SOME power => (* Shift it including the tag. *) BlockSimple(ShiftOperation{ shift=SHL, resultReg=tReg, operand=aReg, shiftAmount=IntegerConstant(Word.toLargeInt power), ccRef=newCCRef(), opSize=polyWordOpSize }) | NONE => (* Multiply including the tag. *) BlockSimple(Multiplication{resultReg=tReg, operand1=aReg, operand2=IntegerConstant(Word.toLargeInt multiplier), ccRef=newCCRef(), opSize=polyWordOpSize}) in [multiply, BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=tReg, operand2=IntegerConstant tagCorrection, ccRef=newCCRef(), opSize=polyWordOpSize})] end in (revApp(argCode @ doMultiply, tailCode), RegisterArgument target, false) end and codeToICodeAllocate({numWords as BICConstnt(length, _), flags as BICConstnt(flagValue, _), initial}, context, _, destination) = (* Constant length and flags is used for ref. We could handle other cases. *) if isShort length andalso isShort flagValue andalso toShort length = 0w1 then let val target = asTarget destination (* Force a different register. *) val vecLength = Word.toInt(toShort length) val flagByte = Word8.fromLargeWord(Word.toLargeWord(toShort flagValue)) val memAddr = newPReg() and valueReg = newPReg() fun initialise n = BlockSimple(StoreArgument{ source=RegisterArgument valueReg, offset=n*Word.toInt wordSize, base=memAddr, index=memIndexOrObject, kind=movePolyWord, isMutable=false}) val code = codeToICodeTarget(initial, context, false, valueReg) @ [BlockSimple(AllocateMemoryOperation{size=vecLength, flags=flagByte, dest=memAddr, saveRegs=[]})] @ List.tabulate(vecLength, initialise) @ [BlockSimple InitialisationComplete, BlockSimple(LoadArgument{source=RegisterArgument memAddr, dest=target, kind=movePolyWord})] in (code, RegisterArgument target, false) end else (* If it's longer use the full run-time form. *) allocateMemoryVariable(numWords, flags, initial, context, destination) | codeToICodeAllocate({numWords, flags, initial}, context, _, destination) = allocateMemoryVariable(numWords, flags, initial, context, destination) and codeToICodeLoad({kind=LoadStoreMLWord _, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, false, context) in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument {source=MemoryLocation memLoc, dest=target, kind=movePolyWord})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreMLByte _, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, true, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveByte}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC8, address}, context, _, destination) = let (* Load a byte from C memory. This is almost exactly the same as LoadStoreMLByte except that the base address is a LargeWord.word value. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w1, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveByte}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC16, address}, context, _, destination) = let (* Load a 16-bit value from C memory. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w2, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move16Bit}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize32})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC32, address}, context, _, destination) = let (* Load a 32-bit value from C memory. If this is 64-bit mode we can tag it but if this is 32-bit mode we need to box it. *) val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w4, context) val untaggedResReg = newUReg() val boxTagCode = if targetArch = Native64Bit then BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=OpSize64 (* It becomes 33 bits *)}) else BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untaggedResReg, dest=target, saveRegs=[]}) in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move32Bit}), boxTagCode], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreC64, address}, context, _, destination) = let (* Load a 64-bit value from C memory. This is only allowed in 64-bit mode. The result is a boxed value. *) val _ = targetArch <> Native32Bit orelse raise InternalError "codeToICodeNonRev: BICLoadOperation LoadStoreC64 in 32-bit" val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w8, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=Move64Bit}), BlockSimple(BoxValue{boxKind=BoxLargeWord, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreCFloat, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w4, context) val untaggedResReg = newUReg() val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double (* We need to convert the float into a double. *) val loadArg = case fpMode of FPModeX87 => BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveFloat}) | FPModeSSE2 => BlockSimple(SSE2FPUnary { source=MemoryLocation memLoc, resultReg=untaggedResReg, opc=SSE2UFloatToDouble}) in (codeBaseIndex @ codeUntag @ [loadArg, BlockSimple(BoxValue{boxKind=boxFloat, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreCDouble, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeCAddress(address, 0w8, context) val untaggedResReg = newUReg() val boxFloat = case fpMode of FPModeX87 => BoxX87Double | FPModeSSE2 => BoxSSE2Double in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=MoveDouble}), BlockSimple(BoxValue{boxKind=boxFloat, source=untaggedResReg, dest=target, saveRegs=[]})], RegisterArgument target, false) end | codeToICodeLoad({kind=LoadStoreUntaggedUnsigned, address}, context, _, destination) = let val target = asTarget destination val (codeBaseIndex, codeUntag, memLoc) = codeAddress(address, false, context) val untaggedResReg = newUReg() in (codeBaseIndex @ codeUntag @ [BlockSimple(LoadArgument { source=MemoryLocation memLoc, dest=untaggedResReg, kind=movePolyWord}), BlockSimple(TagValue {source=untaggedResReg, dest=target, isSigned=false, opSize=polyWordOpSize})], RegisterArgument target, false) end and codeToICodeStore({kind=LoadStoreMLWord _, address, value}, context, _, destination) = let val (sourceCode, source, _) = codeToICode(value, context, false, Allowed allowInMemMove) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, false, context) val code = codeBaseIndex @ sourceCode @ codeUntag @ [BlockSimple(StoreArgument {source=source, base=base, offset=offset, index=index, kind=movePolyWord, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreMLByte _, address, value}, context, _, destination) = let val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, true, context) (* We have to untag the value to store. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedByte(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=valueArg, base=base, offset=offset, index=index, kind=MoveByte, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC8, address, value}, context, _, destination) = let (* Store a byte to C memory. Almost exactly the same as LoadStoreMLByte. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w1, context) val (valueCode, untagValue, valueArg) = codeAsUntaggedByte(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=valueArg, base=base, offset=offset, index=index, kind=MoveByte, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC16, address, value}, context, _, destination) = let (* Store a 16-bit value to C memory. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w2, context) (* We don't currently implement 16-bit constant moves so this must always be in a reg. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedToReg(value, false, context) val code = codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueArg, base=base, offset=offset, index=index, kind=Move16Bit, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC32, address, value}, context, _, destination) = (* Store a 32-bit value. If this is 64-bit mode we untag it but if this is 32-bit mode we unbox it. *) let val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w4, context) val code = if targetArch = Native64Bit then let (* We don't currently implement 32-bit constant moves so this must always be in a reg. *) val (valueCode, untagValue, valueArg) = codeAsUntaggedToReg(value, false, context) in codeBaseIndex @ valueCode @ untagValue @ codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueArg, base=base, offset=offset, index=index, kind=Move32Bit, isMutable=true})] end else let val (valueCode, valueReg) = codeToPReg(value, context) val valueReg1 = newUReg() in codeBaseIndex @ valueCode @ BlockSimple(LoadArgument{source=wordAt valueReg, dest=valueReg1, kind=Move32Bit}) :: codeUntag @ [BlockSimple(StoreArgument {source=RegisterArgument valueReg1, base=base, offset=offset, index=index, kind=Move32Bit, isMutable=true})] end in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreC64, address, value}, context, _, destination) = let (* Store a 64-bit value. *) val _ = targetArch <> Native32Bit orelse raise InternalError "codeToICodeNonRev: BICStoreOperation LoadStoreC64 in 32-bit" val (valueCode, valueReg) = codeToPReg(value, context) val valueReg1 = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w8, context) val code = codeBaseIndex @ valueCode @ codeUntag @ [BlockSimple(LoadArgument{source=wordAt valueReg, dest=valueReg1, kind=Move64Bit}), BlockSimple(StoreArgument {source=RegisterArgument valueReg1, base=base, offset=offset, index=index, kind=Move64Bit, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreCFloat, address, value}, context, _, destination) = let val floatReg = newUReg() and float2Reg = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w4, context) val (valueCode, valueReg) = codeToPReg(value, context) (* If we're using an SSE2 reg we have to convert it from double to single precision. *) val (storeReg, cvtCode) = case fpMode of FPModeSSE2 => (float2Reg, [BlockSimple(SSE2FPUnary{opc=SSE2UDoubleToFloat, resultReg=float2Reg, source=RegisterArgument floatReg})]) | FPModeX87 => (floatReg, []) val code = codeBaseIndex @ valueCode @ codeUntag @ BlockSimple(LoadArgument{source=wordAt valueReg, dest=floatReg, kind=MoveDouble}) :: cvtCode @ [BlockSimple(StoreArgument {source=RegisterArgument storeReg, base=base, offset=offset, index=index, kind=MoveFloat, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreCDouble, address, value}, context, _, destination) = let val floatReg = newUReg() val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeCAddress(address, 0w8, context) val (valueCode, valueReg) = codeToPReg(value, context) val code = codeBaseIndex @ valueCode @ codeUntag @ [BlockSimple(LoadArgument{source=wordAt valueReg, dest=floatReg, kind=MoveDouble}), BlockSimple(StoreArgument {source=RegisterArgument floatReg, base=base, offset=offset, index=index, kind=MoveDouble, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | codeToICodeStore({kind=LoadStoreUntaggedUnsigned, address, value}, context, _, destination) = let (* We have to untag the value to store. *) val (codeBaseIndex, codeUntag, {base, offset, index, ...}) = codeAddress(address, false, context) (* See if it's a constant. This is frequently used to set the last word of a string to zero. *) (* We have to be a bit more careful on the X86. We use moves to store constants that can include addresses. To avoid problems we only use a move if the value is zero or odd and so looks like a tagged value. *) val storeAble = case value of BICConstnt(value, _) => if not(isShort value) then NONE else let val ival = Word.toLargeIntX(toShort value) in if targetArch = Native64Bit then if is32bit ival then SOME ival else NONE else if ival = 0 orelse ival mod 2 = 1 then SOME ival else NONE end | _ => NONE val (storeVal, valCode) = case storeAble of SOME value => (IntegerConstant value (* Leave untagged *), []) | NONE => let val valueReg = newPReg() and valueReg1 = newUReg() in (RegisterArgument valueReg1, codeToICodeTarget(value, context, false, valueReg) @ [BlockSimple(UntagValue{dest=valueReg1, source=valueReg, isSigned=false, cache=NONE, opSize=polyWordOpSize})]) end val code = codeBaseIndex @ valCode @ codeUntag @ [BlockSimple(StoreArgument {source=storeVal, base=base, offset=offset, index=index, kind=movePolyWord, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end and codeToICodeBlock({kind=BlockOpCompareByte, sourceLeft, destRight, length}, context, _, destination) = let (* This is effectively a big-endian comparison since we compare the bytes until we find an inequality. *) val target = asTarget destination val mergeResult = newMergeReg() val vec1Reg = newUReg() and vec2Reg = newUReg() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddress(sourceLeft, true, context) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddress(destRight, true, context) val ccRef = newCCRef() val labLess = newLabel() and labGreater = newLabel() and exitLab = newLabel() val labNotLess = newLabel() and labNotGreater = newLabel() val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToReg(length, false (* unsigned *), context) val code = leftCode @ rightCode @ lengthCode @ leftUntag @ [BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg})] @ rightUntag @ [BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg})] @ lengthUntag @ [BlockSimple(CompareByteVectors{ vec1Addr=vec1Reg, vec2Addr=vec2Reg, length=lengthArg, ccRef=ccRef }), (* N.B. These are unsigned comparisons. *) BlockFlow(Conditional{ ccRef=ccRef, condition=JB, trueJump=labLess, falseJump=labNotLess }), BlockLabel labNotLess, BlockFlow(Conditional{ ccRef=ccRef, condition=JA, trueJump=labGreater, falseJump=labNotGreater }), BlockLabel labNotGreater, BlockSimple(LoadArgument{ source=IntegerConstant(tag 0), dest=mergeResult, kind=movePolyWord }), BlockFlow(Unconditional exitLab), BlockLabel labLess, BlockSimple(LoadArgument{ source=IntegerConstant(tag ~1), dest=mergeResult, kind=movePolyWord }), BlockFlow(Unconditional exitLab), BlockLabel labGreater, BlockSimple(LoadArgument{ source=IntegerConstant(tag 1), dest=mergeResult, kind=movePolyWord }), BlockLabel exitLab, BlockSimple(LoadArgument{ source=RegisterArgument mergeResult, dest=target, kind=movePolyWord })] in (code, RegisterArgument target, false) end | codeToICodeBlock({kind=BlockOpMove {isByteMove}, sourceLeft, destRight, length}, context, _, destination) = let (* Moves of 4 or 8 bytes can be done as word moves provided the alignment is correct. Although this will work for strings it is really to handle moves between SysWord and volatileRef in Foreign.Memory. Moves of 1, 2 or 3 bytes or words are converted into a sequence of byte or word moves. *) val isWordMove = case (isByteMove, length) of (true, BICConstnt(l, _)) => if not (isShort l) orelse (toShort l <> 0w4 andalso toShort l <> nativeWordSize) then NONE else let val leng = Word.toInt(toShort l) val moveKind = if toShort l = nativeWordSize then moveNativeWord else Move32Bit val isLeftAligned = case sourceLeft of {index=NONE, offset:int, ...} => offset mod leng = 0 | _ => false val isRightAligned = case destRight of {index=NONE, offset: int, ...} => offset mod leng = 0 | _ => false in if isLeftAligned andalso isRightAligned then SOME moveKind else NONE end | _ => NONE in case isWordMove of SOME moveKind => let val (leftCode, leftUntag, leftMem) = codeAddress(sourceLeft, isByteMove, context) val (rightCode, rightUntag, {base, offset, index, ...}) = codeAddress(destRight, isByteMove, context) val untaggedResReg = newUReg() val code = leftCode @ rightCode @ leftUntag @ rightUntag @ [BlockSimple(LoadArgument { source=MemoryLocation leftMem, dest=untaggedResReg, kind=moveKind}), BlockSimple(StoreArgument {source=RegisterArgument untaggedResReg, base=base, offset=offset, index=index, kind=moveKind, isMutable=true})] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end | _ => let val vec1Reg = newUReg() and vec2Reg = newUReg() val (leftCode, leftUntag, {base=leftBase, offset=leftOffset, index=leftIndex, ...}) = codeAddress(sourceLeft, isByteMove, context) val (rightCode, rightUntag, {base=rightBase, offset=rightOffset, index=rightIndex, ...}) = codeAddress(destRight, isByteMove, context) val (lengthCode, lengthUntag, lengthArg) = codeAsUntaggedToReg(length, false (* unsigned *), context) val code = leftCode @ rightCode @ lengthCode @ leftUntag @ [BlockSimple(loadAddress{base=leftBase, offset=leftOffset, index=leftIndex, dest=vec1Reg})] @ rightUntag @ [BlockSimple(loadAddress{base=rightBase, offset=rightOffset, index=rightIndex, dest=vec2Reg})] @ lengthUntag @ [BlockSimple(BlockMove{ srcAddr=vec1Reg, destAddr=vec2Reg, length=lengthArg, isByteMove=isByteMove })] in moveIfNotAllowed(destination, code, (* Unit result *) IntegerConstant(tag 0)) end end | codeToICodeBlock({kind=BlockOpEqualByte, ...}, _, _, _) = (* TODO: Move the code from codeToICodeRev. However, that is already reversed. *) raise InternalError "codeToICodeBlock - BlockOpEqualByte" (* Already done *) and codeConditionRev(condition, context, jumpOn, jumpLabel, tailCode) = (* General case. Load the value into a register and compare it with 1 (true) *) let val ccRef = newCCRef() val (testCode, testReg) = codeToPRegRev(condition, context, tailCode) val noJumpLabel = newLabel() in BlockLabel noJumpLabel :: BlockFlow(Conditional{ccRef=ccRef, condition=if jumpOn then JE else JNE, trueJump=jumpLabel, falseJump=noJumpLabel}) :: BlockSimple(CompareLiteral{arg1=RegisterArgument testReg, arg2=tag 1, opSize=OpSize32, ccRef=ccRef}) :: testCode end (* The fixed precision functions are also used for arbitrary precision but instead of raising Overflow we need to jump to the code that handles the long format. *) and codeFixedPrecisionArith(BuiltIns.ArithAdd, arg1, BICConstnt(value, _), context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. This should always be a tagged value if the type is correct. However it's possible for it not to be if we have an arbitrary precision value. There will be a run-time check that the value is short and so this code will never be executed. It will generally be edited out by the higher level be we can't rely on that. Because it's never executed we can just put in zero. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPReg(arg1, context) in arg1Code @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithAdd, BICConstnt(value, _), arg2, context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg2Code @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg2, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithAdd, arg1, arg2, context, target, onOverflow) = let val aReg3 = newPReg() and ccRef = newCCRef() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg1Code @ arg2Code @ (* Subtract the tag bit from the second argument, do the addition and check for overflow. *) (* TODO: We should really do the detagging in the transform phase. It can make a better choice of the argument if one of the arguments is already untagged or if we have a constant argument. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=RegisterArgument aReg2, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end (* Subtraction. We can handle the special case of the second argument being a constant but not the first. *) | codeFixedPrecisionArith(BuiltIns.ArithSub, arg1, BICConstnt(value, _), context, target, onOverflow) = let val ccRef = newCCRef() (* If the argument is a constant we can subtract the tag beforehand. Check for short - see comment above. *) val constVal = if isShort value then semitag(Word.toLargeIntX(toShort value)) else 0 val (arg1Code, aReg1) = codeToPReg(arg1, context) in arg1Code @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=target, operand1=aReg1, operand2=IntegerConstant constVal, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef end | codeFixedPrecisionArith(BuiltIns.ArithSub, arg1, arg2, context, target, onOverflow) = let val aReg3 = newPReg() val ccRef = newCCRef() val (arg1Code, aReg1) = codeToPReg(arg1, context) val (arg2Code, aReg2) = codeToPReg(arg2, context) in arg1Code @ arg2Code @ (* Do the subtraction, test for overflow and afterwards add in the tag bit. *) [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=aReg3, operand1=aReg1, operand2=RegisterArgument aReg2, ccRef=ccRef, opSize=polyWordOpSize})] @ onOverflow ccRef @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=aReg3, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, arg1, BICConstnt(value, _), context, target, onOverflow) = let val aReg = newPReg() and argUntagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* Is it better to untag the constant or the register argument? *) val constVal = if isShort value then Word.toLargeIntX(toShort value) else 0 in codeToICodeTarget(arg1, context, false, aReg) @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=argUntagged, operand1=aReg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=argUntagged, operand2=IntegerConstant constVal, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, BICConstnt(value, _), arg2, context, target, onOverflow) = let val aReg = newPReg() and argUntagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* Is it better to untag the constant or the register argument? *) val constVal = if isShort value then Word.toLargeIntX(toShort value) else 0 in codeToICodeTarget(arg2, context, false, aReg) @ [BlockSimple(ArithmeticFunction{oper=SUB, resultReg=argUntagged, operand1=aReg, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=argUntagged, operand2=IntegerConstant constVal, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithMult, arg1, arg2, context, target, onOverflow) = let val aReg1 = newPReg() and aReg2 = newPReg() and arg1Untagged = newUReg() and arg2Untagged = newUReg() and resUntagged = newUReg() val mulCC = newCCRef() (* This is almost the same as the word operation except we use a signed shift and check for overflow. *) in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift one argument and subtract the tag from the other. It's possible this could be reordered if we have a value that is already untagged. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true (* Signed shift here. *), cache=NONE, opSize=polyWordOpSize}), BlockSimple(ArithmeticFunction{oper=SUB, resultReg=arg2Untagged, operand1=aReg2, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize}), BlockSimple(Multiplication{resultReg=resUntagged, operand1=arg1Untagged, operand2=RegisterArgument arg2Untagged, ccRef=mulCC, opSize=polyWordOpSize} )] @ onOverflow mulCC @ [BlockSimple(ArithmeticFunction{oper=ADD, resultReg=target, operand1=resUntagged, operand2=IntegerConstant 1, ccRef=newCCRef(), opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithQuot, arg1, arg2, context, target, _) = let val aReg1 = newPReg() and aReg2 = newPReg() val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift both of the arguments to remove the tags. We don't test for zero here - that's done explicitly. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = true, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=quotient, dest=target, isSigned=true, opSize=polyWordOpSize})] end | codeFixedPrecisionArith(BuiltIns.ArithRem, arg1, arg2, context, target, _) = let (* Identical to Quot except that the result is the remainder. *) val aReg1 = newPReg() and aReg2 = newPReg() val arg1Untagged = newUReg() and arg2Untagged = newUReg() val quotient = newUReg() and remainder = newUReg() in codeToICodeTarget(arg1, context, false, aReg1) @ codeToICodeTarget(arg2, context, false, aReg2) @ (* Shift both of the arguments to remove the tags. *) [BlockSimple(UntagValue{source=aReg1, dest=arg1Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(UntagValue{source=aReg2, dest=arg2Untagged, isSigned=true, cache=NONE, opSize=polyWordOpSize}), BlockSimple(Division { isSigned = true, dividend=arg1Untagged, divisor=RegisterArgument arg2Untagged, quotient=quotient, remainder=remainder, opSize=polyWordOpSize }), BlockSimple(TagValue { source=remainder, dest=target, isSigned=true, opSize=polyWordOpSize})] end | codeFixedPrecisionArith(_, _, _, _, _, _) = raise InternalError "codeToICode: FixedPrecisionArith - unimplemented operation" (* Generate code for floating point arguments where one of the arguments must be in a register. If the first argument is in a register use that, if the second is in a register and it's commutative use that otherwise load the first argument into a register. *) and codeFPBinaryArgsRev(arg1, arg2, precision, commutative, context, tailCode) = let val (arg1Code, arg1Value) = codeFPArgument(arg1, precision, context, tailCode) val (arg2Code, arg2Value) = codeFPArgument(arg2, precision, context, arg1Code) in case (arg1Value, arg2Value, commutative) of (RegisterArgument fpReg, _, _) => (arg2Code, fpReg, arg2Value) | (_, RegisterArgument fpReg, Commutative) => (arg2Code, fpReg, arg1Value) | (arg1Val, _, _) => let val fpReg = newUReg() val moveOp = case precision of BuiltIns.PrecDouble => MoveDouble | BuiltIns.PrecSingle => MoveFloat in (BlockSimple(LoadArgument{source=arg1Val, dest=fpReg, kind=moveOp}) :: arg2Code, fpReg, arg2Value) end end (* Generate code to evaluate a floating point argument. The aim of this code is to avoid the overhead of untagging a short-precision floating point value in memory. *) and codeFPArgument(BICConstnt(value, _), _, _, tailCode) = let val argVal = (* Single precision constants in 64-bit mode are represented by the value shifted left 32 bits. A word is shifted left one bit so the result is 0w31. *) if isShort value then IntegerConstant(Word.toLargeInt(Word.>>(toShort value, 0w31))) else AddressConstant value in (tailCode, argVal) end | codeFPArgument(arg, precision, context, tailCode) = ( case (precision, wordSize) of (BuiltIns.PrecSingle, 0w8) => (* If this is a single precision value and the word size is 8 the values are tagged. If it is memory we can load the value directly from the high-order word. *) let val memOrReg = { anyConstant=false, const32s=false, memAddr=true, existingPreg=true } val (code, result, _) = codeToICodeRev(arg, context, false, Allowed memOrReg, tailCode) in case result of RegisterArgument argReg => let val fpReg = newUReg() in (BlockSimple(UntagFloat{source=RegisterArgument argReg, dest=fpReg, cache=NONE}) :: code, RegisterArgument fpReg) end | MemoryLocation{offset, base, index, ...} => (code, MemoryLocation{offset=offset+4, base=base, index=index, cache=NONE}) | _ => raise InternalError "codeFPArgument" end | _ => (* Otherwise the value is boxed. *) let val (argCode, argReg) = codeToPRegRev(arg, context, tailCode) in (argCode, wordAt argReg) end ) (* Code an address. The index is optional. *) and codeAddressRev({base, index=SOME index, offset}, true (* byte move *), context, tailCode) = let (* Byte address with index. The index needs to be untagged. *) val indexReg1 = newUReg() val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) val (codeIndex, indexReg) = codeToPRegRev(index, context, codeBase) val untagCode = [BlockSimple(UntagValue{dest=indexReg1, source=indexReg, isSigned=false, cache=NONE, opSize=polyWordOpSize})] val (codeLoadAddr, realBase) = if targetArch = ObjectId32Bit then let val addrReg = newUReg() in ([BlockSimple(LoadEffectiveAddress{ base=SOME baseReg, offset=0, index=ObjectIndex, dest=addrReg, opSize=nativeWordOpSize})], addrReg) end else ([], baseReg) val memResult = {base=realBase, offset=offset, index=MemIndex1 indexReg1, cache=NONE} in (codeLoadAddr @ codeIndex, untagCode, memResult) end | codeAddressRev({base, index=SOME index, offset}, false (* word move *), context, tailCode) = let (* Word address with index. We can avoid untagging the index by adjusting the multiplier and offset *) val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) val (codeIndex, indexReg) = codeToPRegRev(index, context, codeBase) val (codeLoadAddr, realBase) = if targetArch = ObjectId32Bit then let val addrReg = newUReg() in ([BlockSimple(LoadEffectiveAddress{ base=SOME baseReg, offset=0, index=ObjectIndex, dest=addrReg, opSize=nativeWordOpSize})], addrReg) end else ([], baseReg) val memResult = if wordSize = 0w8 then {base=realBase, offset=offset-4, index=MemIndex4 indexReg, cache=NONE} else {base=realBase, offset=offset-2, index=MemIndex2 indexReg, cache=NONE} in (codeLoadAddr @ codeIndex, [], memResult) end | codeAddressRev({base, index=NONE, offset}, _, context, tailCode) = let val (codeBase, baseReg) = codeToPRegRev(base, context, tailCode) val memResult = {offset=offset, base=baseReg, index=memIndexOrObject, cache=NONE} in (codeBase, [], memResult) end and codeAddress(addr, isByte, context) = let val (code, untag, res) = codeAddressRev(addr, isByte, context, []) in (List.rev code, untag, res) end (* C-memory operations are slightly different. The base address is a LargeWord.word value. The index is a byte index so may have to be untagged. *) and codeCAddress({base, index=SOME index, offset}, 0w1, context) = let (* Byte address with index. The index needs to be untagged. *) val untaggedBaseReg = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) and (codeIndex, indexReg) = codeToPReg(index, context) (* The index needs to untagged and, if necessary, sign-extended to the native word size. *) val (untagCode, sxReg) = if targetArch = ObjectId32Bit then let val sReg1 = newUReg() and sReg2 = newUReg() in ([BlockSimple(SignExtend32To64{dest=sReg1, source=RegisterArgument indexReg}), BlockSimple(UntagValue{dest=sReg2, source=sReg1, isSigned=true, cache=NONE, opSize=nativeWordOpSize})], sReg2) end else let val sReg = newUReg() in ([BlockSimple(UntagValue{dest=sReg, source=indexReg, isSigned=true, cache=NONE, opSize=nativeWordOpSize})], sReg) end val loadCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] val memResult = {base=untaggedBaseReg, offset=offset, index=MemIndex1 sxReg, cache=NONE} in (codeBase @ codeIndex, loadCode @ untagCode, memResult) end | codeCAddress({base, index=SOME index, offset}, size, context) = let (* Non-byte address with index. By using an appropriate multiplier we can avoid having to untag the index. *) val untaggedBaseReg = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) and (codeIndex, indexReg) = codeToPReg(index, context) (* The index is signed i.e. negative index values are legal. We don't have to do anything special on the native code versions but on 32-in-64 we need to sign extend. *) val (untagCode, sxReg) = if targetArch = ObjectId32Bit then let val sReg = newUReg() in ([BlockSimple(SignExtend32To64{source=RegisterArgument indexReg, dest=sReg})], sReg) end else ([], indexReg) val loadCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] val memResult = case size of 0w2 => {base=untaggedBaseReg, offset=offset-1, index=MemIndex1 sxReg, cache=NONE} | 0w4 => {base=untaggedBaseReg, offset=offset-2, index=MemIndex2 sxReg, cache=NONE} | 0w8 => {base=untaggedBaseReg, offset=offset-4, index=MemIndex4 sxReg, cache=NONE} | _ => raise InternalError "codeCAddress: unknown size" in (codeBase @ codeIndex, loadCode @ untagCode, memResult) end | codeCAddress({base, index=NONE, offset}, _, context) = let val untaggedBaseReg = newUReg() val (codeBase, baseReg) = codeToPReg(base, context) val untagCode = [BlockSimple(LoadArgument{source=wordAt baseReg, dest=untaggedBaseReg, kind=moveNativeWord})] val memResult = {offset=offset, base=untaggedBaseReg, index=NoMemIndex, cache=NONE} in (codeBase, untagCode, memResult) end (* Return an untagged value. If we have a constant just return it. Otherwise return the code to evaluate the argument, the code to untag it and the reference to the untagged register. *) and codeAsUntaggedToRegRev(BICConstnt(value, _), isSigned, _, tailCode) = let (* Should always be short except for unreachable code. *) val untagReg = newUReg() val cval = if isShort value then toShort value else 0w0 val cArg = IntegerConstant(if isSigned then Word.toLargeIntX cval else Word.toLargeInt cval) (* Don't tag *) val untag = [BlockSimple(LoadArgument{source=cArg, dest=untagReg, kind=movePolyWord})] in (tailCode, untag, untagReg) (* Don't tag. *) end | codeAsUntaggedToRegRev(arg, isSigned, context, tailCode) = let val untagReg = newUReg() val (code, srcReg) = codeToPRegRev(arg, context, tailCode) val untag = [BlockSimple(UntagValue{source=srcReg, dest=untagReg, isSigned=isSigned, cache=NONE, opSize=polyWordOpSize})] in (code, untag, untagReg) end and codeAsUntaggedToReg(arg, isSigned, context) = let val (code, untag, untagReg) = codeAsUntaggedToRegRev(arg, isSigned, context, []) in (List.rev code, untag, untagReg) end (* Return the argument as an untagged value. We separate evaluating the argument from untagging because we may have to evaluate other arguments and that could involve a function call and we can't save the value to the stack after we've untagged it. Currently this is only used for byte values but we may have to be careful if we use it for word values on the X86. Moving an untagged value into a register might look like loading a constant address. *) and codeAsUntaggedByte(BICConstnt(value, _), isSigned, _) = let val cval = if isShort value then toShort value else 0w0 val cArg = IntegerConstant(if isSigned then Word.toLargeIntX cval else Word.toLargeInt cval) (* Don't tag *) in ([], [], cArg) end | codeAsUntaggedByte(arg, isSigned, context) = let val untagReg = newUReg() val (code, argReg) = codeToPReg(arg, context) val untag = [BlockSimple(UntagValue{source=argReg, dest=untagReg, isSigned=isSigned, cache=NONE, opSize=OpSize32})] in (code, untag, RegisterArgument untagReg) end (* Allocate memory. This is used both for true variable length cells and also for longer constant length cells. *) and allocateMemoryVariable(numWords, flags, initial, context, destination) = let val target = asTarget destination (* With the exception of flagReg all these registers are modified by the code. So, we have to copy the size value into a new register. *) val sizeReg = newPReg() and initReg = newPReg() val sizeReg2 = newPReg() val untagSizeReg = newUReg() and initAddrReg = newPReg() and allocReg = newPReg() val sizeCode = codeToICodeTarget(numWords, context, false, sizeReg) and (flagsCode, flagUntag, flagArg) = codeAsUntaggedByte(flags, false, context) (* We're better off deferring the initialiser if possible. If the value is a constant we don't have to save it. *) val (initCode, initResult, _) = codeToICode(initial, context, false, Allowed allowDefer) in (sizeCode @ flagsCode @ initCode @ [(* We need to copy the size here because AllocateMemoryVariable modifies the size in order to store the length word. This is unfortunate especially as we're going to untag it anyway. *) BlockSimple(LoadArgument{source=RegisterArgument sizeReg, dest=sizeReg2, kind=movePolyWord}), BlockSimple(AllocateMemoryVariable{size=sizeReg, dest=allocReg, saveRegs=[]})] @ flagUntag @ [BlockSimple(StoreArgument{ source=flagArg, base=allocReg, offset= ~1, index=memIndexOrObject, kind=MoveByte, isMutable=false}), (* We need to copy the address here because InitialiseMem modifies all its arguments. *) BlockSimple( if targetArch = ObjectId32Bit then LoadEffectiveAddress{ base=SOME allocReg, offset=0, index=ObjectIndex, dest=initAddrReg, opSize=nativeWordOpSize} else LoadArgument{source=RegisterArgument allocReg, dest=initAddrReg, kind=movePolyWord}), BlockSimple(UntagValue{source=sizeReg2, dest=untagSizeReg, isSigned=false, cache=NONE, opSize=polyWordOpSize}), BlockSimple(LoadArgument{source=initResult, dest=initReg, kind=movePolyWord}), BlockSimple(InitialiseMem{size=untagSizeReg, init=initReg, addr=initAddrReg}), BlockSimple InitialisationComplete, BlockSimple(LoadArgument{source=RegisterArgument allocReg, dest=target, kind=movePolyWord})], RegisterArgument target, false) end (*Turn the codetree structure into icode. *) val bodyContext = {loopArgs=NONE, stackPtr=0, currHandler=NONE, overflowBlock=ref NONE} val (bodyCode, _, bodyExited) = codeToICodeRev(body, bodyContext, true, SpecificPReg resultTarget, beginInstructions) val icode = if bodyExited then bodyCode else returnInstruction(bodyContext, resultTarget, bodyCode) (* Turn the icode list into basic blocks. The input list is in reverse so as part of this we reverse the list. *) local val resArray = Array.array(!labelCounter, BasicBlock{ block=[], flow=ExitCode }) fun createEntry (blockNo, block, flow) = Array.update(resArray, blockNo, BasicBlock{ block=block, flow=flow}) fun splitCode([], _, _) = (* End of code. We should have had a BeginFunction. *) raise InternalError "splitCode - no begin" | splitCode(BlockBegin args :: _, sinceLabel, flow) = (* Final instruction. Create the initial block and exit. *) createEntry(0, BeginFunction args ::sinceLabel, flow) | splitCode(BlockSimple instr :: rest, sinceLabel, flow) = splitCode(rest, instr :: sinceLabel, flow) | splitCode(BlockLabel label :: rest, sinceLabel, flow) = (* Label - finish this block and start another. *) ( createEntry(label, sinceLabel, flow); (* Default to a jump to this label. That is used if we have assumed a drop-through. *) splitCode(rest, [], Unconditional label) ) | splitCode(BlockExit instr :: rest, _, _) = splitCode(rest, [instr], ExitCode) | splitCode(BlockFlow flow :: rest, _, _) = splitCode(rest, [], flow) | splitCode(BlockRaiseAndHandle(instr, handler) :: rest, _, _) = splitCode(rest, [instr], UnconditionalHandle handler) | splitCode(BlockOptionalHandle{call, handler, label} :: rest, sinceLabel, flow) = let (* A function call within a handler. This could go to the handler but if there is no exception will go to the next instruction. Also includes JumpLoop since the stack check could result in an Interrupt exception. *) in createEntry(label, sinceLabel, flow); splitCode(rest, [call], ConditionalHandle{handler=handler, continue=label}) end in val () = splitCode(icode, [], ExitCode) val resultVector = Array.vector resArray end open ICODETRANSFORM val pregProperties = Vector.fromList(List.rev(! pregPropList)) in codeICodeFunctionToX86{blocks = resultVector, functionName = name, pregProps = pregProperties, ccCount= ! ccRefCounter, debugSwitches = debugSwitches, resultClosure = resultClosure} end fun gencodeLambda(lambda, debugSwitches, closure) = let open DEBUG Universal (*val debugSwitches = [tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), tagInject assemblyCodeTag true] @ debugSwitches*) in codeFunctionToX86(lambda, debugSwitches, closure) end structure Foreign = X86FOREIGN structure Sharing = struct type backendIC = backendIC and bicLoadForm = bicLoadForm and argumentType = argumentType and closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86ICode.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86ICode.ML index c5093c1f..a2861608 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86ICode.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86ICode.ML @@ -1,945 +1,952 @@ (* - Copyright David C. J. Matthews 2016-19 + Copyright David C. J. Matthews 2016-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86ICode( structure X86CODE: X86CODESIG ): ICodeSig = struct open X86CODE open Address datatype fpMode = FPModeSSE2 | FPModeX87 (* For the moment use SSE2 only on X86/64. Not all 32-bit processors support SSE2. *) val fpMode: fpMode = case targetArch of Native32Bit => FPModeX87 | _ => FPModeSSE2 val (polyWordOpSize, nativeWordOpSize) = case targetArch of Native32Bit => (OpSize32, OpSize32) | Native64Bit => (OpSize64, OpSize64) | ObjectId32Bit => (OpSize32, OpSize64) datatype preg = PReg of int (* A pseudo-register - an abstract register. *) (* A location on the stack. May be more than word if this is a container or a handler entry. *) datatype stackLocn = StackLoc of {size: int, rno: int } (* This combines pregKind and stackLocn. *) datatype regProperty = RegPropGeneral (* A general register. *) | RegPropUntagged (* An untagged general register. *) | RegPropStack of int (* A stack location or container. *) | RegPropCacheTagged | RegPropCacheUntagged | RegPropMultiple (* The result of a conditional or case. May be defined at multiple points. *) datatype argument = RegisterArgument of preg | AddressConstant of machineWord (* A constant that is an address. *) | IntegerConstant of LargeInt.int (* A non-address constant. Will usually be shifted and tagged. *) | MemoryLocation of { base: preg, offset: int, index: memoryIndex, cache: preg option } (* A memory location. Could be the stack. *) (* Offset on the stack. The container is the stack location identifier, the field is an offset in a container. cache is an optional cache register. *) | StackLocation of { wordOffset: int, container: stackLocn, field: int, cache: preg option } (* Address of a container. *) | ContainerAddr of { container: stackLocn, stackOffset: int } (* Generally this indicates the index register if present. For 32-in-64 the "index" may be ObjectIndex in which case the base is actually an object index. *) and memoryIndex = NoMemIndex | MemIndex1 of preg | MemIndex2 of preg | MemIndex4 of preg | MemIndex8 of preg | ObjectIndex (* Kinds of moves. Move32Bit - 32-bit loads and stores Move64Bit - 64-bit loads and stores MoveByte - When loading, load a byte and zero extend. Move16Bit - Used for C-memory loads and stores. Zero extends on load. MoveFloat - Load and store a single-precision value MoveDouble - Load and store a double-precision value. *) datatype moveKind = MoveByte | Move16Bit | Move32Bit | Move64Bit | MoveFloat | MoveDouble (* The reference to a condition code. *) and ccRef = CcRef of int val (movePolyWord, moveNativeWord) = case targetArch of Native32Bit => (Move32Bit, Move32Bit) | Native64Bit => (Move64Bit, Move64Bit) | ObjectId32Bit => (Move32Bit, Move64Bit) datatype boxKind = BoxLargeWord | BoxSSE2Double | BoxSSE2Float | BoxX87Double | BoxX87Float (* Size of operand. OpSize64 is only valid in 64-bit mode. *) datatype opSize = datatype opSize datatype sse2UnaryOps = SSE2UDoubleToFloat | SSE2UFloatToDouble and sse2BinaryOps = SSE2BAddDouble | SSE2BSubDouble | SSE2BMulDouble | SSE2BDivDouble | SSE2BXor | SSE2BAnd | SSE2BAddSingle | SSE2BSubSingle | SSE2BMulSingle | SSE2BDivSingle datatype callKinds = Recursive | ConstantCode of machineWord | FullCall datatype x86ICode = (* Move a value into a register. *) LoadArgument of { source: argument, dest: preg, kind: moveKind } (* Store a value into memory. The source will usually be a register but could be a constant depending on the value. If isMutable is true we're assigning to a ref and we need to flush the memory cache. *) | StoreArgument of { source: argument, base: preg, offset: int, index: memoryIndex, kind: moveKind, isMutable: bool } (* Load an entry from the "memory registers". Used for ThreadSelf and AllocCStack. *) | LoadMemReg of { offset: int, dest: preg, kind: moveKind } (* Store a value into an entry in the "memory registers". Used for AllocCStack/FreeCStack. *) | StoreMemReg of { offset: int, source: preg, kind: moveKind } (* Start of function. Set the register arguments. stackArgs is the list of stack arguments. The last entry is the return address. If the function has a real closure regArgs includes the closure register (rdx). *) | BeginFunction of { regArgs: (preg * reg) list, stackArgs: stackLocn list } (* Call a function. If the code address is a constant it is passed here. Otherwise the address is obtained by indirecting through rdx which has been loaded as one of the argument registers. The result is stored in the destination register. *) | FunctionCall of { callKind: callKinds, regArgs: (argument * reg) list, stackArgs: argument list, dest: preg, realDest: reg, saveRegs: preg list} (* Jump to a tail-recursive function. This is similar to FunctionCall but complicated for stack arguments because the stack and the return address need to be overwritten. stackAdjust is the number of words to remove (positive) or add (negative) to the stack before the call. currStackSize contains the number of items currently on the stack. *) | TailRecursiveCall of { callKind: callKinds, regArgs: (argument * reg) list, stackArgs: {src: argument, stack: int} list, stackAdjust: int, currStackSize: int, workReg: preg } (* Allocate a fixed sized piece of memory. The size is the number of words required. This sets the length word including the flags bits. saveRegs is the list of registers that need to be saved if we need to do a garbage collection. *) | AllocateMemoryOperation of { size: int, flags: Word8.word, dest: preg, saveRegs: preg list } (* Allocate a piece of memory whose size is not known at compile-time. The size argument is the number of words. *) | AllocateMemoryVariable of { size: preg, dest: preg, saveRegs: preg list } (* Initialise a piece of memory. N.B. The size is an untagged value containing the number of words. This uses REP STOSL/Q so addr must be rdi, size must be rcx and init must be rax. *) | InitialiseMem of { size: preg, addr: preg, init: preg } (* Signal that a tuple has been fully initialised. Really a check in the low-level code-generator. *) | InitialisationComplete (* Mark the beginning of a loop. This is really only to prevent the initialisation code being duplicated in ICodeOptimise. *) | BeginLoop (* Set up the registers for a jump back to the start of a loop. *) | JumpLoop of { regArgs: (argument * preg) list, stackArgs: (argument * int * stackLocn) list, checkInterrupt: preg list option, workReg: preg option } (* Raise an exception. The packet is always loaded into rax. *) | RaiseExceptionPacket of { packetReg: preg } (* Reserve a contiguous area on the stack to receive a result tuple. *) | ReserveContainer of { size: int, container: stackLocn } (* Indexed case. *) | IndexedCaseOperation of { testReg: preg, workReg: preg } (* Lock a mutable cell by turning off the mutable bit. *) | LockMutable of { addr: preg } (* Compare two word values. The first argument must be a register. *) | WordComparison of { arg1: preg, arg2: argument, ccRef: ccRef, opSize: opSize } (* Compare with a literal. This is generally used to compare a memory or stack location with a literal and overlaps to some extent with WordComparison. *) | CompareLiteral of { arg1: argument, arg2: LargeInt.int, opSize: opSize, ccRef: ccRef } (* Compare a byte location with a literal. This is the only operation that specifically deals with single bytes. Other cases will use word operations. *) | CompareByteMem of { arg1: { base: preg, offset: int, index: memoryIndex }, arg2: Word8.word, ccRef: ccRef } (* Exception handling. - Set up an exception handler. *) | PushExceptionHandler of { workReg: preg } (* End of a handled section. Restore the previous handler. *) | PopExceptionHandler of { workReg: preg } (* Marks the start of a handler. This sets the stack pointer and restores the old handler. Sets the exception packet register. *) | BeginHandler of { packetReg: preg, workReg: preg } (* Return from the function. *) | ReturnResultFromFunction of { resultReg: preg, realReg: reg, numStackArgs: int } (* Arithmetic or logical operation. These can set the condition codes. *) | ArithmeticFunction of { oper: arithOp, resultReg: preg, operand1: preg, operand2: argument, ccRef: ccRef, opSize: opSize } (* Test the tag bit of a word. Sets the Zero bit if the value is an address i.e. untagged. *) | TestTagBit of { arg: argument, ccRef: ccRef } (* Push a value to the stack. Added during translation phase. *) | PushValue of { arg: argument, container: stackLocn } (* Copy a value to a cache register. LoadArgument could be used for this but it may be better to keep it separate. *) | CopyToCache of { source: preg, dest: preg, kind: moveKind } (* Remove items from the stack. Added during translation phase. *) | ResetStackPtr of { numWords: int, preserveCC: bool } (* Store a value into the stack. *) | StoreToStack of { source: argument, container: stackLocn, field: int, stackOffset: int } (* Tag a value by shifting and setting the tag bit. *) | TagValue of { source: preg, dest: preg, isSigned: bool, opSize: opSize } (* Shift a value to remove the tag bit. The cache is used if this is untagging a value that has previously been tagged. *) | UntagValue of { source: preg, dest: preg, isSigned: bool, cache: preg option, opSize: opSize } (* This provides the LEA instruction which can be used for various sorts of arithmetic. The base register is optional in this case. *) | LoadEffectiveAddress of { base: preg option, offset: int, index: memoryIndex, dest: preg, opSize: opSize } (* Shift a word by an amount that can either be a constant or a register. *) | ShiftOperation of { shift: shiftType, resultReg: preg, operand: preg, shiftAmount: argument, ccRef: ccRef, opSize: opSize } (* Multiplication. We can use signed multiplication for both fixed precision and word (unsigned) multiplication. There are various forms of the instruction including a three-operand version. *) | Multiplication of { resultReg: preg, operand1: preg, operand2: argument, ccRef: ccRef, opSize: opSize } (* Division. This takes a register pair, always RDX:RAX, divides it by the operand register and puts the quotient in RAX and remainder in RDX. At the preg level we represent all of these by pRegs. The divisor can be either a register or a memory location. *) | Division of { isSigned: bool, dividend: preg, divisor: argument, quotient: preg, remainder: preg, opSize: opSize } (* Atomic exchange and addition. This is executed with a lock prefix and is used for atomic increment and decrement for mutexes. Before the operation the source contains an increment. After the operation - the source contains the old value of the destination and the destination + the resultReg contains the old value of the destination and the destination has been updated with its old value added to the increment. The destination is actually the word pointed at by "base". *) - | AtomicExchangeAndAdd of { base: preg, source: preg } + | AtomicExchangeAndAdd of { base: preg, source: preg, resultReg: preg } (* Create a "box" of a single-word "byte" cell and store the source into it. This can be implemented using AllocateMemoryOperation but the idea is to allow the transform layer to recognise when a value is being boxed and then unboxed and remove unnecessary allocation. *) | BoxValue of { boxKind: boxKind, source: preg, dest: preg, saveRegs: preg list } (* Compare two vectors of bytes and set the condition code on the result. In general vec1Addr and vec2Addr will be pointers inside memory cells so have to be untagged registers. *) | CompareByteVectors of { vec1Addr: preg, vec2Addr: preg, length: preg, ccRef: ccRef } (* Move a block of bytes (isByteMove true) or words (isByteMove false). The length is the number of items (bytes or words) to move. *) | BlockMove of { srcAddr: preg, destAddr: preg, length: preg, isByteMove: bool } (* Floating point comparison. *) | X87Compare of { arg1: preg, arg2: argument, isDouble: bool, ccRef: ccRef } (* Floating point comparison. *) | SSE2Compare of { arg1: preg, arg2: argument, isDouble: bool, ccRef: ccRef } (* The X87 FP unit does not generate condition codes directly. We have to load the cc into RAX and test it there. *) | X87FPGetCondition of { ccRef: ccRef, dest: preg } (* Binary floating point operations on the X87. *) | X87FPArith of { opc: fpOps, resultReg: preg, arg1: preg, arg2: argument, isDouble: bool } (* Floating point operations: negate and set sign positive. *) | X87FPUnaryOps of { fpOp: fpUnaryOps, dest: preg, source: preg } (* Load a fixed point value as a floating point value. *) | X87Float of { dest: preg, source: argument } (* Load a fixed point value as a floating point value. *) | SSE2Float of { dest: preg, source: argument } (* Binary floating point operations using SSE2 instructions. *) | SSE2FPUnary of { opc: sse2UnaryOps, resultReg: preg, source: argument } (* Binary floating point operations using SSE2 instructions. *) | SSE2FPBinary of { opc: sse2BinaryOps, resultReg: preg, arg1: preg, arg2: argument } (* Tag a 32-bit floating point value. This is tagged by shifting left 32-bits and then setting the bottom bit. This allows memory operands to be untagged simply by loading the high-order word. *) | TagFloat of { source: preg, dest: preg } (* Untag a 32-bit floating point value into a XMM register. If the source is in memory we just need to load the high-order word. *) | UntagFloat of { source: argument, dest: preg, cache: preg option } (* Get and set the control registers. These all have to work through memory but it's simpler to assume they work through registers. *) | GetSSE2ControlReg of { dest: preg } | SetSSE2ControlReg of { source: preg } | GetX87ControlReg of { dest: preg } | SetX87ControlReg of { source: preg } (* Convert a floating point value to an integer. *) | X87RealToInt of { source: preg, dest: preg } (* Convert a floating point value to an integer. *) | SSE2RealToInt of { source: argument, dest: preg, isDouble: bool, isTruncate: bool } (* Sign extend a 32-bit value to 64-bits. Not included in LoadArgument because that assumes that if we have the result in a register we can simply reuse the register. *) | SignExtend32To64 of { source: argument, dest: preg } (* Touch an entry. Actually doesn't do anything except make sure it is referenced. *) | TouchArgument of { source: preg } + (* Pause instruction - used only in mutex spinlock *) + | PauseCPU + (* Destinations at the end of a basic block. *) and controlFlow = (* Unconditional branch to a label - should be a merge point. *) Unconditional of int (* Conditional branch. Jumps to trueJump if the condional is true, falseJump if false. *) | Conditional of { ccRef: ccRef, condition: branchOps, trueJump: int, falseJump: int } (* Exit - the last instruction of the block is a return, raise or tailcall. *) | ExitCode (* Indexed case - this branches to one of a number of labels *) | IndexedBr of int list (* Set up a handler. This doesn't cause an immediate branch but the state at the start of the handler is the state at this point. *) | SetHandler of { handler: int, continue: int } (* Unconditional branch to a handler. If an exception is raised explicitly within the scope of a handler. *) | UnconditionalHandle of int (* Conditional branch to a handler. Occurs if there is a call to a function within the scope of a handler. It may jump to the handler. *) | ConditionalHandle of { handler: int, continue: int } and basicBlock = BasicBlock of { block: x86ICode list, flow: controlFlow } (* Return the list of blocks that are the immediate successor of this. *) fun successorBlocks(Unconditional l) = [l] | successorBlocks(Conditional{trueJump, falseJump, ...}) = [trueJump, falseJump] | successorBlocks ExitCode = [] | successorBlocks(IndexedBr cases) = cases | successorBlocks(SetHandler{handler, continue, ...}) = [handler, continue] (* We only need "handler" in SetHandler because we may have a handler that is never actually jumped to. *) | successorBlocks(UnconditionalHandle handler) = [handler] | successorBlocks(ConditionalHandle{handler, continue, ...}) = [handler, continue] datatype destinations = RegDest of reg | StackDest of int local fun printReg(PReg i, stream) = stream("R" ^ Int.toString i) and printCC(CcRef ccRef, stream) = stream ("CC" ^ Int.toString ccRef) fun printIndex(NoMemIndex, _) = () | printIndex(MemIndex1 i, stream) = (stream "["; printReg(i, stream); stream "*1]") | printIndex(MemIndex2 i, stream) = (stream "["; printReg(i, stream); stream "*2]") | printIndex(MemIndex4 i, stream) = (stream "["; printReg(i, stream); stream "*4]") | printIndex(MemIndex8 i, stream) = (stream "["; printReg(i, stream); stream "*8]") | printIndex(ObjectIndex, stream) = stream "[objectindex]" fun printStackLoc(StackLoc{size, rno}, stream) = (stream "S"; stream(Int.toString rno); stream "("; stream(Int.toString size); stream ")") fun printOpsize(OpSize32, stream) = stream "32" | printOpsize(OpSize64, stream) = stream "64" fun printArg(RegisterArgument reg, stream) = printReg(reg, stream) | printArg(AddressConstant m, stream) = stream(stringOfWord m) | printArg(IntegerConstant i, stream) = stream(LargeInt.toString i) | printArg(MemoryLocation{base, offset, index, cache, ...}, stream) = ( stream(Int.toString offset ^ "("); printReg(base, stream); stream ")"; printIndex(index, stream); case cache of NONE => () | SOME r => (stream " cache "; printReg(r, stream)) ) | printArg(StackLocation{wordOffset, container, field, cache, ...}, stream) = ( printStackLoc(container, stream); stream " + "; stream(Int.toString field); stream " ("; stream(Int.toString wordOffset); stream ")"; case cache of NONE => () | SOME r => (stream " cache "; printReg(r, stream)) ) | printArg(ContainerAddr{stackOffset, container}, stream) = ( stream "@"; printStackLoc(container, stream); stream " ("; stream(Int.toString stackOffset); stream ")" ) fun printSaves([], _) = () | printSaves([areg], stream) = printReg(areg, stream) | printSaves(areg::more, stream) = (printReg(areg, stream); stream ","; printSaves(more, stream)) fun printKind(Move64Bit, stream) = stream "64Bit" | printKind(MoveByte, stream) = stream "Byte" | printKind(Move16Bit, stream) = stream "16Bit" | printKind(Move32Bit, stream) = stream "32Bit" | printKind(MoveFloat, stream) = stream "Float" | printKind(MoveDouble, stream) = stream "Double"; fun printICode(LoadArgument{source, dest, kind}, stream) = ( stream "\tLoad"; printKind(kind, stream); stream "\t"; printArg(source, stream); stream " => "; printReg(dest, stream) ) | printICode(StoreArgument{source, base, offset, index, kind, ...}, stream) = ( case kind of Move64Bit => stream "\tStore64Bit\t" | MoveByte => stream "\tStoreByte\t" | Move16Bit => stream "\tStore16Bit\t" | Move32Bit => stream "\tStore32Bit\t" | MoveFloat => stream "\tStoreFloat\t" | MoveDouble => stream "\tStoreDouble\t"; printArg(source, stream); stream " => "; stream(Int.toString offset ^ "("); printReg(base, stream); stream ")"; printIndex(index, stream) ) | printICode(LoadMemReg { offset, dest, kind}, stream) = ( stream "\tLoadMemReg"; printKind(kind, stream); stream "\t"; stream(Int.toString offset); stream " => "; printReg(dest, stream) ) | printICode(StoreMemReg { offset, source, kind}, stream) = ( stream "\tStoreMemReg"; printKind(kind, stream); stream "\t"; printReg(source, stream); stream " => "; stream(Int.toString offset) ) | printICode(BeginFunction {regArgs, stackArgs}, stream) = ( stream "\tBeginFunction\t"; List.app(fn (arg, r) => (stream(regRepr r); stream "="; printReg(arg, stream); stream " ")) regArgs; List.app(fn s => printStackLoc(s, stream)) stackArgs ) | printICode(FunctionCall{callKind, regArgs, stackArgs, dest, realDest, saveRegs}, stream) = ( stream "\tFunctionCall\t"; case callKind of Recursive => stream "recursive " | ConstantCode m => (stream(stringOfWord m); stream " ") | FullCall => (); List.app(fn (arg, r) => (stream(regRepr r); stream "="; printArg(arg, stream); stream " ")) regArgs; List.app(fn arg => (stream "p="; printArg(arg, stream); stream " ")) stackArgs; stream "=> "; printReg(dest, stream); stream "="; stream(regRepr realDest); stream " save="; printSaves(saveRegs, stream) ) | printICode(TailRecursiveCall{callKind, regArgs, stackArgs, stackAdjust, currStackSize, workReg}, stream) = ( stream "\tTailCall\t"; case callKind of Recursive => stream "recursive " | ConstantCode m => (stream(stringOfWord m); stream " ") | FullCall => (); List.app(fn (arg, r) => (stream(regRepr r); stream "="; printArg(arg, stream); stream " ")) regArgs; List.app(fn {src, stack} => (stream (Int.toString stack); stream "<="; printArg(src, stream); stream " ")) stackArgs; stream "adjust="; stream(Int.toString stackAdjust); stream "stackSize="; stream(Int.toString currStackSize); stream " work reg="; printReg(workReg, stream) ) | printICode(AllocateMemoryOperation{size, flags, dest, saveRegs}, stream) = ( stream "\tAllocateMemory\t"; stream(concat["s=", Int.toString size, ",f=", Word8.toString flags, " => "]); printReg(dest, stream); stream " save="; printSaves(saveRegs, stream) ) | printICode(AllocateMemoryVariable{size, dest, saveRegs}, stream) = ( stream "\tAllocateMemory\t"; stream "s="; printReg(size, stream); stream " => "; printReg(dest, stream); stream " save="; printSaves(saveRegs, stream) ) | printICode(InitialiseMem{size, addr, init}, stream) = ( stream "\tInitialiseMem\t"; stream "s="; printReg(size, stream); stream ",i="; printReg(init, stream); stream ",a="; printReg(addr, stream) ) | printICode(InitialisationComplete, stream) = stream "\tInitComplete" | printICode(BeginLoop, stream) = stream "\tBeginLoop" | printICode(JumpLoop{regArgs, stackArgs, checkInterrupt, workReg, ... }, stream) = ( stream "\tJumpLoop\t"; List.app( fn (source, loopReg) => (printReg(loopReg, stream); stream "="; printArg(source, stream); stream " ") ) regArgs; List.app( fn (source, stack, stackLocn) => (printStackLoc(stackLocn, stream); stream("(sp" ^ Int.toString stack); stream ")="; printArg(source, stream); stream " ") ) stackArgs; case checkInterrupt of NONE => () | SOME saveRegs => (stream " Check:save="; printSaves(saveRegs, stream)); case workReg of NONE => () | SOME r => (stream " work reg="; printReg(r, stream)) ) | printICode(RaiseExceptionPacket{packetReg}, stream) = (stream "\tRaise\t"; printReg(packetReg, stream)) | printICode(ReserveContainer{size, container}, stream) = (stream "\tReserveContainer\t"; stream(Int.toString size); stream " => "; printStackLoc(container, stream)) | printICode(IndexedCaseOperation{testReg, workReg}, stream) = ( stream "\tIndexedCase\t"; stream "test="; printReg(testReg, stream); stream "work="; printReg(workReg, stream) ) | printICode(LockMutable{addr}, stream) = (stream "\tLockMutable\t"; printReg(addr, stream)) | printICode(WordComparison{arg1, arg2, ccRef, opSize, ...}, stream) = ( stream "\tWordComparison"; printOpsize(opSize, stream); stream "\t"; printReg(arg1, stream); stream ","; printArg(arg2, stream); stream " => "; printCC(ccRef, stream) ) | printICode(CompareLiteral{arg1, arg2, opSize, ccRef, ...}, stream) = ( stream "\tCompareLiteral"; printOpsize(opSize, stream); stream "\t"; printArg(arg1, stream); stream ","; stream(LargeInt.toString arg2); stream " => "; printCC(ccRef, stream) ) | printICode(CompareByteMem{arg1={base, offset, index, ...}, arg2, ccRef,...}, stream) = ( stream "\tCompareByteMem\t"; stream(Int.toString offset ^ "("); printReg(base, stream); stream ")"; printIndex(index, stream); stream ","; stream(Word8.toString arg2); stream " => "; printCC(ccRef, stream) ) | printICode(PushExceptionHandler{workReg }, stream) = ( stream "\tPushExcHandler\twith "; printReg(workReg, stream) ) | printICode(PopExceptionHandler{workReg}, stream) = ( stream "\tPopExceptionHandler\t"; stream "with "; printReg(workReg, stream) ) | printICode(BeginHandler{packetReg, workReg}, stream) = ( stream "\tBeginHandler\t"; printReg(packetReg, stream); stream " with "; printReg(workReg, stream) ) | printICode(ReturnResultFromFunction{resultReg, realReg, numStackArgs}, stream) = ( stream "\tReturnFromFunction\t"; printReg(resultReg, stream); stream "="; stream(regRepr realReg); stream("," ^ Int.toString numStackArgs) ) | printICode(ArithmeticFunction{oper, resultReg, operand1, operand2, ccRef, opSize, ...}, stream) = ( case oper of ADD => stream "\tAdd" | OR => stream "\tOrBits" | AND => stream "\tAndBits" | SUB => stream "\tSubtract" | XOR => stream "\tExclusiveOrBits" | CMP => stream "\tCompare"; printOpsize(opSize, stream); stream "\t"; printReg(operand1, stream); stream ","; printArg(operand2, stream); stream " => "; printReg(resultReg, stream); stream " => "; printCC(ccRef, stream) ) | printICode(TestTagBit{arg, ccRef, ...}, stream) = (stream "\tTestTagBit\t"; printArg(arg, stream); stream " => "; printCC(ccRef, stream)) | printICode(PushValue{arg, container}, stream) = (stream "\tPushValue\t"; printArg(arg, stream); stream " => "; printStackLoc(container, stream)) | printICode(CopyToCache{source, dest, kind}, stream) = (stream "\tCopyToCache"; printKind(kind, stream); stream "\t"; printReg(source, stream); stream " => "; printReg(dest, stream)) | printICode(ResetStackPtr{numWords, preserveCC}, stream) = ( stream "\tResetStackPtr\t"; stream(Int.toString numWords); if preserveCC then stream " preserve CC" else () ) | printICode(StoreToStack{source, container, field, stackOffset}, stream) = ( stream "\tStoreToStack\t"; printArg(source, stream); stream " => "; printStackLoc(container, stream); stream "+"; stream (Int.toString field); stream "("; stream(Int.toString stackOffset); stream ")" ) | printICode(TagValue{source, dest, opSize, ...}, stream) = ( stream "\tTagValue"; printOpsize(opSize, stream); stream "\t"; printReg(source, stream); stream " => "; printReg(dest, stream) ) | printICode(UntagValue{source, dest, isSigned, cache, opSize}, stream) = ( stream "\tUntag"; stream(if isSigned then "Signed" else "Unsigned"); printOpsize(opSize, stream); stream "\t"; printReg(source, stream); stream " => "; printReg(dest, stream); case cache of NONE => () | SOME c => (stream " cache "; printReg(c, stream)) ) | printICode(LoadEffectiveAddress{base, offset, index, dest, opSize}, stream) = ( stream "\tLoadEffectiveAddr"; printOpsize(opSize, stream); stream "\t"; stream(Int.toString offset ^ "("); case base of NONE => stream "_" | SOME b => printReg(b, stream); stream ")"; printIndex(index, stream); stream " => "; printReg(dest, stream) ) | printICode(ShiftOperation{shift, resultReg, operand, shiftAmount, ccRef, opSize, ...}, stream) = ( case shift of SHL => stream "\tShiftLeft" | SHR => stream "\tShiftRLogical" | SAR => stream "\tShiftRArith"; printOpsize(opSize, stream); stream "\t"; printReg(operand, stream); stream ","; printArg(shiftAmount, stream); stream " => "; printReg(resultReg, stream); stream " => "; printCC(ccRef, stream) ) | printICode(Multiplication{resultReg, operand1, operand2, ccRef, opSize, ...}, stream) = ( stream "\tMultiplication"; printOpsize(opSize, stream); stream "\t"; printReg(operand1, stream); stream ","; printArg(operand2, stream); stream " => "; printReg(resultReg, stream); stream " => "; printCC(ccRef, stream) ) | printICode(Division{isSigned, dividend, divisor, quotient, remainder, opSize}, stream) = ( stream "\tDivision"; stream(if isSigned then "Signed\t" else "Unsigned\t"); printOpsize(opSize, stream); stream "\t"; printReg(dividend, stream); stream " by "; printArg(divisor, stream); stream " => "; printReg(quotient, stream); stream " rem "; printReg(remainder, stream) ) - | printICode(AtomicExchangeAndAdd{base, source}, stream) = + | printICode(AtomicExchangeAndAdd{base, source, resultReg}, stream) = ( stream "\tAtomicExchangeAdd\t"; stream "addr=0("; printReg(base, stream); - stream "),with="; printReg(source, stream) + stream "), + "; printReg(source, stream); + stream " => "; + printReg(resultReg, stream) ) | printICode(BoxValue{boxKind, source, dest, saveRegs}, stream) = ( stream( case boxKind of BoxLargeWord => "\tBoxLarge\t" | BoxX87Double => "\tBoxX87Double\t" | BoxX87Float => "\tBoxX87Float\t" | BoxSSE2Double => "\tBoxSSE2Double\t" | BoxSSE2Float => "\tBoxSSE2Float\t" ); printReg(source, stream); stream " => "; printReg(dest, stream); stream " save="; printSaves(saveRegs, stream) ) | printICode(CompareByteVectors{vec1Addr, vec2Addr, length, ccRef, ...}, stream) = ( stream "\tCompareByteVectors\t"; printReg(vec1Addr, stream); stream ","; printReg(vec2Addr, stream); stream ","; printReg(length, stream); stream " => "; printCC(ccRef, stream) ) | printICode(BlockMove{srcAddr, destAddr, length, isByteMove}, stream) = ( stream(if isByteMove then "\tBlockByteMove\t" else "\tBlockWordMove\t"); stream "src="; printReg(srcAddr, stream); stream ",dest="; printReg(destAddr, stream); stream ",len="; printReg(length, stream) ) | printICode(X87Compare{arg1, arg2, isDouble, ccRef, ...}, stream) = ( stream "\tX87Compare"; stream(if isDouble then "D\t" else "S\t"); printReg(arg1, stream); stream ","; printArg(arg2, stream); stream " => "; printCC(ccRef, stream) ) | printICode(SSE2Compare{arg1, arg2, isDouble, ccRef, ...}, stream) = ( stream "\tSSE2Compare"; stream(if isDouble then "D\t" else "S\t"); printReg(arg1, stream); stream ","; printArg(arg2, stream); stream " => "; printCC(ccRef, stream) ) | printICode(X87FPGetCondition{dest, ccRef, ...}, stream) = (stream "\tX87FPGetCondition\t=> "; printReg(dest, stream); stream " => "; printCC(ccRef, stream)) | printICode(X87FPArith{opc, resultReg, arg1, arg2, isDouble}, stream) = ( case opc of FADD => stream "\tX87FPAdd" | FMUL => stream "\tX87FPMul" | FCOM => stream "\tX87FPCompare" | FCOMP => stream "\tX87FPComparePop" | FSUB => stream "\tX87FPSub" | FSUBR => stream "\tX87FPRevSub" | FDIV => stream "\tX87FPDiv" | FDIVR => stream "\tX87FPRevDiv"; if isDouble then stream "D\t" else stream "S\t"; printReg(arg1, stream); stream ","; printArg(arg2, stream); stream " => "; printReg(resultReg, stream) ) | printICode(X87FPUnaryOps{fpOp, dest, source}, stream) = ( case fpOp of FABS => stream "\tX87FPAbs\t" | FCHS => stream "\tX87FPNegate\t" | FLD1 => stream "\tX87FPLoad1\t" | FLDZ => stream "\tX87FPLoad0\t"; printReg(source, stream); stream " => "; printReg(dest, stream) ) | printICode(X87Float{dest, source}, stream) = (stream "\tX87Float\t"; printArg(source, stream); stream " => "; printReg(dest, stream)) | printICode(SSE2Float{dest, source}, stream) = (stream "\tSSE2Float\t"; printArg(source, stream); stream " => "; printReg(dest, stream)) | printICode(SSE2FPUnary{opc, resultReg, source}, stream) = ( case opc of SSE2UDoubleToFloat => stream "\tSSE2UDoubleToFloat\t" | SSE2UFloatToDouble => stream "\tSSE2UFloatToDouble\t"; printArg(source, stream); stream " => "; printReg(resultReg, stream) ) | printICode(SSE2FPBinary{opc, resultReg, arg1, arg2}, stream) = ( case opc of SSE2BAddDouble => stream "\tSSE2BAddDouble\t" | SSE2BSubDouble => stream "\tSSE2BSubDouble\t" | SSE2BMulDouble => stream "\tSSE2BMulDouble\t" | SSE2BDivDouble => stream "\tSSE2BDivDouble\t" | SSE2BAddSingle => stream "\tSSE2BAddSingle\t" | SSE2BSubSingle => stream "\tSSE2BSubSingle\t" | SSE2BMulSingle => stream "\tSSE2BMulSingle\t" | SSE2BDivSingle => stream "\tSSE2BDivSingle\t" | SSE2BXor => stream "\tSSE2BXor\t" | SSE2BAnd => stream "\tSSE2BAnd\t"; printReg(arg1, stream); stream ","; printArg(arg2, stream); stream " => "; printReg(resultReg, stream) ) | printICode(TagFloat{source, dest, ...}, stream) = (stream "\tTagFloat\t"; printReg(source, stream); stream " => "; printReg(dest, stream)) | printICode(UntagFloat{source, dest, cache}, stream) = ( stream "\tUntagFloat\t"; printArg(source, stream); stream " => "; printReg(dest, stream); case cache of NONE => () | SOME c => (stream " cache "; printReg(c, stream)) ) | printICode(GetSSE2ControlReg{dest}, stream) = (stream "\tGetSSE2ControlReg\t"; printReg(dest, stream)) | printICode(SetSSE2ControlReg{source}, stream) = (stream "\tSetSSE2ControlReg\t"; printReg(source, stream)) | printICode(GetX87ControlReg{dest}, stream) = (stream "\tGetX87ControlReg\t"; printReg(dest, stream)) | printICode(SetX87ControlReg{source}, stream) = (stream "\tSetX87ControlReg\t"; printReg(source, stream)) | printICode(X87RealToInt{source, dest}, stream) = (stream "\tX87RealToInt\t"; printReg(source, stream); stream " => "; printReg(dest, stream)) | printICode(SSE2RealToInt{source, dest, isDouble, isTruncate}, stream) = ( stream "\tSSE2RealToInt"; if isTruncate then stream "Trunc" else (); if isDouble then stream "D\t" else stream "S\t"; printArg(source, stream); stream " => "; printReg(dest, stream) ) | printICode(SignExtend32To64{source, dest}, stream) = (stream "\tSignExtend32To64\t"; printArg(source, stream); stream " => "; printReg(dest, stream)) | printICode(TouchArgument{source}, stream) = (stream "\tTouchArgument\t"; printReg(source, stream)) + + | printICode(PauseCPU, stream) = stream "\tPauseCPU" and printCondition(JO, stream) = stream "Overflow" | printCondition(JNO, stream) = stream "NoOverflow" | printCondition(JE, stream) = stream "Equal" | printCondition(JNE, stream) = stream "NotEqual" | printCondition(JL, stream) = stream "LessSigned" | printCondition(JGE, stream) = stream "GeqSigned" | printCondition(JLE, stream) = stream "LeqSigned" | printCondition(JG, stream) = stream "GrtSigned" | printCondition(JB, stream) = stream "LessUnsigned" | printCondition(JNB, stream) = stream "GeqUnsigned" | printCondition(JNA, stream) = stream "LeqUnsigned" | printCondition(JA, stream) = stream "GrtUnsigned" | printCondition(JP, stream) = stream "ParitySet" | printCondition(JNP, stream) = stream "ParityClear" (* Print a basic block. *) fun printBlock stream (blockNo, BasicBlock{block, flow, ...}) = ( (* Put a label on all but the first. *) if blockNo <> 0 then stream("L" ^ Int.toString blockNo ^ ":") else (); List.app (fn icode => (printICode(icode, stream); stream "\n")) block; case flow of Unconditional l => stream("\tJump\tL" ^ Int.toString l ^ "\n") | Conditional {condition, trueJump, falseJump, ccRef, ...} => ( stream "\tJump"; printCondition(condition, stream); stream "\t"; printCC(ccRef, stream); stream " L"; stream (Int.toString trueJump); stream " else L"; stream (Int.toString falseJump); stream "\n" ) | ExitCode => () | IndexedBr _ => () | SetHandler{handler, continue} => stream(concat["\tSetHandler\tH", Int.toString handler, "\n", "\tJump\tL", Int.toString continue, "\n"]) | UnconditionalHandle handler => stream("\tJump\tH" ^ Int.toString handler ^ "\n") | ConditionalHandle{handler, continue} => stream(concat["\tJump\tL", Int.toString continue, " or H", Int.toString handler, "\n"]) ) in fun printICodeAbstract(blockVec, stream) = Vector.appi(printBlock stream) blockVec end (* We frequently just want to know the register. *) fun indexRegister NoMemIndex = NONE | indexRegister (MemIndex1 r) = SOME r | indexRegister (MemIndex2 r) = SOME r | indexRegister (MemIndex4 r) = SOME r | indexRegister (MemIndex8 r) = SOME r | indexRegister ObjectIndex = NONE structure Sharing = struct type genReg = genReg and argument = argument and memoryIndex = memoryIndex and x86ICode = x86ICode and branchOps = branchOps and reg = reg and preg = preg and destinations = destinations and controlFlow = controlFlow and basicBlock = basicBlock and stackLocn = stackLocn and regProperty = regProperty and callKinds = callKinds and arithOp = arithOp and shiftType = shiftType and repOps = repOps and fpOps = fpOps and fpUnaryOps = fpUnaryOps and sse2UnaryOps = sse2UnaryOps and sse2BinaryOps = sse2BinaryOps and ccRef = ccRef and opSize = opSize and closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeGetConflictSets.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeGetConflictSets.ML index aef2e7e3..8d05b706 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeGetConflictSets.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeGetConflictSets.ML @@ -1,284 +1,289 @@ (* Copyright (c) 2016-18 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86ICodeGetConflictSets( structure ICODE: ICodeSig structure INTSET: INTSETSIG structure IDENTIFY: X86IDENTIFYREFSSIG sharing ICODE.Sharing = IDENTIFY.Sharing = INTSET ): X86GETCONFLICTSETSIG = struct open ICODE open INTSET open IDENTIFY type conflictState = { conflicts: intSet, realConflicts: reg list } type triple = {instr: x86ICode, current: intSet, active: intSet} exception InternalError = Misc.InternalError (* Get the conflict sets. This code was originally part of identifyRegisterState and was split off. *) fun getConflictStates (blocks: extendedBasicBlock vector, maxPRegs) = let (* Other registers that conflict with this i.e. cannot share the same real register. *) val regConflicts = Array.array(maxPRegs, emptySet) (* Real registers that cannot be used for this because they are needed for an instruction e.g. shift or block move, that requires these. *) and regRealConflicts = Array.array(maxPRegs, []: reg list) fun addConflictsTo(addTo, conflicts) = List.app(fn aReg => Array.update(regConflicts, aReg, union(Array.sub(regConflicts, aReg), conflicts))) addTo (* To reserve a register we need to add the real register to the real conflict sets of all the abstract conflicts. *) local fun isInset reg set = List.exists (fn r => r = reg) set in fun reserveRegister(reserveFor, reg) = let fun reserveAReg r = let val absConflicts = Array.sub(regConflicts, r) fun addConflict i = if isInset i reserveFor then () else addRealConflict (i, reg) in List.app addConflict (setToList absConflicts) end in List.app reserveAReg reserveFor end and addRealConflict (i, reg) = let val currentConflicts = Array.sub(regRealConflicts, i) in if isInset reg currentConflicts then () else Array.update(regRealConflicts, i, reg :: currentConflicts) end end fun conflictsForInstr passThrough {instr, current, ...} = let val {sources, dests} = getInstructionRegisters instr fun regNo(PReg i) = i val destRegNos = map regNo dests and sourceRegNos = map regNo sources val destSet = listToSet destRegNos val afterRemoveDests = minus(current, destSet) local (* In almost all circumstances the destination and sources don't conflict and the same register can be used as a destination and a source. The exceptions are AllocateMemoryVariable and BoxValues which can only store the size or the contents after the memory has been allocated. We also have to make sure that the test and work registers are different in IndexedCase. *) val postInstruction = case instr of AllocateMemoryVariable _ => destRegNos @ sourceRegNos | BoxValue _ => destRegNos @ sourceRegNos | IndexedCaseOperation _ => destRegNos @ sourceRegNos | ArithmeticFunction{oper=SUB, operand2, ...} => (* Special case for subtraction - we can't use the same register for the result and the second operand. *) destRegNos @ map regNo (argRegs operand2) + | ArithmeticFunction{operand2 as MemoryLocation _, ...} => (* If operand1 is not in the destination register we will move it there before the instruction. That means that we must not have the destination register as either a base or index register. *) destRegNos @ map regNo (argRegs operand2) | Multiplication{operand2 as MemoryLocation _, ...} => (* Likewise for multiplication. *) destRegNos @ map regNo (argRegs operand2) + | AtomicExchangeAndAdd{base, ...} => + (* and for atomic add. The base is always a memory address. *) + regNo base :: destRegNos + (* TailRecursiveCall and JumpLoop may require a work register. This is the only destination but if present it must be distinct from the arguments. *) | TailRecursiveCall _ => destRegNos @ sourceRegNos | JumpLoop _ => destRegNos @ sourceRegNos | _ => destRegNos in (* If there is more than one destination they conflict with each other. *) val () = addConflictsTo(postInstruction, listToSet postInstruction); (* Mark conflicts for the destinations, i.e. after the instruction. The destinations conflict with the registers that are used subsequently. *) val () = addConflictsTo(postInstruction, current); val () = addConflictsTo(postInstruction, passThrough); (* Mark conflicts for the sources i.e. before the instruction. *) (* Sources must be set up as conflicts with each other i.e. when we come to allocate registers we must choose different real registers for different abstract registers. *) val () = addConflictsTo(sourceRegNos, listToSet sourceRegNos) val () = addConflictsTo(sourceRegNos, afterRemoveDests); val () = addConflictsTo(sourceRegNos, passThrough) end (* I'm not sure if this is needed. There was a check in the old code to ensure that different registers were used for loop variables even if they were actually unused. This may happen anyway. *) val () = case instr of JumpLoop{regArgs, ...} => let val destRegs = List.foldl(fn ((_, PReg loopReg), dests) => loopReg :: dests) [] regArgs in addConflictsTo(destRegs, listToSet destRegs); addConflictsTo(destRegs, current); addConflictsTo(destRegs, passThrough) end | _ => () (* Certain instructions are specific as to the real registers. *) val () = case instr of (* Storing a byte value. This is messy on X86/32 because we can't use edi or esi as the register to store. To get round this we reserve ecx as a possible register as with shifts. We don't actually need to use this but it is available if necessary. *) StoreArgument { source=RegisterArgument sReg, kind=MoveByte, ...} => if targetArch <> Native32Bit then () else reserveRegister([regNo sReg], GenReg ecx) | InitialiseMem{size, addr, init} => ( (* We are going to use rep stosl/q to set the memory. That requires the length to be in ecx, the initialiser to be in eax and the address to be edi. *) reserveRegister([regNo addr], GenReg edi); reserveRegister([regNo init], GenReg eax); reserveRegister([regNo size], GenReg ecx) ) | ShiftOperation{shiftAmount=RegisterArgument shiftAmount, ...} => ( (* Shift with by amount specified in a register. This must be ecx. *) reserveRegister([regNo shiftAmount], GenReg ecx); (* reserveRegister only sets a conflict between the args. We need to include the result because that will be allocated first. *) List.app(fn r => addRealConflict (r, GenReg ecx)) destRegNos ) | Division{dividend, quotient, remainder, ...} => ( (* Division is specific as to the registers. The dividend must be eax, quotient is eax and the remainder is edx. The divisor must not be in either edx or eax because we need to sign extend the dividend before the division. *) reserveRegister([regNo quotient, regNo dividend], GenReg eax); (* In addition, we need to register conflicts with the divisor, at least for edx. The remainder is a result and may well not be in the conflict set with the divisor. *) List.app(fn r => addRealConflict (r, GenReg edx)) sourceRegNos; reserveRegister([regNo remainder], GenReg edx) ) | CompareByteVectors{vec1Addr, vec2Addr, length, ...} => ( (* We have to use specific registers. *) reserveRegister([regNo vec1Addr], GenReg esi); reserveRegister([regNo vec2Addr], GenReg edi); reserveRegister([regNo length], GenReg ecx) ) | BlockMove{srcAddr, destAddr, length, ...} => ( (* We have to use specific registers. *) reserveRegister([regNo srcAddr], GenReg esi); reserveRegister([regNo destAddr], GenReg edi); reserveRegister([regNo length], GenReg ecx) ) | X87FPGetCondition{dest, ...} => (* This can only put the result in rax. *) reserveRegister([regNo dest], GenReg eax) | RaiseExceptionPacket{ packetReg } => (* This wasn't needed previously because we always pushed the registers across an exception. *) reserveRegister([regNo packetReg], GenReg eax) | BeginHandler { packetReg, ...} => reserveRegister([regNo packetReg], GenReg eax) | FunctionCall { dest, realDest, regArgs, ...} => (* This is only needed if we are saving the registers rather than marking them as "must push". *) ( reserveRegister([regNo dest], realDest); (* The argument registers also conflict. In order to execute this call we need to load the arguments into specific registers so we can't use them for values that we want after the call. We use regNo dest here because that will conflict with everything immediately afterwards. *) List.app(fn (_, r) => reserveRegister([regNo dest], r)) regArgs ) | TailRecursiveCall {workReg=PReg wReg, regArgs, ...} => (* Prevent the work reg from using any of the real register args. *) List.app(fn (_, r) => addRealConflict (wReg, r)) regArgs | _ => () in () end (* Process the block. *) fun conflictsForBlock(ExtendedBasicBlock{block, passThrough, exports, ...}) = let (* We need to establish conflicts between all the registers active at the end of the block since they may not be established elsewhere. This isn't necessary for an unconditional branch since the same registers will be included in the block that is the target of the branch, possibly along with others. However if this is a conditional or indexed branch we may have different sets at each of the targets and we have to ensure that all the registers differ. *) val united = union(exports, passThrough) val () = addConflictsTo(setToList united, united) val () = List.app (conflictsForInstr passThrough) block in () end val () = Vector.app conflictsForBlock blocks val conflictState: conflictState vector = Vector.tabulate(maxPRegs, fn i => { conflicts = Array.sub(regConflicts, i), realConflicts = Array.sub(regRealConflicts, i) } ) in conflictState end structure Sharing = struct type x86ICode = x86ICode and reg = reg and preg = preg and intSet = intSet and extendedBasicBlock = extendedBasicBlock end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeIdentifyReferences.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeIdentifyReferences.ML index f79c2d60..3d018535 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeIdentifyReferences.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeIdentifyReferences.ML @@ -1,914 +1,917 @@ (* - Copyright (c) 2016-20 David C.J. Matthews + Copyright (c) 2016-21 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86ICodeIdentifyReferences( structure ICODE: ICodeSig structure DEBUG: DEBUG structure INTSET: INTSETSIG ): X86IDENTIFYREFSSIG = struct open ICODE open INTSET type regState = { active: int, refs: int, pushState: bool, prop: regProperty } (* CC states before and after. The instruction may use the CC or ignore it. The only instructions to use the CC is X87FPGetCondition. Conditional branches are handled at the block level. The result of executing the instruction may be to set the condition code to a defined state, an undefined state or leave it unchanged. N.B. Some "instructions" may involve a stack reset that could affect the CC. *) datatype outCCState = CCSet of ccRef | CCIndeterminate | CCUnchanged and inCCState = CCNeeded of ccRef | CCUnused datatype extendedBasicBlock = ExtendedBasicBlock of { block: {instr: x86ICode, current: intSet, active: intSet, kill: intSet } list, flow: controlFlow, locals: intSet, (* Defined and used entirely within the block. *) imports: intSet, (* Defined outside the block, used inside it, but not needed afterwards. *) exports: intSet, (* Defined within the block, possibly used inside, but used outside. *) passThrough: intSet, (* Active throughout the block. May be referred to by it but needed afterwards. *) loopRegs: intSet, (* Destination registers for a loop. They will be updated by this block. *) initialStacks: intSet, (* Stack items required at the start i.e. imports+passThrough for stack items. *) inCCState: ccRef option, (* The state this block assumes. If SOME _ all predecessors must set it. *) outCCState: ccRef option (* The condition code set by this block. SOME _ if at least one successor needs it. *) } exception InternalError = Misc.InternalError (* Return the list of blocks that are the immediate successor of this. *) fun blockSuccessors(BasicBlock{flow, ...}) = successorBlocks flow (* Find the registers from an argument. *) fun argRegs(RegisterArgument rarg) = [rarg] | argRegs(MemoryLocation { base, index, cache=SOME cr, ...}) = cr :: base :: argIndex index | argRegs(MemoryLocation { base, index, cache=NONE, ...}) = base :: argIndex index | argRegs(StackLocation { cache=SOME rarg, ...}) = [rarg] | argRegs _ = [] and argIndex NoMemIndex = [] | argIndex(MemIndex1 arg) = [arg] | argIndex(MemIndex2 arg) = [arg] | argIndex(MemIndex4 arg) = [arg] | argIndex(MemIndex8 arg) = [arg] | argIndex ObjectIndex = [] fun argStacks(StackLocation { container, ...}) = [container] | argStacks(ContainerAddr { container, ...}) = [container] | argStacks _ = [] (* Return the set of registers used by the instruction. sources are registers that must have values after the instruction. dests are registers that are given values or modified by the instruction. *) fun getInstructionState(LoadArgument { source, dest, ...}) = { sources=argRegs source, dests=[dest], sStacks=argStacks source, dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged } | getInstructionState(StoreArgument{ source, base, index, ...}) = { sources=argRegs source @ [base] @ argIndex index, dests=[], sStacks=argStacks source, dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged } | getInstructionState(LoadMemReg { dest, ...}) = { sources=[], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged } | getInstructionState(StoreMemReg { source, ...}) = { sources=[source], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged } | getInstructionState(BeginFunction {regArgs, stackArgs, ...}) = { sources=[], dests=map #1 regArgs, sStacks=[], dStacks=stackArgs, ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(FunctionCall{regArgs, stackArgs, dest, ...}) = let (* Non-tail-recursive. Behaves as a normal reference to sources. *) fun getSources argSource = let val stackSources = List.foldl(fn (arg, srcs) => argSource arg @ srcs) [] stackArgs fun regSource((arg, _), srcs) = argSource arg @ srcs in List.foldl regSource stackSources regArgs end in { sources=getSources argRegs, dests=[dest], sStacks=getSources argStacks, dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } end | getInstructionState(TailRecursiveCall{regArgs, stackArgs, workReg, ...}) = let (* Tail recursive call. References the argument sources but exits. *) fun getSources argSource = let val stackSources = List.foldl(fn ({src, ...}, srcs) => argSource src @ srcs) [] stackArgs fun regSource((arg, _), srcs) = argSource arg @ srcs in List.foldl regSource stackSources regArgs end in { sources=getSources argRegs, dests=[workReg], sStacks=getSources argStacks, dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } end | getInstructionState(AllocateMemoryOperation{dest, ...}) = { sources=[], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(AllocateMemoryVariable{size, dest, ...}) = { sources=[size], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(InitialiseMem{size, addr, init}) = { sources=[size, addr, init], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged } | getInstructionState(InitialisationComplete) = (* This is just a marker. It doesn't actually generate any code. *) { sources=[], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(BeginLoop) = (* This is just a marker. It doesn't actually generate any code. *) { sources=[], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(JumpLoop{regArgs, stackArgs, workReg, ...}) = let fun getSources argSource = let val regSourceAsRegs = List.foldl(fn ((source, _), srcs) => argSource source @ srcs) [] regArgs in List.foldl(fn ((source, _, _), srcs) => argSource source @ srcs) regSourceAsRegs stackArgs end val dests = case workReg of SOME r => [r] | NONE => [] in { sources=getSources argRegs, dests=dests, sStacks=getSources argStacks, dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } end | getInstructionState(RaiseExceptionPacket{packetReg}) = { sources=[packetReg], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(ReserveContainer{container, ...}) = { sources=[], dests=[], sStacks=[], dStacks=[container], ccIn=CCUnused, ccOut=CCUnchanged } | getInstructionState(IndexedCaseOperation{testReg, workReg, ...}) = { sources=[testReg], dests=[workReg], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(LockMutable{addr}) = { sources=[addr], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(WordComparison{arg1, arg2, ccRef, ...}) = { sources=arg1 :: argRegs arg2, dests=[], sStacks=argStacks arg2, dStacks=[], ccIn=CCUnused, ccOut=CCSet ccRef } | getInstructionState(CompareLiteral{arg1, ccRef, ...}) = { sources=argRegs arg1, dests=[], sStacks=argStacks arg1, dStacks=[], ccIn=CCUnused, ccOut=CCSet ccRef } | getInstructionState(CompareByteMem{arg1={base, index, ...}, ccRef, ...}) = { sources=base :: argIndex index, dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCSet ccRef } | getInstructionState(PushExceptionHandler{workReg, ...}) = { sources=[], dests=[workReg], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(PopExceptionHandler{ workReg }) = { sources=[], dests=[workReg], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(BeginHandler{ workReg, packetReg, ...}) = { sources=[], dests=[packetReg, workReg], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(ReturnResultFromFunction{resultReg, ...}) = { sources=[resultReg], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(ArithmeticFunction{resultReg, operand1, operand2, ccRef, ...}) = { sources=operand1 :: argRegs operand2, dests=[resultReg], sStacks=argStacks operand2, dStacks=[], ccIn=CCUnused, ccOut=CCSet ccRef } | getInstructionState(TestTagBit{arg, ccRef, ...}) = { sources=argRegs arg, dests=[], sStacks=argStacks arg, dStacks=[], ccIn=CCUnused, ccOut=CCSet ccRef } | getInstructionState(PushValue {arg, container, ...}) = { sources=argRegs arg, dests=[], sStacks=argStacks arg, dStacks=[container], ccIn=CCUnused, ccOut=CCUnchanged } | getInstructionState(CopyToCache{source, dest, ...}) = { sources=[source], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged} | getInstructionState(ResetStackPtr{preserveCC, ...}) = { sources=[], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=if preserveCC then CCUnchanged else CCIndeterminate } | getInstructionState(StoreToStack {source, container, ...}) = (* Although this stores into the container it must already exist. *) { sources=argRegs source, dests=[], sStacks=container :: argStacks source, dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged } | getInstructionState(TagValue{source, dest, ...}) = { sources=[source], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged } | getInstructionState(UntagValue{source, dest, cache, ...}) = { sources=case cache of NONE => [source] | SOME cr => [cr, source], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(LoadEffectiveAddress{base, index, dest, ...}) = let val bRegs = case base of SOME bReg => [bReg] | _ => [] val iRegs = argIndex index in { sources=bRegs @ iRegs, dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged } end | getInstructionState(ShiftOperation{resultReg, operand, shiftAmount, ccRef, ...}) = { sources=operand :: argRegs shiftAmount, dests=[resultReg], sStacks=argStacks shiftAmount, dStacks=[], ccIn=CCUnused, ccOut=CCSet ccRef } | getInstructionState(Multiplication{resultReg, operand1, operand2, ccRef, ...}) = { sources=operand1 :: argRegs operand2, dests=[resultReg], sStacks=argStacks operand2, dStacks=[], ccIn=CCUnused, ccOut=CCSet ccRef } | getInstructionState(Division{dividend, divisor, quotient, remainder, ...}) = { sources=dividend :: argRegs divisor, dests=[quotient, remainder], sStacks=argStacks divisor, dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } - | getInstructionState(AtomicExchangeAndAdd{base, source}) = - { sources=[base, source], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } + | getInstructionState(AtomicExchangeAndAdd{base, source, resultReg}) = + { sources=[base, source], dests=[resultReg], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(BoxValue{source, dest, ...}) = { sources=[source], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(CompareByteVectors{vec1Addr, vec2Addr, length, ccRef, ...}) = { sources=[vec1Addr, vec2Addr, length], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCSet ccRef } | getInstructionState(BlockMove{srcAddr, destAddr, length, ...}) = { sources=[srcAddr, destAddr, length], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(X87Compare{arg1, arg2, ccRef, ...}) = { sources=arg1 :: argRegs arg2, dests=[], sStacks=argStacks arg2, dStacks=[], ccIn=CCUnused, ccOut=CCSet ccRef } | getInstructionState(SSE2Compare{arg1, arg2, ccRef, ...}) = { sources=arg1 :: argRegs arg2, dests=[], sStacks=argStacks arg2, dStacks=[], ccIn=CCUnused, ccOut=CCSet ccRef } | getInstructionState(X87FPGetCondition{dest, ccRef, ...}) = { sources=[], dests=[dest], sStacks=[], dStacks=[], ccIn=CCNeeded ccRef, ccOut=CCIndeterminate } | getInstructionState(X87FPArith{resultReg, arg1, arg2, ...}) = { sources=arg1 :: argRegs arg2, dests=[resultReg], sStacks=argStacks arg2, dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(X87FPUnaryOps{dest, source, ...}) = { sources=[source], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(X87Float{dest, source}) = { sources=argRegs source, dests=[dest], sStacks=argStacks source, dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(SSE2Float{dest, source}) = { sources=argRegs source, dests=[dest], sStacks=argStacks source, dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(SSE2FPUnary{resultReg, source, ...}) = { sources=argRegs source, dests=[resultReg], sStacks=argStacks source, dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(SSE2FPBinary{resultReg, arg1, arg2, ...}) = { sources=arg1 :: argRegs arg2, dests=[resultReg], sStacks=argStacks arg2, dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(TagFloat{source, dest, ...}) = { sources=[source], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(UntagFloat{source, dest, cache, ...}) = { sources=case cache of NONE => argRegs source | SOME cr => cr :: argRegs source, dests=[dest], sStacks=argStacks source, dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(GetSSE2ControlReg{dest}) = { sources=[], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(SetSSE2ControlReg{source}) = { sources=[source], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(GetX87ControlReg{dest}) = { sources=[], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(SetX87ControlReg{source}) = { sources=[source], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(X87RealToInt{ source, dest }) = { sources=[source], dests=[dest], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(SSE2RealToInt{ source, dest, ... }) = { sources=argRegs source, dests=[dest], sStacks=argStacks source, dStacks=[], ccIn=CCUnused, ccOut=CCIndeterminate } | getInstructionState(SignExtend32To64{ source, dest }) = { sources=argRegs source, dests=[dest], sStacks=argStacks source, dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged } | getInstructionState(TouchArgument{source}) = { sources=[source], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged } + | getInstructionState PauseCPU = + { sources=[], dests=[], sStacks=[], dStacks=[], ccIn=CCUnused, ccOut=CCUnchanged } + (* These instructions can be eliminated if their register sources are not used. There may be other cases. *) fun eliminateable(LoadArgument _) = true | eliminateable(TagValue _) = true | eliminateable(UntagValue _) = true | eliminateable(LoadEffectiveAddress _) = true | eliminateable(BoxValue _) = true | eliminateable(CopyToCache _) = true | eliminateable(LoadMemReg _) = true | eliminateable _ = false fun identifyRegs(blockVector, pregProps): extendedBasicBlock vector * regState vector = let val maxPRegs = Vector.length pregProps val vectorLength = Vector.length blockVector (* Initial arrays - declarationArray is the set of registers given values by the block, importArray is the set of registers referenced by the block and not declared locally. *) val declarationArray = Array.array(vectorLength, emptySet) and importArray = Array.array(vectorLength, emptySet) val stackDecArray = Array.array(vectorLength, emptySet) and stackImportArray = Array.array(vectorLength, emptySet) and localLoopRegArray = Array.array(vectorLength, emptySet) (* References - this is used locally to see if a register is ever actually used and also included in the result which uses it as part of the choice of which register to spill. *) val regRefs = Array.array(maxPRegs, 0) (* Registers that must be pushed because they are required after a function call. For cache registers this means "discard". *) and requirePushOrDiscard = Array.array(maxPRegs, false) fun incrRef r = Array.update(regRefs, r, Array.sub(regRefs, r)+1) (* Contains the, possibly filtered, code for each block. *) val resultCode = Array.array(vectorLength, NONE) val ccInStates = Array.array(vectorLength, CCUnused) and ccOutStates = Array.array(vectorLength, CCIndeterminate) (* First pass - for each block build up the sets of registers defined and used in the block. We do this depth-first so that we can use "refs" to see if a register is used. If this is an instruction that can be eliminated we don't need to generate it and can ignore any references it makes. *) local fun blockScan blockNo = if isSome(Array.sub(resultCode, blockNo)) then () else let val () = Array.update(resultCode, blockNo, SOME []) (* Prevent looping. *) val thisBlock as BasicBlock { block, flow, ...} = Vector.sub(blockVector, blockNo) val successors = blockSuccessors thisBlock (* Visit everything reachable first. *) val () = List.app blockScan successors fun scanCode(instr, original as { code, decs, refs, sDecs, sRefs, occIn, occOut, loopRegs, ... }) = let val { sources, dests, sStacks=stackSrcs, dStacks=stackDests, ccIn, ccOut, ... } = getInstructionState instr fun regNo(PReg i) = i and stackNo(StackLoc{rno, ...}) = rno val destRegNos = map regNo dests and sourceRegNos = map regNo sources val stackDestRegNos = map stackNo stackDests and stackSourceRegNos = map stackNo stackSrcs (* If this instruction requires a cc i.e. is SetToCondition or X87FPGetCondition we need to set this as a requirement earlier. If this sets the CC and it is the condition we've been expecting we've satisfied it and can set the previous condition to Unused. We could use this to decide if a comparison is no longer required. That can only happen in very specific circumstances e.g. some tests in Test176.ML so it's not worthwhile. *) val newInCC = case (ccIn, ccOut, occIn) of (cc as CCNeeded _, _, _) => cc (* This instr needs a particular cc. *) | (CCUnused, CCSet _, _) => CCUnused | (CCUnused, _, occIn) => occIn (* If this instruction modifies the CC check to see if it is setting an requirement. *) val _ = case (occIn, ccOut) of (CCNeeded ccRIn, CCSet ccRout) => if ccRIn = ccRout then () else raise InternalError "CCCheck failed" | (CCNeeded _, CCIndeterminate) => raise InternalError "CCCheck failed" | _ => () (* The output CC is the last CC set. Tail instructions that don't change the CC state are ignored until we reach an instruction that sets it. *) val newOutCC = case occOut of CCUnchanged => ccOut | _ => occOut val instrLoopRegs = case instr of JumpLoop{regArgs, ...} => listToSet (map (regNo o #2) regArgs) | _ => emptySet in if eliminateable instr andalso List.all(fn dReg => Array.sub(regRefs, dReg) = 0) destRegNos then original (* Don't include this instruction. *) else let (* Only mark the sources as referred after we know we're going to need this. In that way we may eliminate the instruction that created this source. *) val () = List.app incrRef sourceRegNos in { code = instr :: code, decs = union(listToSet destRegNos, decs), refs = union(listToSet sourceRegNos, refs), sDecs = union(listToSet stackDestRegNos, sDecs), sRefs = union(listToSet stackSourceRegNos, sRefs), occIn = newInCC, occOut = newOutCC, loopRegs = union(loopRegs, instrLoopRegs)} end end (* If we have a conditional branch at the end we need the condition code. It should either be set here or in a preceding block. *) val inCC = case flow of Conditional { ccRef, ...} => CCNeeded ccRef | _ => CCUnused val { code, decs, refs, sDecs, sRefs, occIn, occOut, loopRegs, ... } = List.foldr scanCode {code=[], decs=emptySet, refs=emptySet, sDecs=emptySet, sRefs=emptySet, occIn=inCC, occOut=CCUnchanged, loopRegs=emptySet} block in Array.update(declarationArray, blockNo, decs); (* refs includes local declarations. Remove before adding to the result. *) Array.update(importArray, blockNo, minus(refs, decs)); Array.update(localLoopRegArray, blockNo, loopRegs); Array.update(stackDecArray, blockNo, sDecs); Array.update(stackImportArray, blockNo, minus(sRefs, sDecs)); Array.update(resultCode, blockNo, SOME code); Array.update(ccInStates, blockNo, occIn); Array.update(ccOutStates, blockNo, occOut) end in val () = blockScan 0 (* Start with the root block. *) end (* Second phase - Propagate reference information between the blocks. We need to consider loops here. Do a depth-first scan marking each block. If we find a loop we save the import information we've used. If when we come to process that block we find the import information is different we need to reprocess. *) (* Pass through array - values used in other blocks after this that are not declared in this block. *) val passThroughArray = Array.array(vectorLength, emptySet) val stackPassThroughArray = Array.array(vectorLength, emptySet) (* Exports - those of our declarations that are used in other blocks. *) val exportArray = Array.array(vectorLength, emptySet) val stackExportArray = Array.array(vectorLength, emptySet) (* Loop registers. This contains the registers that are not exported from or passed through this block but are used subsequently as loop registers. *) val loopRegArray = Array.array(vectorLength, emptySet) val () = Array.copy{src=localLoopRegArray, dst=loopRegArray, di=0} (* If any one of the successors requires the CC then this is set. Otherwise we leave it as Unused. *) val ccRequiredOut = Array.array(vectorLength, CCUnused) local datatype loopData = Unprocessed | Processing | Processed | Looped of { regSet: intSet, loopSet: intSet, stackSet: intSet, ccState: inCCState } fun reprocessLoop () = let val reprocess = ref false val loopArray = Array.array(vectorLength, Unprocessed) fun processBlocks blockNo = case Array.sub(loopArray, blockNo) of Processed => (* Already seen this by a different route. *) { regSet = union(Array.sub(passThroughArray, blockNo), Array.sub(importArray, blockNo)), stackSet = union(Array.sub(stackPassThroughArray, blockNo), Array.sub(stackImportArray, blockNo)), ccState = Array.sub(ccInStates, blockNo), loopSet = Array.sub(loopRegArray, blockNo) } | Looped s => s (* We've already seen this in a loop. *) | Processing => (* We have a loop. *) let (* Use the existing input array. *) val inputs = { regSet = union(Array.sub(passThroughArray, blockNo), Array.sub(importArray, blockNo)), stackSet = union(Array.sub(stackPassThroughArray, blockNo), Array.sub(stackImportArray, blockNo)), ccState = Array.sub(ccInStates, blockNo), loopSet = Array.sub(loopRegArray, blockNo) } val () = Array.update(loopArray, blockNo, Looped inputs) in inputs end | Unprocessed => (* Normal case - not visited yet. *) let val () = Array.update(loopArray, blockNo, Processing) val thisBlock = Vector.sub(blockVector, blockNo) val ourDeclarations = Array.sub(declarationArray, blockNo) and ourStackDeclarations = Array.sub(stackDecArray, blockNo) and ourLocalLoopRegs = Array.sub(localLoopRegArray, blockNo) val successors = blockSuccessors thisBlock fun addSuccessor b = let val {regSet=theirImports, stackSet=theirStackImports, ccState=theirInState, loopSet=theirLoops} = processBlocks b (* Remove loop regs from the imports if they are actually given new values by this block. We don't want to pass the old loop regs through here. *) val theirImports = minus(theirImports, ourLocalLoopRegs) (* Split the imports. If a register is a local declaration then it becomes an export. If it is not it becomes part of our passThrough. *) val (addToExp, addToImp) = INTSET.partition (fn i => member(i, ourDeclarations)) theirImports val (addToStackExp, addToStackImp) = INTSET.partition (fn i => member(i, ourStackDeclarations)) theirStackImports (* Merge the input states from each of the successors. *) val () = case (theirInState, Array.sub(ccRequiredOut, blockNo)) of (CCNeeded ts, CCNeeded req) => if ts = req then () else raise InternalError "Mismatched states" | (ts as CCNeeded _, _) => Array.update(ccRequiredOut, blockNo, ts) | _ => () (* Add loop registers to the set if they are not declared here. The only place they are declared is at the entry to the loop so that stops them being propagated further. *) val addToLoops = minus(theirLoops, ourDeclarations) in Array.update(exportArray, blockNo, union(Array.sub(exportArray, blockNo), addToExp)); Array.update(passThroughArray, blockNo, union(Array.sub(passThroughArray, blockNo), addToImp)); Array.update(stackExportArray, blockNo, union(Array.sub(stackExportArray, blockNo), addToStackExp)); Array.update(stackPassThroughArray, blockNo, union(Array.sub(stackPassThroughArray, blockNo), addToStackImp)); Array.update(loopRegArray, blockNo, union(Array.sub(loopRegArray, blockNo), addToLoops)) end val () = List.app addSuccessor successors val ourInputs = union(Array.sub(passThroughArray, blockNo), Array.sub(importArray, blockNo)) val ourStackInputs = union(Array.sub(stackPassThroughArray, blockNo), Array.sub(stackImportArray, blockNo)) in (* Check that we supply the required state. *) case (Array.sub(ccRequiredOut, blockNo), Array.sub(ccOutStates, blockNo)) of (CCNeeded ccReq, CCSet ccSet) => if ccReq = ccSet then () else raise InternalError "Mismatched cc states" | (CCNeeded _, CCIndeterminate) => raise InternalError "Mismatched cc states" | (cc as CCNeeded needOut, CCUnchanged) => ( (* We pass through the state. If we don't use the state then we need to set this as the input. If we do use the state it must be the same. *) case Array.sub(ccInStates, blockNo) of CCUnused => Array.update(ccInStates, blockNo, cc) | CCNeeded needIn => if needOut = needIn then () else raise InternalError "Mismatched cc states" ) | _ => (); (* Was this block used in a loop? If so we should not be requiring a CC. *) case Array.sub(loopArray, blockNo) of Looped {regSet, stackSet, ...} => ( case Array.sub(ccInStates, blockNo) of CCNeeded _ => raise InternalError "Looped state needs cc" | _ => (); if setToList regSet = setToList ourInputs andalso setToList stackSet = setToList ourStackInputs then () else reprocess := true ) | _ => (); Array.update(loopArray, blockNo, Processed); { regSet = ourInputs, stackSet = ourStackInputs, ccState = Array.sub(ccInStates, blockNo), loopSet=Array.sub(loopRegArray, blockNo)} end in reprocess := false; processBlocks 0; if !reprocess then reprocessLoop () else () end in val () = reprocessLoop () end (* Third pass - Build the result list with the active registers for each instruction. We don't include registers in the passThrough set since they are active throughout the block. *) local (* Number of instrs for which this is active. We use this to try to select a register to push to the stack if we have too many. Registers that have only a short lifetime are less likely to be pushed than those that are active longer. *) val regActive = Array.array(maxPRegs, 0) fun addActivity n r = Array.update(regActive, r, Array.sub(regActive, r)+n) fun createResultInstrs (passThrough, stackPassThrough) (instr, (tail, activeAfterThis, stackActiveAfterThis)) = let val { sources, dests, sStacks=stackSrcs, dStacks=stackDests, ... } = getInstructionState instr in (* Eliminate instructions if their results are not required. The earlier check for this will remove most cases but if we have duplicated a block we may have a register that is required elsewhere but not in this particular branch. *) if not(List.exists(fn PReg d => member(d, activeAfterThis)) dests) andalso eliminateable instr then (tail, activeAfterThis, stackActiveAfterThis) else let fun regNo(PReg i) = i fun stackNo(StackLoc{rno, ...}) = rno val destRegNos = map regNo dests and sourceRegNos = map regNo sources val destSet = listToSet destRegNos (* Remove any sources that are present in passThrough since they are going to be active throughout the block. *) and sourceSet = minus(listToSet sourceRegNos, passThrough) val stackDestRegNos = map stackNo stackDests and stackSourceRegNos = map stackNo stackSrcs val stackDestSet = listToSet stackDestRegNos and stackSourceSet = minus(listToSet stackSourceRegNos, stackPassThrough) (* To compute the active set for the PREVIOUS instruction (we're processing from the end back to the start) we remove any registers that have been given values in this instruction and add anything that we are using in this instruction since they will now need to have values. *) val afterRemoveDests = minus(activeAfterThis, destSet) val stackAfterRemoveDests = minus(stackActiveAfterThis, stackDestSet) val activeForPrevious = union(sourceSet, afterRemoveDests) val stackActiveForPrevious = union(stackSourceSet, stackAfterRemoveDests) (* The "active" set is the set of registers that need to be active DURING the instruction. It includes destinations, which will usually be in "activeAfterThis", because there may be destinations that are not actually used subsequently but still need a register. That will also include work registers. Usually sources aren't included if this is the last use but the AllocateMemoryVariable "instruction" can't set the size after the memory is allocated so the active set includes the source(s). *) val activeForInstr = case instr of FunctionCall _ => sourceSet (* Is this still needed? *) | TailRecursiveCall _ => (* Set the active set to the total set of registers we require including the work register. This ensures that we will spill as many registers as we require when we look at the size of the active set. *) union(sourceSet, destSet) | AllocateMemoryVariable _ => (* We can only set the size after the memory is allocated. *) union(activeAfterThis, union(sourceSet, destSet)) | BoxValue _ => (* We can only store the value in the box after the box is allocated. *) union(activeAfterThis, union(sourceSet, destSet)) | _ => union(activeAfterThis, destSet) val () = List.app(addActivity 1) (setToList activeForInstr) local (* If we are allocating memory we have to save the current registers if they could contain an address. We mustn't push untagged registers and we mustn't push the destination. *) fun getSaveSet dReg = let val activeAfter = union(activeAfterThis, passThrough) (* Remove any registers marked - must-not-push. These are registers holding non-address values. They will actually be saved by the RTS across any GC but not checked or modified by the GC. Exclude the result register. *) fun getSave i = if i = dReg then NONE else case Vector.sub(pregProps, i) of RegPropGeneral => SOME(PReg i) | RegPropCacheTagged => SOME(PReg i) | RegPropUntagged => NONE | RegPropStack _ => NONE | RegPropCacheUntagged => NONE | RegPropMultiple => raise InternalError "getSave: RegPropMultiple" in List.mapPartial getSave (setToList activeAfter) end in (* Sometimes we need to modify the instruction e.g. to include the set of registers to save. *) val convertedInstr = case instr of AllocateMemoryOperation{size, flags, dest, saveRegs=_} => AllocateMemoryOperation{size=size, flags=flags, dest=dest, saveRegs=getSaveSet(regNo dest)} | AllocateMemoryVariable{size, dest, saveRegs=_} => AllocateMemoryVariable{size=size, dest=dest, saveRegs=getSaveSet(regNo dest)} | BoxValue{source, dest, boxKind, saveRegs=_} => BoxValue{source=source, dest=dest, boxKind=boxKind, saveRegs=getSaveSet(regNo dest)} | JumpLoop{regArgs, stackArgs, checkInterrupt = SOME _, workReg, ...} => let (* If we have to check for interrupts we must preserve registers across the RTS call. *) fun getSave i = case Vector.sub(pregProps, i) of RegPropGeneral => SOME(PReg i) | RegPropCacheTagged => SOME(PReg i) | RegPropUntagged => NONE | RegPropStack _ => NONE | RegPropCacheUntagged => NONE | RegPropMultiple => raise InternalError "getSave: RegPropMultiple" val currentRegs = union(activeAfterThis, passThrough) (* Have to include the loop registers. These were previously included automatically because they were part of the import set. *) val check = List.mapPartial getSave (map (regNo o #2) regArgs @ setToList currentRegs) in JumpLoop{regArgs=regArgs, stackArgs=stackArgs, checkInterrupt=SOME check, workReg=workReg} end | FunctionCall{regArgs, stackArgs=[], dest, realDest, callKind as ConstantCode m, saveRegs=_} => (* If this is arbitrary precision push the registers rather than marking them as "save". stringOfWord returns 'CODE "PolyAddArbitrary"' etc. *) if (String.isSubstring "Arbitrary\"" (Address.stringOfWord m)) then FunctionCall{regArgs=regArgs, stackArgs=[], callKind=callKind, dest=dest, realDest=realDest, saveRegs=getSaveSet(regNo dest) } else instr | instr as LoadArgument{dest=PReg dreg, ...} => ( if member(dreg, activeAfterThis) then () else print("Register " ^ Int.toString dreg ^ " inactive-" ^ PolyML.makestring instr ^ "\n"); instr ) | _ => instr end (* FunctionCall must mark all registers as "push". *) local fun pushRegisters () = let val activeAfter = union(activeAfterThis, passThrough) fun pushAllButDests i = if List.exists(fn j => i=j) destRegNos then () else case Vector.sub(pregProps, i) of RegPropCacheTagged => raise InternalError "pushRegisters: cache reg" | RegPropCacheUntagged => raise InternalError "pushRegisters: cache reg" | _ => Array.update(requirePushOrDiscard, i, true) in (* We need to push everything active after this except the result register. *) List.app pushAllButDests (setToList activeAfter) end in val () = case instr of FunctionCall{ stackArgs=[], callKind=ConstantCode m, ...} => if (String.isSubstring "Arbitrary\"" (Address.stringOfWord m)) then () else pushRegisters () | FunctionCall _ => pushRegisters () (* It should no longer be necessary to push across a handler but there still seem to be cases that need it. *) | BeginHandler _ => pushRegisters () | CopyToCache {source=PReg srcReg, dest=PReg dstReg, ...} => (* If the source is a cache register marked as "must push" i.e. discard, the destination must also be discarded i.e. not available. Note: the source could be a non-cache register marked for pushing. *) ( case (Vector.sub(pregProps, srcReg), Array.sub(requirePushOrDiscard, srcReg)) of (RegPropCacheTagged, true) => Array.update(requirePushOrDiscard, dstReg, true) | (RegPropCacheUntagged, true) => Array.update(requirePushOrDiscard, dstReg, true) | _ => () ) | _ => () end (* Which entries are active in this instruction but not afterwards? *) val kill = union(minus(stackSourceSet, stackActiveAfterThis), minus(sourceSet, activeAfterThis)) in ({instr=convertedInstr, active=activeForInstr, current=activeAfterThis, kill=kill} :: tail, activeForPrevious, stackActiveForPrevious) end end fun createResult blockNo = let val BasicBlock{ flow, ...} = Vector.sub(blockVector, blockNo) val declSet = Array.sub(declarationArray, blockNo) and importSet = Array.sub(importArray, blockNo) and passSet = Array.sub(passThroughArray, blockNo) and loopSet = Array.sub(loopRegArray, blockNo) and exportSet = Array.sub(exportArray, blockNo) and stackPassSet = Array.sub(stackPassThroughArray, blockNo) and stackImportSet = Array.sub(stackImportArray, blockNo) and stackExportSet = Array.sub(stackExportArray, blockNo) val filteredCode = getOpt(Array.sub(resultCode, blockNo), []) (* At the end of the block we should have the exports active. *) val (resultInstrs, _, _) = List.foldr (createResultInstrs (passSet, stackPassSet)) ([], exportSet, stackExportSet) filteredCode (* Set the active count for the pass through. *) val instrCount = List.length filteredCode val () = List.app(addActivity instrCount) (setToList passSet) val inCCState = case Array.sub(ccInStates, blockNo) of CCNeeded s => SOME s | CCUnused => NONE val outCCState = case Array.sub(ccRequiredOut, blockNo) of CCNeeded s => SOME s | CCUnused => NONE in ExtendedBasicBlock { block = resultInstrs, flow=flow, locals = minus(declSet, exportSet), imports = importSet, exports = exportSet, passThrough = passSet, loopRegs = loopSet, initialStacks = union(stackPassSet, stackImportSet), inCCState = inCCState, outCCState = outCCState } end in val resultBlocks = Vector.tabulate(vectorLength, createResult) val regActive = regActive end val registerState: regState vector = Vector.tabulate(maxPRegs, fn i => { active = Array.sub(regActive, i), refs = Array.sub(regRefs, i), pushState = Array.sub(requirePushOrDiscard, i), prop = Vector.sub(pregProps, i) } ) in (resultBlocks, registerState) end (* Exported function. First filter out unreferenced blocks then process the registers themselves. *) fun identifyRegisters(blockVector, pregProps) = let val vectorLength = Vector.length blockVector val mapArray = Array.array(vectorLength, NONE) and resArray = Array.array(vectorLength, NONE) val count = ref 0 fun setReferences label = case Array.sub(mapArray, label) of NONE => (* Not yet visited *) let val BasicBlock{flow, block} = Vector.sub(blockVector, label) (* Create a new entry for it. *) val newLabel = ! count before count := !count + 1 (* Add it to the map. Any other references will use this without reprocessing. *) val () = Array.update(mapArray, label, SOME newLabel) val newFlow = case flow of Unconditional l => Unconditional(setReferences l) | Conditional{trueJump, falseJump, ccRef, condition} => Conditional{trueJump=setReferences trueJump, falseJump=setReferences falseJump, ccRef=ccRef, condition=condition} | ExitCode => ExitCode | IndexedBr list => IndexedBr(map setReferences list) | SetHandler{handler, continue} => SetHandler{handler=setReferences handler, continue=setReferences continue} | UnconditionalHandle l => UnconditionalHandle(setReferences l) | ConditionalHandle{handler, continue} => ConditionalHandle{handler=setReferences handler, continue=setReferences continue} val () = Array.update(resArray, newLabel, SOME(BasicBlock{flow=newFlow, block=block})) in newLabel end | SOME lab => lab val _ = setReferences 0 val newBlockVector = Vector.tabulate(!count, fn i => valOf(Array.sub(resArray, i))) in identifyRegs(newBlockVector, pregProps) end (* Exported for use in GetConflictSets *) fun getInstructionRegisters instr = let val {sources, dests, ...} = getInstructionState instr in {sources=sources, dests=dests} end (* Exported for use in ICodeOptimise *) val getInstructionCC = #ccOut o getInstructionState structure Sharing = struct type x86ICode = x86ICode and reg = reg and preg = preg and intSet = intSet and basicBlock = basicBlock and extendedBasicBlock = extendedBasicBlock and controlFlow = controlFlow and argument = argument and memoryIndex = memoryIndex and regProperty = regProperty and ccRef = ccRef and outCCState = outCCState end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeToX86Code.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeToX86Code.ML index 7c6fc143..d7ab73ba 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeToX86Code.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86ICodeToX86Code.ML @@ -1,2126 +1,2130 @@ (* - Copyright David C. J. Matthews 2016-20 + Copyright David C. J. Matthews 2016-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86ICodeToX86Code( structure X86CODE: X86CODESIG structure X86OPTIMISE: sig type operation type code type operations = operation list type closureRef val generateCode: {code: code, ops: operations, labelCount: int, resultClosure: closureRef } -> unit structure Sharing: sig type operation = operation type code = code type closureRef = closureRef end end structure DEBUG: DEBUG structure ICODE: ICodeSig structure IDENTIFY: X86IDENTIFYREFSSIG structure INTSET: INTSETSIG structure PRETTY: PRETTYSIG structure STRONGLY: sig val stronglyConnectedComponents: {nodeAddress: 'a -> int, arcs: 'a -> int list } -> 'a list -> 'a list list end sharing X86CODE.Sharing = ICODE.Sharing = X86OPTIMISE.Sharing = IDENTIFY.Sharing = INTSET ): X86ICODEGENERATESIG = struct open IDENTIFY open ICODE open X86CODE open Address exception InternalError = Misc.InternalError fun asGenReg(GenReg r) = r | asGenReg _ = raise InternalError "asGenReg" and asFPReg(FPReg r) = r | asFPReg _ = raise InternalError "asFPReg" and asXMMReg(XMMReg r) = r | asXMMReg _ = raise InternalError "asXMMReg" (* tag a short constant *) fun tag c = 2 * c + 1 local val regs = case targetArch of Native32Bit => [edi, esi, edx, ecx, ebx, eax] | Native64Bit => [r14, r13, r12, r11, r10, r9, r8, edi, esi, edx, ecx, ebx, eax] | ObjectId32Bit => [r14, r13, r12, r11, r10, r9, r8, edi, esi, edx, ecx, eax] in val generalRegisters = List.map GenReg regs end fun opSizeToMove OpSize32 = Move32 | opSizeToMove OpSize64 = Move64 fun icodeToX86Code{blocks, functionName, stackRequired, debugSwitches, allocatedRegisters, resultClosure, ...} = let fun argAsGenReg(RegisterArg(GenReg r)) = r | argAsGenReg _ = raise InternalError "argAsGenReg" fun sourceAsGenRegOrMem(RegisterArg(GenReg r)) = RegisterArg r | sourceAsGenRegOrMem(MemoryArg{offset, base=baseReg, index}) = MemoryArg{base=baseReg, offset=offset, index=index} | sourceAsGenRegOrMem(NonAddressConstArg v) = NonAddressConstArg v | sourceAsGenRegOrMem(AddressConstArg v) = AddressConstArg v | sourceAsGenRegOrMem _ = raise InternalError "sourceAsGenRegOrMem" and sourceAsXMMRegOrMem(RegisterArg(XMMReg r)) = RegisterArg r | sourceAsXMMRegOrMem(MemoryArg{offset, base=baseReg, index}) = MemoryArg{base=baseReg, offset=offset, index=index} | sourceAsXMMRegOrMem(NonAddressConstArg v) = NonAddressConstArg v | sourceAsXMMRegOrMem(AddressConstArg v) = AddressConstArg v | sourceAsXMMRegOrMem _ = raise InternalError "sourceAsGenRegOrMem" (* Moves and loads. *) fun llLoadArgument({ source, dest=GenReg destReg, kind=Move64Bit}, code) = Move { source=sourceAsGenRegOrMem source, destination=RegisterArg destReg, moveSize=Move64 } :: code | llLoadArgument({ source=MemoryArg mLoc, dest=GenReg destReg, kind=MoveByte}, code) = (* Load from memory. *) Move{moveSize=Move8, source=MemoryArg mLoc, destination=RegisterArg destReg} :: code | llLoadArgument({ source=MemoryArg mLoc, dest=GenReg destReg, kind=Move16Bit}, code) = (* Load from memory. *) Move{moveSize=Move16, source=MemoryArg mLoc, destination=RegisterArg destReg} :: code | llLoadArgument({ source, dest=GenReg destReg, kind=Move32Bit}, code) = (* Load from memory. *) Move { source=sourceAsGenRegOrMem source, destination=RegisterArg destReg, moveSize=Move32 } :: code (* Load a floating point value. *) | llLoadArgument({source=MemoryArg{offset, base=baseReg, index}, dest=FPReg fpReg, kind=MoveDouble}, code) = moveToOutputFP(fpReg, FPLoadFromMemory{ address={base=baseReg, offset=offset, index=index}, precision=DoublePrecision } :: code) | llLoadArgument({source=AddressConstArg addrConst, dest=FPReg fpReg, kind=MoveDouble}, code) = moveToOutputFP(fpReg, FPLoadFromConst{ constant= addrConst, precision=DoublePrecision } :: code) | llLoadArgument({source=RegisterArg(FPReg fpSrc), dest=FPReg fpDest, kind=MoveDouble}, code) = (* Moving from one FP reg to another. Even if we are moving from FP0 we still do a load because FPStoreToFPReg adds one to the register number to account for one value on the stack. *) moveToOutputFP(fpDest, FPLoadFromFPReg{source=fpSrc, lastRef=false} :: code) (* Load or move from an XMM reg. *) | llLoadArgument({source, dest=XMMReg xmmRegReg, kind=MoveDouble}, code) = XMMArith { opc= SSE2MoveDouble, source=sourceAsXMMRegOrMem source, output=xmmRegReg } :: code (* Load a floating point value. *) | llLoadArgument({source=MemoryArg{offset, base=baseReg, index}, dest=FPReg fpReg, kind=MoveFloat}, code) = moveToOutputFP(fpReg, FPLoadFromMemory{ address={ base=baseReg, offset=offset, index=index }, precision=SinglePrecision } :: code) | llLoadArgument({source=AddressConstArg addrConst, dest=FPReg fpReg, kind=MoveFloat}, code) = moveToOutputFP(fpReg, FPLoadFromConst{ constant= addrConst, precision=SinglePrecision } :: code) (* Load or move from an XMM reg. *) | llLoadArgument({source, dest=XMMReg xmmRegReg, kind=MoveFloat}, code) = XMMArith { opc= SSE2MoveFloat, source=sourceAsXMMRegOrMem source, output=xmmRegReg } :: code (* Any other combinations are not allowed. *) | llLoadArgument _ = raise InternalError "codeGenICode: LoadArgument" (* Unless the destination is FP0 we need to store and pop. *) and moveToOutputFP(fpDest, code) = if fpDest = fp0 then code else FPStoreToFPReg{output=fpDest, andPop=true} :: code (* Store to memory *) fun llStoreArgument{ source=RegisterArg(GenReg sourceReg), base, offset, index, kind=Move64Bit} = Move{source=RegisterArg sourceReg, destination=MemoryArg {base=base, offset=offset, index=index}, moveSize=opSizeToMove OpSize64} | llStoreArgument{ source=RegisterArg(GenReg sourceReg), base, offset, index, kind=MoveByte} = Move{moveSize=Move8, source=RegisterArg sourceReg, destination=MemoryArg {base=base, offset=offset, index=index}} | llStoreArgument{ source=RegisterArg(GenReg sourceReg), base, offset, index, kind=Move16Bit} = Move{moveSize=Move16, source=RegisterArg sourceReg, destination=MemoryArg {base=base, offset=offset, index=index}} | llStoreArgument{ source=RegisterArg(GenReg sourceReg), base, offset, index, kind=Move32Bit} = Move{source=RegisterArg sourceReg, destination=MemoryArg {base=base, offset=offset, index=index}, moveSize=opSizeToMove OpSize32} (* Store a short constant to memory *) | llStoreArgument{ source=NonAddressConstArg srcValue, base, offset, index, kind=Move64Bit} = Move{source=NonAddressConstArg srcValue, destination=MemoryArg {base=base, offset=offset, index=index}, moveSize=Move64} | llStoreArgument{ source=NonAddressConstArg srcValue, base, offset, index, kind=Move32Bit} = Move{source=NonAddressConstArg srcValue, destination=MemoryArg {base=base, offset=offset, index=index}, moveSize=Move32} | llStoreArgument{ source=NonAddressConstArg srcValue, base, offset, index, kind=MoveByte} = Move{moveSize=Move8, source=NonAddressConstArg srcValue, destination=MemoryArg{base=base, offset=offset, index=index}} (* Store a long constant to memory *) | llStoreArgument{ source=AddressConstArg srcValue, base, offset, index, kind} = ( (* This Move must be of a polyWord size. *) case (kind, polyWordOpSize) of (Move64Bit, OpSize64) => () | (Move32Bit, OpSize32) => () | _ => raise InternalError "Move of AddressConstArg"; Move{moveSize=opSizeToMove polyWordOpSize, source=AddressConstArg srcValue, destination=MemoryArg {base=base, offset=offset, index=index}} ) (* Store a floating point value. *) | llStoreArgument{source=RegisterArg(FPReg fpReg), offset, base=baseReg, index, kind=MoveDouble} = let val _ = fpReg = fp0 orelse raise InternalError "llStoreArgument: Store FPReg <> fp0" in FPStoreToMemory{ address={ base=baseReg, offset=offset, index=index}, precision=DoublePrecision, andPop=true } end | llStoreArgument{source=RegisterArg(XMMReg xmmRegReg), offset, base=baseReg, index, kind=MoveDouble} = XMMStoreToMemory { toStore=xmmRegReg, address={base=baseReg, offset=offset, index=index}, precision=DoublePrecision } (* Store a floating point value. *) | llStoreArgument{source=RegisterArg(FPReg fpReg), offset, base=baseReg, index, kind=MoveFloat} = let val _ = fpReg = fp0 orelse raise InternalError "llStoreArgument: Store FPReg <> fp0" in FPStoreToMemory{address={ base=baseReg, offset=offset, index=index}, precision=SinglePrecision, andPop=true } end | llStoreArgument{source=RegisterArg(XMMReg xmmRegReg), offset, base=baseReg, index, kind=MoveFloat} = XMMStoreToMemory { toStore=xmmRegReg, address={base=baseReg, offset=offset, index=index}, precision=SinglePrecision } | llStoreArgument _ = raise InternalError "llStoreArgument: StoreArgument" val numBlocks = Vector.length blocks fun getAllocatedReg r = Vector.sub(allocatedRegisters, r) val getAllocatedGenReg = asGenReg o getAllocatedReg and getAllocatedFPReg = asFPReg o getAllocatedReg and getAllocatedXMMReg = asXMMReg o getAllocatedReg fun codeExtIndex NoMemIndex = NoIndex | codeExtIndex(MemIndex1(PReg r)) = Index1(getAllocatedGenReg r) | codeExtIndex(MemIndex2(PReg r)) = Index2(getAllocatedGenReg r) | codeExtIndex(MemIndex4(PReg r)) = Index4(getAllocatedGenReg r) | codeExtIndex(MemIndex8(PReg r)) = Index8(getAllocatedGenReg r) | codeExtIndex ObjectIndex = raise InternalError "codeExtIndex: ObjectIndex" local fun codeExtArgument getReg (RegisterArgument(PReg r)) = RegisterArg(getReg r) | codeExtArgument _ (AddressConstant m) = AddressConstArg m | codeExtArgument _ (IntegerConstant i) = NonAddressConstArg i | codeExtArgument _ (MemoryLocation{base=PReg bReg, offset, index=ObjectIndex, cache=NONE}) = MemoryArg{base=ebx, index=Index4(getAllocatedGenReg bReg), offset=offset} | codeExtArgument _ (MemoryLocation{base=PReg bReg, offset, index, cache=NONE}) = MemoryArg{base=getAllocatedGenReg bReg, offset=offset, index=codeExtIndex index} | codeExtArgument getReg (MemoryLocation{cache=SOME(PReg r), ...}) = RegisterArg(getReg r) | codeExtArgument _ (StackLocation{wordOffset, cache=NONE, ...}) = MemoryArg{base=esp, offset=wordOffset*Word.toInt nativeWordSize, index=NoIndex} | codeExtArgument getReg (StackLocation{cache=SOME(PReg r), ...}) = RegisterArg(getReg r) | codeExtArgument _ (ContainerAddr _) = raise InternalError "codeExtArgument - ContainerAddr" in val codeExtArgument = codeExtArgument getAllocatedReg and codeExtArgumentAsGenReg = codeExtArgument getAllocatedGenReg and codeExtArgumentAsFPReg = codeExtArgument getAllocatedFPReg and codeExtArgumentAsXMMReg = codeExtArgument getAllocatedXMMReg end fun codeCallKind Recursive = NonAddressConstArg 0 (* Jump to the start *) | codeCallKind (ConstantCode v) = AddressConstArg v | codeCallKind FullCall = ( case targetArch of ObjectId32Bit => MemoryArg{base=ebx, index=Index4 edx, offset=0} | _ => MemoryArg{base=edx, index=NoIndex, offset=0} ) (* Move unless the registers are the same. *) fun moveIfNecessary({src, dst, kind}, code) = if src = dst then code else llLoadArgument({source=RegisterArg src, dest=dst, kind=kind}, code) fun opSizeToIMove OpSize64 = Move64Bit | opSizeToIMove OpSize32 = Move32Bit datatype llsource = StackSource of int | OtherSource of reg regOrMemoryArg fun sourceToX86Code(OtherSource r) = r | sourceToX86Code(StackSource wordOffset) = MemoryArg{base=esp, offset=wordOffset*Word.toInt nativeWordSize, index=NoIndex} local fun indexRegister NoIndex = NONE | indexRegister (Index1 r) = SOME r | indexRegister (Index2 r) = SOME r | indexRegister (Index4 r) = SOME r | indexRegister (Index8 r) = SOME r (* The registers are numbered from 0. Choose values that don't conflict with the stack addresses. *) fun regNo r = ~1 - nReg r type node = {src: llsource, dst: destinations } fun nodeAddress({dst=RegDest r, ...}: node) = regNo r | nodeAddress({dst=StackDest a, ...}) = a fun arcs({src=StackSource wordOffset, ...}: node) = [wordOffset] | arcs{src=OtherSource(RegisterArg r), ...} = [regNo r] | arcs{src=OtherSource(MemoryArg{base, index, ...}), ...} = (case indexRegister index of NONE => [regNo(GenReg base)] | SOME r => [regNo(GenReg base), regNo(GenReg r)]) | arcs _ = [] in val stronglyConnected = STRONGLY.stronglyConnectedComponents { nodeAddress=nodeAddress, arcs=arcs } end (* This is a general function for moving values into registers or to the stack where it is possible that the source values might also be in use as destinations. The stack is used for destinations only for tail recursive calls. *) fun moveMultipleValues(moves, workReg: reg option, code) = let val _ = if List.exists(fn {dst=StackDest _, ...} => true | _ => false) moves andalso not(isSome workReg) then raise InternalError "no work reg" else () fun moveValues ([], code) = code (* We're done. *) | moveValues (arguments, code) = let (* stronglyConnectedComponents does two things. It detects loops where it's not possible to move items without breaking the loop but more importantly it orders the dependencies so that if there are no loops we can load the source and store it in the destination knowing that we won't overwrite anything we might later need. *) val ordered = stronglyConnected arguments fun isFPReg(GenReg _) = false | isFPReg(XMMReg _) = true | isFPReg(FPReg _) = true fun moveEachValue ([], code) = code | moveEachValue ([{dst=RegDest reg, src as OtherSource(RegisterArg r)}] :: rest, code) = (* Source and dest are both regs - only move if they're different. *) if r = reg then moveEachValue(rest, code) else moveEachValue(rest, llLoadArgument({source=sourceToX86Code src, dest=reg, kind=if isFPReg reg then MoveDouble else moveNativeWord}, code)) | moveEachValue ([{dst=RegDest reg, src as StackSource _}] :: rest, code) = (* If loading from the stack always use native word. The value could be a stack address. *) moveEachValue(rest, llLoadArgument({source=sourceToX86Code src, dest=reg, kind=moveNativeWord}, code)) | moveEachValue ([{dst=RegDest reg, src}] :: rest, code) = (* Load from store or a constant. Have to use movePolyWord if it's an address constant. *) moveEachValue(rest, llLoadArgument({source=sourceToX86Code src, dest=reg, kind=movePolyWord}, code)) | moveEachValue ([{dst=StackDest _, src=OtherSource(MemoryArg _ )}] :: _, _) = raise InternalError "moveEachValue - MemoryArgument" | moveEachValue ([{dst=StackDest addr, src as StackSource wordOffset}] :: rest, code) = (* Copy a stack location - needs a load and store unless the address is the same. *) if addr = wordOffset then moveEachValue(rest, code) else let val workReg = valOf workReg in moveEachValue(rest, llStoreArgument{source=RegisterArg workReg, base=esp, index=NoIndex, offset = addr*Word.toInt nativeWordSize, kind=moveNativeWord} :: llLoadArgument({source=sourceToX86Code src, dest=workReg, kind=moveNativeWord}, code)) end | moveEachValue ([{dst=StackDest addr, src}] :: rest, code) = (* Store from a register or a constant. *) moveEachValue(rest, llStoreArgument{ source=sourceToX86Code src, base=esp, index=NoIndex, offset = addr*Word.toInt nativeWordSize, kind=moveNativeWord} :: code) | moveEachValue((cycle as first :: _ :: _) :: rest, code) = (* We have a cycle. *) let (* We need to exchange some of the arguments. Doing an exchange here will set the destination with the correct source. However we have to process every subsequent entry with the swapped registers. That may well mean that one of those entries becomes trivial. Using XCHG means that we can move N registers in N-1 exchanges. We also need to rerun stronglyConnectedComponents on at least the rest of this cycle. It's easiest to flatten the rest and do everything. *) (* Try to find either a register-register move or a register-stack move. If not use the first. If there's a stack-register move there will also be a register-stack so we don't need to look for both. *) val {dst=selectDst, src=selectSrc} = case List.find(fn {src=OtherSource(RegisterArg _), dst=RegDest _} => true | _ => false) cycle of SOME found => found | _ => ( case List.find(fn {dst=RegDest _, ...} => true | _ => false) cycle of SOME found => found | NONE => first ) (* This includes this entry but after the swap we'll eliminate it. *) val flattened = List.foldl(fn (a, b) => a @ b) [] (cycle :: rest) val destAsSource = case selectDst of RegDest reg => OtherSource(RegisterArg reg) | StackDest s => StackSource s (* Source is not an equality type. We can't currently handle the situation where the source is a memory location. *) fun match(OtherSource(RegisterArg r1), OtherSource(RegisterArg r2)) = r1 = r2 | match(StackSource s1, StackSource s2) = s1 = s2 | match(OtherSource(MemoryArg _), _) = raise InternalError "moveEachValue: cycle" | match _ = false fun swapSources{src, dst} = if match(src, selectSrc) then {src=destAsSource, dst=dst} else if match(src, destAsSource) then {src=selectSrc, dst=dst} else {src=src, dst=dst} (* Try to use register to register exchange if we can. A register-to-memory exchange involves a bus lock and we'd like to avoid that. *) val exchangeCode = case (selectDst, selectSrc) of (RegDest(GenReg regA), OtherSource(RegisterArg(GenReg regB))) => XChng { reg=regA, arg=RegisterArg regB, opSize=nativeWordOpSize } :: code | (RegDest(XMMReg regA), OtherSource(RegisterArg(XMMReg regB))) => (* This is the only case where we can have a cycle with SSE2 regs. There are various ways of doing it but XORs are probably the easiest. *) XMMArith{opc=SSE2Xor, source=RegisterArg regA, output=regB} :: XMMArith{opc=SSE2Xor, source=RegisterArg regB, output=regA} :: XMMArith{opc=SSE2Xor, source=RegisterArg regA, output=regB} :: code | (RegDest _, OtherSource(RegisterArg _)) => raise InternalError "moveEachValue: invalid register combination" | (RegDest regA, src as StackSource addr) => let val workReg = valOf workReg in llStoreArgument{source=RegisterArg workReg, base=esp, index=NoIndex, offset = addr*Word.toInt nativeWordSize, kind=moveNativeWord} :: XChng { reg=asGenReg regA, arg=RegisterArg(asGenReg workReg), opSize=nativeWordOpSize } :: llLoadArgument({source=sourceToX86Code src, dest=workReg, kind=moveNativeWord}, code) end | (StackDest addr, OtherSource(RegisterArg regA)) => let (* This doesn't actually occur because we always find the case above. *) val workReg = valOf workReg in llStoreArgument{source=RegisterArg workReg, base=esp, index=NoIndex, offset = addr*Word.toInt nativeWordSize, kind=moveNativeWord} :: XChng { reg=asGenReg regA, arg=RegisterArg (asGenReg workReg), opSize=nativeWordOpSize } :: llLoadArgument({ source=MemoryArg{base=esp, offset=addr*Word.toInt nativeWordSize, index=NoIndex}, dest=workReg, kind=moveNativeWord}, code) end | (StackDest addr1, StackSource addr2) => let val workReg = valOf workReg (* This can still happen if we have argument registers that need to be loaded from stack locations and those argument registers happen to contain the values to be stored into those stack locations. e.g. ebx => S8; eax => S7; S8 => eax; S7 => eax. Eliminating the registers results in a cycle. It may be possible to avoid this by excluding the argument registers (eax; ebx; r8; r9; r10) from holding values in the area to be overwritten. *) in llStoreArgument{source=RegisterArg workReg, base=esp, index=NoIndex, offset = addr1*Word.toInt nativeWordSize, kind=moveNativeWord} :: XChng { reg=asGenReg workReg, arg=MemoryArg{base=esp, offset=addr2*Word.toInt nativeWordSize, index=NoIndex}, opSize=nativeWordOpSize } :: llLoadArgument({ source=MemoryArg{base=esp, offset=addr1*Word.toInt nativeWordSize, index=NoIndex}, dest=workReg, kind=moveNativeWord}, code) end | _ => raise InternalError "moveEachValue: cycle" in moveValues(List.map swapSources flattened, exchangeCode) end | moveEachValue(([]) :: _, _) = (* This should not happen - avoid warning. *) raise InternalError "moveEachValue - empty set" in moveEachValue(ordered, code) end in moveValues(moves, code) end (* Where we have multiple specific registers as either source or destination there is the potential that a destination register if currently in use as a source. *) fun moveMultipleRegisters(regPairList, code) = let val regPairsAsDests = List.map(fn {src, dst} => {src=OtherSource(RegisterArg src), dst=RegDest dst}) regPairList in moveMultipleValues(regPairsAsDests, NONE, code) end val outputLabelCount = ref 0 val blockToLabelMap = Array.array(numBlocks, ~1) fun makeLabel() = Label{labelNo = ! outputLabelCount} before outputLabelCount := !outputLabelCount + 1 fun getBlockLabel blockNo = case Array.sub(blockToLabelMap, blockNo) of ~1 => let val label as Label{labelNo} = makeLabel() val () = Array.update(blockToLabelMap, blockNo, labelNo) in label end | n => Label{labelNo=n} (* The profile object is a single mutable with the F_bytes bit set. *) local val v = RunCall.allocateByteMemory(0w1, Word.fromLargeWord(Word8.toLargeWord(Word8.orb(F_mutable, F_bytes)))) fun clear 0w0 = () | clear i = (assignByte(v, i-0w1, 0w0); clear (i-0w1)) val () = clear wordSize in val profileObject = toMachineWord v end (* Switch to indicate if we want to trace where live data has been allocated. *) val addAllocatingFunction = DEBUG.getParameter DEBUG.profileAllocationTag debugSwitches = 1 fun llAllocateMemoryOperation ({ size, flags, dest, saveRegs}, code) = let val toReg = asGenReg dest val preserve = saveRegs (* Allocate memory. N.B. Instructions are in reverse order. *) fun allocStore{size, flags, output, preserve} = if targetArch = Native64Bit andalso flags <> 0w0 then [Move{moveSize=Move8, source=NonAddressConstArg(Word8.toLargeInt flags), destination=MemoryArg {offset= ~1, base=output, index=NoIndex}}, Move{source=NonAddressConstArg(LargeInt.fromInt size), destination=MemoryArg {offset= ~ (Word.toInt wordSize), base=output, index=NoIndex}, moveSize=opSizeToMove polyWordOpSize}, AllocStore{size=size, output=output, saveRegs=preserve}] else let val lengthWord = IntInf.orb(IntInf.fromInt size, IntInf.<<(Word8.toLargeInt flags, 0w24)) in [Move{source=NonAddressConstArg lengthWord, destination=MemoryArg {offset= ~ (Word.toInt wordSize), base=output, index=NoIndex}, moveSize=opSizeToMove polyWordOpSize}, AllocStore{size=size, output=output, saveRegs=preserve}] end val allocCode = (* If we need to add the profile object *) if addAllocatingFunction then allocStore {size=size+1, flags=Word8.orb(flags, Address.F_profile), output=toReg, preserve=preserve} @ [Move{moveSize=opSizeToMove polyWordOpSize, source=AddressConstArg profileObject, destination=MemoryArg {base=toReg, offset=size*Word.toInt wordSize, index=NoIndex}}] else allocStore {size=size, flags=flags, output=toReg, preserve=preserve} (* Convert to an object index if necessary. *) val convertToObjId = if targetArch = ObjectId32Bit then [ ShiftConstant{ shiftType=SHR, output=toReg, shift=0w2, opSize=OpSize64 }, ArithToGenReg{ opc=SUB, output=toReg, source=RegisterArg ebx, opSize=nativeWordOpSize } ] else [] in convertToObjId @ allocCode @ code end (* Check the stack limit "register". This is used both at the start of a function for genuine stack checking but also in a loop to check for an interrupt. We need to save the registers even across an interrupt because it can be used if another thread wants a GC. *) fun testRegAndTrap(reg, entryPt, saveRegs) = let (* Normally we won't have a stack overflow so we will skip the check. *) val skipCheckLab = makeLabel() in (* Need it in reverse order. *) [ JumpLabel skipCheckLab, CallRTS{rtsEntry=entryPt, saveRegs=saveRegs}, ConditionalBranch{test=JNB, label=skipCheckLab}, ArithToGenReg{ opc=CMP, output=reg, source=MemoryArg{offset=memRegStackLimit, base=ebp, index=NoIndex}, opSize=nativeWordOpSize } ] end local val numRegisters = Vector.length allocatedRegisters val uses = Array.array(numRegisters, false) fun used(PReg r) = Array.update(uses, r, true) fun isUsed(PReg r) = Array.sub(uses, r) (* Set the registers used by the sources. This differs from getInstructionState in that we don't set the base register of a memory location to "used" if we can use the cache. *) fun argUses(RegisterArgument rarg) = used rarg | argUses(MemoryLocation { cache=SOME cr, ...}) = used cr | argUses(MemoryLocation { base, index, cache=NONE, ...}) = (used base; indexUses index) | argUses(StackLocation { cache=SOME rarg, ...}) = used rarg | argUses _ = () and indexUses NoMemIndex = () | indexUses(MemIndex1 arg) = used arg | indexUses(MemIndex2 arg) = used arg | indexUses(MemIndex4 arg) = used arg | indexUses(MemIndex8 arg) = used arg | indexUses ObjectIndex = () (* LoadArgument, TagValue, CopyToCache, UntagValue and BoxValue are eliminated if their destination is not used. In that case their source are not used either. *) fun instructionUses(LoadArgument { source, dest, ...}) = if isUsed dest then argUses source else () | instructionUses(StoreArgument{ source, base, index, ...}) = (argUses source; used base; indexUses index) | instructionUses(LoadMemReg _) = () | instructionUses(StoreMemReg {source, ...}) = used source | instructionUses(BeginFunction _) = () | instructionUses(FunctionCall{regArgs, stackArgs, ...}) = (List.app(argUses o #1) regArgs; List.app argUses stackArgs) | instructionUses(TailRecursiveCall{regArgs, stackArgs, ...}) = (List.app(argUses o #1) regArgs; List.app(argUses o #src) stackArgs) | instructionUses(AllocateMemoryOperation _) = () | instructionUses(AllocateMemoryVariable{size, ...}) = used size | instructionUses(InitialiseMem{size, addr, init}) = (used size; used addr; used init) | instructionUses(InitialisationComplete) = () | instructionUses(BeginLoop) = () | instructionUses(JumpLoop{regArgs, stackArgs, ...}) = (List.app(argUses o #1) regArgs; List.app(argUses o #1) stackArgs) | instructionUses(RaiseExceptionPacket{packetReg}) = used packetReg | instructionUses(ReserveContainer _) = () | instructionUses(IndexedCaseOperation{testReg, ...}) = used testReg | instructionUses(LockMutable{addr}) = used addr | instructionUses(WordComparison{arg1, arg2, ...}) = (used arg1; argUses arg2) | instructionUses(CompareLiteral{arg1, ...}) = argUses arg1 | instructionUses(CompareByteMem{arg1={base, index, ...}, ...}) = (used base; indexUses index) | instructionUses(PushExceptionHandler _) = () | instructionUses(PopExceptionHandler _) = () | instructionUses(BeginHandler _) = () | instructionUses(ReturnResultFromFunction{resultReg, ...}) = used resultReg | instructionUses(ArithmeticFunction{operand1, operand2, ...}) = (used operand1; argUses operand2) | instructionUses(TestTagBit{arg, ...}) = argUses arg | instructionUses(PushValue {arg, ...}) = argUses arg | instructionUses(CopyToCache{source, dest, ...}) = if isUsed dest then used source else () | instructionUses(ResetStackPtr _) = () | instructionUses(StoreToStack {source, ...}) = argUses source | instructionUses(TagValue{source, dest, ...}) = if isUsed dest then used source else () | instructionUses(UntagValue{dest, cache=SOME cacheR, ...}) = if isUsed dest then used cacheR else () | instructionUses(UntagValue{source, dest, cache=NONE, ...}) = if isUsed dest then used source else () | instructionUses(LoadEffectiveAddress{base, index, ...}) = (case base of SOME bReg => used bReg | NONE => (); indexUses index) | instructionUses(ShiftOperation{operand, shiftAmount, ...}) = (used operand; argUses shiftAmount) | instructionUses(Multiplication{operand1, operand2, ...}) = (used operand1; argUses operand2) | instructionUses(Division{dividend, divisor, ...}) = (used dividend; argUses divisor) - | instructionUses(AtomicExchangeAndAdd{base, source}) = (used base; used source) + | instructionUses(AtomicExchangeAndAdd{base, source, ...}) = (used base; used source) | instructionUses(BoxValue{source, dest, ...}) = if isUsed dest then used source else () | instructionUses(CompareByteVectors{vec1Addr, vec2Addr, length, ...}) = (used vec1Addr; used vec2Addr; used length) | instructionUses(BlockMove{srcAddr, destAddr, length, ...}) = (used srcAddr; used destAddr; used length) | instructionUses(X87Compare{arg1, arg2, ...}) = (used arg1; argUses arg2) | instructionUses(SSE2Compare{arg1, arg2, ...}) = (used arg1; argUses arg2) | instructionUses(X87FPGetCondition _) = () | instructionUses(X87FPArith{arg1, arg2, ...}) = (used arg1; argUses arg2) | instructionUses(X87FPUnaryOps{source, ...}) = used source | instructionUses(X87Float{source, ...}) = argUses source | instructionUses(SSE2Float{source, ...}) = argUses source | instructionUses(SSE2FPUnary{source, ...}) = argUses source | instructionUses(SSE2FPBinary{arg1, arg2, ...}) = (used arg1; argUses arg2) | instructionUses(TagFloat{source, dest, ...}) = if isUsed dest then used source else () | instructionUses(UntagFloat{dest, cache=SOME cacheR, ...}) = if isUsed dest then used cacheR else () | instructionUses(UntagFloat{source, dest, cache=NONE, ...}) = if isUsed dest then argUses source else () | instructionUses(GetSSE2ControlReg _) = () | instructionUses(SetSSE2ControlReg{source}) = used source | instructionUses(GetX87ControlReg _) = () | instructionUses(SetX87ControlReg{source}) = used source | instructionUses(X87RealToInt{source, ...}) = used source | instructionUses(SSE2RealToInt{source, ...}) = argUses source | instructionUses(SignExtend32To64{source, dest}) = if isUsed dest then argUses source else () | instructionUses(TouchArgument{source}) = used source + | instructionUses PauseCPU = () (* Depth-first scan. *) val visited = Array.array(numBlocks, false) fun processBlocks blockNo = if Array.sub(visited, blockNo) then () (* Done or currently being done. *) else let val () = Array.update(visited, blockNo, true) val ExtendedBasicBlock { flow, block,...} = Vector.sub(blocks, blockNo) val () = (* Process the dependencies first. *) case flow of ExitCode => () | Unconditional m => processBlocks m | Conditional {trueJump, falseJump, ...} => (processBlocks trueJump; processBlocks falseJump) | IndexedBr cases => List.app processBlocks cases | SetHandler{ handler, continue } => (processBlocks handler; processBlocks continue) | UnconditionalHandle _ => () | ConditionalHandle { continue, ...} => processBlocks continue (* Now this block. *) in List.foldr(fn ({instr, ...}, ()) => instructionUses instr) () block end in val () = processBlocks 0 val isUsed = isUsed end (* Return the register part of a cached item. *) fun decache(StackLocation{cache=SOME r, ...}) = RegisterArgument r | decache(MemoryLocation{cache=SOME r, ...}) = RegisterArgument r | decache arg = arg (* Only get the registers that are actually used. *) val getSaveRegs = List.mapPartial(fn (reg as PReg r) => if isUsed reg then SOME(getAllocatedGenReg r) else NONE) fun codeExtended _ ({instr=LoadArgument{source, dest as PReg dreg, kind}, ...}, code) = if not (isUsed dest) then code else let val realDestReg = getAllocatedReg dreg in case source of RegisterArgument(PReg sreg) => (* Register to register move. Try to use the same register for the source as the destination to eliminate the instruction. *) (* If the source is the same as the destination we don't need to do anything. *) moveIfNecessary({src=getAllocatedReg sreg, dst=realDestReg, kind=kind}, code) | MemoryLocation{cache=SOME(PReg sreg), ...} => (* This is also a register to register move but because the original load is from memory it could be a byte or short precision value. *) let val moveKind = case kind of Move64Bit => Move64Bit | MoveByte => Move32Bit | Move16Bit => Move32Bit | Move32Bit => Move32Bit | MoveFloat => MoveFloat | MoveDouble => MoveDouble in moveIfNecessary({src=getAllocatedReg sreg, dst=realDestReg, kind=moveKind}, code) end (* TODO: Isn't this covered by codeExtArgument? It looks like it was added in the 32-in-64 changes. *) | StackLocation{cache=SOME(PReg sreg), ...} => moveIfNecessary({src=getAllocatedReg sreg, dst=realDestReg, kind=kind}, code) | source as StackLocation _ => (* Always use native loads from the stack. *) llLoadArgument({source=codeExtArgument source, dest=realDestReg, kind=moveNativeWord}, code) | source => (* Loads of constants or from an address. *) llLoadArgument({source=codeExtArgument source, dest=realDestReg, kind=kind}, code) end | codeExtended _ ({instr=StoreArgument{ source, base=PReg bReg, offset, index, kind, ... }, ...}, code) = let val (baseReg, indexVal) = case index of ObjectIndex => (ebx, Index4(getAllocatedGenReg bReg)) | _ => (getAllocatedGenReg bReg, codeExtIndex index) in case (decache source, kind) of (RegisterArgument(PReg sReg), MoveByte) => if targetArch <> Native32Bit then llStoreArgument{ source=codeExtArgument source, base=baseReg, offset=offset, index=indexVal, kind=MoveByte} :: code else (* This is complicated on X86/32. We can't use edi or esi for the store registers. Instead we reserve ecx (see special case in "identify") and use that if we have to. *) let val realStoreReg = getAllocatedReg sReg val (moveCode, storeReg) = if realStoreReg = GenReg edi orelse realStoreReg = GenReg esi then (moveIfNecessary({src=realStoreReg, dst=GenReg ecx, kind=moveNativeWord}, code), GenReg ecx) else (code, realStoreReg) in llStoreArgument{ source=RegisterArg storeReg, base=baseReg, offset=offset, index=indexVal, kind=MoveByte} :: moveCode end | _ => llStoreArgument{ source=codeExtArgument source, base=baseReg, offset=offset, index=indexVal, kind=kind} :: code end | codeExtended _ ({instr=LoadMemReg { offset, dest=PReg pr, kind}, ...}, code) = (* Load from the "memory registers" pointed at by rbp. *) llLoadArgument({source=MemoryArg{base=rbp, offset=offset, index=NoIndex}, dest=getAllocatedReg pr, kind=kind}, code) | codeExtended _ ({instr=StoreMemReg { offset, source=PReg pr, kind}, ...}, code) = (* Store into the "memory register". *) llStoreArgument{ source=RegisterArg(getAllocatedReg pr), base=rbp, offset=offset, index=NoIndex, kind=kind} :: code | codeExtended _ ({instr=BeginFunction{regArgs, ...}, ...}, code) = let val minStackCheck = 20 val saveRegs = List.mapPartial(fn (_, GenReg r) => SOME r | _ => NONE) regArgs val preludeCode = if stackRequired >= minStackCheck then let (* Compute the necessary amount in edi and compare that. *) val stackByteAdjust = ~ (Word.toInt nativeWordSize) * stackRequired val testEdiCode = testRegAndTrap (edi, StackOverflowCallEx, saveRegs) in (* N.B. In reverse order. *) testEdiCode @ [LoadAddress{output=edi, base=SOME esp, index=NoIndex, offset=stackByteAdjust, opSize=nativeWordOpSize}] end else testRegAndTrap (esp, StackOverflowCall, saveRegs) val usedRegs = List.filter (isUsed o #1) regArgs fun mkPair(PReg pr, rr) = {src=rr,dst=getAllocatedReg pr} val regPairs = List.map mkPair usedRegs in moveMultipleRegisters(regPairs, preludeCode @ code) end | codeExtended _ ({instr=TailRecursiveCall{callKind, regArgs=oRegArgs, stackArgs=oStackArgs, stackAdjust, currStackSize, workReg=PReg wReg}, ...}, code) = let val regArgs = List.map (fn (arg, reg) => (decache arg, reg)) oRegArgs and stackArgs = List.map(fn {src, stack } => {src=decache src, stack=stack}) oStackArgs val workReg = getAllocatedReg wReg (* We must leave stack entries as stack entries for the moment. *) fun codeArg(StackLocation{wordOffset, cache=NONE, ...}) = StackSource wordOffset | codeArg arg = OtherSource(codeExtArgument arg) val extStackArgs = map (fn {stack, src} => {dst=StackDest(stack+currStackSize), src=codeArg src}) stackArgs val extRegArgs = map (fn (a, r) => {src=codeArg a, dst=RegDest r}) regArgs (* Tail recursive calls are complicated because we generally have to overwrite the existing stack. That means storing the arguments in the right order to avoid overwriting a value that we are using for a different argument. *) fun codeTailCall(arguments: {dst: destinations, src: llsource} list, stackAdjust, code) = if stackAdjust < 0 then let (* If the function we're calling takes more arguments on the stack than the current function we will have to extend the stack. Do that by pushing the argument whose offset is at -1. Then adjust all the offsets and repeat. *) val {src=argM1, ...} = valOf(List.find(fn {dst=StackDest ~1, ...} => true | _ => false) arguments) fun renumberArgs [] = [] | renumberArgs ({dst=StackDest ~1, ...} :: args) = renumberArgs args (* Remove the one we've done. *) | renumberArgs ({dst, src} :: args) = let val newDest = case dst of StackDest d => StackDest(d+1) | regDest => regDest val newSrc = case src of StackSource wordOffset => StackSource(wordOffset+1) | other => other in {dst=newDest, src=newSrc} :: renumberArgs args end in codeTailCall(renumberArgs arguments, stackAdjust+1, PushToStack(sourceAsGenRegOrMem(sourceToX86Code argM1)) :: code) end else let val loadArgs = moveMultipleValues(arguments, SOME workReg, code) in if stackAdjust = 0 then loadArgs else ResetStack{numWords=stackAdjust, preserveCC=false} :: loadArgs end in JumpAddress(codeCallKind callKind) :: codeTailCall(extStackArgs @ extRegArgs, stackAdjust+currStackSize, code) end | codeExtended _ ({instr=FunctionCall{callKind, regArgs=oRegArgs, stackArgs=oStackArgs, dest=PReg dReg, realDest, saveRegs}, ...}, code) = let val regArgs = List.map (fn (arg, reg) => (decache arg, reg)) oRegArgs and stackArgs = List.map decache oStackArgs val destReg = getAllocatedReg dReg fun pushStackArgs ([], _, code) = code | pushStackArgs (ContainerAddr {stackOffset, ...} ::args, argNum, code) = let val adjustedAddr = stackOffset+argNum (* If there is an offset relative to rsp we need to add this in. *) val addOffset = if adjustedAddr = 0 then [] else [ArithMemConst{opc=ADD, address={offset=0, base=esp, index=NoIndex}, source=LargeInt.fromInt(adjustedAddr*Word.toInt nativeWordSize), opSize=nativeWordOpSize}] in pushStackArgs(args, argNum+1, addOffset @ PushToStack(RegisterArg esp) :: code) end | pushStackArgs (StackLocation {wordOffset, container, field, ...} ::args, argNum, code) = let (* Have to adjust the offsets of stack arguments. *) val adjusted = StackLocation{wordOffset=wordOffset+argNum, container=container, field=field+argNum, cache=NONE} in pushStackArgs(args, argNum+1, PushToStack(codeExtArgumentAsGenReg adjusted) :: code) end | pushStackArgs (arg::args, argNum, code) = pushStackArgs(args, argNum+1, PushToStack(codeExtArgumentAsGenReg arg) :: code) val pushedArgs = pushStackArgs(stackArgs, 0, code (* Initial code *)) (* We have to adjust any stack offset to account for the arguments we've pushed. *) val numStackArgs = List.length stackArgs (* We don't currently allow the arguments to be memory locations and instead force them into registers. That may be simpler especially if we can get the values directly into the required register. *) fun getRegArgs(RegisterArgument(PReg pr), reg) = SOME{dst=reg, src=getAllocatedReg pr} | getRegArgs(StackLocation {cache=SOME(PReg pr), ...}, reg) = SOME{dst=reg, src=getAllocatedReg pr} | getRegArgs(MemoryLocation _, _) = raise InternalError "FunctionCall - MemoryLocation" | getRegArgs _ = NONE val loadRegArgs = moveMultipleRegisters(List.mapPartial getRegArgs regArgs, pushedArgs) (* These are all items we can load without requiring a source register. That includes loading from the stack. *) fun getConstArgs((AddressConstant m, reg), code) = llLoadArgument({source=AddressConstArg m, dest=reg, kind=movePolyWord}, code) | getConstArgs((IntegerConstant i, reg), code) = llLoadArgument({source=NonAddressConstArg i, dest=reg, kind=movePolyWord}, code) | getConstArgs((StackLocation { cache=SOME _, ...}, _), code) = code | getConstArgs((StackLocation { wordOffset, ...}, reg), code) = llLoadArgument({source=MemoryArg{offset=(wordOffset+numStackArgs)*Word.toInt nativeWordSize, base=esp, index=NoIndex}, dest=reg, kind=moveNativeWord}, code) | getConstArgs((ContainerAddr {stackOffset, ...}, reg), code) = if stackOffset+numStackArgs = 0 then llLoadArgument({source=RegisterArg(GenReg esp), dest=reg, kind=moveNativeWord}, code) else LoadAddress{ output=asGenReg reg, offset=(stackOffset+numStackArgs)*Word.toInt nativeWordSize, base=SOME esp, index=NoIndex, opSize=nativeWordOpSize } :: code | getConstArgs((RegisterArgument _, _), code) = code | getConstArgs((MemoryLocation _, _), code) = code val loadConstArgs = List.foldl getConstArgs loadRegArgs regArgs (* Push the registers before the call and pop them afterwards. *) fun makeSaves([], code) = CallAddress(codeCallKind callKind) :: code | makeSaves(PReg reg::regs, code) = let val areg = getAllocatedGenReg reg val _ = areg = eax andalso raise InternalError "codeExtended: eax in save regs" val _ = if List.exists(fn (_, r) => r = GenReg areg) regArgs then raise InternalError "codeExtended: arg reg in save regs" else () in PopR areg :: makeSaves(regs, PushToStack(RegisterArg areg) :: code) end in moveIfNecessary({dst=destReg, src=realDest, kind=case realDest of GenReg _ => moveNativeWord | _ => MoveDouble}, makeSaves(saveRegs, loadConstArgs)) end | codeExtended _ ({instr=AllocateMemoryOperation{ size, flags, dest=PReg dReg, saveRegs}, ...}, code) = let val preserve = getSaveRegs saveRegs in llAllocateMemoryOperation({ size=size, flags=flags, dest=getAllocatedReg dReg, saveRegs=preserve}, code) end | codeExtended _ ({instr=AllocateMemoryVariable{size=PReg size, dest=PReg dest, saveRegs}, ...}, code) = let (* Simple case - no initialiser. *) val saveRegs = getSaveRegs saveRegs val sReg = getAllocatedGenReg size and dReg = getAllocatedGenReg dest val _ = sReg <> dReg orelse raise InternalError "codeGenICode-AllocateMemoryVariable" val allocCode = [ (* Store it as the length field. *) Move{source=RegisterArg sReg, moveSize=opSizeToMove polyWordOpSize, destination=MemoryArg {base=dReg, offset= ~ (Word.toInt wordSize), index=NoIndex}}, (* Untag the length *) ShiftConstant{ shiftType=SHR, output=sReg, shift=0w1, opSize=polyWordOpSize}, (* Allocate the memory *) AllocStoreVariable{ size=sReg, output=dReg, saveRegs=saveRegs} ] (* Convert to an object index if necessary. *) val convertToObjId = if targetArch = ObjectId32Bit then [ ShiftConstant{ shiftType=SHR, output=dReg, shift=0w2, opSize=OpSize64 }, ArithToGenReg{ opc=SUB, output=dReg, source=RegisterArg ebx, opSize=nativeWordOpSize } ] else [] in convertToObjId @ allocCode @ code end | codeExtended _ ({instr=InitialiseMem{size=PReg sReg, addr=PReg aReg, init=PReg iReg}, ...}, code) = (* We are going to use rep stosl/q to set the memory. That requires the length to be in ecx, the initialiser to be in eax and the destination to be edi. *) RepeatOperation (if polyWordOpSize = OpSize64 then STOS64 else STOS32):: moveIfNecessary({src=getAllocatedReg iReg, dst=GenReg eax, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg aReg, dst=GenReg edi, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg sReg, dst=GenReg ecx, kind=moveNativeWord}, code))) | codeExtended _ ({instr=InitialisationComplete, ...}, code) = StoreInitialised :: code | codeExtended _ ({instr=BeginLoop, ...}, code) = code | codeExtended _ ({instr=JumpLoop{regArgs, stackArgs, checkInterrupt, workReg}, ...}, code) = let val workReg = Option.map (fn PReg r => getAllocatedReg r) workReg (* TODO: Make the sources and destinations "friends". *) (* We must leave stack entries as stack entries for the moment as with TailCall. *) fun codeArg(StackLocation{wordOffset, ...}) = StackSource wordOffset | codeArg arg = OtherSource(codeExtArgument arg) val extStackArgs = map (fn (src, stack, _) => {dst=StackDest stack, src=codeArg src}) stackArgs val extRegArgs = map (fn (a, PReg r) => {src=codeArg a, dst=RegDest(getAllocatedReg r)}) regArgs val checkCode = case checkInterrupt of NONE => [] | SOME saveRegs => testRegAndTrap (esp, StackOverflowCall, getSaveRegs saveRegs) in checkCode @ moveMultipleValues(extStackArgs @ extRegArgs, workReg, code) end | codeExtended _ ({instr=RaiseExceptionPacket{ packetReg=PReg preg }, ...}, code) = (* We need a work register here. It can be any register other than eax since we don't preserve registers across calls. *) RaiseException { workReg=ecx } :: moveIfNecessary({src=getAllocatedReg preg, dst=GenReg eax, kind=moveNativeWord}, code) | codeExtended _ ({instr=ReserveContainer{size, ...}, ...}, code) = (* The memory must be cleared in case we have a GC. *) List.tabulate(size, fn _ => PushToStack(NonAddressConstArg(tag 0))) @ code | codeExtended {flow} ({instr=IndexedCaseOperation{testReg=PReg tReg, workReg=PReg wReg}, ...}, code) = let val testReg = getAllocatedReg tReg val workReg = getAllocatedReg wReg val _ = testReg <> workReg orelse raise InternalError "IndexedCaseOperation - same registers" val rReg = asGenReg testReg and wReg = asGenReg workReg val _ = rReg <> wReg orelse raise InternalError "IndexedCaseOperation - same registers" (* This should only be within a block with an IndexedBr flow type. *) val cases = case flow of IndexedBr cases => cases | _ => raise InternalError "codeGenICode: IndexedCaseOperation" val caseLabels = map getBlockLabel cases val startJumpTable = makeLabel() (* Compute the jump address. The index is a tagged integer so it is already multiplied by 2. We need to multiply by four to get the correct size. Subtract off the shifted tag. *) val jumpSize = ref JumpSize8 in JumpTable{cases=caseLabels, jumpSize=jumpSize} :: JumpLabel startJumpTable :: JumpAddress(RegisterArg wReg) :: IndexedJumpCalc{ addrReg=wReg, indexReg=rReg, jumpSize=jumpSize } :: LoadLabelAddress{label=startJumpTable, output=wReg} :: code end | codeExtended _ ({instr=LockMutable{addr=PReg pr}, ...}, code) = let val (bReg, index) = if targetArch = ObjectId32Bit then (ebx, Index4(asGenReg(getAllocatedReg pr))) else (asGenReg(getAllocatedReg pr), NoIndex) in (* Mask off the mutable bit. *) ArithByteMemConst{opc=AND, address={base=bReg, offset= ~1, index=index}, source=0wxff - F_mutable} :: code end | codeExtended _ ({instr=WordComparison{ arg1=PReg pr, arg2, opSize, ... }, ...}, code) = ArithToGenReg {opc=CMP, output=asGenReg(getAllocatedReg pr), source=codeExtArgumentAsGenReg arg2, opSize=opSize} :: code | codeExtended _ ({instr=CompareLiteral{ arg1, arg2, opSize, ... }, ...}, code) = ( case decache arg1 of (* N.B. We MUST decache since we're assuming that the base reg is not used. *) RegisterArgument(PReg pr) => ArithToGenReg {opc=CMP, output=asGenReg(getAllocatedReg pr), source=NonAddressConstArg arg2, opSize=opSize} :: code | MemoryLocation{base=PReg br, offset, index=ObjectIndex, ...} => ArithMemConst{ opc=CMP, address={offset=offset, base=ebx, index=Index4(asGenReg(getAllocatedReg br))}, source=arg2, opSize=opSize } :: code | MemoryLocation{base=PReg br, index, offset, ...} => ArithMemConst{ opc=CMP, address={offset=offset, base=asGenReg(getAllocatedReg br), index=codeExtIndex index}, source=arg2, opSize=opSize } :: code | StackLocation{wordOffset, ...} => ArithMemConst{ opc=CMP, address={offset=wordOffset*Word.toInt nativeWordSize, base=esp, index=NoIndex}, source=arg2, opSize=opSize } :: code | _ => raise InternalError "CompareLiteral" ) | codeExtended _ ({instr=CompareByteMem{ arg1={base=PReg br, offset, index}, arg2, ... }, ...}, code) = let val (bReg, index) = case index of ObjectIndex => (ebx, Index4(asGenReg(getAllocatedReg br))) | _ => (asGenReg(getAllocatedReg br), codeExtIndex index) in ArithByteMemConst{ opc=CMP, address={offset=offset, base=bReg, index=index}, source=arg2 } :: code end (* Set up an exception handler. *) | codeExtended {flow} ({instr=PushExceptionHandler{workReg=PReg hReg}, ...}, code) = let (* Set up an exception handler. *) val workReg=getAllocatedReg hReg (* Although we're pushing this to the stack we need to use LEA on the X86/64 and some arithmetic on the X86/32. We need a work reg for that. *) val handleReg = asGenReg workReg (* This should only be within a block with a SetHandler flow type. *) val handleLabel = case flow of SetHandler{ handler, ...} => handler | _ => raise InternalError "codeGenICode: PushExceptionHandler" val labelRef = getBlockLabel handleLabel (* Set up the handler by pushing the old handler to the stack, pushing the entry point and setting the handler address to the current stack pointer. *) in ( Move{source=RegisterArg esp, destination=MemoryArg {offset=memRegHandlerRegister, base=ebp, index=NoIndex}, moveSize=opSizeToMove nativeWordOpSize} :: PushToStack(RegisterArg handleReg) :: LoadLabelAddress{ label=labelRef, output=handleReg} :: PushToStack(MemoryArg{base=ebp, offset=memRegHandlerRegister, index=NoIndex}) :: code) end (* Pop an exception handler at the end of a handled section. Executed if no exception has been raised. This removes items from the stack. *) | codeExtended _ ({instr=PopExceptionHandler{workReg=PReg wReg, ...}, ...}, code) = let val workReg = getAllocatedReg wReg val wReg = asGenReg workReg in (* The stack pointer has been adjusted to just above the two words that were stored in PushExceptionHandler. *) ( Move{source=RegisterArg wReg, destination=MemoryArg {offset=memRegHandlerRegister, base=ebp, index=NoIndex}, moveSize=opSizeToMove nativeWordOpSize} :: PopR wReg :: ResetStack{numWords=1, preserveCC=false} :: code) end (* Start of a handler. Sets the address associated with PushExceptionHandler and provides a register for the packet.*) | codeExtended _ ({instr=BeginHandler{packetReg=PReg pReg, workReg=PReg wReg}, ...}, code) = let (* The exception packet is in rax. *) val realPktReg = getAllocatedReg pReg val realWorkreg = getAllocatedGenReg wReg (* The code here is almost the same as PopExceptionHandler. The only real difference is that PopExceptionHandler needs to pass the result of executing the handled code which could be in any register. This code needs to transmit the exception packet and that is always in rax. *) val beginHandleCode = Move{source=RegisterArg realWorkreg, destination=MemoryArg {offset=memRegHandlerRegister, base=ebp, index=NoIndex}, moveSize=opSizeToMove nativeWordOpSize} :: PopR realWorkreg :: ResetStack{numWords=1, preserveCC=false} :: Move{ source=MemoryArg{base=ebp, offset=memRegHandlerRegister, index=NoIndex}, destination=RegisterArg esp, moveSize=opSizeToMove nativeWordOpSize } :: code in moveIfNecessary({src=GenReg eax, dst=realPktReg, kind=moveNativeWord }, beginHandleCode) end | codeExtended _ ({instr=ReturnResultFromFunction { resultReg=PReg resReg, realReg, numStackArgs }, ...}, code) = let val resultReg = getAllocatedReg resReg (* If for some reason it's not in the right register we have to move it there. *) in ReturnFromFunction numStackArgs :: moveIfNecessary({src=resultReg, dst=realReg, kind=moveNativeWord}, code) end | codeExtended _ ({instr=ArithmeticFunction{oper=SUB, resultReg=PReg resReg, operand1=PReg op1Reg, operand2, opSize, ...}, ...}, code) = (* Subtraction - this is special because it can only be done one way round. The first argument must be in a register. *) let val realDestReg = getAllocatedReg resReg val realOp1Reg = getAllocatedReg op1Reg in ArithToGenReg { opc=SUB, output=asGenReg realDestReg, source=codeExtArgumentAsGenReg operand2, opSize=opSize } :: moveIfNecessary({src=realOp1Reg, dst=realDestReg, kind=opSizeToIMove opSize}, code) end | codeExtended _ ({instr=ArithmeticFunction{oper, resultReg=PReg resReg, operand1=PReg op1Reg, operand2, opSize, ...}, ...}, code) = ( case decache operand2 of RegisterArgument(PReg op2Reg) => (* Arithmetic operation with both arguments as registers. These operations are all symmetric so we can try to put either argument into the result reg and then do the operation on the other arg. *) let val realDestReg = getAllocatedGenReg resReg val realOp1Reg = getAllocatedGenReg op1Reg and realOp2Reg = getAllocatedGenReg op2Reg val (operandReg, moveInstr) = if realOp1Reg = realDestReg then (realOp2Reg, code) else if realOp2Reg = realDestReg then (realOp1Reg, code) else (realOp2Reg, Move{source=RegisterArg realOp1Reg, destination=RegisterArg realDestReg, moveSize=opSizeToMove opSize} :: code) in ArithToGenReg { opc=oper, output=realDestReg, source=RegisterArg operandReg, opSize=opSize } :: moveInstr end | operand2 => (* Arithmetic operation with the first argument in a register and the second a constant or memory location. *) let val realDestReg = getAllocatedReg resReg val realOp1Reg = getAllocatedReg op1Reg val op2Arg = codeExtArgumentAsGenReg operand2 (* If we couldn't put it in the result register we have to copy it there. *) in ArithToGenReg { opc=oper, output=asGenReg realDestReg, source=op2Arg, opSize=opSize } :: moveIfNecessary({src=realOp1Reg, dst=realDestReg, kind=opSizeToIMove opSize}, code) end ) | codeExtended _ ({instr=TestTagBit{arg, ...}, ...}, code) = TestByteBits{arg=codeExtArgumentAsGenReg arg, bits=0w1} :: code | codeExtended _ ({instr=PushValue {arg, ...}, ...}, code) = PushToStack(codeExtArgumentAsGenReg arg) :: code | codeExtended _ ({instr=CopyToCache{source=PReg sreg, dest as PReg dreg, kind}, ...}, code) = if not (isUsed dest) then code else let val realDestReg = getAllocatedReg dreg (* Get the source register using the current destination as a preference. *) val realSrcReg = getAllocatedReg sreg in (* If the source is the same as the destination we don't need to do anything. *) moveIfNecessary({src=realSrcReg, dst=realDestReg, kind=kind}, code) end | codeExtended _ ({instr=ResetStackPtr {numWords, preserveCC}, ...}, code) = ( numWords >= 0 orelse raise InternalError "codeGenICode: ResetStackPtr - negative offset"; ResetStack{numWords=numWords, preserveCC=preserveCC} :: code ) | codeExtended _ ({instr=StoreToStack{ source, stackOffset, ... }, ...}, code) = llStoreArgument{ source=codeExtArgument source, base=esp, offset=stackOffset*Word.toInt nativeWordSize, index=NoIndex, kind=moveNativeWord} :: code | codeExtended _ ({instr=TagValue{source=PReg srcReg, dest as PReg dReg, opSize, ...}, ...}, code) = if not (isUsed dest) then code else let val regResult = asGenReg(getAllocatedReg dReg) val realSReg = asGenReg(getAllocatedReg srcReg) in (* N.B. Using LEA with a base register and an index multiplier of 1 is shorter than using no base register and a multiplier of two. *) (* TODO: If the value we're tagging is a byte or a 16-bit value we can use OpSize32 and possibly save the Rex byte. *) LoadAddress{ output=regResult, offset=1, base=SOME realSReg, index=Index1 realSReg, opSize=opSize } :: code end | codeExtended _ ({instr=UntagValue{dest as PReg dReg, cache=SOME(PReg cacheReg), opSize, ...}, ...}, code) = if not (isUsed dest) then code else moveIfNecessary({src=getAllocatedReg cacheReg, dst=getAllocatedReg dReg, kind=opSizeToIMove opSize}, code) | codeExtended _ ({instr=UntagValue{source=PReg sReg, dest as PReg dReg, isSigned, opSize, ...}, ...}, code) = if not (isUsed dest) then code else let val regResult = getAllocatedReg dReg val realSReg = getAllocatedReg sReg in (* For most cases we're going to be using a 32-bit word if this is 32-in-64. The exception is when converting a word to a signed large-word. *) ShiftConstant{ shiftType=if isSigned then SAR else SHR, output=asGenReg regResult, shift=0w1, opSize=opSize } :: moveIfNecessary({src=realSReg, dst=regResult, kind=opSizeToIMove opSize}, code) end | codeExtended _ ({instr=LoadEffectiveAddress{base, offset, index=ObjectIndex, dest=PReg dReg, opSize}, ...}, code) = let val destReg = asGenReg(getAllocatedReg dReg) val bReg = case base of SOME(PReg br) => asGenReg(getAllocatedReg br) | NONE => raise InternalError "LoadEffectiveAddress - ObjectIndex but no base" in LoadAddress{ output=destReg, offset=offset, base=SOME ebx, index=Index4 bReg, opSize=opSize } :: code end | codeExtended _ ({instr=LoadEffectiveAddress{base, offset, index, dest=PReg dReg, opSize}, ...}, code) = let val destReg = asGenReg(getAllocatedReg dReg) val bReg = case base of SOME(PReg br) => SOME(asGenReg(getAllocatedReg br)) | NONE => NONE val indexR = codeExtIndex index in LoadAddress{ output=destReg, offset=offset, base=bReg, index=indexR, opSize=opSize } :: code end | codeExtended _ ({instr=ShiftOperation{shift, resultReg=PReg resReg, operand=PReg operReg, shiftAmount=IntegerConstant i, opSize, ...}, ...}, code) = let val realDestReg = getAllocatedReg resReg val realOpReg = getAllocatedReg operReg in ShiftConstant{ shiftType=shift, output=asGenReg realDestReg, shift=Word8.fromLargeInt i, opSize=opSize } :: moveIfNecessary({src=realOpReg, dst=realDestReg, kind=opSizeToIMove opSize}, code) end | codeExtended _ ({instr=ShiftOperation{shift, resultReg=PReg resReg, operand=PReg operReg, shiftAmount=RegisterArgument(PReg shiftReg), opSize, ...}, ...}, code) = let val realDestReg = getAllocatedReg resReg val realShiftReg = getAllocatedReg shiftReg val realOpReg = getAllocatedReg operReg (* We want the shift in ecx. We may not have got it there but the register should be free. The shift is masked to 5 or 6 bits so we have to check for larger shift values at a higher level.*) in ShiftVariable{ shiftType=shift, output=asGenReg realDestReg, opSize=opSize } :: moveIfNecessary({src=realOpReg, dst=realDestReg, kind=opSizeToIMove opSize}, moveIfNecessary({src=realShiftReg, dst=GenReg ecx, kind=Move32Bit (* < 64*)}, code)) end | codeExtended _ ({instr=ShiftOperation _, ...}, _) = raise InternalError "codeExtended - ShiftOperation" | codeExtended _ ({instr= Multiplication{resultReg=PReg resReg, operand1=PReg op1Reg, operand2, opSize, ...}, ...}, code) = ( case decache operand2 of RegisterArgument(PReg op2Reg) => let (* Treat exactly the same as ArithmeticFunction. *) val realDestReg = getAllocatedGenReg resReg val realOp1Reg = getAllocatedGenReg op1Reg and realOp2Reg = getAllocatedGenReg op2Reg val (operandReg, moveInstr) = if realOp1Reg = realDestReg then (realOp2Reg, code) else if realOp2Reg = realDestReg then (realOp1Reg, code) else (realOp2Reg, Move{source=RegisterArg realOp1Reg, destination=RegisterArg realDestReg, moveSize=opSizeToMove opSize} :: code) in MultiplyR { source=RegisterArg operandReg, output=realDestReg, opSize=opSize } :: moveInstr end | operand2 => (* Multiply operation with the first argument in a register and the second a constant or memory location. *) let val realDestReg = getAllocatedReg resReg val realOp1Reg = getAllocatedReg op1Reg val op2Arg = codeExtArgumentAsGenReg operand2 in MultiplyR { output=asGenReg realDestReg, source=op2Arg, opSize=opSize } :: moveIfNecessary({src=realOp1Reg, dst=realDestReg, kind=opSizeToIMove opSize}, code) end ) | codeExtended _ ({instr=Division{isSigned, dividend=PReg regDivid, divisor, quotient=PReg regQuot, remainder=PReg regRem, opSize}, ...}, code) = let (* TODO: This currently only supports the dividend in a register. LargeWord division will generally load the argument from a box so we could support a memory argument for that case. Word and integer values will always have to be detagged. *) (* Division is specific as to the registers. The dividend must be eax, quotient is eax and the remainder is edx. *) val realDiviReg = getAllocatedReg regDivid val realQuotReg = getAllocatedReg regQuot val realRemReg = getAllocatedReg regRem val divisorArg = codeExtArgument divisor val divisorReg = argAsGenReg divisorArg val _ = divisorReg <> eax andalso divisorReg <> edx orelse raise InternalError "codeGenICode: Division" (* rdx needs to be set to the high order part of the dividend. For signed division that means sign-extending rdx, for unsigned division we clear it. We only need a 32-bit clear since the top 32-bits are cleared anyway. *) val setRDX = if isSigned then SignExtendForDivide opSize else ArithToGenReg{ opc=XOR, output=edx, source=RegisterArg edx, opSize=OpSize32 } in (* We may need to move one or more of the registers although normally that won't be necessary. Almost certainly only either the remainder or the quotient will actually be used. *) moveMultipleRegisters([{src=GenReg eax, dst=realQuotReg}, {src=GenReg edx, dst=realRemReg}], DivideAccR {arg=divisorReg, isSigned=isSigned, opSize=opSize} :: setRDX :: moveIfNecessary({src=realDiviReg, dst=GenReg eax, kind=opSizeToIMove opSize}, code)) end - | codeExtended _ ({instr=AtomicExchangeAndAdd{base=PReg bReg, source=PReg sReg}, ...}, code) = + | codeExtended _ ({instr=AtomicExchangeAndAdd{base=PReg bReg, source=PReg sReg, resultReg = PReg rReg}, ...}, code) = let - val baseReg = asGenReg (getAllocatedReg bReg) and outReg = asGenReg (getAllocatedReg sReg) + val baseReg = asGenReg (getAllocatedReg bReg) and outReg = asGenReg (getAllocatedReg rReg) val address = if targetArch = ObjectId32Bit then {base=ebx, index=Index4 baseReg, offset=0} else {base=baseReg, index=NoIndex, offset=0} in - AtomicXAdd{address=address, output=outReg, opSize=polyWordOpSize} :: code + AtomicXAdd{address=address, output=outReg, opSize=polyWordOpSize} :: + moveIfNecessary({src=getAllocatedReg sReg, dst=GenReg outReg, kind=movePolyWord}, code) end | codeExtended _ ({instr=BoxValue{boxKind, source=PReg sReg, dest as PReg dReg, saveRegs}, ...}, code) = if not (isUsed dest) then code else let val preserve = getSaveRegs saveRegs val (srcReg, boxSize, moveKind) = case boxKind of BoxLargeWord => (getAllocatedReg sReg, Word.toInt(nativeWordSize div wordSize), moveNativeWord) | BoxX87Double => (getAllocatedReg sReg, Word.toInt(0w8 div wordSize), MoveDouble) | BoxX87Float => (getAllocatedReg sReg, Word.toInt(0w4 div wordSize), MoveFloat) | BoxSSE2Double => (getAllocatedReg sReg, Word.toInt(0w8 div wordSize), MoveDouble) | BoxSSE2Float => (getAllocatedReg sReg, Word.toInt(0w4 div wordSize), MoveFloat) val dstReg = getAllocatedReg dReg val (bReg, index) = if targetArch = ObjectId32Bit then (ebx, Index4(asGenReg dstReg)) else (asGenReg dstReg, NoIndex) in StoreInitialised :: llStoreArgument{ source=RegisterArg srcReg, offset=0, base=bReg, index=index, kind=moveKind} :: llAllocateMemoryOperation({ size=boxSize, flags=0wx1, dest=dstReg, saveRegs=preserve}, code) end | codeExtended _ ({instr=CompareByteVectors{vec1Addr=PReg v1Reg, vec2Addr=PReg v2Reg, length=PReg lReg, ...}, ...}, code) = (* There's a complication here. CompareByteVectors generates REPE CMPSB to compare the vectors but the condition code is only set if CMPSB is executed at least once. If the value in RCX/ECX is zero it will never be executed and the condition code will be unchanged. We want the result to be "equal" in that case so we need to ensure that is the case. It's quite possible that the condition code has just been set by shifting RCX/ECX to remove the tag in which case it will have set "equal" if the value was zero. We use CMP R/ECX,R/ECX which is two bytes in 32-bit. If we knew the length was non-zero (e.g. a constant) we could avoid this. *) RepeatOperation CMPS8 :: ArithToGenReg {opc=CMP, output=ecx, source=RegisterArg ecx, opSize=OpSize32} :: moveIfNecessary({src=getAllocatedReg v1Reg, dst=GenReg esi, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg v2Reg, dst=GenReg edi, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg lReg, dst=GenReg ecx, kind=moveNativeWord}, code))) | codeExtended _ ({instr=BlockMove{srcAddr=PReg sReg, destAddr=PReg dReg, length=PReg lReg, isByteMove}, ...}, code) = (* We may need to move these into the appropriate registers. They have been reserved but it's still possible the values could be in something else. *) RepeatOperation(if isByteMove then MOVS8 else if polyWordOpSize = OpSize64 then MOVS64 else MOVS32) :: moveIfNecessary({src=getAllocatedReg sReg, dst=GenReg esi, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg dReg, dst=GenReg edi, kind=moveNativeWord}, moveIfNecessary({src=getAllocatedReg lReg, dst=GenReg ecx, kind=moveNativeWord}, code))) | codeExtended _ ({instr=X87Compare{arg1=PReg argReg, arg2, isDouble, ...}, ...}, code) = let val fpReg = getAllocatedFPReg argReg val _ = fpReg = fp0 orelse raise InternalError "codeGenICode: CompareFloatingPt not fp0" (* This currently pops the value. *) val precision = if isDouble then DoublePrecision else SinglePrecision in case codeExtArgumentAsFPReg arg2 of RegisterArg fpReg2 => FPArithR{opc=FCOMP, source=fpReg2} :: code | MemoryArg{offset, base=baseReg, index=NoIndex} => FPArithMemory{opc=FCOMP, base=baseReg, offset=offset, precision=precision} :: code | AddressConstArg const => FPArithConst{opc=FCOMP, source = const, precision=precision} :: code | _ => raise InternalError "codeGenICode: CompareFloatingPt: TODO" end | codeExtended _ ({instr=SSE2Compare{arg1=PReg argReg, arg2, isDouble, ...}, ...}, code) = let val xmmReg = getAllocatedXMMReg argReg val arg2Code = codeExtArgumentAsXMMReg arg2 in XMMArith { opc= if isDouble then SSE2CompDouble else SSE2CompSingle, output=xmmReg, source=arg2Code} :: code end | codeExtended _ ({instr=X87FPGetCondition{dest=PReg dReg, ...}, ...}, code) = moveIfNecessary({src=GenReg eax, dst=getAllocatedReg dReg, kind=Move32Bit}, FPStatusToEAX :: code) | codeExtended _ ({instr=X87FPArith{opc, resultReg=PReg resReg, arg1=PReg op1Reg, arg2, isDouble}, ...}, code) = let val realDestReg = getAllocatedFPReg resReg val realOp1Reg = getAllocatedFPReg op1Reg val _ = realDestReg = fp0 orelse raise InternalError "codeGenICode: FloatingPointArith not fp0" val _ = realOp1Reg = fp0 orelse raise InternalError "codeGenICode: FloatingPointArith not fp0" val op2Arg = codeExtArgumentAsFPReg arg2 val precision = if isDouble then DoublePrecision else SinglePrecision in case op2Arg of MemoryArg{offset, base=baseReg, index=NoIndex} => FPArithMemory{opc=opc, base=baseReg, offset=offset, precision=precision} :: code | AddressConstArg const => FPArithConst{opc=opc, source = const, precision=precision} :: code | _ => raise InternalError "codeGenICode: X87FPArith: TODO" end | codeExtended _ ({instr=X87FPUnaryOps{fpOp, dest=PReg resReg, source=PReg op1Reg}, ...}, code) = let val realDestReg = getAllocatedFPReg resReg val realOp1Reg = getAllocatedFPReg op1Reg val _ = realDestReg = fp0 orelse raise InternalError "codeGenICode: X87FPUnaryOps not fp0" val _ = realOp1Reg = fp0 orelse raise InternalError "codeGenICode: X87FPUnaryOps not fp0" in FPUnary fpOp :: code end | codeExtended _ ({instr=X87Float{dest=PReg resReg, source}, ...}, code) = let val intSource = codeExtArgumentAsGenReg source val fpReg = getAllocatedFPReg resReg val _ = fpReg = fp0 orelse raise InternalError "codeGenICode: FloatFixedInt not fp0" in (* This is complicated. The integer value has to be in memory not in a register so we have to push it to the stack and then make sure it is popped afterwards. Because it is untagged it is unsafe to leave it. *) ResetStack{numWords=1, preserveCC=false} :: FPLoadInt{ base=esp, offset=0, opSize=polyWordOpSize } :: PushToStack intSource :: code end | codeExtended _ ({instr=SSE2Float{dest=PReg resReg, source}, ...}, code) = let val xmmResReg = getAllocatedXMMReg resReg val srcReg = case codeExtArgumentAsGenReg source of RegisterArg srcReg => srcReg | _ => raise InternalError "FloatFixedInt: not reg" in XMMConvertFromInt{ output=xmmResReg, source=srcReg, opSize=polyWordOpSize} :: code end | codeExtended _ ({instr=SSE2FPUnary{opc, resultReg=PReg resReg, source}, ...}, code) = let val realDestReg = getAllocatedXMMReg resReg val opArg = codeExtArgumentAsXMMReg source val sse2Op = case opc of SSE2UDoubleToFloat => SSE2DoubleToFloat | SSE2UFloatToDouble => SSE2FloatToDouble in XMMArith{ opc=sse2Op, output=realDestReg, source=opArg} :: code end | codeExtended _ ({instr=SSE2FPBinary{opc, resultReg=PReg resReg, arg1=PReg op1Reg, arg2}, ...}, code) = let val realDestReg = getAllocatedXMMReg resReg val realOp1Reg = getAllocatedXMMReg op1Reg val op2Arg = codeExtArgumentAsXMMReg arg2 (* xorpd and andpd require 128-bit arguments with 128-bit alignment. *) val _ = case (opc, op2Arg) of (SSE2BXor, RegisterArg _) => () | (SSE2BXor, _) => raise InternalError "codeGenICode - SSE2Xor not in register" | (SSE2BAnd, RegisterArg _) => () | (SSE2BAnd, _) => raise InternalError "codeGenICode - SSE2And not in register" | _ => () val doMove = if realDestReg = realOp1Reg then code else XMMArith { opc=SSE2MoveDouble, source=RegisterArg realOp1Reg, output=realDestReg } :: code val sse2Op = case opc of SSE2BAddDouble => SSE2AddDouble | SSE2BSubDouble => SSE2SubDouble | SSE2BMulDouble => SSE2MulDouble | SSE2BDivDouble => SSE2DivDouble | SSE2BAddSingle => SSE2AddSingle | SSE2BSubSingle => SSE2SubSingle | SSE2BMulSingle => SSE2MulSingle | SSE2BDivSingle => SSE2DivSingle | SSE2BXor => SSE2Xor | SSE2BAnd => SSE2And in XMMArith{ opc=sse2Op, output=realDestReg, source=op2Arg} :: doMove end | codeExtended _ ({instr=TagFloat{source=PReg srcReg, dest as PReg dReg, ...}, ...}, code) = if not (isUsed dest) then code else let val _ = targetArch = Native64Bit orelse raise InternalError "TagFloat: not 64-bit" (* Copy the value from an XMM reg into a general reg and tag it. *) val regResult = asGenReg(getAllocatedReg dReg) val realSReg = getAllocatedXMMReg srcReg in ArithToGenReg { opc=ADD, output=regResult, source=NonAddressConstArg 1, opSize=polyWordOpSize } :: ShiftConstant{ shiftType=SHL, output=regResult, shift=0w32, opSize=OpSize64} :: MoveXMMRegToGenReg { source = realSReg, output = regResult } :: code end | codeExtended _ ({instr=UntagFloat{dest as PReg dReg, cache=SOME(PReg cacheReg), ...}, ...}, code) = if not (isUsed dest) then code else moveIfNecessary({src=getAllocatedReg cacheReg, dst=getAllocatedReg dReg, kind=MoveFloat}, code) | codeExtended _ ({instr=UntagFloat{source, dest as PReg dReg, ...}, ...}, code) = if not (isUsed dest) then code else let val regResult = getAllocatedXMMReg dReg in case codeExtArgumentAsGenReg source of RegisterArg realSReg => XMMShiftRight{ output=regResult, shift=0w4 (* Bytes - not bits *) } :: MoveGenRegToXMMReg {source=realSReg, output=regResult} :: code | MemoryArg{base, offset, index} => (* If the value is in memory we can just load the high order word. *) XMMArith { opc=SSE2MoveFloat, source=MemoryArg{base=base, offset=offset+4, index=index}, output=regResult } :: code | NonAddressConstArg ic => (* Shift down and then load from the non-constant area. *) XMMArith { opc=SSE2MoveFloat, source=NonAddressConstArg(IntInf.~>>(ic, 0w32)), output=regResult } :: code | _ => raise InternalError "UntagFloat - not register or memory" end | codeExtended _ ({instr=GetSSE2ControlReg{dest=PReg dReg}, ...}, code) = let (* This has to work through memory. Reserve one word on the stack, get the MXCSR register into it and pop it to the register. *) val regResult = getAllocatedGenReg dReg in PopR regResult :: XMMStoreCSR{base=esp, offset=0, index=NoIndex } :: PushToStack(NonAddressConstArg 0) :: code end | codeExtended _ ({instr=SetSSE2ControlReg{source=PReg sReg}, ...}, code) = let (* This has to work through memory. Push the register to the stack, store the value into the control register and remove it from the stack. *) val sourceReg = getAllocatedGenReg sReg in ResetStack{ numWords=1, preserveCC=false } :: XMMLoadCSR{base=esp, offset=0, index=NoIndex } :: PushToStack(RegisterArg sourceReg) :: code end | codeExtended _ ({instr=GetX87ControlReg{dest=PReg dReg}, ...}, code) = let (* This has to work through memory. Reserve one word on the stack, get the X87 control register into it and pop it to the register. *) val regResult = getAllocatedGenReg dReg in PopR regResult :: FPStoreCtrlWord{base=esp, offset=0, index=NoIndex } :: PushToStack(NonAddressConstArg 0) :: code end | codeExtended _ ({instr=SetX87ControlReg{source=PReg sReg}, ...}, code) = let (* This has to work through memory. Push the register to the stack, store the value into the control register and remove it from the stack. *) val sourceReg = getAllocatedGenReg sReg in ResetStack{ numWords=1, preserveCC=false } :: FPLoadCtrlWord{base=esp, offset=0, index=NoIndex } :: PushToStack(RegisterArg sourceReg) :: code end | codeExtended _ ({instr=X87RealToInt{source=PReg sReg, dest=PReg dReg}, ...}, code) = let (* This has to work through memory. Reserve one word on the stack, convert the value into it and pop it to the register. *) val regResult = getAllocatedGenReg dReg val fpReg = getAllocatedFPReg sReg val _ = fpReg = fp0 orelse raise InternalError "codeGenICode: CompareFloatingPt not fp0" (* This currently pops the value. *) in PopR regResult :: FPStoreInt{base=esp, offset=0, index=NoIndex } :: PushToStack(NonAddressConstArg 0) :: code end | codeExtended _ ({instr=SSE2RealToInt{source, dest=PReg dReg, isDouble, isTruncate}, ...}, code) = let (* The source is either an XMM register or memory. *) val regResult = getAllocatedGenReg dReg val opArg = codeExtArgumentAsXMMReg source in XMMStoreInt { source=opArg, precision=if isDouble then DoublePrecision else SinglePrecision, output = regResult, isTruncate=isTruncate } :: code end | codeExtended _ ({instr=SignExtend32To64{source, dest=PReg dReg}, ...}, code) = let val regResult = getAllocatedGenReg dReg val opArg = codeExtArgumentAsGenReg source in Move{moveSize=Move32X64, source=opArg, destination=RegisterArg regResult } :: code end | codeExtended _ ({instr=TouchArgument _, ...}, code) = code (* Don't need to do anything. *) + + | codeExtended _ ({instr=PauseCPU, ...}, code) = PauseForSpinLock :: code val newCode = codeCreate (functionName, profileObject, debugSwitches) local (* processed - set to true when a block has been processed. *) val processed = Array.array(numBlocks, false) fun haveProcessed n = Array.sub(processed, n) (* Find the blocks that reference this one. This isn't essential but allows us to try to generate blocks in the order of the control flow. This in turn may allow us to use short branches rather than long ones. *) val labelRefs = Array.array(numBlocks, []) datatype flowCode = FlowCodeSimple of int | FlowCodeCMove of {code: operation list, trueJump: int, falseJump: int} (* Process this recursively to set the references. If we have unreachable blocks, perhaps because they've been merged, we don't want to include them in the reference counting. This shouldn't happen now that IdentifyReferences removes unreferenced blocks. *) fun setReferences fromLabel toLabel = case Array.sub(labelRefs, toLabel) of [] => (* Not yet visited at all. *) let val ExtendedBasicBlock{ flow, ...} = Vector.sub(blocks, toLabel) val refs = case flow of ExitCode => [] | Unconditional lab => [lab] | Conditional{trueJump, falseJump, ... } => [trueJump, falseJump] | IndexedBr labs => labs | SetHandler { handler, continue } => [handler, continue] | UnconditionalHandle _ => [] | ConditionalHandle { continue, ...} => [continue] val () = if fromLabel >= 0 then Array.update(labelRefs, toLabel, [fromLabel]) else () in List.app (setReferences toLabel) refs end | refs => (* We've visiting this at least once. Just add us to the list. *) Array.update(labelRefs, toLabel, fromLabel :: refs) val _ = setReferences 0 0 (* Process the blocks. We keep the "stack" explicit rather than using recursion because this allows us to select both arms of a conditional branch sooner. *) fun genCode(toDo, lastFlow, code) = case List.filter (not o haveProcessed) toDo of [] => let (* There's nothing left to do. We may need to add a final branch to the end. *) val finalBranch = case lastFlow of ExitCode => [] | IndexedBr _ => [] | Unconditional dest => [UncondBranch(getBlockLabel dest)] | Conditional { condition, trueJump, falseJump, ...} => [ UncondBranch(getBlockLabel falseJump), ConditionalBranch{test=condition, label=getBlockLabel trueJump} ] | SetHandler { continue, ...} => [UncondBranch(getBlockLabel continue)] | UnconditionalHandle _ => [] | ConditionalHandle { continue, ...} => [UncondBranch(getBlockLabel continue)] in finalBranch @ code (* Done. *) end | stillToDo as head :: _ => let local (* Check the references. If all the sources that lead up to this have already been we won't have any backward jumps. *) fun available dest = List.all haveProcessed (Array.sub(labelRefs, dest)) val continuation = case lastFlow of ExitCode => NONE | IndexedBr _ => NONE (* We could put the last branch in here. *) | Unconditional dest => if not (haveProcessed dest) andalso available dest then SOME(FlowCodeSimple dest) else NONE | Conditional {trueJump, falseJump, condition, ...} => let (* Can we replace this with a SETCC or CMOV? If both arms simply set a register to a value and either return or jump to the same location we can use a SETCC or a CMOV. *) val ExtendedBasicBlock { flow=tFlow, block=tBlock, ...} = Vector.sub(blocks, trueJump) and ExtendedBasicBlock { flow=fFlow, block=fBlock, ...} = Vector.sub(blocks, falseJump) fun cmoveOrSetcc{condition, output, tSource=IntegerConstant trueValue, fSource=IntegerConstant falseValue, kind, code} = let (* Could use SETCC. Only if we can use LEA for multiplication. The result must be tagged so we will always have a multiplier. *) val (multiplier, fValue, testCondition) = if trueValue >= falseValue then (trueValue-falseValue, falseValue, condition) else (falseValue-trueValue, trueValue, invertTest condition) val destReg = asGenReg output in if not (targetArch = Native32Bit andalso (destReg=esi orelse destReg=edi)) (* We can't use Setcc with esi or edi on native 32-bit. *) andalso (multiplier = 2 orelse multiplier = 4 orelse multiplier = 8) (* We're using LEA so can only be multiplying by 2, 4 or 8. *) andalso is32bit fValue (* and we're going to put this in the offset *) then let val effectiveOpSize = (* We can generally use 32-bit LEA except if the result is negative. *) if kind = Move32Bit orelse fValue >= 0 andalso fValue+multiplier <= 0x7fffffff then OpSize32 else OpSize64 val (index, base) = case multiplier of 2 => (Index1 destReg, SOME destReg) | 4 => (Index4 destReg, NONE) | 8 => (Index8 destReg, NONE) | _ => (NoIndex, NONE) (* Try to put the instruction to zero the register before any compare. We can do it provided the register we're going to zero isn't used in the comparison. *) fun checkArg(RegisterArg r) = r <> destReg | checkArg(MemoryArg mem) = checkMem mem | checkArg _ = true and checkMem{base, index=NoIndex, ...} = base <> destReg | checkMem{base, index=Index1 index, ...} = base <> destReg andalso index <> destReg | checkMem{base, index=Index2 index, ...} = base <> destReg andalso index <> destReg | checkMem{base, index=Index4 index, ...} = base <> destReg andalso index <> destReg | checkMem{base, index=Index8 index, ...} = base <> destReg andalso index <> destReg val zeroReg = ArithToGenReg { opc=XOR, output=destReg, source=RegisterArg destReg, opSize=OpSize32 } fun addXOR [] = NONE | addXOR ((instr as ResetStack _) :: tl) = (* If we can add the XOR before the ResetStack do so. *) Option.map(fn code => instr :: code) (addXOR tl) | addXOR ((instr as ArithToGenReg{output, source, ...}) :: tl) = if output <> destReg andalso checkArg source then SOME(instr :: zeroReg :: tl) else NONE | addXOR ((instr as ArithMemConst{address, ...}) :: tl) = if checkMem address then SOME(instr :: zeroReg :: tl) else NONE | addXOR ((instr as ArithByteMemConst{address, ...}) :: tl) = if checkMem address then SOME(instr :: zeroReg :: tl) else NONE | addXOR ((instr as XMMArith{source=MemoryArg mem, ...}) :: tl) = if checkMem mem then SOME(instr :: zeroReg :: tl) else NONE | addXOR ((instr as XMMArith _) :: tl) = SOME(instr :: zeroReg :: tl) | addXOR ((instr as TestByteBits{arg, ...}) :: tl) = if checkArg arg then SOME(instr :: zeroReg :: tl) else NONE | addXOR ((instr as RepeatOperation CMPS8) :: tl) = (* This uses edi, esi and ecx implicitly *) if destReg <> esi andalso destReg <> edi andalso destReg <> ecx then SOME(instr :: zeroReg :: tl) else NONE (* This seems to be just a conditional jump as a result of testing the condition code twice in Real.== *) | addXOR _ = NONE (* If we can't put the XOR before the instruction we need to either zero it using a move which won't affect the CC or we use MOVZB to extend the byte value to 32/64 bits. *) val loadAddr = LoadAddress{output=destReg, offset=Int.fromLarge fValue, base=base, index=index, opSize=effectiveOpSize} and setCond = SetCondition{output=destReg, test=testCondition} val code = case addXOR code of SOME withXOR => loadAddr :: setCond :: withXOR | NONE => loadAddr :: (* We've already check that we're not using esi/edi on native 32-bits. *) Move{destination=RegisterArg destReg, source=RegisterArg destReg, moveSize=Move8} :: setCond :: code in SOME code end else NONE end (* If either value is a memory location it isn't safe to load it. The base address may not be valid if the condition does not hold. *) | cmoveOrSetcc{tSource=MemoryLocation _, ...} = NONE | cmoveOrSetcc{fSource=MemoryLocation _, ...} = NONE | cmoveOrSetcc{condition, output, tSource, fSource, kind, code} = if targetArch = Native32Bit then NONE (* CMov doesn't work for constants. *) else let val output = asGenReg output val codeTrue = codeExtArgumentAsGenReg tSource and codeFalse = codeExtArgumentAsGenReg fSource val opSize = case kind of Move32Bit => OpSize32 | Move64Bit => OpSize64 | _ => raise InternalError "move size" (* One argument has to be loaded into a register first and the other is conditionally moved. *) val loadFalseCmoveTrue = if (case codeFalse of RegisterArg regFalse => regFalse = output | _ => false) then true (* The false value is already in the right register. *) else if (case codeTrue of RegisterArg regTrue => regTrue = output | _ => false) then false (* The true value is in the right register - have to reverse. *) else if (case codeTrue of NonAddressConstArg _ => true | _ => false) then false (* The true value is a short constant. If we use a CMOV we will have to put that in the non-constant area and use a PC-relative reference. Try to avoid it. *) else true fun cmov{codeLoad, codeMove, condition} = let val load = case codeLoad of RegisterArg regLoad => moveIfNecessary({src=GenReg regLoad, dst=GenReg output, kind=opSizeToIMove opSize}, code) | codeLoad => Move{source=codeLoad, destination=RegisterArg output, moveSize=opSizeToMove opSize} :: code in CondMove{test=condition, output=output, source=codeMove, opSize=opSize} :: load end in if loadFalseCmoveTrue then SOME(cmov{codeLoad=codeFalse, codeMove=codeTrue, condition=condition}) else SOME(cmov{codeLoad=codeTrue, codeMove=codeFalse, condition=invertTest condition}) end val isPossSetCCOrCmov = if not (haveProcessed trueJump) andalso available trueJump andalso not (haveProcessed falseJump) andalso available falseJump then case (tFlow, fFlow, tBlock, fBlock) of (ExitCode, ExitCode, [{instr=LoadArgument{dest=PReg tReg, source=tSource, kind=kindT}, ...}, {instr=ReturnResultFromFunction{resultReg=PReg resReg, realReg, numStackArgs, ...}, ...}], [{instr=LoadArgument{dest=PReg fReg, source=fSource, kind=kindF}, ...}, {instr=ReturnResultFromFunction _, ...}]) => (* The real register for the two sides should both be rax. *) let val realTReg = getAllocatedReg tReg and realFReg = getAllocatedReg fReg in if realTReg = realFReg andalso kindT = kindF andalso (kindT = Move32Bit orelse kindT = Move64Bit) then ( case cmoveOrSetcc{condition=condition, output=realTReg, tSource=tSource, fSource=fSource, kind=kindT, code=code} of SOME code => let val resultReg = getAllocatedReg resReg val code = ReturnFromFunction numStackArgs :: moveIfNecessary({src=resultReg, dst=realReg, kind=moveNativeWord}, code) in SOME{code=code, trueJump=trueJump, falseJump=falseJump} end | NONE => NONE ) else NONE end | (Unconditional tDest, Unconditional fDest, [{instr=LoadArgument{dest=PReg tReg, source=tSource, kind=kindT}, ...}], [{instr=LoadArgument{dest=PReg fReg, source=fSource, kind=kindF}, ...}]) => let val realTReg = getAllocatedReg tReg and realFReg = getAllocatedReg fReg in if tDest = fDest andalso realTReg = realFReg andalso kindT = kindF andalso (kindT = Move32Bit orelse kindT = Move64Bit) then ( case cmoveOrSetcc{condition=condition, output=realTReg, tSource=tSource, fSource=fSource, kind=kindT, code=code} of SOME code => SOME{code=code, trueJump=trueJump, falseJump=falseJump} | NONE => NONE ) else NONE end | _ => NONE else NONE in case isPossSetCCOrCmov of NONE => (* We can usually choose either destination and in nearly all cases it won't matter. The default branch is not to take forward jumps so if there is reason to believe that one branch is more likely we should follow that branch now and leave the other. If we have JO/JNO we assume that overflow is unusual. If one branch raises an exception we assume that that is unusual. *) let val (first, second) = case (condition, Vector.sub(blocks, falseJump)) of (JNO, _) => (trueJump, falseJump) | (_, ExtendedBasicBlock{ flow=ExitCode, block, ...}) => if List.exists(fn{instr=RaiseExceptionPacket _, ...} => true | _ => false) block then (trueJump, falseJump) else (falseJump, trueJump) | _ => (falseJump, trueJump) in if not (haveProcessed first) andalso available first then SOME(FlowCodeSimple first) else if not (haveProcessed second) andalso available second then SOME(FlowCodeSimple second) else NONE end | SOME args => SOME(FlowCodeCMove args) end | SetHandler { continue, ... } => (* We want the continuation if possible. We'll need a branch round the handler so that won't help. *) if not (haveProcessed continue) andalso available continue then SOME(FlowCodeSimple continue) else NONE | UnconditionalHandle _ => NONE | ConditionalHandle _ => NONE in (* First choice - continue the existing block. Second choice - the first item whose sources have all been processed. Third choice - something from the list. *) val picked = case continuation of SOME c => c | NONE => case List.find available stillToDo of SOME c => FlowCodeSimple c | NONE => FlowCodeSimple head end in case picked of FlowCodeSimple picked => let val () = Array.update(processed, picked, true) (* Code to terminate the previous block. *) val startCode = case lastFlow of ExitCode => [] | IndexedBr _ => [] | UnconditionalHandle _ => [] | Unconditional dest => if dest = picked then [] else [UncondBranch(getBlockLabel dest)] | ConditionalHandle { continue, ...} => if continue = picked then [] else [UncondBranch(getBlockLabel continue)] | SetHandler { continue, ... } => if continue = picked then [] else [UncondBranch(getBlockLabel continue)] | Conditional { condition, trueJump, falseJump, ...} => if picked = falseJump (* Usual case. *) then [ConditionalBranch{test=condition, label=getBlockLabel trueJump}] else if picked = trueJump then (* We have a jump to the true condition. Invert the jump. This is more than an optimisation. Because this immediately precedes the true block we're not going to generate a label. *) [ConditionalBranch{test=invertTest condition, label=getBlockLabel falseJump}] else [ UncondBranch(getBlockLabel falseJump), ConditionalBranch{test=condition, label=getBlockLabel trueJump} ] (* Code-generate the body with the code we've done so far at the end. Add a label at the start if necessary. *) local (* If the previous block dropped through to this and this was the only reference then we don't need a label. *) fun onlyJumpingHere (lab: int) = if lab <> picked then false else case Array.sub(labelRefs, picked) of [singleton] => singleton = lab | _ => false val noLabel = case lastFlow of ExitCode => picked = 0 (* Unless this was the first block. *) | Unconditional dest => onlyJumpingHere dest | Conditional { trueJump, falseJump, ...} => onlyJumpingHere trueJump orelse onlyJumpingHere falseJump | IndexedBr _ => false | SetHandler _ => false | UnconditionalHandle _ => false | ConditionalHandle { continue, ...} => onlyJumpingHere continue in val startLabel = if noLabel then [] else [JumpLabel(getBlockLabel picked)] end val ExtendedBasicBlock { flow, block, ...} = Vector.sub(blocks, picked) local fun genCodeBlock(instr, code) = codeExtended {flow=flow} (instr, code) in val bodyCode = List.foldl genCodeBlock (startLabel @ startCode @ code) block end val addSet = case flow of ExitCode => [] | IndexedBr cases => cases | Unconditional dest => [dest] | Conditional {trueJump, falseJump, ...} => [falseJump, trueJump] | SetHandler { handler, continue } => [handler, continue] | UnconditionalHandle _ => [] | ConditionalHandle { continue, ...} => [continue] in genCode(addSet @ stillToDo, flow, bodyCode) end | FlowCodeCMove{code, trueJump, falseJump} => let (* We've generated a conditional move and possibly a return. If the trueJump and falseJump are only ever referenced from this block they're done, otherwise we still need to do them. *) val _ = case Array.sub(labelRefs, trueJump) of [_] => Array.update(processed, trueJump, true) | _ => () val _ = case Array.sub(labelRefs, falseJump) of [_] => Array.update(processed, falseJump, true) | _ => () val ExtendedBasicBlock { flow, ...} = Vector.sub(blocks, trueJump) val addSet = case flow of ExitCode => [] | Unconditional dest => [dest] | _ => raise InternalError "FlowCodeCMove" in genCode(addSet @ stillToDo, flow, code) end end in val ops = genCode([0], ExitCode, []) end in X86OPTIMISE.generateCode{code=newCode, ops=List.rev ops, labelCount= !outputLabelCount, resultClosure=resultClosure} end val nGenRegs = List.length generalRegisters structure Sharing = struct type intSet = intSet and extendedBasicBlock = extendedBasicBlock and regProperty = regProperty and reg = reg and closureRef = closureRef end end; diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86OUTPUTCODE.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86OUTPUTCODE.ML index c60440fb..05234797 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86OUTPUTCODE.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86OUTPUTCODE.ML @@ -1,4015 +1,4023 @@ (* - Copyright David C. J. Matthews 1989, 2000, 2009-10, 2012-13, 2015-20 + Copyright David C. J. Matthews 1989, 2000, 2009-10, 2012-13, 2015-21 Based on original code: Copyright (c) 2000 Cambridge University Technical Services Limited This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Title: Code Generator Routines. Author: Dave Matthews, Cambridge University Computer Laboratory Copyright Cambridge University 1989 *) (* This module contains the code vector and operations to insert code into it. Each procedure is compiled into a separate segment. Initially it is compiled into a fixed size segment, and then copied into a segment of the correct size at the end. This module contains all the definitions of the X86 opCodes and registers. It uses "codeseg" to create and operate on the segment itself. *) functor X86OUTPUTCODE ( structure DEBUG: DEBUG structure PRETTY: PRETTYSIG (* for compilerOutTag *) structure CODE_ARRAY: CODEARRAYSIG ) : X86CODESIG = struct open CODE_ARRAY open DEBUG open Address open Misc (* May be targeted at native 32-bit, native 64-bit or X86/64 with 32-bit words and addresses as object Ids. *) datatype targetArch = Native32Bit | Native64Bit | ObjectId32Bit val targetArch = case PolyML.architecture() of "I386" => Native32Bit | "X86_64" => Native64Bit | "X86_64_32" => ObjectId32Bit | _ => raise InternalError "Unknown target architecture" (* Some checks - *) val () = case (targetArch, wordSize, nativeWordSize) of (Native32Bit, 0w4, 0w4) => () | (Native64Bit, 0w8, 0w8) => () | (ObjectId32Bit, 0w4, 0w8) => () | _ => raise InternalError "Mismatch of architecture and word-length" val hostIsX64 = targetArch <> Native32Bit infix 5 << <<+ <<- >> >>+ >>- ~>> ~>>+ ~>>- (* Shift operators *) infix 3 andb orb xorb andbL orbL xorbL andb8 orb8 xorb8 val op << = Word.<< and op >> = Word.>> val (*op <<+ = LargeWord.<< and *) op >>+ = LargeWord.>> val op <<- = Word8.<< and op >>- = Word8.>> val op orb8 = Word8.orb val op andb8 = Word8.andb val op andb = Word.andb (* and op andbL = LargeWord.andb *) and op orb = Word.orb val wordToWord8 = Word8.fromLargeWord o Word.toLargeWord (*and word8ToWord = Word.fromLargeWord o Word8.toLargeWord*) val exp2_16 = 0x10000 val exp2_31 = 0x80000000: LargeInt.int (* Returns true if this a 32-bit machine or if the constant is within 32-bits. This is exported to the higher levels. N.B. The test for not isX64 avoids a significant overhead with arbitrary precision arithmetic on X86/32. *) fun is32bit v = not hostIsX64 orelse ~exp2_31 <= v andalso v < exp2_31 (* tag a short constant *) fun tag c = 2 * c + 1; fun is8BitL (n: LargeInt.int) = ~ 0x80 <= n andalso n < 0x80 local val shift = if wordSize = 0w4 then 0w2 else if wordSize = 0w8 then 0w3 else raise InternalError "Invalid word size for x86_32 or x86+64" in fun wordsToBytes n = n << shift and bytesToWords n = n >> shift end infix 6 addrPlus addrMinus; (* All indexes into the code vector have type "addrs". This is really a legacy. *) type addrs = Word.word val addrZero = 0w0 (* This is the external label type used when constructing operations. *) datatype label = Label of { labelNo: int } (* Constants which are too large to go inline in the code are put in a list and put at the end of the code. They are arranged so that the garbage collector can find them and change them as necessary. A reference to a constant is treated like a forward reference to a label. *) datatype code = Code of { procName: string, (* Name of the procedure. *) printAssemblyCode:bool, (* Whether to print the code when we finish. *) printStream: string->unit, (* The stream to use *) lowLevelOptimise: bool, (* Whether to do the low-level optimisation pass *) profileObject : machineWord (* The profile object for this code. *) } (* Exported functions *) fun lowLevelOptimise(Code{lowLevelOptimise, ...}) = lowLevelOptimise (* EBP/RBP points to a structure that interfaces to the RTS. These are offsets into that structure. *) val memRegLocalMPointer = 0 (* Not used in 64-bit *) and memRegHandlerRegister = Word.toInt nativeWordSize and memRegLocalMbottom = 2 * Word.toInt nativeWordSize and memRegStackLimit = 3 * Word.toInt nativeWordSize and memRegExceptionPacket = 4 * Word.toInt nativeWordSize and memRegCStackPtr = 6 * Word.toInt nativeWordSize and memRegThreadSelf = 7 * Word.toInt nativeWordSize and memRegStackPtr = 8 * Word.toInt nativeWordSize and memRegHeapOverflowCall = 10 * Word.toInt nativeWordSize and memRegStackOverflowCall = 11 * Word.toInt nativeWordSize and memRegStackOverflowCallEx = 12 * Word.toInt nativeWordSize and memRegSavedRbx = 14 * Word.toInt nativeWordSize (* Heap base in 32-in-64. *) (* create and initialise a code segment *) fun codeCreate (name : string, profObj, parameters) : code = let val printStream = PRETTY.getSimplePrinter(parameters, []) in Code { procName = name, printAssemblyCode = DEBUG.getParameter DEBUG.assemblyCodeTag parameters, printStream = printStream, lowLevelOptimise = DEBUG.getParameter DEBUG.lowlevelOptimiseTag parameters, profileObject = profObj } end (* Put 1 unsigned byte at a given offset in the segment. *) fun set8u (b, addr, seg) = byteVecSet (seg, addr, b) (* Put 4 bytes at a given offset in the segment. *) (* b0 is the least significant byte. *) fun set4Bytes (b3, b2, b1, b0, addr, seg) = let val a = addr; in (* Little-endian *) byteVecSet (seg, a, b0); byteVecSet (seg, a + 0w1, b1); byteVecSet (seg, a + 0w2, b2); byteVecSet (seg, a + 0w3, b3) end; (* Put 1 unsigned word at a given offset in the segment. *) fun set32u (ival: LargeWord.word, addr, seg) : unit = let val b3 = Word8.fromLargeWord (ival >>+ 0w24) val b2 = Word8.fromLargeWord (ival >>+ 0w16) val b1 = Word8.fromLargeWord (ival >>+ 0w8) val b0 = Word8.fromLargeWord ival in set4Bytes (b3, b2, b1, b0, addr, seg) end (* Put 1 signed word at a given offset in the segment. *) fun set32s (ival: LargeInt.int, addr, seg) = set32u(LargeWord.fromLargeInt ival, addr, seg) fun byteSigned ival = if ~0x80 <= ival andalso ival < 0x80 then Word8.fromInt ival else raise InternalError "byteSigned: invalid byte" (* Convert a large-word value to a little-endian byte sequence. *) fun largeWordToBytes(_, 0) = [] | largeWordToBytes(ival: LargeWord.word, n) = Word8.fromLargeWord ival :: largeWordToBytes(ival >>+ 0w8, n-1) fun word32Unsigned(ival: LargeWord.word) = largeWordToBytes(ival, 4) fun int32Signed(ival: LargeInt.int) = if is32bit ival then word32Unsigned(LargeWord.fromLargeInt ival) else raise InternalError "int32Signed: invalid word" (* Registers. *) datatype genReg = GeneralReg of Word8.word * bool and fpReg = FloatingPtReg of Word8.word and xmmReg = SSE2Reg of Word8.word datatype reg = GenReg of genReg | FPReg of fpReg | XMMReg of xmmReg (* These are the real registers we have. The AMD extension encodes the additional registers through the REX prefix. *) val rax = GeneralReg (0w0, false) val rcx = GeneralReg (0w1, false) val rdx = GeneralReg (0w2, false) val rbx = GeneralReg (0w3, false) val rsp = GeneralReg (0w4, false) val rbp = GeneralReg (0w5, false) val rsi = GeneralReg (0w6, false) val rdi = GeneralReg (0w7, false) val eax = rax and ecx = rcx and edx = rdx and ebx = rbx and esp = rsp and ebp = rbp and esi = rsi and edi = rdi val r8 = GeneralReg (0w0, true) val r9 = GeneralReg (0w1, true) val r10 = GeneralReg (0w2, true) val r11 = GeneralReg (0w3, true) val r12 = GeneralReg (0w4, true) val r13 = GeneralReg (0w5, true) val r14 = GeneralReg (0w6, true) val r15 = GeneralReg (0w7, true) (* Floating point "registers". Actually entries on the floating point stack. The X86 has a floating point stack with eight entries. *) val fp0 = FloatingPtReg 0w0 and fp1 = FloatingPtReg 0w1 and fp2 = FloatingPtReg 0w2 and fp3 = FloatingPtReg 0w3 and fp4 = FloatingPtReg 0w4 and fp5 = FloatingPtReg 0w5 and fp6 = FloatingPtReg 0w6 and fp7 = FloatingPtReg 0w7 (* SSE2 Registers. These are used for floating point in 64-bity mode. We only use XMM0-6 because the others are callee save and we don't currently save them. *) val xmm0 = SSE2Reg 0w0 and xmm1 = SSE2Reg 0w1 and xmm2 = SSE2Reg 0w2 and xmm3 = SSE2Reg 0w3 and xmm4 = SSE2Reg 0w4 and xmm5 = SSE2Reg 0w5 and xmm6 = SSE2Reg 0w6 and xmm7 = SSE2Reg 0w7 fun getReg (GeneralReg r) = r fun mkReg n = GeneralReg n (* reg.up *) (* The maximum size of the register vectors and masks. Although the X86/32 has a floating point stack with eight entries it's much simpler to treat it as having seven "real" registers. Items are pushed to the stack and then stored and popped into the current location. It may be possible to improve the code by some peephole optimisation. *) val regs = 30 (* Include the X86/64 registers even if this is 32-bit. *) (* The nth register (counting from 0). *) (* Profiling shows that applying the constructors here creates a lot of garbage. Create the entries once and then use vector indexing instead. *) local fun regN i = if i < 8 then GenReg(GeneralReg(Word8.fromInt i, false)) else if i < 16 then GenReg(GeneralReg(Word8.fromInt(i-8), true)) else if i < 23 then FPReg(FloatingPtReg(Word8.fromInt(i-16))) else XMMReg(SSE2Reg(Word8.fromInt(i-23))) val regVec = Vector.tabulate(regs, regN) in fun regN i = Vector.sub(regVec, i) handle Subscript => raise InternalError "Bad register number" end (* The number of the register. *) fun nReg(GenReg(GeneralReg(r, false))) = Word8.toInt r | nReg(GenReg(GeneralReg(r, true))) = Word8.toInt r + 8 | nReg(FPReg(FloatingPtReg r)) = Word8.toInt r + 16 | nReg(XMMReg(SSE2Reg r)) = Word8.toInt r + 23 datatype opsize = SZByte | SZWord | SZDWord | SZQWord (* Default size when printing regs. *) val sz32_64 = if hostIsX64 then SZQWord else SZDWord fun genRegRepr(GeneralReg (0w0, false), SZByte) = "al" | genRegRepr(GeneralReg (0w1, false), SZByte) = "cl" | genRegRepr(GeneralReg (0w2, false), SZByte) = "dl" | genRegRepr(GeneralReg (0w3, false), SZByte) = "bl" | genRegRepr(GeneralReg (0w4, false), SZByte) = "ah" | genRegRepr(GeneralReg (0w5, false), SZByte) = "ch" | genRegRepr(GeneralReg (0w6, false), SZByte) = "sil" (* Assume there's a Rex code that forces low-order reg *) | genRegRepr(GeneralReg (0w7, false), SZByte) = "dil" | genRegRepr(GeneralReg (reg, true), SZByte) = "r" ^ Int.toString(Word8.toInt reg +8) ^ "b" | genRegRepr(GeneralReg (0w0, false), SZDWord) = "eax" | genRegRepr(GeneralReg (0w1, false), SZDWord) = "ecx" | genRegRepr(GeneralReg (0w2, false), SZDWord) = "edx" | genRegRepr(GeneralReg (0w3, false), SZDWord) = "ebx" | genRegRepr(GeneralReg (0w4, false), SZDWord) = "esp" | genRegRepr(GeneralReg (0w5, false), SZDWord) = "ebp" | genRegRepr(GeneralReg (0w6, false), SZDWord) = "esi" | genRegRepr(GeneralReg (0w7, false), SZDWord) = "edi" | genRegRepr(GeneralReg (reg, true), SZDWord) = "r" ^ Int.toString(Word8.toInt reg +8) ^ "d" | genRegRepr(GeneralReg (0w0, false), SZQWord) = "rax" | genRegRepr(GeneralReg (0w1, false), SZQWord) = "rcx" | genRegRepr(GeneralReg (0w2, false), SZQWord) = "rdx" | genRegRepr(GeneralReg (0w3, false), SZQWord) = "rbx" | genRegRepr(GeneralReg (0w4, false), SZQWord) = "rsp" | genRegRepr(GeneralReg (0w5, false), SZQWord) = "rbp" | genRegRepr(GeneralReg (0w6, false), SZQWord) = "rsi" | genRegRepr(GeneralReg (0w7, false), SZQWord) = "rdi" | genRegRepr(GeneralReg (reg, true), SZQWord) = "r" ^ Int.toString(Word8.toInt reg +8) | genRegRepr(GeneralReg (0w0, false), SZWord) = "ax" | genRegRepr(GeneralReg (0w1, false), SZWord) = "cx" | genRegRepr(GeneralReg (0w2, false), SZWord) = "dx" | genRegRepr(GeneralReg (0w3, false), SZWord) = "bx" | genRegRepr(GeneralReg (0w4, false), SZWord) = "sp" | genRegRepr(GeneralReg (0w5, false), SZWord) = "bp" | genRegRepr(GeneralReg (0w6, false), SZWord) = "si" | genRegRepr(GeneralReg (0w7, false), SZWord) = "di" | genRegRepr(GeneralReg (reg, true), SZWord) = "r" ^ Int.toString(Word8.toInt reg +8) ^ "w" | genRegRepr _ = "unknown" (* Suppress warning because word values are not exhaustive. *) and fpRegRepr(FloatingPtReg n) = "fp" ^ Word8.toString n and xmmRegRepr(SSE2Reg n) = "xmm" ^ Word8.toString n fun regRepr(GenReg r) = genRegRepr (r, sz32_64) | regRepr(FPReg r) = fpRegRepr r | regRepr(XMMReg r) = xmmRegRepr r (* Install a pretty printer. This is simply for when this code is being run under the debugger. N.B. We need PolyML.PrettyString here. *) val () = PolyML.addPrettyPrinter(fn _ => fn _ => fn r => PolyML.PrettyString(regRepr r)) datatype argType = ArgGeneral | ArgFP (* Size of operand. OpSize64 is only valid in 64-bit mode. *) datatype opSize = OpSize32 | OpSize64 structure RegSet = struct (* Implement a register set as a bit mask. *) datatype regSet = RegSet of word fun singleton r = RegSet(0w1 << Word.fromInt(nReg r)) fun regSetUnion(RegSet r1, RegSet r2) = RegSet(Word.orb(r1, r2)) fun regSetIntersect(RegSet r1, RegSet r2) = RegSet(Word.andb(r1, r2)) local fun addReg(acc, n) = if n = regs then acc else addReg(regSetUnion(acc, singleton(regN n)), n+1) in val allRegisters = addReg(RegSet 0w0, 0) end val noRegisters = RegSet 0w0 fun inSet(r, rs) = regSetIntersect(singleton r, rs) <> noRegisters fun regSetMinus(RegSet s1, RegSet s2) = RegSet(Word.andb(s1, Word.notb s2)) val listToSet = List.foldl (fn(r, rs) => regSetUnion(singleton r, rs)) noRegisters local val regs = case targetArch of Native32Bit => [eax, ecx, edx, ebx, esi, edi] | Native64Bit => [eax, ecx, edx, ebx, esi, edi, r8, r9, r10, r11, r12, r13, r14] | ObjectId32Bit => [eax, ecx, edx, esi, edi, r8, r9, r10, r11, r12, r13, r14] in val generalRegisters = listToSet(map GenReg regs) end (* The floating point stack. Note that this excludes one item so it is always possible to load a value onto the top of the FP stack. *) val floatingPtRegisters = listToSet(map FPReg [fp0, fp1, fp2, fp3, fp4, fp5, fp6(*, fp7*)]) val sse2Registers = listToSet(map XMMReg [xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6]) fun isAllRegs rs = rs = allRegisters fun setToList (RegSet regSet)= let fun testBit (n, bit, res) = if n = regs then res else testBit(n+1, bit << 0w1, if (regSet andb bit) <> 0w0 then regN n :: res else res) in testBit(0, 0w1, []) end val cardinality = List.length o setToList (* Choose one of the set. This chooses the least value which means that the ordering of the registers is significant. This is a hot-spot so is coded directly with the word operations. *) fun oneOf(RegSet regSet) = let fun find(n, bit) = if n = Word.fromInt regs then raise InternalError "oneOf: empty" else if Word.andb(bit, regSet) <> 0w0 then n else find(n+0w1, Word.<<(bit, 0w1)) in regN(Word.toInt(find(0w0, 0w1))) end fun regSetRepr regSet = let val regs = setToList regSet in "[" ^ String.concatWith "," (List.map regRepr regs) ^ "]" end (* Install a pretty printer for when this code is being debugged. *) val () = PolyML.addPrettyPrinter(fn _ => fn _ => fn r => PolyML.PrettyString(regSetRepr r)) end open RegSet datatype arithOp = ADD | OR (*|ADC | SBB*) | AND | SUB | XOR | CMP fun arithOpToWord ADD = 0w0: Word8.word | arithOpToWord OR = 0w1 | arithOpToWord AND = 0w4 | arithOpToWord SUB = 0w5 | arithOpToWord XOR = 0w6 | arithOpToWord CMP = 0w7 fun arithOpRepr ADD = "Add" | arithOpRepr OR = "Or" | arithOpRepr AND = "And" | arithOpRepr SUB = "Sub" | arithOpRepr XOR = "Xor" | arithOpRepr CMP = "Cmp" datatype shiftType = SHL | SHR | SAR fun shiftTypeToWord SHL = 0w4: Word8.word | shiftTypeToWord SHR = 0w5 | shiftTypeToWord SAR = 0w7 fun shiftTypeRepr SHL = "Shift Left Logical" | shiftTypeRepr SHR = "Shift Right Logical" | shiftTypeRepr SAR = "Shift Right Arithemetic" datatype repOps = CMPS8 | MOVS8 | MOVS32 | STOS8 | STOS32 | MOVS64 | STOS64 fun repOpsToWord CMPS8 = 0wxa6: Word8.word | repOpsToWord MOVS8 = 0wxa4 | repOpsToWord MOVS32 = 0wxa5 | repOpsToWord MOVS64 = 0wxa5 (* Plus Rex.w *) | repOpsToWord STOS8 = 0wxaa | repOpsToWord STOS32 = 0wxab | repOpsToWord STOS64 = 0wxab (* Plus Rex.w *) fun repOpsRepr CMPS8 = "CompareBytes" | repOpsRepr MOVS8 = "MoveBytes" | repOpsRepr MOVS32 = "MoveWords32" | repOpsRepr MOVS64 = "MoveWords64" | repOpsRepr STOS8 = "StoreBytes" | repOpsRepr STOS32 = "StoreWords32" | repOpsRepr STOS64 = "StoreWords64" datatype fpOps = FADD | FMUL | FCOM | FCOMP | FSUB | FSUBR | FDIV | FDIVR fun fpOpToWord FADD = 0w0: Word8.word | fpOpToWord FMUL = 0w1 | fpOpToWord FCOM = 0w2 | fpOpToWord FCOMP = 0w3 | fpOpToWord FSUB = 0w4 | fpOpToWord FSUBR = 0w5 | fpOpToWord FDIV = 0w6 | fpOpToWord FDIVR = 0w7 fun fpOpRepr FADD = "FPAdd" | fpOpRepr FMUL = "FPMultiply" | fpOpRepr FCOM = "FPCompare" | fpOpRepr FCOMP = "FPCompareAndPop" | fpOpRepr FSUB = "FPSubtract" | fpOpRepr FSUBR = "FPReverseSubtract" | fpOpRepr FDIV = "FPDivide" | fpOpRepr FDIVR = "FPReverseDivide" datatype fpUnaryOps = FCHS | FABS | FLD1 | FLDZ fun fpUnaryToWords FCHS = {rm=0w0:Word8.word, nnn=0w4: Word8.word} | fpUnaryToWords FABS = {rm=0w1, nnn=0w4} | fpUnaryToWords FLD1 = {rm=0w0, nnn=0w5} | fpUnaryToWords FLDZ = {rm=0w6, nnn=0w5} fun fpUnaryRepr FCHS = "FPChangeSign" | fpUnaryRepr FABS = "FPAbs" | fpUnaryRepr FLD1 = "FPLoadOne" | fpUnaryRepr FLDZ = "FPLoadZero" datatype branchOps = JO | JNO | JE | JNE | JL | JGE | JLE | JG | JB | JNB | JNA | JA | JP | JNP fun branchOpToWord JO = 0wx0: Word8.word | branchOpToWord JNO = 0wx1 | branchOpToWord JB = 0wx2 | branchOpToWord JNB = 0wx3 | branchOpToWord JE = 0wx4 | branchOpToWord JNE = 0wx5 | branchOpToWord JNA = 0wx6 | branchOpToWord JA = 0wx7 | branchOpToWord JP = 0wxa | branchOpToWord JNP = 0wxb | branchOpToWord JL = 0wxc | branchOpToWord JGE = 0wxd | branchOpToWord JLE = 0wxe | branchOpToWord JG = 0wxf fun branchOpRepr JO = "Overflow" | branchOpRepr JNO = "NotOverflow" | branchOpRepr JE = "Equal" | branchOpRepr JNE = "NotEqual" | branchOpRepr JL = "Less" | branchOpRepr JGE = "GreaterOrEqual" | branchOpRepr JLE = "LessOrEqual" | branchOpRepr JG = "Greater" | branchOpRepr JB = "Before" | branchOpRepr JNB= "NotBefore" | branchOpRepr JNA = "NotAfter" | branchOpRepr JA = "After" | branchOpRepr JP = "Parity" | branchOpRepr JNP = "NoParity" (* Invert a test. This is used if we want to change the sense of a test from jumping if the condition is true to jumping if it is false. *) fun invertTest JE = JNE | invertTest JNE = JE | invertTest JA = JNA | invertTest JB = JNB | invertTest JNA = JA | invertTest JNB = JB | invertTest JL = JGE | invertTest JG = JLE | invertTest JLE = JG | invertTest JGE = JL | invertTest JO = JNO | invertTest JNO = JO | invertTest JP = JNP | invertTest JNP = JP datatype sse2Operations = SSE2MoveDouble | SSE2MoveFloat | SSE2CompDouble | SSE2AddDouble | SSE2SubDouble | SSE2MulDouble | SSE2DivDouble | SSE2Xor | SSE2And | SSE2FloatToDouble | SSE2DoubleToFloat | SSE2CompSingle | SSE2AddSingle | SSE2SubSingle | SSE2MulSingle | SSE2DivSingle fun sse2OpRepr SSE2MoveDouble = "SSE2MoveDouble" | sse2OpRepr SSE2MoveFloat = "SSE2MoveFloat" | sse2OpRepr SSE2CompDouble = "SSE2CompDouble" | sse2OpRepr SSE2AddDouble = "SSE2AddDouble" | sse2OpRepr SSE2SubDouble = "SSE2SubDouble" | sse2OpRepr SSE2MulDouble = "SSE2MulDouble" | sse2OpRepr SSE2DivDouble = "SSE2DivDouble" | sse2OpRepr SSE2Xor = "SSE2Xor" | sse2OpRepr SSE2And = "SSE2And" | sse2OpRepr SSE2CompSingle = "SSE2CompSingle" | sse2OpRepr SSE2AddSingle = "SSE2AddSingle" | sse2OpRepr SSE2SubSingle = "SSE2SubSingle" | sse2OpRepr SSE2MulSingle = "SSE2MulSingle" | sse2OpRepr SSE2DivSingle = "SSE2DivSingle" | sse2OpRepr SSE2FloatToDouble = "SSE2FloatToDouble" | sse2OpRepr SSE2DoubleToFloat = "SSE2DoubleToFloat" (* Primary opCodes. N.B. only opCodes actually used are listed here. If new instruction are added check they will be handled by the run-time system in the event of trap. *) datatype opCode = Group1_8_A32 | Group1_8_A64 | Group1_32_A32 | Group1_32_A64 | Group1_8_a | JMP_8 | JMP_32 | CALL_32 | MOVL_A_R32 | MOVL_A_R64 | MOVL_R_A32 | MOVL_R_A64 | MOVL_R_A16 | MOVB_R_A32 | MOVB_R_A64 of {forceRex: bool} | PUSH_R of Word8.word | POP_R of Word8.word | Group5 | NOP | LEAL32 | LEAL64 | MOVL_32_R of Word8.word | MOVL_64_R of Word8.word | MOVL_32_A32 | MOVL_32_A64 | MOVB_8_A | POP_A | RET | RET_16 | CondJump of branchOps | CondJump32 of branchOps | SetCC of branchOps | Arith32 of arithOp * Word8.word | Arith64 of arithOp * Word8.word | Group3_A32 | Group3_A64 | Group3_a | Group2_8_A32 | Group2_8_A64 | Group2_CL_A32 | Group2_CL_A64 | Group2_1_A32 | Group2_1_A64 | PUSH_8 | PUSH_32 | TEST_ACC8 | LOCK_XADD32 | LOCK_XADD64 | FPESC of Word8.word | XCHNG32 | XCHNG64 | REP (* Rep prefix *) | MOVZB (* Needs escape code. *) | MOVZW (* Needs escape code. *) | MOVSXB32 (* Needs escape code. *) | MOVSXW32 (* Needs escape code. *) | MOVSXB64 (* Needs escape code. *) | MOVSXW64 (* Needs escape code. *) | IMUL32 (* Needs escape code. *) | IMUL64 (* Needs escape code. *) | SSE2StoreSingle (* movss with memory destination - needs escape sequence. *) | SSE2StoreDouble (* movsd with memory destination - needs escape sequence. *) | CQO_CDQ32 (* Sign extend before divide.. *) | CQO_CDQ64 (* Sign extend before divide.. *) | SSE2Ops of sse2Operations (* SSE2 instructions. *) | CVTSI2SD32 | CVTSI2SD64 | HLT (* End of code marker. *) | IMUL_C8_32 | IMUL_C8_64 | IMUL_C32_32 | IMUL_C32_64 | MOVDFromXMM (* move 32 bit value from XMM to general reg. *) | MOVQToXMM (* move 64 bit value from general reg.to XMM *) | PSRLDQ (* Shift XMM register *) | LDSTMXCSR | CVTSD2SI32 (* Double to 32-bit int *) | CVTSD2SI64 (* Double to 64-bit int *) | CVTSS2SI32 (* Single to 32-bit int *) | CVTSS2SI64 (* Single to 64-bit int *) | CVTTSD2SI32 (* Double to 32-bit int - truncate towards zero *) | CVTTSD2SI64 (* Double to 64-bit int - truncate towards zero *) | CVTTSS2SI32 (* Single to 32-bit int - truncate towards zero *) | CVTTSS2SI64 (* Single to 64-bit int - truncate towards zero *) | MOVSXD | CMOV32 of branchOps | CMOV64 of branchOps + | PAUSE fun opToInt Group1_8_A32 = 0wx83 | opToInt Group1_8_A64 = 0wx83 | opToInt Group1_32_A32 = 0wx81 | opToInt Group1_32_A64 = 0wx81 | opToInt Group1_8_a = 0wx80 | opToInt JMP_8 = 0wxeb | opToInt JMP_32 = 0wxe9 | opToInt CALL_32 = 0wxe8 | opToInt MOVL_A_R32 = 0wx8b | opToInt MOVL_A_R64 = 0wx8b | opToInt MOVL_R_A32 = 0wx89 | opToInt MOVL_R_A64 = 0wx89 | opToInt MOVL_R_A16 = 0wx89 (* Also has an OPSIZE prefix. *) | opToInt MOVB_R_A32 = 0wx88 | opToInt (MOVB_R_A64 _) = 0wx88 | opToInt (PUSH_R reg) = 0wx50 + reg | opToInt (POP_R reg) = 0wx58 + reg | opToInt Group5 = 0wxff | opToInt NOP = 0wx90 | opToInt LEAL32 = 0wx8d | opToInt LEAL64 = 0wx8d | opToInt (MOVL_32_R reg) = 0wxb8 + reg | opToInt (MOVL_64_R reg) = 0wxb8 + reg | opToInt MOVL_32_A32 = 0wxc7 | opToInt MOVL_32_A64 = 0wxc7 | opToInt MOVB_8_A = 0wxc6 | opToInt POP_A = 0wx8f | opToInt RET = 0wxc3 | opToInt RET_16 = 0wxc2 | opToInt (CondJump opc) = 0wx70 + branchOpToWord opc | opToInt (CondJump32 opc) = 0wx80 + branchOpToWord opc (* Needs 0F prefix *) | opToInt (SetCC opc) = 0wx90 + branchOpToWord opc (* Needs 0F prefix *) | opToInt (Arith32 (ao,dw)) = arithOpToWord ao * 0w8 + dw | opToInt (Arith64 (ao,dw)) = arithOpToWord ao * 0w8 + dw | opToInt Group3_A32 = 0wxf7 | opToInt Group3_A64 = 0wxf7 | opToInt Group3_a = 0wxf6 | opToInt Group2_8_A32 = 0wxc1 | opToInt Group2_8_A64 = 0wxc1 | opToInt Group2_1_A32 = 0wxd1 | opToInt Group2_1_A64 = 0wxd1 | opToInt Group2_CL_A32 = 0wxd3 | opToInt Group2_CL_A64 = 0wxd3 | opToInt PUSH_8 = 0wx6a | opToInt PUSH_32 = 0wx68 | opToInt TEST_ACC8 = 0wxa8 | opToInt LOCK_XADD32 = 0wxC1 (* Needs lock and escape prefixes. *) | opToInt LOCK_XADD64 = 0wxC1 (* Needs lock and escape prefixes. *) | opToInt (FPESC n) = 0wxD8 orb8 n | opToInt XCHNG32 = 0wx87 | opToInt XCHNG64 = 0wx87 | opToInt REP = 0wxf3 | opToInt MOVZB = 0wxb6 (* Needs escape code. *) | opToInt MOVZW = 0wxb7 (* Needs escape code. *) | opToInt MOVSXB32 = 0wxbe (* Needs escape code. *) | opToInt MOVSXW32 = 0wxbf (* Needs escape code. *) | opToInt MOVSXB64 = 0wxbe (* Needs escape code. *) | opToInt MOVSXW64 = 0wxbf (* Needs escape code. *) | opToInt IMUL32 = 0wxaf (* Needs escape code. *) | opToInt IMUL64 = 0wxaf (* Needs escape code. *) | opToInt SSE2StoreSingle = 0wx11 (* Needs F3 0F escape. *) | opToInt SSE2StoreDouble = 0wx11 (* Needs F2 0F escape. *) | opToInt CQO_CDQ32 = 0wx99 | opToInt CQO_CDQ64 = 0wx99 | opToInt (SSE2Ops SSE2MoveDouble) = 0wx10 (* Needs F2 0F escape. *) | opToInt (SSE2Ops SSE2MoveFloat) = 0wx10 (* Needs F3 0F escape. *) | opToInt (SSE2Ops SSE2CompDouble) = 0wx2E (* Needs 66 0F escape. *) | opToInt (SSE2Ops SSE2AddDouble) = 0wx58 (* Needs F2 0F escape. *) | opToInt (SSE2Ops SSE2SubDouble) = 0wx5c (* Needs F2 0F escape. *) | opToInt (SSE2Ops SSE2MulDouble) = 0wx59 (* Needs F2 0F escape. *) | opToInt (SSE2Ops SSE2DivDouble) = 0wx5e (* Needs F2 0F escape. *) | opToInt (SSE2Ops SSE2CompSingle) = 0wx2E (* Needs 0F escape. *) | opToInt (SSE2Ops SSE2AddSingle) = 0wx58 (* Needs F3 0F escape. *) | opToInt (SSE2Ops SSE2SubSingle) = 0wx5c (* Needs F3 0F escape. *) | opToInt (SSE2Ops SSE2MulSingle) = 0wx59 (* Needs F3 0F escape. *) | opToInt (SSE2Ops SSE2DivSingle) = 0wx5e (* Needs F3 0F escape. *) | opToInt (SSE2Ops SSE2And) = 0wx54 (* Needs 66 0F escape. *) | opToInt (SSE2Ops SSE2Xor) = 0wx57 (* Needs 66 0F escape. *) | opToInt (SSE2Ops SSE2FloatToDouble) = 0wx5A (* Needs F3 0F escape. *) | opToInt (SSE2Ops SSE2DoubleToFloat) = 0wx5A (* Needs F2 0F escape. *) | opToInt CVTSI2SD32 = 0wx2a (* Needs F2 0F escape. *) | opToInt CVTSI2SD64 = 0wx2a (* Needs F2 0F escape. *) | opToInt HLT = 0wxf4 | opToInt IMUL_C8_32 = 0wx6b | opToInt IMUL_C8_64 = 0wx6b | opToInt IMUL_C32_32 = 0wx69 | opToInt IMUL_C32_64 = 0wx69 | opToInt MOVDFromXMM = 0wx7e (* Needs 66 0F escape. *) | opToInt MOVQToXMM = 0wx6e (* Needs 66 0F escape. *) | opToInt PSRLDQ = 0wx73 (* Needs 66 0F escape. *) | opToInt LDSTMXCSR = 0wxae (* Needs 0F prefix. *) | opToInt CVTSD2SI32 = 0wx2d (* Needs F2 0F prefix. *) | opToInt CVTSD2SI64 = 0wx2d (* Needs F2 0F prefix and rex.w. *) | opToInt CVTSS2SI32 = 0wx2d (* Needs F3 0F prefix. *) | opToInt CVTSS2SI64 = 0wx2d (* Needs F3 0F prefix and rex.w. *) | opToInt CVTTSD2SI32 = 0wx2c (* Needs F2 0F prefix. *) | opToInt CVTTSD2SI64 = 0wx2c (* Needs F2 0F prefix. *) | opToInt CVTTSS2SI32 = 0wx2c (* Needs F3 0F prefix. *) | opToInt CVTTSS2SI64 = 0wx2c (* Needs F3 0F prefix and rex.w. *) | opToInt MOVSXD = 0wx63 | opToInt (CMOV32 opc) = 0wx40 + branchOpToWord opc (* Needs 0F prefix *) | opToInt (CMOV64 opc) = 0wx40 + branchOpToWord opc (* Needs 0F prefix and rex.w *) + | opToInt PAUSE = 0wx90 (* Needs F3 prefix *) datatype mode = Based0 (* mod = 0 *) | Based8 (* mod = 1 *) | Based32 (* mod = 2 *) | Register (* mod = 3 *) ; (* Put together the three fields which make up the mod r/m byte. *) fun modrm (md : mode, rg: Word8.word, rm : Word8.word) : Word8.word = let val _ = if rg > 0w7 then raise InternalError "modrm: bad rg" else () val _ = if rm > 0w7 then raise InternalError "modrm: bad rm" else () val modField: Word8.word = case md of Based0 => 0w0 | Based8 => 0w1 | Based32 => 0w2 | Register => 0w3 in (modField <<- 0w6) orb8 (rg <<- 0w3) orb8 rm end (* REX prefix *) fun rex {w,r,x,b} = 0wx40 orb8 (if w then 0w8 else 0w0) orb8 (if r then 0w4 else 0w0) orb8 (if x then 0w2 else 0w0) orb8 (if b then 0w1 else 0w0) (* The X86 has the option to include an index register and to scale it. *) datatype indexType = NoIndex | Index1 of genReg | Index2 of genReg | Index4 of genReg | Index8 of genReg (* Lock, Opsize and REPNE prefixes come before the REX. *) fun opcodePrefix LOCK_XADD32 = [0wxF0] (* Requires LOCK prefix. *) | opcodePrefix LOCK_XADD64 = [0wxF0] (* Requires LOCK prefix. *) | opcodePrefix MOVL_R_A16 = [0wx66] (* Requires OPSIZE prefix. *) | opcodePrefix SSE2StoreSingle = [0wxf3] | opcodePrefix SSE2StoreDouble = [0wxf2] | opcodePrefix(SSE2Ops SSE2CompDouble) = [0wx66] | opcodePrefix(SSE2Ops SSE2And) = [0wx66] | opcodePrefix(SSE2Ops SSE2Xor) = [0wx66] | opcodePrefix(SSE2Ops SSE2CompSingle) = [] (* No prefix *) | opcodePrefix(SSE2Ops SSE2MoveDouble) = [0wxf2] | opcodePrefix(SSE2Ops SSE2AddDouble) = [0wxf2] | opcodePrefix(SSE2Ops SSE2SubDouble) = [0wxf2] | opcodePrefix(SSE2Ops SSE2MulDouble) = [0wxf2] | opcodePrefix(SSE2Ops SSE2DivDouble) = [0wxf2] | opcodePrefix(SSE2Ops SSE2DoubleToFloat) = [0wxf2] | opcodePrefix(SSE2Ops SSE2MoveFloat) = [0wxf3] | opcodePrefix(SSE2Ops SSE2AddSingle) = [0wxf3] | opcodePrefix(SSE2Ops SSE2SubSingle) = [0wxf3] | opcodePrefix(SSE2Ops SSE2MulSingle) = [0wxf3] | opcodePrefix(SSE2Ops SSE2DivSingle) = [0wxf3] | opcodePrefix(SSE2Ops SSE2FloatToDouble) = [0wxf3] | opcodePrefix CVTSI2SD32 = [0wxf2] | opcodePrefix CVTSI2SD64 = [0wxf2] | opcodePrefix MOVDFromXMM = [0wx66] | opcodePrefix MOVQToXMM = [0wx66] | opcodePrefix PSRLDQ = [0wx66] | opcodePrefix CVTSD2SI32 = [0wxf2] | opcodePrefix CVTSD2SI64 = [0wxf2] | opcodePrefix CVTSS2SI32 = [0wxf3] | opcodePrefix CVTSS2SI64 = [0wxf3] | opcodePrefix CVTTSD2SI32 = [0wxf2] | opcodePrefix CVTTSD2SI64 = [0wxf2] | opcodePrefix CVTTSS2SI32 = [0wxf3] | opcodePrefix CVTTSS2SI64 = [0wxf3] + | opcodePrefix PAUSE = [0wxf3] | opcodePrefix _ = [] (* A few instructions require an escape. Escapes come after the REX. *) fun escapePrefix MOVZB = [0wx0f] | escapePrefix MOVZW = [0wx0f] | escapePrefix MOVSXB32 = [0wx0f] | escapePrefix MOVSXW32 = [0wx0f] | escapePrefix MOVSXB64 = [0wx0f] | escapePrefix MOVSXW64 = [0wx0f] | escapePrefix LOCK_XADD32 = [0wx0f] | escapePrefix LOCK_XADD64 = [0wx0f] | escapePrefix IMUL32 = [0wx0f] | escapePrefix IMUL64 = [0wx0f] | escapePrefix(CondJump32 _) = [0wx0f] | escapePrefix(SetCC _) = [0wx0f] | escapePrefix SSE2StoreSingle = [0wx0f] | escapePrefix SSE2StoreDouble = [0wx0f] | escapePrefix(SSE2Ops _) = [0wx0f] | escapePrefix CVTSI2SD32 = [0wx0f] | escapePrefix CVTSI2SD64 = [0wx0f] | escapePrefix MOVDFromXMM = [0wx0f] | escapePrefix MOVQToXMM = [0wx0f] | escapePrefix PSRLDQ = [0wx0f] | escapePrefix LDSTMXCSR = [0wx0f] | escapePrefix CVTSD2SI32 = [0wx0f] | escapePrefix CVTSD2SI64 = [0wx0f] | escapePrefix CVTSS2SI32 = [0wx0f] | escapePrefix CVTSS2SI64 = [0wx0f] | escapePrefix CVTTSD2SI32 = [0wx0f] | escapePrefix CVTTSD2SI64 = [0wx0f] | escapePrefix CVTTSS2SI32 = [0wx0f] | escapePrefix CVTTSS2SI64 = [0wx0f] | escapePrefix(CMOV32 _) = [0wx0f] | escapePrefix(CMOV64 _) = [0wx0f] | escapePrefix _ = [] (* Generate an opCode byte after doing any pending operations. *) fun opCodeBytes(opb:opCode, rx) = let val rexByte = case rx of NONE => [] | SOME rxx => if hostIsX64 then [rex rxx] else raise InternalError "opCodeBytes: rex prefix in 32 bit mode"; in opcodePrefix opb @ rexByte @ escapePrefix opb @ [opToInt opb] end fun rexByte(opb, rrX, rbX, riX) = let (* We need a rex prefix if we need to set the length to 64-bit. *) val need64bit = case opb of Group1_8_A64 => true (* Arithmetic operations - must be 64-bit *) | Group1_32_A64 => true (* Arithmetic operations - must be 64-bit *) | Group2_1_A64 => true (* 1-bit shifts - must be 64-bit *) | Group2_8_A64 => true (* n-bit shifts - must be 64-bit *) | Group2_CL_A64 => true (* Shifts by value in CL *) | Group3_A64 => true (* Test, Not, Mul etc. *) | Arith64 (_, _) => true | MOVL_A_R64 => true (* Needed *) | MOVL_R_A64 => true (* Needed *) | XCHNG64 => true | LEAL64 => true (* Needed to ensure the result is 64-bits *) | MOVL_64_R _ => true (* Needed *) | MOVL_32_A64 => true (* Needed *) | IMUL64 => true (* Needed to ensure the result is 64-bits *) | LOCK_XADD64 => true (* Needed to ensure the result is 64-bits *) | CQO_CDQ64 => true (* It's only CQO if there's a Rex prefix. *) | CVTSI2SD64 => true (* This affects the size of the integer source. *) | IMUL_C8_64 => true | IMUL_C32_64 => true | MOVQToXMM => true | CVTSD2SI64 => true (* This affects the size of the integer source. *) | CVTSS2SI64 => true | CVTTSD2SI64 => true | CVTTSS2SI64 => true | MOVSXD => true | CMOV64 _ => true | MOVSXB64 => true | MOVSXW64 => true (* Group5 - We only use 2/4/6 and they don't need prefix *) | _ => false (* If we are using MOVB_R_A with SIL or DIL we need to force a REX prefix. That's only possible in 64-bit mode. This also applies with Test and SetCC but they are dealt with elsewhere. *) val forceRex = case opb of MOVB_R_A64 {forceRex=true} => true (* This is allowed in X86/64 but not in X86/32. *) | _ => false in if need64bit orelse rrX orelse rbX orelse riX orelse forceRex then [rex{w=need64bit, r=rrX, b=rbX, x = riX}] else [] end (* Register/register operation. *) fun opReg(opb:opCode, (*dest*)GeneralReg(rrC, rrX), (*source*)GeneralReg(rbC, rbX)) = let val pref = opcodePrefix opb (* Any opsize or lock prefix. *) val rex = rexByte(opb, rrX, rbX, false) val esc = escapePrefix opb (* Generate the ESCAPE code if needed. *) val opc = opToInt opb val mdrm = modrm(Register, rrC, rbC) in pref @ rex @ esc @ [opc, mdrm] end (* Operations on a register where the second "register" is actually an operation code. *) fun opRegPlus2(opb:opCode, rd: genReg, op2: Word8.word) = let val (rrC, rrX) = getReg rd val pref = opcodePrefix opb (* Any opsize or lock prefix. *) val rex = rexByte(opb, false, rrX, false) val opc = opToInt opb val mdrm = modrm(Register, op2, rrC) in pref @ rex @ [opc, mdrm] end local (* General instruction form with modrm and optional sib bytes. rb is an option since the base register may be omitted. This is used with LEA to tag integers. *) fun opIndexedGen (opb:opCode, offset: LargeInt.int, rb: genReg option, ri: indexType, (rrC, rrX)) = let (* Base encoding. (Based0, 0w5) means "no base" so if we need ebp as the base we have to use Based8 at least. *) val (offsetCode, rbC, rbX) = case rb of NONE => (Based0, 0w5 (* no base register *), false) | SOME rb => let val (rbC, rbX) = getReg rb val base = if offset = 0 andalso rbC <> 0wx5 (* Can't use ebp with Based0 *) then Based0 (* no disp field *) else if is8BitL offset then Based8 (* use 8-bit disp field *) else Based32 (* use 32-bit disp field *) in (base, rbC, rbX) end (* Index coding. esp can't be used as an index so (0w4, false) means "no index". But r12 (0w4, true) CAN be. *) val ((riC, riX), scaleFactor) = case ri of NoIndex => ((0w4, false), 0w0) | Index1 i => (getReg i, 0w0) | Index2 i => (getReg i, 0w1) | Index4 i => (getReg i, 0w2) | Index8 i => (getReg i, 0w3) (* If the base register is esp or r12 we have to use a sib byte even if there's no index. That's because 0w4 as a base register means "there's a SIB byte". *) val modRmAndOptionalSib = if rbC = 0w4 (* Code for esp and r12 *) orelse riC <> 0w4 orelse riX then let val mdrm = modrm(offsetCode, rrC, 0w4 (* s-i-b *)) val sibByte = (scaleFactor <<- 0w6) orb8 (riC <<- 0w3) orb8 rbC in [mdrm, sibByte] end else [modrm(offsetCode, rrC, rbC)] (* Generate the disp field (if any) *) val dispField = case (offsetCode, rb) of (Based8, _) => [Word8.fromLargeInt offset] | (Based32, _) => int32Signed offset | (_, NONE) => (* 32 bit absolute used as base *) int32Signed offset | _ => [] in opcodePrefix opb @ rexByte(opb, rrX, rbX, riX) @ escapePrefix opb @ opToInt opb :: modRmAndOptionalSib @ dispField end in fun opEA(opb, offset, rb, r) = opIndexedGen(opb, offset, SOME rb, NoIndex, getReg r) (* Generate a opcode plus a second modrm byte but where the "register" field in the modrm byte is actually a code. *) and opPlus2(opb, offset, rb, op2) = opIndexedGen(opb, offset, SOME rb, NoIndex, (op2, false)) and opIndexedPlus2(opb, offset, rb, ri, op2) = opIndexedGen(opb, offset, SOME rb, ri, (op2, false)) fun opIndexed (opb, offset, rb, ri, rd) = opIndexedGen(opb, offset, rb, ri, getReg rd) fun opAddress(opb, offset, rb, ri, rd) = opIndexedGen (opb, offset, SOME rb, ri, getReg rd) and mMXAddress(opb, offset, rb, ri, SSE2Reg rrC) = opIndexedGen(opb, offset, SOME rb, ri, (rrC, false)) and opAddressPlus2(opb, offset, rb, ri, op2) = opIndexedGen(opb, offset, SOME rb, ri, (op2, false)) end (* An operation with an operand that needs to go in the constant area, or in the case of native 32-bit, where the constant is stored in an object and the address of the object is inline. This just puts in the instruction and the address. The details of the constant are dealt with in putConst. *) fun opConstantOperand(opb, (*dest*)GeneralReg(rrC, rrX)) = let val pref = opcodePrefix opb (* Any opsize or lock prefix. *) val rex = rexByte(opb, rrX, false, false) val esc = escapePrefix opb (* Generate the ESCAPE code if needed. *) val opc = opToInt opb val mdrm = modrm(Based0, rrC, 0w5 (* PC-relative or absolute *)) in pref @ rex @ esc @ [opc, mdrm] @ int32Signed(tag 0) end fun immediateOperand (opn: arithOp, rd: genReg, imm: LargeInt.int, opSize) = if is8BitL imm then (* Can use one byte immediate *) opRegPlus2(case opSize of OpSize64 => Group1_8_A64 | OpSize32 => Group1_8_A32, rd, arithOpToWord opn) @ [Word8.fromLargeInt imm] else if is32bit imm then (* Need 32 bit immediate. *) opRegPlus2(case opSize of OpSize64 => Group1_32_A64 | OpSize32 => Group1_32_A32, rd, arithOpToWord opn) @ int32Signed imm else (* It won't fit in the immediate; put it in the non-address area. *) let val opc = case opSize of OpSize64 => Arith64 | OpSize32 => Arith32 in opConstantOperand(opc(opn, 0w3 (* r/m to reg *)), rd) end fun arithOpReg(opn: arithOp, rd: genReg, rs: genReg, opIs64) = opReg ((if opIs64 then Arith64 else Arith32) (opn, 0w3 (* r/m to reg *)), rd, rs) type handlerLab = addrs ref fun floatingPtOp{escape, md, nnn, rm} = opCodeBytes(FPESC escape, NONE) @ [(md <<- 0w6) orb8 (nnn <<- 0w3) orb8 rm] datatype trapEntries = StackOverflowCall | StackOverflowCallEx | HeapOverflowCall (* RTS call. We need to save any registers that may contain addresses to the stack. All the registers are preserved but not seen by the GC. *) fun rtsCall(rtsEntry, regSet) = let val entry = case rtsEntry of StackOverflowCall => memRegStackOverflowCall | StackOverflowCallEx => memRegStackOverflowCallEx | HeapOverflowCall => memRegHeapOverflowCall val regSet = List.foldl(fn (r, a) => (0w1 << Word.fromInt(nReg(GenReg r))) orb a) 0w0 regSet val callInstr = opPlus2(Group5, LargeInt.fromInt entry, ebp, 0w2 (* call *)) val regSetInstr = if regSet >= 0w256 then [0wxca, (* This is actually a FAR RETURN *) wordToWord8 regSet, (* Low byte*) wordToWord8 (regSet >> 0w8) (* High byte*)] else if regSet <> 0w0 then [0wxcd, (* This is actually INT n *) wordToWord8 regSet] else [] in callInstr @ regSetInstr end (* Operations. *) type cases = word * label type memoryAddress = { base: genReg, offset: int, index: indexType } datatype 'reg regOrMemoryArg = RegisterArg of 'reg | MemoryArg of memoryAddress | NonAddressConstArg of LargeInt.int | AddressConstArg of machineWord datatype moveSize = Move64 | Move32 | Move8 | Move16 | Move32X64 | Move8X32 | Move8X64 | Move16X32 | Move16X64 and fpSize = SinglePrecision | DoublePrecision datatype operation = Move of { source: genReg regOrMemoryArg, destination: genReg regOrMemoryArg, moveSize: moveSize } | PushToStack of genReg regOrMemoryArg | PopR of genReg | ArithToGenReg of { opc: arithOp, output: genReg, source: genReg regOrMemoryArg, opSize: opSize } | ArithMemConst of { opc: arithOp, address: memoryAddress, source: LargeInt.int, opSize: opSize } | ArithMemLongConst of { opc: arithOp, address: memoryAddress, source: machineWord } | ArithByteMemConst of { opc: arithOp, address: memoryAddress, source: Word8.word } | ShiftConstant of { shiftType: shiftType, output: genReg, shift: Word8.word, opSize: opSize } | ShiftVariable of { shiftType: shiftType, output: genReg, opSize: opSize } (* Shift amount is in ecx *) | ConditionalBranch of { test: branchOps, label: label } | SetCondition of { output: genReg, test: branchOps } | LoadAddress of { output: genReg, offset: int, base: genReg option, index: indexType, opSize: opSize } | TestByteBits of { arg: genReg regOrMemoryArg, bits: Word8.word } | CallRTS of {rtsEntry: trapEntries, saveRegs: genReg list } | AllocStore of { size: int, output: genReg, saveRegs: genReg list } | AllocStoreVariable of { size: genReg, output: genReg, saveRegs: genReg list } | StoreInitialised | CallAddress of genReg regOrMemoryArg | JumpAddress of genReg regOrMemoryArg | ReturnFromFunction of int | RaiseException of { workReg: genReg } | UncondBranch of label | ResetStack of { numWords: int, preserveCC: bool } | JumpLabel of label | LoadLabelAddress of { label: label, output: genReg } | RepeatOperation of repOps | DivideAccR of {arg: genReg, isSigned: bool, opSize: opSize } | DivideAccM of {base: genReg, offset: int, isSigned: bool, opSize: opSize } | AtomicXAdd of {address: memoryAddress, output: genReg, opSize: opSize } | FPLoadFromMemory of { address: memoryAddress, precision: fpSize } | FPLoadFromFPReg of { source: fpReg, lastRef: bool } | FPLoadFromConst of { constant: machineWord, precision: fpSize } | FPStoreToFPReg of { output: fpReg, andPop: bool } | FPStoreToMemory of { address: memoryAddress, precision: fpSize, andPop: bool } | FPArithR of { opc: fpOps, source: fpReg } | FPArithConst of { opc: fpOps, source: machineWord, precision: fpSize } | FPArithMemory of { opc: fpOps, base: genReg, offset: int, precision: fpSize } | FPUnary of fpUnaryOps | FPStatusToEAX | FPLoadInt of { base: genReg, offset: int, opSize: opSize } | FPFree of fpReg | MultiplyR of { source: genReg regOrMemoryArg, output: genReg, opSize: opSize } | XMMArith of { opc: sse2Operations, source: xmmReg regOrMemoryArg, output: xmmReg } | XMMStoreToMemory of { toStore: xmmReg, address: memoryAddress, precision: fpSize } | XMMConvertFromInt of { source: genReg, output: xmmReg, opSize: opSize } | SignExtendForDivide of opSize | XChng of { reg: genReg, arg: genReg regOrMemoryArg, opSize: opSize } | Negative of { output: genReg, opSize: opSize } | JumpTable of { cases: label list, jumpSize: jumpSize ref } | IndexedJumpCalc of { addrReg: genReg, indexReg: genReg, jumpSize: jumpSize ref } | MoveXMMRegToGenReg of { source: xmmReg, output: genReg } | MoveGenRegToXMMReg of { source: genReg, output: xmmReg } | XMMShiftRight of { output: xmmReg, shift: Word8.word } | FPLoadCtrlWord of memoryAddress (* Load FP control word. *) | FPStoreCtrlWord of memoryAddress (* Store FP control word. *) | XMMLoadCSR of memoryAddress (* Load combined control/status word. *) | XMMStoreCSR of memoryAddress (* Store combined control/status word. *) | FPStoreInt of memoryAddress | XMMStoreInt of { source: xmmReg regOrMemoryArg, output: genReg, precision: fpSize, isTruncate: bool } | CondMove of { test: branchOps, output: genReg, source: genReg regOrMemoryArg, opSize: opSize } | LoadAbsolute of { destination: genReg, value: machineWord } + | PauseForSpinLock and jumpSize = JumpSize2 | JumpSize8 type operations = operation list fun printOperation(operation, stream) = let fun printGReg r = stream(genRegRepr(r, sz32_64)) val printFPReg = stream o fpRegRepr and printXMMReg = stream o xmmRegRepr fun printBaseOffset(b, x, i) = ( stream(Int.toString i); stream "("; printGReg b; stream ")"; case x of NoIndex => () | Index1 x => (stream "["; printGReg x; stream "]") | Index2 x => (stream "["; printGReg x; stream "*2]") | Index4 x => (stream "["; printGReg x; stream "*4]") | Index8 x => (stream "["; printGReg x; stream "*8]") ) fun printMemAddress({ base, offset, index }) = printBaseOffset(base, index, offset) fun printRegOrMemoryArg printReg (RegisterArg r) = printReg r | printRegOrMemoryArg _ (MemoryArg{ base, offset, index }) = printBaseOffset(base, index, offset) | printRegOrMemoryArg _ (NonAddressConstArg c) = stream(LargeInt.toString c) | printRegOrMemoryArg _ (AddressConstArg c) = stream(Address.stringOfWord c) fun printOpSize OpSize32 = "32" | printOpSize OpSize64 = "64" in case operation of Move { source, destination, moveSize } => ( case moveSize of Move64 => stream "Move64 " | Move32 => stream "Move32 " | Move8 => stream "Move8 " | Move16 => stream "Move16 " | Move32X64 => stream "Move32X64 " | Move8X32 => stream "Move8X32 " | Move8X64 => stream "Move8X64 " | Move16X32 => stream "Move16X32 " | Move16X64 => stream "Move16X64 "; printRegOrMemoryArg printGReg destination; stream " <= "; printRegOrMemoryArg printGReg source ) | ArithToGenReg { opc, output, source, opSize } => (stream (arithOpRepr opc); stream "RR"; stream(printOpSize opSize); stream " "; printGReg output; stream " <= "; printRegOrMemoryArg printGReg source ) | ArithMemConst { opc, address, source, opSize } => ( stream (arithOpRepr opc); stream "MC"; stream(printOpSize opSize); stream " "; printMemAddress address; stream " "; stream(LargeInt.toString source) ) | ArithMemLongConst { opc, address, source } => ( stream (arithOpRepr opc ^ "MC "); printMemAddress address; stream " <= "; stream(Address.stringOfWord source) ) | ArithByteMemConst { opc, address, source } => ( stream (arithOpRepr opc); stream "MC8"; stream " "; printMemAddress address; stream " "; stream(Word8.toString source) ) | ShiftConstant { shiftType, output, shift, opSize } => ( stream(shiftTypeRepr shiftType); stream(printOpSize opSize); stream " "; printGReg output; stream " by "; stream(Word8.toString shift) ) | ShiftVariable { shiftType, output, opSize } => (* Shift amount is in ecx *) ( stream(shiftTypeRepr shiftType); stream(printOpSize opSize); stream " "; printGReg output; stream " by ECX" ) | ConditionalBranch { test, label=Label{labelNo, ...} } => ( stream "Jump"; stream(branchOpRepr test); stream " L"; stream(Int.toString labelNo) ) | SetCondition { output, test } => ( stream "SetCC"; stream(branchOpRepr test); stream " => "; printGReg output ) | PushToStack source => (stream "Push "; printRegOrMemoryArg printGReg source) | PopR dest => (stream "PopR "; printGReg dest) | LoadAddress{ output, offset, base, index, opSize } => ( stream "LoadAddress"; stream(printOpSize opSize); stream " "; case base of NONE => () | SOME r => (printGReg r; stream " + "); stream(Int.toString offset); case index of NoIndex => () | Index1 x => (stream " + "; printGReg x) | Index2 x => (stream " + "; printGReg x; stream "*2 ") | Index4 x => (stream " + "; printGReg x; stream "*4 ") | Index8 x => (stream " + "; printGReg x; stream "*8 "); stream " => "; printGReg output ) | TestByteBits { arg, bits } => ( stream "TestByteBits "; printRegOrMemoryArg printGReg arg; stream " 0x"; stream(Word8.toString bits) ) | CallRTS {rtsEntry, ...} => ( stream "CallRTS "; case rtsEntry of StackOverflowCall => stream "StackOverflowCall" | HeapOverflowCall => stream "HeapOverflow" | StackOverflowCallEx => stream "StackOverflowCallEx" ) | AllocStore { size, output, ... } => (stream "AllocStore "; stream(Int.toString size); stream " => "; printGReg output ) | AllocStoreVariable { output, size, ...} => (stream "AllocStoreVariable "; printGReg size; stream " => "; printGReg output ) | StoreInitialised => stream "StoreInitialised" | CallAddress source => (stream "CallAddress "; printRegOrMemoryArg printGReg source) | JumpAddress source => (stream "JumpAddress "; printRegOrMemoryArg printGReg source) | ReturnFromFunction argsToRemove => (stream "ReturnFromFunction "; stream(Int.toString argsToRemove)) | RaiseException { workReg } => (stream "RaiseException "; printGReg workReg) | UncondBranch(Label{labelNo, ...})=> (stream "UncondBranch L"; stream(Int.toString labelNo)) | ResetStack{numWords, preserveCC} => (stream "ResetStack "; stream(Int.toString numWords); if preserveCC then stream " preserve CC" else ()) | JumpLabel(Label{labelNo, ...}) => (stream "L"; stream(Int.toString labelNo); stream ":") | LoadLabelAddress{ label=Label{labelNo, ...}, output } => (stream "LoadLabelAddress L"; stream(Int.toString labelNo); stream "=>"; printGReg output) | RepeatOperation repOp => (stream "Repeat "; stream(repOpsRepr repOp)) | DivideAccR{arg, isSigned, opSize} => ( stream(if isSigned then "DivideSigned" else "DivideUnsigned"); stream(printOpSize opSize); stream " "; printGReg arg) | DivideAccM{base, offset, isSigned, opSize} => ( stream(if isSigned then "DivideSigned" else "DivideUnsigned"); stream(printOpSize opSize); stream " "; printBaseOffset(base, NoIndex, offset)) | AtomicXAdd{address, output, opSize} => (stream "LockedXAdd"; stream(printOpSize opSize); printMemAddress address; stream " <=> "; printGReg output) | FPLoadFromMemory{address, precision=DoublePrecision} => (stream "FPLoadDouble "; printMemAddress address) | FPLoadFromMemory{address, precision=SinglePrecision} => (stream "FPLoadSingle "; printMemAddress address) | FPLoadFromFPReg {source, lastRef} => (stream "FPLoad "; printFPReg source; if lastRef then stream " (LAST)" else()) | FPLoadFromConst{constant, precision} => ( case precision of DoublePrecision => stream "FPLoadD " | SinglePrecision => stream "FPLoadS"; stream(Address.stringOfWord constant) ) | FPStoreToFPReg{ output, andPop } => (if andPop then stream "FPStoreAndPop => " else stream "FPStore => "; printFPReg output) | FPStoreToMemory{ address, precision=DoublePrecision, andPop: bool } => ( if andPop then stream "FPStoreDoubleAndPop => " else stream "FPStoreDouble => "; printMemAddress address ) | FPStoreToMemory{ address, precision=SinglePrecision, andPop: bool } => ( if andPop then stream "FPStoreSingleAndPop => " else stream "FPStoreSingle => "; printMemAddress address ) | FPArithR{ opc, source } => (stream(fpOpRepr opc); stream " "; printFPReg source) | FPArithConst{ opc, source, precision } => (stream(fpOpRepr opc); case precision of DoublePrecision => stream "D " | SinglePrecision => stream "S "; stream(Address.stringOfWord source)) | FPArithMemory{ opc, base, offset, precision } => (stream(fpOpRepr opc); case precision of DoublePrecision => stream "D " | SinglePrecision => stream "S "; printBaseOffset(base, NoIndex, offset)) | FPUnary opc => stream(fpUnaryRepr opc) | FPStatusToEAX => (stream "FPStatus "; printGReg eax) | FPLoadInt { base, offset, opSize} => (stream "FPLoadInt"; stream(printOpSize opSize); stream " "; printBaseOffset(base, NoIndex, offset)) | FPFree reg => (stream "FPFree "; printFPReg reg) | MultiplyR {source, output, opSize } => (stream "MultiplyR"; stream(printOpSize opSize); stream " "; printRegOrMemoryArg printGReg source; stream " *=>"; printGReg output) | XMMArith { opc, source, output } => ( stream (sse2OpRepr opc ^ "RM "); printXMMReg output; stream " <= "; printRegOrMemoryArg printXMMReg source ) | XMMStoreToMemory { toStore, address, precision=DoublePrecision } => ( stream "MoveDouble "; printXMMReg toStore; stream " => "; printMemAddress address ) | XMMStoreToMemory { toStore, address, precision=SinglePrecision } => ( stream "MoveSingle "; printXMMReg toStore; stream " => "; printMemAddress address ) | XMMConvertFromInt { source, output, opSize } => ( stream "ConvertFromInt "; stream(printOpSize opSize); stream " "; printGReg source; stream " => "; printXMMReg output ) | SignExtendForDivide opSize => ( stream "SignExtendForDivide"; stream(printOpSize opSize) ) | XChng { reg, arg, opSize } => (stream "XChng"; stream(printOpSize opSize); stream " "; printGReg reg; stream " <=> "; printRegOrMemoryArg printGReg arg) | Negative { output, opSize } => (stream "Negative"; stream(printOpSize opSize); stream " "; printGReg output) | JumpTable{cases, ...} => List.app(fn(Label{labelNo, ...}) => (stream "UncondBranch L"; stream(Int.toString labelNo); stream "\n")) cases | IndexedJumpCalc { addrReg, indexReg, jumpSize=ref jumpSize } => ( stream "IndexedJumpCalc "; printGReg addrReg; stream " += "; printGReg indexReg; stream (case jumpSize of JumpSize2 => " * 2" | JumpSize8 => " * 8 ") ) | MoveXMMRegToGenReg { source, output } => ( stream "MoveXMMRegToGenReg "; printXMMReg source; stream " => "; printGReg output ) | MoveGenRegToXMMReg { source, output } => ( stream "MoveGenRegToXMMReg "; printGReg source; stream " => "; printXMMReg output ) | XMMShiftRight { output, shift } => ( stream "XMMShiftRight "; printXMMReg output; stream " by "; stream(Word8.toString shift) ) | FPLoadCtrlWord address => ( stream "FPLoadCtrlWord "; stream " => "; printMemAddress address ) | FPStoreCtrlWord address => ( stream "FPStoreCtrlWord "; stream " <= "; printMemAddress address ) | XMMLoadCSR address => ( stream "XMMLoadCSR "; stream " => "; printMemAddress address ) | XMMStoreCSR address => ( stream "XMMStoreCSR "; stream " <= "; printMemAddress address ) | FPStoreInt address => ( stream "FPStoreInt "; stream " <= "; printMemAddress address ) | XMMStoreInt{ source, output, precision, isTruncate } => ( stream "XMMStoreInt"; case precision of SinglePrecision => stream "Single" | DoublePrecision => stream "Double"; if isTruncate then stream "Truncate " else stream " "; printGReg output; stream " <= "; printRegOrMemoryArg printXMMReg source ) | CondMove { test, output, source, opSize } => ( stream "CondMove"; stream(branchOpRepr test); stream(printOpSize opSize); printGReg output; stream " <= "; printRegOrMemoryArg printGReg source ) | LoadAbsolute { destination, value } => ( stream "LoadAbsolute "; printGReg destination; stream " <= "; stream(Address.stringOfWord value) ) + | PauseForSpinLock => stream "PauseForSpinLock" ; stream "\n" end datatype implement = ImplementGeneral | ImplementLiteral of machineWord fun printLowLevelCode(ops, Code{printAssemblyCode, printStream, procName, ...}) = if printAssemblyCode then ( if procName = "" (* No name *) then printStream "?" else printStream procName; printStream ":\n"; List.app(fn i => printOperation(i, printStream)) ops; printStream "\n" ) else () (* val opLen = if isX64 then OpSize64 else OpSize32 *) (* Code generate a list of operations. The list is in reverse order i.e. last instruction first. *) fun codeGenerate ops = let fun cgOp(Move{source=RegisterArg source, destination=RegisterArg output, moveSize=Move64 }) = (* Move from one general register to another. N.B. Because we're using the "store" version of the Move the source and output are reversed. *) opReg(MOVL_R_A64, source, output) | cgOp(Move{source=RegisterArg source, destination=RegisterArg output, moveSize=Move32 }) = opReg(MOVL_R_A32, source, output) | cgOp(Move{ source=NonAddressConstArg source, destination=RegisterArg output, moveSize=Move64}) = if targetArch <> Native32Bit then ( (* N.B. There is related code in getConstant that deals with PC-relative values and also checks the range of constants that need to be in the constant area. *) if source >= 0 andalso source < 0x100000000 then (* Unsigned 32 bits. We can use a 32-bit instruction to set the value because it will zero extend to 64-bits. This may also allow us to save a rex byte. *) let val (rc, rx) = getReg output val opb = opCodeBytes(MOVL_32_R rc, if rx then SOME{w=false, r=false, b=rx, x=false} else NONE) in opb @ word32Unsigned(LargeWord.fromLargeInt source) end else if source >= ~0x80000000 andalso source < 0 then (* Signed 32-bits. *) (* This is not scanned in 64-bit mode because 32-bit values aren't big enough to contain addresses. *) opRegPlus2(MOVL_32_A64, output, 0w0) @ int32Signed source else (* Too big for 32-bits; put it in the non-word area. *) opConstantOperand(MOVL_A_R64, output) ) else (* 32-bit mode. *) ( (* The RTS scans for possible addresses in MOV instructions so we can only use MOV if this is a tagged value. If it isn't we have to use something else such as XOR/ADD. In particular this is used before LOCK XADD for atomic inc/dec. We expect Move to preserve the CC so shouldn't use anything that affects it. There was a previous comment that said that using LEA wasn't a good idea. Perhaps because it takes 6 bytes. *) if source mod 2 = 0 then opIndexed(LEAL32, source, NONE, NoIndex, output) else let val (rc, rx) = getReg output val opb = opCodeBytes(MOVL_32_R rc, if rx then SOME{w=false, r=false, b=rx, x=false} else NONE) in opb @ int32Signed source end ) | cgOp(Move{ source=NonAddressConstArg source, destination=RegisterArg output, moveSize=Move32}) = if targetArch <> Native32Bit then ( (* N.B. There is related code in getConstant that deals with PC-relative values and also checks the range of constants that need to be in the constant area. *) if source >= 0 andalso source < 0x100000000 then (* Unsigned 32 bits. We can use a 32-bit instruction to set the value because it will zero extend to 64-bits. This may also allow us to save a rex byte. *) let val (rc, rx) = getReg output val opb = opCodeBytes(MOVL_32_R rc, if rx then SOME{w=false, r=false, b=rx, x=false} else NONE) in opb @ word32Unsigned(LargeWord.fromLargeInt source) end else if source >= ~0x80000000 andalso source < 0 then (* Signed 32-bits. *) (* This is not scanned in 64-bit mode because 32-bit values aren't big enough to contain addresses. *) opRegPlus2(MOVL_32_A64, output, 0w0) @ int32Signed source else (* Too big for 32-bits; put it in the non-word area. *) opConstantOperand(MOVL_A_R64, output) ) else (* 32-bit mode. *) ( (* The RTS scans for possible addresses in MOV instructions so we can only use MOV if this is a tagged value. If it isn't we have to use something else such as XOR/ADD. In particular this is used before LOCK XADD for atomic inc/dec. We expect Move to preserve the CC so shouldn't use anything that affects it. There was a previous comment that said that using LEA wasn't a good idea. Perhaps because it takes 6 bytes. *) if source mod 2 = 0 then opIndexed(LEAL32, source, NONE, NoIndex, output) else let val (rc, rx) = getReg output val opb = opCodeBytes(MOVL_32_R rc, if rx then SOME{w=false, r=false, b=rx, x=false} else NONE) in opb @ int32Signed source end ) | cgOp(Move{ source=AddressConstArg _, destination=RegisterArg output, moveSize=Move64 }) = ( (* The constant area is currently PolyWords. That means we MUST use a 32-bit load in 32-in-64. *) targetArch = Native64Bit orelse raise InternalError "Move64 in 32-bit"; (* Put address constants in the constant area. *) opConstantOperand(MOVL_A_R64, output) ) | cgOp(Move{ source=AddressConstArg _, destination=RegisterArg output, moveSize=Move32 }) = ( case targetArch of Native64Bit => raise InternalError "Move32 - AddressConstArg" | ObjectId32Bit => (* Put address constants in the constant area. *) (* The constant area is currently PolyWords. That means we MUST use a 32-bit load in 32-in-64. *) opConstantOperand(MOVL_A_R32, output) | Native32Bit => (* Immediate constant *) let val (rc, _) = getReg output in opCodeBytes(MOVL_32_R rc, NONE) @ int32Signed(tag 0) end ) | cgOp(LoadAbsolute{ destination, ... }) = ( (* Immediate address constant. This is currently only used the special case of loading the address of PolyX86GetThreadData in a callback when we don't have rbx in 32-in-64. *) case targetArch of Native32Bit => let val (rc, _) = getReg destination in opCodeBytes(MOVL_32_R rc, NONE) @ int32Signed(tag 0) end | Native64Bit => opConstantOperand(MOVL_A_R64, destination) | ObjectId32Bit => let val (rc, rx) = getReg destination in opCodeBytes(MOVL_64_R rc, SOME{w=true, r=false, b=rx, x=false}) @ largeWordToBytes(LargeWord.fromLargeInt(tag 0), 8) end ) | cgOp(Move{source=MemoryArg{base, offset, index}, destination=RegisterArg output, moveSize=Move32 }) = opAddress(MOVL_A_R32, LargeInt.fromInt offset, base, index, output) | cgOp(Move{source=MemoryArg{base, offset, index}, destination=RegisterArg output, moveSize=Move64 }) = opAddress(MOVL_A_R64, LargeInt.fromInt offset, base, index, output) | cgOp(Move{source=MemoryArg{base, offset, index}, destination=RegisterArg output, moveSize=Move8 }) = (* We don't need a REX.W bit here because the top 32-bits of a 64-bit register will always be zeroed. *) opAddress(MOVZB, LargeInt.fromInt offset, base, index, output) | cgOp(Move{source=RegisterArg source, destination=RegisterArg output, moveSize=Move8 }) = let (* Zero extend an 8-bit value in a register to 32/64 bits. *) val (rrC, rrX) = getReg output val (rbC, rbX) = getReg source (* We don't need a REX.W bit here because the top 32-bits of a 64-bit register will always be zeroed but we may need a REX byte if we're using esi or edi. *) val rexByte = if rrC < 0w4 andalso not rrX andalso not rbX then NONE else if hostIsX64 then SOME {w=false, r=rrX, b=rbX, x=false} else raise InternalError "Move8 with esi/edi" in opCodeBytes(MOVZB, rexByte) @ [modrm(Register, rrC, rbC)] end | cgOp(Move{source=MemoryArg{base, offset, index}, destination=RegisterArg output, moveSize=Move8X32 }) = opAddress(MOVSXB32, LargeInt.fromInt offset, base, index, output) | cgOp(Move{source=MemoryArg{base, offset, index}, destination=RegisterArg output, moveSize=Move8X64 }) = (* But we will need a Rex.W here. *) opAddress(MOVSXB64, LargeInt.fromInt offset, base, index, output) | cgOp(Move{moveSize=Move16, source=MemoryArg{base, offset, index}, destination=RegisterArg output }) = (* No need for Rex.W *) opAddress(MOVZW, LargeInt.fromInt offset, base, index, output) | cgOp(Move{moveSize=Move16X32, source=MemoryArg{base, offset, index}, destination=RegisterArg output }) = opAddress(MOVSXW32, LargeInt.fromInt offset, base, index, output) | cgOp(Move{moveSize=Move16X64, source=MemoryArg{base, offset, index}, destination=RegisterArg output }) = (* But we do need Rex.W here *) opAddress(MOVSXW64, LargeInt.fromInt offset, base, index, output) | cgOp(Move{moveSize=Move32X64, source=RegisterArg source, destination=RegisterArg output }) = (* We should have a REX.W bit here. *) opReg(MOVSXD, output, source) | cgOp(Move{moveSize=Move32X64, source=MemoryArg{base, offset, index}, destination=RegisterArg output }) = (* We should have a REX.W bit here. *) opAddress(MOVSXD, LargeInt.fromInt offset, base, index, output) | cgOp(Move{moveSize=Move32X64, ...}) = raise InternalError "cgOp: LoadNonWord Size32Bit" | cgOp(LoadAddress{ offset, base, index, output, opSize }) = (* This provides a mixture of addition and multiplication in a single instruction. *) opIndexed(case opSize of OpSize64 => LEAL64 | OpSize32 => LEAL32, LargeInt.fromInt offset, base, index, output) | cgOp(ArithToGenReg{ opc, output, source=RegisterArg source, opSize }) = arithOpReg (opc, output, source, opSize=OpSize64) | cgOp(ArithToGenReg{ opc, output, source=NonAddressConstArg source, opSize }) = let (* On the X86/32 we use CMP with literal sources to compare with an address and the RTS searches for them in the code. Any non-address constant must be tagged. Most will be but we might want to use this to compare with the contents of a LargeWord value. *) val _ = if hostIsX64 orelse is8BitL source orelse opc <> CMP orelse IntInf.andb(source, 1) = 1 then () else raise InternalError "CMP with constant that looks like an address" in immediateOperand(opc, output, source, opSize) end | cgOp(ArithToGenReg{ opc, output, source=AddressConstArg _, opSize }) = (* This is only used for opc=CMP to compare addresses for equality. *) if hostIsX64 then (* We use this in 32-in-64 as well as native 64-bit. *) opConstantOperand( (case opSize of OpSize64 => Arith64 | OpSize32 => Arith32) (opc, 0w3), output) else let val (rc, _) = getReg output val opb = opCodeBytes(Group1_32_A32 (* group1, 32 bit immediate *), NONE) val mdrm = modrm(Register, arithOpToWord opc, rc) in opb @ [mdrm] @ int32Signed(tag 0) end | cgOp(ArithToGenReg{ opc, output, source=MemoryArg{offset, base, index}, opSize }) = opAddress((case opSize of OpSize64 => Arith64 | OpSize32 => Arith32) (opc, 0w3), LargeInt.fromInt offset, base, index, output) | cgOp(ArithByteMemConst{ opc, address={offset, base, index}, source }) = opIndexedPlus2(Group1_8_a (* group1, 8 bit immediate *), LargeInt.fromInt offset, base, index, arithOpToWord opc) @ [source] | cgOp(ArithMemConst{ opc, address={offset, base, index}, source, opSize }) = if is8BitL source then (* Can use one byte immediate *) opIndexedPlus2(case opSize of OpSize64 => Group1_8_A64 | OpSize32 => Group1_8_A32 (* group1, 8 bit immediate *), LargeInt.fromInt offset, base, index, arithOpToWord opc) @ [Word8.fromLargeInt source] else (* Need 32 bit immediate. *) opIndexedPlus2(case opSize of OpSize64 => Group1_32_A64 | OpSize32 => Group1_32_A32(* group1, 32 bit immediate *), LargeInt.fromInt offset, base, index, arithOpToWord opc) @ int32Signed source | cgOp(ArithMemLongConst{ opc, address={offset, base, index}, ... }) = (* Currently this is always a comparison. It is only valid in 32-bit mode because the constant is only 32-bits. *) if hostIsX64 then raise InternalError "ArithMemLongConst in 64-bit mode" else let val opb = opIndexedPlus2 (Group1_32_A32, LargeInt.fromInt offset, base, index, arithOpToWord opc) in opb @ int32Signed(tag 0) end | cgOp(ShiftConstant { shiftType, output, shift, opSize }) = if shift = 0w1 then opRegPlus2(case opSize of OpSize64 => Group2_1_A64 | OpSize32 => Group2_1_A32, output, shiftTypeToWord shiftType) else opRegPlus2(case opSize of OpSize64 => Group2_8_A64 | OpSize32 => Group2_8_A32, output, shiftTypeToWord shiftType) @ [shift] | cgOp(ShiftVariable { shiftType, output, opSize }) = opRegPlus2(case opSize of OpSize64 => Group2_CL_A64 | OpSize32 => Group2_CL_A32, output, shiftTypeToWord shiftType) | cgOp(TestByteBits{arg=RegisterArg reg, bits}) = let (* Test the bottom bit and jump depending on its value. This is used for tag tests in arbitrary precision operations and also for testing for short/long values. *) val (regNum, rx) = getReg reg in if reg = eax then (* Special instruction for testing accumulator. Can use an 8-bit test. *) opCodeBytes(TEST_ACC8, NONE) @ [bits] else if hostIsX64 then let (* We can use a REX code to force it to always use the low order byte. *) val opb = opCodeBytes(Group3_a, if rx orelse regNum >= 0w4 then SOME{w=false, r=false, b=rx, x=false} else NONE) val mdrm = modrm (Register, 0w0 (* test *), regNum) in opb @ [mdrm, bits] end else if reg = ebx orelse reg = ecx orelse reg = edx (* can we use an 8-bit test? *) then (* Yes. The register value refers to low-order byte. *) let val opb = opCodeBytes(Group3_a, NONE) val mdrm = modrm(Register, 0w0 (* test *), regNum) in opb @ [mdrm, bits] end else let val opb = opCodeBytes(Group3_A32, NONE) val mdrm = modrm (Register, 0w0 (* test *), regNum) in opb @ mdrm :: word32Unsigned(Word8.toLarge bits) end end | cgOp(TestByteBits{arg=MemoryArg{base, offset, index}, bits}) = (* Test the tag bit and set the condition code. *) opIndexedPlus2(Group3_a, LargeInt.fromInt offset, base, index, 0w0 (* test *)) @ [ bits] | cgOp(TestByteBits _) = raise InternalError "cgOp: TestByteBits" | cgOp(ConditionalBranch{ test=opc, ... }) = opCodeBytes(CondJump32 opc, NONE) @ word32Unsigned 0w0 | cgOp(SetCondition{ output, test}) = let val (rrC, rx) = getReg output (* In 64-bit mode we can specify the low-order byte of RSI/RDI but we must use a REX prefix. This isn't possible in 32-bit mode. *) in if hostIsX64 orelse rrC < 0w4 then let val opb = opCodeBytes(SetCC test, if rx orelse rrC >= 0w4 then SOME{w=false, r=false, b=rx, x=false} else NONE) val mdrm = modrm (Register, 0w0, rrC) in opb @ [mdrm] end else raise InternalError "High byte register" end | cgOp(CallRTS{rtsEntry, saveRegs}) = rtsCall(rtsEntry, saveRegs) | cgOp(RepeatOperation repOp) = let (* We don't explicitly clear the direction flag. Should that be done? *) val opb = opCodeBytes(REP, NONE) (* Put in a rex prefix to force 64-bit mode. *) val optRex = if case repOp of STOS64 => true | MOVS64 => true | _ => false then [rex{w=true, r=false, b=false, x=false}] else [] val repOp = repOpsToWord repOp in opb @ optRex @ [repOp] end | cgOp(DivideAccR{arg, isSigned, opSize}) = opRegPlus2(case opSize of OpSize64 => Group3_A64 | OpSize32 => Group3_A32, arg, if isSigned then 0w7 else 0w6) | cgOp(DivideAccM{base, offset, isSigned, opSize}) = opPlus2(case opSize of OpSize64 => Group3_A64 | OpSize32 => Group3_A32, LargeInt.fromInt offset, base, if isSigned then 0w7 else 0w6) | cgOp(AtomicXAdd{address={offset, base, index}, output, opSize}) = (* Locked exchange-and-add. We need the lock prefix before the REX prefix. *) opAddress(case opSize of OpSize64 => LOCK_XADD64 | OpSize32 => LOCK_XADD32, LargeInt.fromInt offset, base, index, output) | cgOp(PushToStack(RegisterArg reg)) = let val (rc, rx) = getReg reg in (* Always 64-bit but a REX prefix may be needed for the register. *) opCodeBytes(PUSH_R rc, if rx then SOME{w=false, b = true, x=false, r = false } else NONE) end | cgOp(PushToStack(MemoryArg{base, offset, index})) = opAddressPlus2(Group5, LargeInt.fromInt offset, base, index, 0w6 (* push *)) | cgOp(PushToStack(NonAddressConstArg constnt)) = if is8BitL constnt then opCodeBytes(PUSH_8, NONE) @ [Word8.fromLargeInt constnt] else if is32bit constnt then opCodeBytes(PUSH_32, NONE) @ int32Signed constnt else (* It won't fit in the immediate; put it in the non-address area. *) let val opb = opCodeBytes(Group5, NONE) val mdrm = modrm(Based0, 0w6 (* push *), 0w5 (* PC rel *)) in opb @ [mdrm] @ int32Signed(tag 0) end | cgOp(PushToStack(AddressConstArg _)) = ( case targetArch of Native64Bit => (* Put it in the constant area. *) let val opb = opCodeBytes(Group5, NONE) val mdrm = modrm(Based0, 0w6 (* push *), 0w5 (* PC rel *)); in opb @ [mdrm] @ int32Signed(tag 0) end | Native32Bit => opCodeBytes(PUSH_32, NONE) @ int32Signed(tag 0) | ObjectId32Bit => (* We can't do this. The constant area contains 32-bit quantities and 32-bit literals are sign-extended rather than zero-extended. *) raise InternalError "PushToStack:AddressConstArg" ) | cgOp(PopR reg ) = let val (rc, rx) = getReg reg in (* Always 64-bit but a REX prefix may be needed for the register. Because the register is encoded in the instruction the rex bit for the register is b not r. *) opCodeBytes(POP_R rc, if rx then SOME{w=false, b = true, x=false, r = false } else NONE) end | cgOp(Move{source=RegisterArg toStore, destination=MemoryArg{offset, base, index}, moveSize=Move64}) = opAddress(MOVL_R_A64, LargeInt.fromInt offset, base, index, toStore) | cgOp(Move{source=RegisterArg toStore, destination=MemoryArg{offset, base, index}, moveSize=Move32}) = opAddress(MOVL_R_A32, LargeInt.fromInt offset, base, index, toStore) | cgOp(Move{source=NonAddressConstArg toStore, destination=MemoryArg{offset, base, index}, moveSize=Move64 }) = ( (* Short constant. In 32-bit mode this is scanned as a possible address. That means we can't have an untagged constant in it. That's not a problem in 64-bit mode. There's a special check for using this to set the length word on newly allocated memory. *) targetArch <> Native32Bit orelse toStore = 0 orelse toStore mod 2 = 1 orelse offset = ~ (Word.toInt wordSize) orelse raise InternalError "cgOp: StoreConstToMemory not tagged"; opAddressPlus2(MOVL_32_A64, LargeInt.fromInt offset, base, index, 0w0) @ int32Signed toStore ) | cgOp(Move{source=NonAddressConstArg toStore, destination=MemoryArg{offset, base, index}, moveSize=Move32 }) = ( (* Short constant. In 32-bit mode this is scanned as a possible address. That means we can't have an untagged constant in it. That's not a problem in 64-bit mode. There's a special check for using this to set the length word on newly allocated memory. *) targetArch <> Native32Bit orelse toStore = 0 orelse toStore mod 2 = 1 orelse offset = ~ (Word.toInt wordSize) orelse raise InternalError "cgOp: StoreConstToMemory not tagged"; opAddressPlus2(MOVL_32_A32, LargeInt.fromInt offset, base, index, 0w0) @ int32Signed toStore ) | cgOp(Move{source=AddressConstArg _, destination=MemoryArg{offset, base, index}, moveSize=Move32}) = (* This is not used for addresses even in 32-in-64. We don't scan for addresses after MOVL_32_A. *) if targetArch <> Native32Bit then raise InternalError "StoreLongConstToMemory in 64-bit mode" else opAddressPlus2(MOVL_32_A32, LargeInt.fromInt offset, base, index, 0w0) @ int32Signed (tag 0) | cgOp(Move{source=AddressConstArg _, destination=MemoryArg _, ...}) = raise InternalError "cgOp: Move - AddressConstArg => MemoryArg" | cgOp(Move{ moveSize = Move8, source=RegisterArg toStore, destination=MemoryArg{offset, base, index} }) = let val (rrC, _) = getReg toStore (* In 64-bit mode we can specify the low-order byte of RSI/RDI but we must use a REX prefix. This isn't possible in 32-bit mode. *) val opcode = if hostIsX64 then MOVB_R_A64{forceRex= rrC >= 0w4} else if rrC < 0w4 then MOVB_R_A32 else raise InternalError "High byte register" in opAddress(opcode, LargeInt.fromInt offset, base, index, toStore) end | cgOp(Move{ moveSize = Move16, source=RegisterArg toStore, destination=MemoryArg{offset, base, index}}) = opAddress(MOVL_R_A16, LargeInt.fromInt offset, base, index, toStore) | cgOp(Move{ moveSize = Move8, source=NonAddressConstArg toStore, destination=MemoryArg{offset, base, index}}) = opAddressPlus2(MOVB_8_A, LargeInt.fromInt offset, base, index, 0w0) @ [Word8.fromLargeInt toStore] | cgOp(Move _) = raise InternalError "Move: Unimplemented arguments" (* Allocation is dealt with by expanding the code. *) | cgOp(AllocStore _) = raise InternalError "cgOp: AllocStore" | cgOp(AllocStoreVariable _) = raise InternalError "cgOp: AllocStoreVariable" | cgOp StoreInitialised = raise InternalError "cgOp: StoreInitialised" | cgOp(CallAddress(NonAddressConstArg _)) = (* Call to the start of the code. Offset is patched in later. *) opCodeBytes (CALL_32, NONE) @ int32Signed 0 | cgOp(CallAddress(AddressConstArg _)) = if targetArch = Native64Bit then let val opc = opCodeBytes(Group5, NONE) val mdrm = modrm(Based0, 0w2 (* call *), 0w5 (* PC rel *)) in opc @ [mdrm] @ int32Signed(tag 0) end (* Because this is a relative branch we need to point this at itself. Until it is set to the relative offset of the destination it needs to contain an address within the code and this could be the last instruction. *) else opCodeBytes (CALL_32, NONE) @ int32Signed ~5 | cgOp(CallAddress(RegisterArg reg)) = opRegPlus2(Group5, reg, 0w2 (* call *)) | cgOp(CallAddress(MemoryArg{base, offset, index})) = opAddressPlus2(Group5, LargeInt.fromInt offset, base, index, 0w2 (* call *)) | cgOp(JumpAddress(NonAddressConstArg _)) = (* Jump to the start of the current function. Offset is patched in later. *) opCodeBytes (JMP_32, NONE) @ int32Signed 0 | cgOp(JumpAddress (AddressConstArg _)) = if targetArch = Native64Bit then let val opb = opCodeBytes (Group5, NONE) val mdrm = modrm(Based0, 0w4 (* jmp *), 0w5 (* PC rel *)) in opb @ [mdrm] @ int32Signed(tag 0) end else opCodeBytes (JMP_32, NONE) @ int32Signed ~5 (* As with Call. *) | cgOp(JumpAddress (RegisterArg reg)) = (* Used as part of indexed case - not for entering a function. *) opRegPlus2(Group5, reg, 0w4 (* jmp *)) | cgOp(JumpAddress(MemoryArg{base, offset, index})) = opAddressPlus2(Group5, LargeInt.fromInt offset, base, index, 0w4 (* jmp *)) | cgOp(ReturnFromFunction args) = if args = 0 then opCodeBytes(RET, NONE) else let val offset = Word.fromInt args * nativeWordSize in opCodeBytes(RET_16, NONE) @ [wordToWord8 offset, wordToWord8(offset >> 0w8)] end | cgOp (RaiseException { workReg }) = opEA(if hostIsX64 then MOVL_A_R64 else MOVL_A_R32, LargeInt.fromInt memRegHandlerRegister, ebp, workReg) @ opAddressPlus2(Group5, 0, workReg, NoIndex, 0w4 (* jmp *)) | cgOp(UncondBranch _) = opToInt JMP_32 :: word32Unsigned 0w0 | cgOp(ResetStack{numWords, preserveCC}) = let val bytes = Word.toLargeInt(Word.fromInt numWords * nativeWordSize) in (* If we don't need to preserve the CC across the reset we use ADD since it's shorter. *) if preserveCC then opEA(if hostIsX64 then LEAL64 else LEAL32, bytes, esp, esp) else immediateOperand(ADD, esp, bytes, if hostIsX64 then OpSize64 else OpSize32) end | cgOp(JumpLabel _) = [] (* No code. *) | cgOp(LoadLabelAddress{ output, ... }) = (* Load the address of a label. Used when setting up an exception handler or in indexed cases. *) (* On X86/64 we can use pc-relative addressing to set the start of the handler. On X86/32 we have to load the address of the start of the code and add an offset. *) if hostIsX64 then opConstantOperand(LEAL64, output) else let val (rc, _) = getReg output in opCodeBytes(MOVL_32_R rc , NONE) @ int32Signed(tag 0) @ opRegPlus2(Group1_32_A32, output, arithOpToWord ADD) @ int32Signed 0 end | cgOp (FPLoadFromMemory {address={ base, offset, index }, precision}) = let val loadInstr = case precision of DoublePrecision => FPESC 0w5 | SinglePrecision => FPESC 0w1 in opAddressPlus2(loadInstr, LargeInt.fromInt offset, base, index, 0wx0) end | cgOp (FPLoadFromFPReg{source=FloatingPtReg fp, ...}) = (* Assume there's nothing currently on the stack. *) floatingPtOp({escape=0w1, md=0w3, nnn=0w0, rm= fp + 0w0}) (* FLD ST(r1) *) | cgOp (FPLoadFromConst {precision, ...} ) = (* The real constant here is actually the address of a memory object. FLD takes the address as the argument and in 32-bit mode we use an absolute address. In 64-bit mode we need to put the constant at the end of the code segment and use PC-relative addressing which happens to be encoded in the same way. There are special cases for zero and one but it's probably too much work to detect them. *) let val esc = case precision of SinglePrecision => 0w1 | DoublePrecision => 0w5 val opb = opCodeBytes(FPESC esc, NONE) (* FLD [Constant] *) val mdrm = modrm (Based0, 0w0, 0w5 (* constant address/PC-relative *)) in opb @ [mdrm] @ int32Signed(tag 0) end | cgOp (FPStoreToFPReg{ output=FloatingPtReg dest, andPop }) = (* Assume there's one item on the stack. *) floatingPtOp({escape=0w5, md=0w3, nnn=if andPop then 0wx3 else 0wx2, rm = dest+0w1(* One item *)}) (* FSTP ST(n+1) *) | cgOp (FPStoreToMemory{address={ base, offset, index}, precision, andPop }) = let val storeInstr = case precision of DoublePrecision => FPESC 0w5 | SinglePrecision => FPESC 0w1 val subInstr = if andPop then 0wx3 else 0wx2 in opAddressPlus2(storeInstr, LargeInt.fromInt offset, base, index, subInstr) end | cgOp (FPArithR{ opc, source = FloatingPtReg src}) = floatingPtOp({escape=0w0, md=0w3, nnn=fpOpToWord opc, rm=src + 0w1 (* One item already there *)}) | cgOp (FPArithConst{ opc, precision, ... }) = (* See comment on FPLoadFromConst *) let val fpesc = case precision of DoublePrecision => 0w4 | SinglePrecision => 0w0 val opb = opCodeBytes(FPESC fpesc, NONE) (* FADD etc [constnt] *) val mdrm = modrm (Based0, fpOpToWord opc, 0w5 (* constant address *)) in opb @ [mdrm] @ int32Signed(tag 0) end | cgOp (FPArithMemory{ opc, base, offset, precision }) = let val fpesc = case precision of DoublePrecision => 0w4 | SinglePrecision => 0w0 in opPlus2(FPESC fpesc, LargeInt.fromInt offset, base, fpOpToWord opc) (* FADD/FMUL etc [r2] *) end | cgOp (FPUnary opc ) = let val {rm, nnn} = fpUnaryToWords opc in floatingPtOp({escape=0w1, md=0w3, nnn=nnn, rm=rm}) (* FCHS etc *) end | cgOp (FPStatusToEAX ) = opCodeBytes(FPESC 0w7, NONE) @ [0wxe0] (* FNSTSW AX *) | cgOp (FPFree(FloatingPtReg reg)) = floatingPtOp({escape=0w5, md=0w3, nnn=0w0, rm=reg}) (* FFREE FP(n) *) | cgOp (FPLoadInt{base, offset, opSize=OpSize64}) = (* fildl (esp) in 32-bit mode or fildq (esp) in 64-bit mode. *) opPlus2(FPESC 0w7, LargeInt.fromInt offset, base, 0w5) | cgOp (FPLoadInt{base, offset, opSize=OpSize32}) = (* fildl (esp) in 32-bit mode or fildq (esp) in 64-bit mode. *) opPlus2(FPESC 0w3, LargeInt.fromInt offset, base, 0w0) | cgOp (MultiplyR {source=RegisterArg srcReg, output, opSize}) = (* We use the 0F AF form of IMUL rather than the Group3 MUL or IMUL because the former allows us to specify the destination register. The Group3 forms produce double length results in RAX:RDX/EAX:EDX but we only ever want the low-order half. *) opReg(case opSize of OpSize64 => IMUL64 | OpSize32 => IMUL32 (* 2 byte opcode *), output, srcReg) | cgOp (MultiplyR {source=MemoryArg{base, offset, index}, output, opSize}) = (* This may be used for large-word multiplication. *) opAddress(case opSize of OpSize64 => IMUL64 | OpSize32 => IMUL32 (* 2 byte opcode *), LargeInt.fromInt offset, base, index, output) | cgOp(MultiplyR {source=NonAddressConstArg constnt, output, opSize}) = (* If the constant is an 8-bit or 32-bit value we are actually using a three-operand instruction where the argument can be a register or memory and the destination register does not need to be the same as the source. *) if is8BitL constnt then opReg(case opSize of OpSize64 => IMUL_C8_64 | OpSize32 => IMUL_C8_32, output, output) @ [Word8.fromLargeInt constnt] else if is32bit constnt then opReg(case opSize of OpSize64 => IMUL_C32_64 | OpSize32 => IMUL_C32_32, output, output) @ int32Signed constnt else opConstantOperand(case opSize of OpSize64 => IMUL64 | OpSize32 => IMUL32, output) | cgOp(MultiplyR {source=AddressConstArg _, ...}) = raise InternalError "Multiply - address constant" | cgOp (XMMArith { opc, source=MemoryArg{base, offset, index}, output }) = mMXAddress(SSE2Ops opc, LargeInt.fromInt offset, base, index, output) | cgOp (XMMArith { opc, source=AddressConstArg _, output=SSE2Reg rrC }) = let (* The real constant here is actually the address of an 8-byte memory object. In 32-bit mode we put this address into the code and retain this memory object. In 64-bit mode we copy the real value out of the memory object into the non-address constant area and use PC-relative addressing. These happen to be encoded the same way. *) val opb = opCodeBytes(SSE2Ops opc, NONE) val mdrm = modrm (Based0, rrC, 0w5 (* constant address/PC-relative *)) in opb @ [mdrm] @ int32Signed(tag 0) end | cgOp (XMMArith { opc, source=RegisterArg(SSE2Reg rrS), output=SSE2Reg rrC }) = let val oper = SSE2Ops opc val pref = opcodePrefix oper val esc = escapePrefix oper val opc = opToInt oper val mdrm = modrm(Register, rrC, rrS) in pref @ esc @ [opc, mdrm] end | cgOp (XMMArith { opc, source=NonAddressConstArg _, output=SSE2Reg rrC }) = let val _ = hostIsX64 orelse raise InternalError "XMMArith-NonAddressConstArg in 32-bit mode" (* This is currently used for 32-bit float arguments but can equally be used for 64-bit values since the actual argument will always be put in the 64-bit constant area. *) val opb = opCodeBytes(SSE2Ops opc, NONE) val mdrm = modrm (Based0, rrC, 0w5 (* constant address/PC-relative *)) in opb @ [mdrm] @ int32Signed(tag 0) end | cgOp (XMMStoreToMemory { toStore, address={base, offset, index}, precision }) = let val oper = case precision of DoublePrecision => SSE2StoreDouble | SinglePrecision => SSE2StoreSingle in mMXAddress(oper, LargeInt.fromInt offset, base, index, toStore) end | cgOp (XMMConvertFromInt { source, output=SSE2Reg rrC, opSize }) = let (* The source is a general register and the output a XMM register. *) (* TODO: The source can be a memory location. *) val (rbC, rbX) = getReg source val oper = case opSize of OpSize64 => CVTSI2SD64 | OpSize32 => CVTSI2SD32 in (* This is a special case with both an XMM and general register. *) opcodePrefix oper @ rexByte(oper, false, rbX, false) @ escapePrefix oper @ [opToInt oper, modrm(Register, rrC, rbC)] end | cgOp (SignExtendForDivide OpSize64) = opCodeBytes(CQO_CDQ64, SOME {w=true, r=false, b=false, x=false}) | cgOp (SignExtendForDivide OpSize32) = opCodeBytes(CQO_CDQ32, NONE) | cgOp (XChng { reg, arg=RegisterArg regY, opSize }) = opReg(case opSize of OpSize64 => XCHNG64 | OpSize32 => XCHNG32, reg, regY) | cgOp (XChng { reg, arg=MemoryArg{offset, base, index}, opSize }) = opAddress(case opSize of OpSize64 => XCHNG64 | OpSize32 => XCHNG32, LargeInt.fromInt offset, base, index, reg) | cgOp (XChng _) = raise InternalError "cgOp: XChng" | cgOp (Negative {output, opSize}) = opRegPlus2(case opSize of OpSize64 => Group3_A64 | OpSize32 => Group3_A32, output, 0w3 (* neg *)) | cgOp (JumpTable{cases, jumpSize=ref jumpSize}) = let val _ = jumpSize = JumpSize8 orelse raise InternalError "cgOp: JumpTable" (* Make one jump for each case and pad it 8 bytes with Nops. *) fun makeJump (_, l) = opToInt JMP_32 :: word32Unsigned 0w0 @ [opToInt NOP, opToInt NOP, opToInt NOP] @ l in List.foldl makeJump [] cases end | cgOp(IndexedJumpCalc{ addrReg, indexReg, jumpSize=ref jumpSize }) = ( jumpSize = JumpSize8 orelse raise InternalError "cgOp: IndexedJumpCalc"; (* Should currently be JumpSize8 which requires a multiplier of 4 and 4 to be subtracted to remove the shifted tag. *) opAddress(if hostIsX64 then LEAL64 else LEAL32, ~4, addrReg, Index4 indexReg, addrReg) ) | cgOp(MoveXMMRegToGenReg { source=SSE2Reg rrC, output }) = let (* The source is a XMM register and the output a general register. *) val (rbC, rbX) = getReg output val oper = MOVDFromXMM in (* This is a special case with both an XMM and general register. *) opcodePrefix oper @ rexByte(oper, false, rbX, false) @ escapePrefix oper @ [opToInt oper, modrm(Register, rrC, rbC)] end | cgOp(MoveGenRegToXMMReg { source, output=SSE2Reg rrC }) = let (* The source is a general register and the output a XMM register. *) val (rbC, rbX) = getReg source val oper = MOVQToXMM in (* This is a special case with both an XMM and general register. *) (* This needs to move the whole 64-bit value. TODO: This is inconsistent with MoveXMMRegToGenReg *) opcodePrefix oper @ rexByte(oper, false, rbX, false) @ escapePrefix oper @ [opToInt oper, modrm(Register, rrC, rbC)] end | cgOp(XMMShiftRight { output=SSE2Reg rrC, shift }) = let val oper = PSRLDQ in opcodePrefix oper @ escapePrefix oper @ [opToInt oper, modrm(Register, 0w3, rrC), shift] end | cgOp(FPLoadCtrlWord {base, offset, index}) = opIndexedPlus2(FPESC 0w1, LargeInt.fromInt offset, base, index, 0w5) | cgOp(FPStoreCtrlWord {base, offset, index}) = opIndexedPlus2(FPESC 0w1, LargeInt.fromInt offset, base, index, 0w7) | cgOp(XMMLoadCSR {base, offset, index}) = opIndexedPlus2(LDSTMXCSR, LargeInt.fromInt offset, base, index, 0w2) | cgOp(XMMStoreCSR {base, offset, index}) = opIndexedPlus2(LDSTMXCSR, LargeInt.fromInt offset, base, index, 0w3) | cgOp(FPStoreInt {base, offset, index}) = (* fistp dword ptr [esp] in 32-bit mode or fistp qword ptr [rsp] in 64-bit mode. *) if hostIsX64 then opIndexedPlus2(FPESC 0w7, LargeInt.fromInt offset, base, index, 0w7) else opIndexedPlus2(FPESC 0w3, LargeInt.fromInt offset, base, index, 0w3) | cgOp(XMMStoreInt {source, output, precision, isTruncate}) = let (* The destination is a general register. The source is an XMM register or memory. *) val (rbC, rbX) = getReg output val oper = case (hostIsX64, precision, isTruncate) of (false, DoublePrecision, false) => CVTSD2SI32 | (true, DoublePrecision, false) => CVTSD2SI64 | (false, SinglePrecision, false) => CVTSS2SI32 | (true, SinglePrecision, false) => CVTSS2SI64 | (false, DoublePrecision, true) => CVTTSD2SI32 | (true, DoublePrecision, true) => CVTTSD2SI64 | (false, SinglePrecision, true) => CVTTSS2SI32 | (true, SinglePrecision, true) => CVTTSS2SI64 in case source of MemoryArg{base, offset, index} => opAddress(oper, LargeInt.fromInt offset, base, index, output) | RegisterArg(SSE2Reg rrS) => opcodePrefix oper @ rexByte(oper, rbX, false, false) @ escapePrefix oper @ [opToInt oper, modrm(Register, rbC, rrS)] | _ => raise InternalError "XMMStoreInt: Not register or memory" end | cgOp(CondMove { test, output, source=RegisterArg source, opSize=OpSize32 }) = opReg(CMOV32 test, output, source) | cgOp(CondMove { test, output, source=RegisterArg source, opSize=OpSize64 }) = opReg(CMOV64 test, output, source) | cgOp(CondMove { test, output, source=NonAddressConstArg _, opSize }) = ( (* We currently support only native-64 bit and put the constant in the non-address constant area. These are 64-bit values both in native 64-bit and in 32-in-64. To support it in 32-bit mode we'd have to put the constant in a single-word object and put its absolute address into the code. *) targetArch <> Native32Bit orelse raise InternalError "CondMove: constant in 32-bit mode"; opConstantOperand((case opSize of OpSize32 => CMOV32 | OpSize64 => CMOV64) test, output) ) | cgOp(CondMove { test, output, source=AddressConstArg _, opSize=OpSize64 }) = (* An address constant. The opSize must match the size of a polyWord since the value it going into the constant area. *) ( targetArch = Native64Bit orelse raise InternalError "CondMove: AddressConstArg"; opConstantOperand(CMOV64 test, output) ) | cgOp(CondMove { test, output, source=AddressConstArg _, opSize=OpSize32 }) = ( (* We only support address constants in 32-in-64. *) targetArch = ObjectId32Bit orelse raise InternalError "CondMove: AddressConstArg"; opConstantOperand(CMOV32 test, output) ) | cgOp(CondMove { test, output, source=MemoryArg{base, offset, index}, opSize=OpSize32 }) = opAddress(CMOV32 test, LargeInt.fromInt offset, base, index, output) | cgOp(CondMove { test, output, source=MemoryArg{base, offset, index}, opSize=OpSize64 }) = opAddress(CMOV64 test, LargeInt.fromInt offset, base, index, output) + + | cgOp PauseForSpinLock = opCodeBytes(PAUSE, NONE) in List.rev(List.foldl (fn (c, list) => Word8Vector.fromList(cgOp c) :: list) [] ops) end (* General function to process the code. ic is the byte counter within the original code. *) fun foldCode foldFn n (ops, byteList) = let fun doFold(oper :: operList, bytes :: byteList, ic, acc) = doFold(operList, byteList, ic + Word.fromInt(Word8Vector.length bytes), foldFn(oper, bytes, ic, acc)) | doFold(_, _, _, n) = n in doFold(ops, byteList, 0w0, n) end (* Go through the code and update branch and similar instructions with the destinations of the branches. Long branches are converted to short where possible and the code is reprocessed. That might repeat if the effect of shorting one branch allows another to be shortened. *) fun fixupLabels(ops, bytesList, labelCount) = let (* Label array - initialise to 0wxff... . Every label should be defined but just in case, this is more likely to be detected in int32Signed. *) val labelArray = Array.array(labelCount, ~ 0w1) (* First pass - Set the addresses of labels. *) fun setLabelAddresses(oper :: operList, bytes :: byteList, ic) = ( case oper of JumpLabel(Label{labelNo, ...}) => Array.update(labelArray, labelNo, ic) | _ => (); setLabelAddresses(operList, byteList, ic + Word.fromInt(Word8Vector.length bytes)) ) | setLabelAddresses(_, _, ic) = ic (* Return the length of the code. *) fun fixup32(destination, bytes, ic) = let val brLength = Word8Vector.length bytes (* The offset is relative to the end of the branch instruction. *) val diff = Word.toInt destination - Word.toInt ic - brLength in Word8VectorSlice.concat[ Word8VectorSlice.slice(bytes, 0, SOME(brLength-4)), (* The original opcode. *) Word8VectorSlice.full(Word8Vector.fromList(int32Signed(LargeInt.fromInt diff))) ] end fun fixupAddress(UncondBranch(Label{labelNo, ...}), bytes, ic, list) = let val destination = Array.sub(labelArray, labelNo) val brLength = Word8Vector.length bytes (* The offset is relative to the end of the branch instruction. *) val diff = Word.toInt destination - Word.toInt ic - brLength in if brLength = 2 then (* It's a short branch. Take the original operand and set the relative offset. *) Word8Vector.fromList [opToInt JMP_8, byteSigned diff] :: list else if brLength <> 5 then raise InternalError "fixupAddress" else (* 32-bit offset. If it will fit in a byte we can use a short branch. If this is a reverse branch we can actually use values up to -131 here because we've calculated using the end of the long branch. *) if diff <= 127 andalso diff >= ~(128 + 3) then Word8Vector.fromList [opToInt JMP_8, 0w0 (* Fixed on next pass *)] :: list else Word8Vector.fromList(opToInt JMP_32 :: int32Signed(LargeInt.fromInt diff)) :: list end | fixupAddress(ConditionalBranch{label=Label{labelNo, ...}, test, ...}, bytes, ic, list) = let val destination = Array.sub(labelArray, labelNo) val brLength = Word8Vector.length bytes (* The offset is relative to the end of the branch instruction. *) val diff = Word.toInt destination - Word.toInt ic - brLength in if brLength = 2 then (* It's a short branch. Take the original operand and set the relative offset. *) Word8Vector.fromList [opToInt(CondJump test), byteSigned diff] :: list else if brLength <> 6 then raise InternalError "fixupAddress" else if diff <= 127 andalso diff >= ~(128+4) then Word8Vector.fromList[opToInt(CondJump test), 0w0 (* Fixed on next pass *)] :: list else Word8Vector.fromList(opCodeBytes(CondJump32 test, NONE) @ int32Signed(LargeInt.fromInt diff)) :: list end | fixupAddress(LoadLabelAddress{ label=Label{labelNo, ...}, ... }, brCode, ic, list) = let val destination = Array.sub(labelArray, labelNo) in if hostIsX64 then (* This is a relative offset on the X86/64. *) fixup32(destination, brCode, ic) :: list else (* On X86/32 the address is relative to the start of the code so we simply put in the destination address. *) Word8VectorSlice.concat[ Word8VectorSlice.slice(brCode, 0, SOME(Word8Vector.length brCode-4)), Word8VectorSlice.full(Word8Vector.fromList(int32Signed(Word.toLargeInt destination)))] :: list end | fixupAddress(JumpTable{cases, jumpSize as ref JumpSize8}, brCode: Word8Vector.vector, ic, list) = let (* Each branch is a 32-bit jump padded up to 8 bytes. *) fun processCase(Label{labelNo, ...} :: cases, offset, ic) = fixup32(Array.sub(labelArray, labelNo), Word8VectorSlice.vector(Word8VectorSlice.slice(brCode, offset, SOME 5)), ic) :: Word8VectorSlice.vector(Word8VectorSlice.slice(brCode, offset+5, SOME 3)) :: processCase(cases, offset+8, ic+0w8) | processCase _ = [] (* Could we use short branches? If all of the branches were short the table would be smaller so the offsets we use would be less. Ignore backwards branches - could only occur if we have linked labels in a loop. *) val newStartOfCode = ic + Word.fromInt(List.length cases * 6) fun tryShort(Label{labelNo, ...} :: cases, ic) = let val destination = Array.sub(labelArray, labelNo) in if destination > ic + 0w2 andalso destination - ic - 0w2 < 0w127 then tryShort(cases, ic+0w2) else false end | tryShort _ = true val newCases = if tryShort(cases, newStartOfCode) then ( jumpSize := JumpSize2; (* Generate a short branch table. *) List.map(fn _ => Word8Vector.fromList [opToInt JMP_8, 0w0 (* Fixed on next pass *)]) cases ) else processCase(cases, 0, ic) in Word8Vector.concat newCases :: list end | fixupAddress(JumpTable{cases, jumpSize=ref JumpSize2}, _, ic, list) = let (* Each branch is a short jump. *) fun processCase(Label{labelNo, ...} :: cases, offset, ic) = let val destination = Array.sub(labelArray, labelNo) val brLength = 2 val diff = Word.toInt destination - Word.toInt ic - brLength in Word8Vector.fromList[opToInt JMP_8, byteSigned diff] :: processCase(cases, offset+2, ic+0w2) end | processCase _ = [] in Word8Vector.concat(processCase(cases, 0, ic)) :: list end (* If we've shortened a jump table we have to change the indexing. *) | fixupAddress(IndexedJumpCalc{ addrReg, indexReg, jumpSize=ref JumpSize2 }, _, _, list) = (* On x86/32 it might be shorter to use DEC addrReg; ADD addrReg, indexReg. *) Word8Vector.fromList(opAddress(if hostIsX64 then LEAL64 else LEAL32, ~1, addrReg, Index1 indexReg, addrReg)) :: list | fixupAddress(CallAddress(NonAddressConstArg _), brCode, ic, list) = let val brLen = Word8Vector.length brCode in (* Call to the start of the code. Offset is -(bytes to start). *) Word8VectorSlice.concat[ Word8VectorSlice.slice(brCode, 0, SOME(brLen-4)), (* The original opcode. *) Word8VectorSlice.full(Word8Vector.fromList(int32Signed(LargeInt.fromInt(~(Word.toInt ic+brLen))))) ] :: list end | fixupAddress(JumpAddress(NonAddressConstArg _), brCode, ic, list) = let val brLen = Word8Vector.length brCode in (* Call to the start of the code. Offset is -(bytes to start). *) Word8VectorSlice.concat[ Word8VectorSlice.slice(brCode, 0, SOME(brLen-4)), (* The original opcode. *) Word8VectorSlice.full(Word8Vector.fromList(int32Signed(LargeInt.fromInt(~(Word.toInt ic+brLen))))) ] :: list end | fixupAddress(_, bytes, _, list) = bytes :: list fun reprocess(bytesList, lastCodeSize) = let val fixedList = List.rev(foldCode fixupAddress [] (ops, bytesList)) val newCodeSize = setLabelAddresses(ops, fixedList, 0w0) in if newCodeSize = lastCodeSize then (fixedList, lastCodeSize) else if newCodeSize > lastCodeSize then raise InternalError "reprocess - size increased" else reprocess(fixedList, newCodeSize) end in reprocess(bytesList, setLabelAddresses(ops, bytesList, 0w0)) end (* The handling of constants generally differs between 32- and 64-bits. In 32-bits we put all constants inline and the GC processes the code to find the addresss. For real values the "constant" is actually the address of the boxed real value. In 64-bit mode inline constants were used with the MOV instruction but this has now been removed. All constants are stored in one of two areas at the end of the code segment. Non-addresses, including the actual values of reals, are stored in the non-address area and addresses go in the address area. Only the latter is scanned by the GC. The address area is also used in 32-bit mode but only has the address of the function name and the address of the profile ref in it. *) datatype inline32constants = SelfAddress (* The address of the start of the code - inline absolute address 32-bit only *) | InlineAbsoluteAddress of machineWord (* An address in the code: 32-bit only *) | InlineRelativeAddress of machineWord (* A relative address: 32-bit only. *) local (* Turn an integer constant into an 8-byte vector. *) fun intConst ival = LargeWord.fromLargeInt ival (* Copy a real constant from memory into an 8-byte vector. *) fun realConst c = let val cAsAddr = toAddress c (* This may be a boxed real or, in 32-in-64 mode, a boxed float. *) val cLength = length cAsAddr * wordSize val _ = ((cLength = 0w8 orelse cLength = 0w4) andalso flags cAsAddr = F_bytes) orelse raise InternalError "realConst: Not a real number" fun getBytes(i, a) = if i = 0w0 then a else getBytes(i-0w1, a*0w256 + Word8.toLargeWord(loadByte(cAsAddr, i-0w1))) in getBytes(cLength, 0w0) end fun getConstant(Move{ source=NonAddressConstArg source, moveSize=Move32, ...}, bytes, ic, (inl, addr, na)) = if targetArch <> Native32Bit then ( if source >= ~0x80000000 andalso source < 0x100000000 then (* Signed or unsigned 32-bits. *) (inl, addr, na) else (* Too big for 32-bits. *) (inl, addr, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, intConst source) :: na) ) else (inl, addr, na) (* 32-bit mode. The constant will always be inline even if we've had to use LEA r,c *) | getConstant(Move{ source=NonAddressConstArg source, moveSize=Move64, ...}, bytes, ic, (inl, addr, na)) = if targetArch <> Native32Bit then ( if source >= ~0x80000000 andalso source < 0x100000000 then (* Signed or unsigned 32-bits. *) (inl, addr, na) else (* Too big for 32-bits. *) (inl, addr, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, intConst source) :: na) ) else (inl, addr, na) (* 32-bit mode. The constant will always be inline even if we've had to use XOR r,r; ADD r,c *) | getConstant(Move{ source=AddressConstArg source, ... }, bytes, ic, (inl, addr, na)) = if targetArch <> Native32Bit then (* Address constants go in the constant area. *) (inl, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, source) :: addr, na) else ((ic + Word.fromInt(Word8Vector.length bytes) - wordSize, InlineAbsoluteAddress source) :: inl, addr, na) | getConstant(LoadAbsolute{value, ...}, bytes, ic, (inl, addr, na)) = if targetArch = Native64Bit then (* Address constants go in the constant area. *) (inl, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, value) :: addr, na) (* This is the only case of an inline constant in 32-in-64 *) else ((ic + Word.fromInt(Word8Vector.length bytes) - nativeWordSize, InlineAbsoluteAddress value) :: inl, addr, na) | getConstant(ArithToGenReg{ source=NonAddressConstArg source, ... }, bytes, ic, (inl, addr, na)) = if is32bit source then (inl, addr, na) else (inl, addr, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, intConst source) :: na) | getConstant(ArithToGenReg{ source=AddressConstArg source, ... }, bytes, ic, (inl, addr, na)) = if hostIsX64 then (inl, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, source) :: addr, na) else ((ic + Word.fromInt(Word8Vector.length bytes) - 0w4, InlineAbsoluteAddress source) :: inl, addr, na) | getConstant(ArithMemLongConst{ source, ... }, bytes, ic, (inl, addr, na)) = (* 32-bit only. *) ((ic + Word.fromInt(Word8Vector.length bytes) - 0w4, InlineAbsoluteAddress source) :: inl, addr, na) | getConstant(PushToStack(NonAddressConstArg constnt), bytes, ic, (inl, addr, na)) = if is32bit constnt then (inl, addr, na) else (inl, addr, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, intConst constnt) :: na) | getConstant(PushToStack(AddressConstArg constnt), bytes, ic, (inl, addr, na)) = if targetArch = Native64Bit then (inl, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, constnt) :: addr, na) else ((ic + Word.fromInt(Word8Vector.length bytes) - 0w4, InlineAbsoluteAddress constnt) :: inl, addr, na) | getConstant(CallAddress(AddressConstArg w), bytes, ic, (inl, addr, na)) = if targetArch = Native64Bit then (inl, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, w) :: addr, na) else ((ic + Word.fromInt(Word8Vector.length bytes) - 0w4, InlineRelativeAddress w) :: inl, addr, na) | getConstant(JumpAddress(AddressConstArg w), bytes, ic, (inl, addr, na)) = if targetArch = Native64Bit then (inl, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, w) :: addr, na) else ((ic + Word.fromInt(Word8Vector.length bytes) - 0w4, InlineRelativeAddress w) :: inl, addr, na) | getConstant(LoadLabelAddress _, _, ic, (inl, addr, na)) = (* We need the address of the code itself but it's in the first of a pair of instructions. *) if hostIsX64 then (inl, addr, na) else ((ic + 0w1, SelfAddress) :: inl, addr, na) | getConstant(FPLoadFromConst{constant, ...}, bytes, ic, (inl, addr, na)) = if hostIsX64 then (inl, addr, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, realConst constant) :: na) else ((ic + Word.fromInt(Word8Vector.length bytes) - 0w4, InlineAbsoluteAddress constant) :: inl, addr, na) | getConstant(FPArithConst{ source, ... }, bytes, ic, (inl, addr, na)) = if hostIsX64 then (inl, addr, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, realConst source) :: na) else ((ic + Word.fromInt(Word8Vector.length bytes) - 0w4, InlineAbsoluteAddress source) :: inl, addr, na) | getConstant(XMMArith { source=AddressConstArg constVal, ... }, bytes, ic, (inl, addr, na)) = (* Real.real constant or, with 32-bit words, a Real32.real constant. *) if hostIsX64 then (inl, addr, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, realConst constVal) :: na) else ((ic + Word.fromInt(Word8Vector.length bytes) - 0w4, InlineAbsoluteAddress constVal) :: inl, addr, na) | getConstant(XMMArith { source=NonAddressConstArg constVal, ... }, bytes, ic, (inl, addr, na)) = (* Real32.real constant in native 64-bit. *) (inl, addr, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, intConst constVal) :: na) | getConstant(MultiplyR{ source=NonAddressConstArg source, ... }, bytes, ic, (inl, addr, na)) = if is32bit source then (inl, addr, na) else (inl, addr, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, intConst source) :: na) | getConstant(CondMove{ source=NonAddressConstArg source, ... }, bytes, ic, (inl, addr, na)) = if targetArch <> Native32Bit then (inl, addr, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, intConst source) :: na) else (inl, addr, na) (* 32-bit mode. The constant will always be inline. *) | getConstant(CondMove{ source=AddressConstArg source, ... }, bytes, ic, (inl, addr, na)) = if targetArch <> Native32Bit then (* Address constants go in the constant area. *) (inl, (ic + Word.fromInt(Word8Vector.length bytes) - 0w4, source) :: addr, na) else ((ic + Word.fromInt(Word8Vector.length bytes) - wordSize, InlineAbsoluteAddress source) :: inl, addr, na) | getConstant(_, _, _, l) = l in val getConstants = foldCode getConstant ([], [], []) end (* It is convenient to have AllocStore and AllocStoreVariable as primitives at the higher level but at this point it's better to expand them into their basic instructions. *) fun expandComplexOperations(instrs, oldLabelCount) = let val labelCount = ref oldLabelCount fun mkLabel() = Label{labelNo= !labelCount} before labelCount := !labelCount + 1 (* On X86/64 the local pointer is in r15. On X86/32 it's in memRegs. *) val localPointer = if hostIsX64 then RegisterArg r15 else MemoryArg{base=ebp, offset=memRegLocalMPointer, index=NoIndex} val nativeWordOpSize = if hostIsX64 then OpSize64 else OpSize32 fun allocStoreCommonCode (resultReg, isVarAlloc, regSaveSet: genReg list) = let val compare = ArithToGenReg{opc=CMP, output=resultReg, source=MemoryArg{base=ebp, offset=memRegLocalMbottom, index=NoIndex}, opSize=nativeWordOpSize} (* Normally we won't have run out of store so we want the default branch prediction to skip the test here. However doing that involves adding an extra branch which lengthens the code so it's probably not worth while. *) (* Just checking against the lower limit can fail in the situation where the heap pointer is at the low end of the address range and the store required is so large that the subtraction results in a negative number. In that case it will be > (unsigned) lower_limit so in addition we have to check that the result is < (unsigned) heap_pointer. This actually happened on Windows with X86-64. In theory this can happen with fixed-size allocations as well as variable allocations but in practice fixed-size allocations are going to be small enough that it's not a problem. *) val destLabel = mkLabel() val branches = if isVarAlloc then let val extraLabel = mkLabel() in [ConditionalBranch{test=JB, label=extraLabel}, ArithToGenReg{opc=CMP, output=resultReg, source=localPointer, opSize=nativeWordOpSize}, ConditionalBranch{test=JB, label=destLabel}, JumpLabel extraLabel] end else [ConditionalBranch{test=JNB, label=destLabel}] val callRts = CallRTS{rtsEntry=HeapOverflowCall, saveRegs=regSaveSet} val fixup = JumpLabel destLabel (* Update the heap pointer now we have the store. This is also used by the RTS in the event of a trap to work out how much store was being allocated. *) val update = if hostIsX64 then Move{source=RegisterArg resultReg, destination=RegisterArg r15, moveSize=Move64} else Move{source=RegisterArg resultReg, destination=MemoryArg{base=ebp, offset=memRegLocalMPointer, index=NoIndex}, moveSize=Move32} in compare :: branches @ [callRts, fixup, update] end fun doExpansion([], code, _) = code | doExpansion(AllocStore {size, output, saveRegs} :: instrs, code, inAllocation) = let val _ = inAllocation andalso raise InternalError "doExpansion: Allocation started but not complete" val () = if List.exists (fn r => r = output) saveRegs then raise InternalError "AllocStore: in set" else () val startCode = case targetArch of Native64Bit => let val bytes = (size + 1) * Word.toInt wordSize in [LoadAddress{output=output, offset = ~ bytes, base=SOME r15, index=NoIndex, opSize=OpSize64}] (* TODO: What if it's too big to fit? *) end | Native32Bit => let val bytes = (size + 1) * Word.toInt wordSize in [Move{source=MemoryArg{base=ebp, offset=memRegLocalMPointer, index=NoIndex}, destination=RegisterArg output, moveSize=Move32}, LoadAddress{output=output, offset = ~ bytes, base=SOME output, index=NoIndex, opSize=OpSize32}] end | ObjectId32Bit => let (* We must allocate an even number of words. *) val heapWords = if Int.rem(size, 2) = 1 then size+1 else size+2 val bytes = heapWords * Word.toInt wordSize in [LoadAddress{output=output, offset = ~ bytes, base=SOME r15, index=NoIndex, opSize=OpSize64}] end val resultCode = startCode @ allocStoreCommonCode(output, false, saveRegs) in doExpansion(instrs, (List.rev resultCode) @ code, true) end | doExpansion(AllocStoreVariable {size, output, saveRegs} :: instrs, code, inAllocation) = let (* Allocates memory. The "size" register contains the number of words as a tagged int. *) val _ = inAllocation andalso raise InternalError "doExpansion: Allocation started but not complete" val () = if List.exists (fn r => r = output) saveRegs then raise InternalError "AllocStore: in set" else () (* Negate the length and add it to the current heap pointer. *) (* Compute the number of bytes into dReg. The length in sReg is the number of words as a tagged value so we need to multiply it, add wordSize to include one word for the header then subtract the, multiplied, tag. We use LEA here but want to avoid having an empty base register. *) val _ = size = output andalso raise InternalError "AllocStoreVariable : same register for size and output" val startCode = if wordSize = 0w8 (* 8-byte words *) then [ ArithToGenReg{opc=XOR, output=output, source=RegisterArg output, opSize=OpSize32 (* Rest is zeroed *)}, ArithToGenReg{opc=SUB, output=output, source=RegisterArg size, opSize=OpSize64}, LoadAddress{output=output, base=SOME r15, offset= ~(Word.toInt wordSize-4), index=Index4 output, opSize=OpSize64 } ] else (* 4 byte words *) [ LoadAddress{output=output, base=SOME size, offset=Word.toInt wordSize-2, index=Index1 size, opSize=nativeWordOpSize }, Negative{output=output, opSize=nativeWordOpSize}, ArithToGenReg{opc=ADD, output=output, source=localPointer, opSize=nativeWordOpSize} ] (* If this is 32-in-64 we need to round down to the next 8-byte boundary. *) val roundCode = if targetArch = ObjectId32Bit then [ArithToGenReg{opc=AND, output=output, source=NonAddressConstArg ~8, opSize=OpSize64 }] else [] val resultCode = startCode @ roundCode @ allocStoreCommonCode(output, true, saveRegs) in doExpansion(instrs, (List.rev resultCode) @ code, true) end | doExpansion(StoreInitialised :: instrs, code, _) = doExpansion(instrs, code, false) | doExpansion(instr :: instrs, code, inAlloc) = doExpansion(instrs, instr::code, inAlloc) val expanded = List.rev(doExpansion(instrs, [], false)) in (expanded, !labelCount) end fun printCode (Code{procName, printStream, ...}, seg) = let val print = printStream val ptr = ref 0w0; (* prints a string representation of a number *) fun printValue v = if v < 0 then (print "-"; print(LargeInt.toString (~ v))) else print(LargeInt.toString v) infix 3 +:= ; fun (x +:= y) = (x := !x + (y:word)); fun get16s (a, seg) : int = let val b0 = Word8.toInt (codeVecGet (seg, a)); val b1 = Word8.toInt (codeVecGet (seg, a + 0w1)); val b1' = if b1 >= 0x80 then b1 - 0x100 else b1; in (b1' * 0x100) + b0 end fun get16u(a, seg) : int = Word8.toInt (codeVecGet (seg, a + 0w1)) * 0x100 + Word8.toInt (codeVecGet (seg, a)) (* Get 1 unsigned byte from the given offset in the segment. *) fun get8u (a, seg) : Word8.word = codeVecGet (seg, a); (* Get 1 signed byte from the given offset in the segment. *) fun get8s (a, seg) : int = Word8.toIntX (codeVecGet (seg, a)); (* Get 1 signed 32 bit word from the given offset in the segment. *) fun get32s (a, seg) : LargeInt.int = let val b0 = Word8.toLargeInt (codeVecGet (seg, a)); val b1 = Word8.toLargeInt (codeVecGet (seg, a + 0w1)); val b2 = Word8.toLargeInt (codeVecGet (seg, a + 0w2)); val b3 = Word8.toLargeInt (codeVecGet (seg, a + 0w3)); val b3' = if b3 >= 0x80 then b3 - 0x100 else b3; val topHw = (b3' * 0x100) + b2; val bottomHw = (b1 * 0x100) + b0; in (topHw * exp2_16) + bottomHw end fun get64s (a, seg) : LargeInt.int = let val b0 = Word8.toLargeInt (codeVecGet (seg, a)); val b1 = Word8.toLargeInt (codeVecGet (seg, a + 0w1)); val b2 = Word8.toLargeInt (codeVecGet (seg, a + 0w2)); val b3 = Word8.toLargeInt (codeVecGet (seg, a + 0w3)); val b4 = Word8.toLargeInt (codeVecGet (seg, a + 0w4)); val b5 = Word8.toLargeInt (codeVecGet (seg, a + 0w5)); val b6 = Word8.toLargeInt (codeVecGet (seg, a + 0w6)); val b7 = Word8.toLargeInt (codeVecGet (seg, a + 0w7)); val b7' = if b7 >= 0x80 then b7 - 0x100 else b7; in ((((((((b7' * 0x100 + b6) * 0x100 + b5) * 0x100 + b4) * 0x100 + b3) * 0x100 + b2) * 0x100) + b1) * 0x100) + b0 end fun print32 () = printValue (get32s (!ptr, seg)) before (ptr +:= 0w4) and print64 () = printValue (get64s (!ptr, seg)) before (ptr +:= 0w8) and print16 () = printValue (LargeInt.fromInt(get16s (!ptr, seg)) before (ptr +:= 0w2)) and print8 () = printValue (LargeInt.fromInt(get8s (!ptr, seg)) before (ptr +:= 0w1)) fun printJmp () = let val valu = get8s (!ptr, seg) before ptr +:= 0w1 in print (Word.fmt StringCvt.HEX (Word.fromInt valu + !ptr)) end (* Print an effective address. The register field may designate a general register or an xmm register depending on the instruction. *) fun printEAGeneral printRegister (rex, sz) = let val modrm = codeVecGet (seg, !ptr) val () = ptr +:= 0w1 (* Decode the Rex prefix if present. *) val rexX = (rex andb8 0wx2) <> 0w0 val rexB = (rex andb8 0wx1) <> 0w0 val prefix = case sz of SZByte => "byte ptr " | SZWord => "word ptr " | SZDWord => "dword ptr " | SZQWord => "qword ptr " in case (modrm >>- 0w6, modrm andb8 0w7, hostIsX64) of (0w3, rm, _) => printRegister(rm, rexB, sz) | (md, 0w4, _) => let (* s-i-b present. *) val sib = codeVecGet (seg, !ptr) val () = ptr +:= 0w1 val ss = sib >>- 0w6 val index = (sib >>- 0w3) andb8 0w7 val base = sib andb8 0w7 in print prefix; case (md, base, hostIsX64) of (0w1, _, _) => print8 () | (0w2, _, _) => print32 () | (0w0, 0w5, _) => print32 () (* Absolute in 32-bit mode. PC-relative in 64-bit ?? *) | _ => (); print "["; if md <> 0w0 orelse base <> 0w5 then ( print (genRegRepr (mkReg (base, rexB), sz32_64)); if index = 0w4 then () else print "," ) else (); if index = 0w4 andalso not rexX (* No index. *) then () else print (genRegRepr (mkReg(index, rexX), sz32_64) ^ (if ss = 0w0 then "*1" else if ss = 0w1 then "*2" else if ss = 0w2 then "*4" else "*8")); print "]" end | (0w0, 0w5, false) => (* Absolute address.*) (print prefix; print32 ()) | (0w0, 0w5, _) => (* PC-relative in 64-bit *) (print prefix; print ".+"; print32 ()) | (md, rm, _) => (* register plus offset. *) ( print prefix; if md = 0w1 then print8 () else if md = 0w2 then print32 () else (); print ("[" ^ genRegRepr (mkReg(rm, rexB), sz32_64) ^ "]") ) end (* For most instructions we want to print a general register. *) val printEA = printEAGeneral (fn (rm, rexB, sz) => print (genRegRepr (mkReg(rm, rexB), sz))) and printEAxmm = printEAGeneral (fn (rm, _, _) => print (xmmRegRepr(SSE2Reg rm))) fun printArith opc = print (case opc of 0 => "add " | 1 => "or " | 2 => "adc " | 3 => "sbb " | 4 => "and " | 5 => "sub " | 6 => "xor " | _ => "cmp " ) fun printGvEv (opByte, rex, rexR, sz) = let (* Register is in next byte. *) val nb = codeVecGet (seg, !ptr) val reg = (nb >>- 0w3) andb8 0w7 in printArith(Word8.toInt((opByte div 0w8) mod 0w8)); print "\t"; print (genRegRepr (mkReg(reg, rexR), sz)); print ","; printEA(rex, sz) end fun printMovCToR (opByte, sz, rexB) = ( print "mov \t"; print(genRegRepr (mkReg (opByte mod 0w8, rexB), sz)); print ","; case sz of SZDWord => print32 () | SZQWord => print64 () | _ => print "???" ) fun printShift (opByte, rex, sz) = let (* Opcode is determined by next byte. *) val nb = Word8.toInt (codeVecGet (seg, !ptr)) val opc = (nb div 8) mod 8 in print (case opc of 4 => "shl " | 5 => "shr " | 7 => "sar " | _ => "???" ); print "\t"; printEA(rex, sz); print ","; if opByte = opToInt Group2_1_A32 then print "1" else if opByte = opToInt Group2_CL_A32 then print "cl" else print8 () end fun printFloat (opByte, rex) = let (* Opcode is in next byte. *) val opByte2 = codeVecGet (seg, !ptr) val nnn = (opByte2 >>- 0w3) andb8 0w7 val escNo = opByte andb8 0wx7 in if (opByte2 andb8 0wxC0) = 0wxC0 then (* mod = 11 *) ( case (escNo, nnn, opByte2 andb8 0wx7 (* modrm *)) of (0w1, 0w4, 0w0) => print "fchs" | (0w1, 0w4, 0w1) => print "fabs" | (0w1, 0w5, 0w6) => print "fldz" | (0w1, 0w5, 0w1) => print "flf1" | (0w7, 0w4, 0w0) => print "fnstsw\tax" | (0w1, 0w5, 0w0) => print "fld1" | (0w1, 0w6, 0w3) => print "fpatan" | (0w1, 0w7, 0w2) => print "fsqrt" | (0w1, 0w7, 0w6) => print "fsin" | (0w1, 0w7, 0w7) => print "fcos" | (0w1, 0w6, 0w7) => print "fincstp" | (0w1, 0w6, 0w6) => print "fdecstp" | (0w3, 0w4, 0w2) => print "fnclex" | (0w5, 0w2, rno) => print ("fst \tst(" ^ Word8.toString rno ^ ")") | (0w5, 0w3, rno) => print ("fstp\tst(" ^ Word8.toString rno ^ ")") | (0w1, 0w0, rno) => print ("fld \tst(" ^ Word8.toString rno ^ ")") | (0w1, 0w1, rno) => print ("fxch\tst(" ^ Word8.toString rno ^ ")") | (0w0, 0w3, rno) => print ("fcomp\tst(" ^ Word8.toString rno ^ ")") | (0w0, 0w0, rno) => print ("fadd\tst,st(" ^ Word8.toString rno ^ ")") | (0w0, 0w1, rno) => print ("fmul\tst,st(" ^ Word8.toString rno ^ ")") | (0w0, 0w4, rno) => print ("fsub\tst,st(" ^ Word8.toString rno ^ ")") | (0w0, 0w5, rno) => print ("fsubr\tst,st(" ^ Word8.toString rno ^ ")") | (0w0, 0w6, rno) => print ("fdiv\tst,st(" ^ Word8.toString rno ^ ")") | (0w0, 0w7, rno) => print ("fdivr\tst,st(" ^ Word8.toString rno ^ ")") | (0w5, 0w0, rno) => print ("ffree\tst(" ^ Word8.toString rno ^ ")") | _ => (printValue(Word8.toLargeInt opByte); printValue(Word8.toLargeInt opByte2)); ptr +:= 0w1 ) else (* mod = 00, 01, 10 *) ( case (escNo, nnn) of (0w0, 0w0) => (print "fadd\t"; printEA(rex, SZDWord)) (* Single precision. *) | (0w0, 0w1) => (print "fmul\t"; printEA(rex, SZDWord)) | (0w0, 0w3) => (print "fcomp\t"; printEA(rex, SZDWord)) | (0w0, 0w4) => (print "fsub\t"; printEA(rex, SZDWord)) | (0w0, 0w5) => (print "fsubr\t"; printEA(rex, SZDWord)) | (0w0, 0w6) => (print "fdiv\t"; printEA(rex, SZDWord)) | (0w0, 0w7) => (print "fdivr\t"; printEA(rex, SZDWord)) | (0w1, 0w0) => (print "fld \t"; printEA(rex, SZDWord)) | (0w1, 0w2) => (print "fst\t"; printEA(rex, SZDWord)) | (0w1, 0w3) => (print "fstp\t"; printEA(rex, SZDWord)) | (0w1, 0w5) => (print "fldcw\t"; printEA(rex, SZWord)) (* Control word is 16 bits *) | (0w1, 0w7) => (print "fstcw\t"; printEA(rex, SZWord)) (* Control word is 16 bits *) | (0w3, 0w0) => (print "fild\t"; printEA(rex, SZDWord)) (* 32-bit int. *) | (0w7, 0w5) => (print "fild\t"; printEA(rex, SZQWord)) (* 64-bit int. *) | (0w3, 0w3) => (print "fistp\t"; printEA(rex, SZDWord)) (* 32-bit int. *) | (0w7, 0w7) => (print "fistp\t"; printEA(rex, SZQWord)) (* 64-bit int. *) | (0w4, 0w0) => (print "fadd\t"; printEA(rex, SZQWord)) (* Double precision. *) | (0w4, 0w1) => (print "fmul\t"; printEA(rex, SZQWord)) | (0w4, 0w3) => (print "fcomp\t"; printEA(rex, SZQWord)) | (0w4, 0w4) => (print "fsub\t"; printEA(rex, SZQWord)) | (0w4, 0w5) => (print "fsubr\t"; printEA(rex, SZQWord)) | (0w4, 0w6) => (print "fdiv\t"; printEA(rex, SZQWord)) | (0w4, 0w7) => (print "fdivr\t"; printEA(rex, SZQWord)) | (0w5, 0w0) => (print "fld \t"; printEA(rex, SZQWord)) | (0w5, 0w2) => (print "fst\t"; printEA(rex, SZQWord)) | (0w5, 0w3) => (print "fstp\t"; printEA(rex, SZQWord)) | _ => (printValue(Word8.toLargeInt opByte); printValue(Word8.toLargeInt opByte2)) ) end fun printJmp32 oper = let val valu = get32s (!ptr, seg) before (ptr +:= 0w4) in print oper; print "\t"; print (Word.fmt StringCvt.HEX (!ptr + Word.fromLargeInt valu)) end fun printMask mask = let val wordMask = Word.fromInt mask fun printAReg n = if n = regs then () else ( if (wordMask andb (0w1 << Word.fromInt n)) <> 0w0 then (print(regRepr(regN n)); print " ") else (); printAReg(n+1) ) in printAReg 0 end in if procName = "" (* No name *) then print "?" else print procName; print ":\n"; while get8u (!ptr, seg) <> 0wxf4 (* HLT. *) do let val () = print (Word.fmt StringCvt.HEX (!ptr)) (* The address in hex. *) val () = print "\t" (* See if we have a lock prefix. *) val () = if get8u (!ptr, seg) = 0wxF0 then (print "lock "; ptr := !ptr + 0w1) else () val legacyPrefix = let val p = get8u (!ptr, seg) in if p = 0wxF2 orelse p = 0wxF3 orelse p = 0wx66 then (ptr := !ptr + 0w1; p) else 0wx0 end (* See if we have a REX byte. *) val rex = let val b = get8u (!ptr, seg); in if b >= 0wx40 andalso b <= 0wx4f then (ptr := !ptr + 0w1; b) else 0w0 end val rexW = (rex andb8 0wx8) <> 0w0 val rexR = (rex andb8 0wx4) <> 0w0 val rexB = (rex andb8 0wx1) <> 0w0 val opByte = get8u (!ptr, seg) before ptr +:= 0w1 val sizeFromRexW = if rexW then SZQWord else SZDWord in case opByte of 0wx03 => printGvEv (opByte, rex, rexR, sizeFromRexW) | 0wx0b => printGvEv (opByte, rex, rexR, sizeFromRexW) | 0wx0f => (* ESCAPE *) let (* Opcode is in next byte. *) val opByte2 = codeVecGet (seg, !ptr) val () = (ptr +:= 0w1) fun printcmov movop = let val nb = codeVecGet (seg, !ptr) val reg = (nb >>- 0w3) andb8 0w7 in print movop; print "\t"; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)); print ","; printEA(rex, sizeFromRexW) end in case legacyPrefix of 0w0 => ( case opByte2 of 0wx2e => let (* ucomiss doesn't have a prefix. *) val nb = codeVecGet (seg, !ptr) val reg = SSE2Reg((nb >>- 0w3) andb8 0w7) in print "ucomiss\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZDWord) end | 0wx40 => printcmov "cmovo" | 0wx41 => printcmov "cmovno" | 0wx42 => printcmov "cmovb" | 0wx43 => printcmov "cmovnb" | 0wx44 => printcmov "cmove" | 0wx45 => printcmov "cmovne" | 0wx46 => printcmov "cmovna" | 0wx47 => printcmov "cmova" | 0wx48 => printcmov "cmovs" | 0wx49 => printcmov "cmovns" | 0wx4a => printcmov "cmovp" | 0wx4b => printcmov "cmovnp" | 0wx4c => printcmov "cmovl" | 0wx4d => printcmov "cmovge" | 0wx4e => printcmov "cmovle" | 0wx4f => printcmov "cmovg" | 0wxC1 => let val nb = codeVecGet (seg, !ptr); val reg = (nb >>- 0w3) andb8 0w7 in (* The address argument comes first in the assembly code. *) print "xadd\t"; printEA (rex, sizeFromRexW); print ","; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)) end | 0wxB6 => let val nb = codeVecGet (seg, !ptr); val reg = (nb >>- 0w3) andb8 0w7 in print "movzx\t"; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)); print ","; printEA (rex, SZByte) end | 0wxB7 => let val nb = codeVecGet (seg, !ptr); val reg = (nb >>- 0w3) andb8 0w7 in print "movzx\t"; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)); print ","; printEA (rex, SZWord) end | 0wxBE => let val nb = codeVecGet (seg, !ptr); val reg = (nb >>- 0w3) andb8 0w7 in print "movsx\t"; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)); print ","; printEA (rex, SZByte) end | 0wxBF => let val nb = codeVecGet (seg, !ptr); val reg = (nb >>- 0w3) andb8 0w7 in print "movsx\t"; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)); print ","; printEA (rex, SZWord) end | 0wxAE => let (* Opcode is determined by the next byte. *) val opByte2 = codeVecGet (seg, !ptr); val nnn = (opByte2 >>- 0w3) andb8 0w7 in case nnn of 0wx2 => (print "ldmxcsr\t"; printEA(rex, SZDWord)) | 0wx3 => (print "stmxcsr\t"; printEA(rex, SZDWord)) | _ => (printValue(Word8.toLargeInt opByte); printValue(Word8.toLargeInt opByte2)) end | 0wxAF => let val nb = codeVecGet (seg, !ptr); val reg = (nb >>- 0w3) andb8 0w7 in print "imul\t"; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)); print ","; printEA (rex, sizeFromRexW) end | 0wx80 => printJmp32 "jo " | 0wx81 => printJmp32 "jno " | 0wx82 => printJmp32 "jb " | 0wx83 => printJmp32 "jnb " | 0wx84 => printJmp32 "je " | 0wx85 => printJmp32 "jne " | 0wx86 => printJmp32 "jna " | 0wx87 => printJmp32 "ja " | 0wx88 => printJmp32 "js " | 0wx89 => printJmp32 "jns " | 0wx8a => printJmp32 "jp " | 0wx8b => printJmp32 "jnp " | 0wx8c => printJmp32 "jl " | 0wx8d => printJmp32 "jge " | 0wx8e => printJmp32 "jle " | 0wx8f => printJmp32 "jg " | 0wx90 => (print "seto\t"; printEA (rex, SZByte)) | 0wx91 => (print "setno\t"; printEA (rex, SZByte)) | 0wx92 => (print "setb\t"; printEA (rex, SZByte)) | 0wx93 => (print "setnb\t"; printEA (rex, SZByte)) | 0wx94 => (print "sete\t"; printEA (rex, SZByte)) | 0wx95 => (print "setne\t"; printEA (rex, SZByte)) | 0wx96 => (print "setna\t"; printEA (rex, SZByte)) | 0wx97 => (print "seta\t"; printEA (rex, SZByte)) | 0wx98 => (print "sets\t"; printEA (rex, SZByte)) | 0wx99 => (print "setns\t"; printEA (rex, SZByte)) | 0wx9a => (print "setp\t"; printEA (rex, SZByte)) | 0wx9b => (print "setnp\t"; printEA (rex, SZByte)) | 0wx9c => (print "setl\t"; printEA (rex, SZByte)) | 0wx9d => (print "setge\t"; printEA (rex, SZByte)) | 0wx9e => (print "setle\t"; printEA (rex, SZByte)) | 0wx9f => (print "setg\t"; printEA (rex, SZByte)) | _ => (print "esc\t"; printValue(Word8.toLargeInt opByte2)) ) | 0wxf2 => (* SSE2 instruction *) let val nb = codeVecGet (seg, !ptr) val rr = (nb >>- 0w3) andb8 0w7 val reg = SSE2Reg rr in case opByte2 of 0wx10 => ( print "movsd\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZQWord) ) | 0wx11 => ( print "movsd\t"; printEAxmm(rex, SZQWord); print ","; print(xmmRegRepr reg) ) | 0wx2a => ( print "cvtsi2sd\t"; print(xmmRegRepr reg); print ","; printEA(rex, sizeFromRexW) ) | 0wx2c => ( print "cvttsd2si\t"; print (genRegRepr (mkReg(rr, rexR), sizeFromRexW)); print ","; printEAxmm(rex, sizeFromRexW) ) | 0wx2d => ( print "cvtsd2si\t"; print (genRegRepr (mkReg(rr, rexR), sizeFromRexW)); print ","; printEAxmm(rex, sizeFromRexW) ) | 0wx58 => ( print "addsd\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZQWord) ) | 0wx59 => ( print "mulsd\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZQWord) ) | 0wx5a => ( print "cvtsd2ss\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZQWord) ) | 0wx5c => ( print "subsd\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZQWord) ) | 0wx5e => ( print "divsd\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZQWord) ) | b => (print "F2\n"; print "0F\n"; print(Word8.fmt StringCvt.HEX b)) end | 0wxf3 => (* SSE2 instruction. *) let val nb = codeVecGet (seg, !ptr) val rr = (nb >>- 0w3) andb8 0w7 val reg = SSE2Reg rr in case opByte2 of 0wx10 => ( print "movss\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZDWord) ) | 0wx11 => ( print "movss\t"; printEAxmm(rex, SZDWord); print ","; print(xmmRegRepr reg) ) | 0wx2c => ( print "cvttss2si\t"; print (genRegRepr (mkReg(rr, rexR), sizeFromRexW)); print ","; printEAxmm(rex, sizeFromRexW) ) | 0wx2d => ( print "cvtss2si\t"; print (genRegRepr (mkReg(rr, rexR), sizeFromRexW)); print ","; printEAxmm(rex, sizeFromRexW) ) | 0wx5a => ( print "cvtss2sd\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZDWord) ) | 0wx58 => ( print "addss\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZDWord) ) | 0wx59 => ( print "mulss\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZDWord) ) | 0wx5c => ( print "subss\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZDWord) ) | 0wx5e => ( print "divss\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZDWord) ) | b => (print "F3\n"; print "0F\n"; print(Word8.fmt StringCvt.HEX b)) end | 0wx66 => (* SSE2 instruction *) let val nb = codeVecGet (seg, !ptr) val reg = SSE2Reg((nb >>- 0w3) andb8 0w7) in case opByte2 of 0wx2e => ( print "ucomisd\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZQWord) ) | 0wx54 => ( print "andpd\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZQWord) ) | 0wx57 => ( print "xorpd\t"; print(xmmRegRepr reg); print ","; printEAxmm(rex, SZQWord) ) | 0wx6e => ( print (if rexW then "movq\t" else "movd\t"); print(xmmRegRepr reg); print ","; printEA(rex, sizeFromRexW) ) | 0wx7e => ( print (if rexW then "movq\t" else "movd\t"); printEA(rex, sizeFromRexW); print ","; print(xmmRegRepr reg) ) | 0wx73 => ( print "psrldq\t"; printEAxmm(rex, SZQWord); print ","; print8 ()) | b => (print "66\n"; print "0F\n"; print(Word8.fmt StringCvt.HEX b)) end | _ => (print "esc\t"; printValue(Word8.toLargeInt opByte2)) end (* ESCAPE *) | 0wx13 => printGvEv (opByte, rex, rexR, sizeFromRexW) | 0wx1b => printGvEv (opByte, rex, rexR, sizeFromRexW) | 0wx23 => printGvEv (opByte, rex, rexR, sizeFromRexW) | 0wx2b => printGvEv (opByte, rex, rexR, sizeFromRexW) | 0wx33 => printGvEv (opByte, rex, rexR, sizeFromRexW) | 0wx3b => printGvEv (opByte, rex, rexR, sizeFromRexW) (* Push and Pop. These are 64-bit on X86/64 whether there is REX prefix or not. *) | 0wx50 => print ("push\t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx51 => print ("push\t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx52 => print ("push\t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx53 => print ("push\t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx54 => print ("push\t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx55 => print ("push\t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx56 => print ("push\t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx57 => print ("push\t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx58 => print ("pop \t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx59 => print ("pop \t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx5a => print ("pop \t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx5b => print ("pop \t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx5c => print ("pop \t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx5d => print ("pop \t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx5e => print ("pop \t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx5f => print ("pop \t" ^ genRegRepr (mkReg (opByte mod 0w8, rexB), sz32_64)) | 0wx63 => (* MOVSXD. This is ARPL in 32-bit mode but that's never used here. *) let val nb = codeVecGet (seg, !ptr) val reg = (nb >>- 0w3) andb8 0w7 in print "movsxd\t"; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)); print ","; printEA(rex, SZDWord) end | 0wx68 => (print "push\t"; print32 ()) | 0wx69 => let (* Register is in next byte. *) val nb = codeVecGet (seg, !ptr) val reg = (nb >>- 0w3) andb8 0w7 in print "imul\t"; print(genRegRepr (mkReg(reg, rexR), sizeFromRexW)); print ","; printEA(rex, sizeFromRexW); print ","; print32 () end | 0wx6a => (print "push\t"; print8 ()) | 0wx6b => let (* Register is in next byte. *) val nb = codeVecGet (seg, !ptr) val reg = (nb >>- 0w3) andb8 0w7 in print "imul\t"; print(genRegRepr (mkReg(reg, rexR), sizeFromRexW)); print ","; printEA(rex, sizeFromRexW); print ","; print8 () end | 0wx70 => (print "jo \t"; printJmp()) | 0wx71 => (print "jno \t"; printJmp()) | 0wx72 => (print "jb \t"; printJmp()) | 0wx73 => (print "jnb \t"; printJmp()) | 0wx74 => (print "je \t"; printJmp()) | 0wx75 => (print "jne \t"; printJmp()) | 0wx76 => (print "jna \t"; printJmp()) | 0wx77 => (print "ja \t"; printJmp()) | 0wx78 => (print "js \t"; printJmp()) | 0wx79 => (print "jns \t"; printJmp()) | 0wx7a => (print "jp \t"; printJmp()) | 0wx7b => (print "jnp \t"; printJmp()) | 0wx7c => (print "jl \t"; printJmp()) | 0wx7d => (print "jge \t"; printJmp()) | 0wx7e => (print "jle \t"; printJmp()) | 0wx7f => (print "jg \t"; printJmp()) | 0wx80 => (* Group1_8_a *) let (* Memory, byte constant *) (* Opcode is determined by next byte. *) val nb = Word8.toInt (codeVecGet (seg, !ptr)) in printArith ((nb div 8) mod 8); print "\t"; printEA(rex, SZByte); print ","; print8 () end | 0wx81 => let (* Memory, 32-bit constant *) (* Opcode is determined by next byte. *) val nb = Word8.toInt (codeVecGet (seg, !ptr)) in printArith ((nb div 8) mod 8); print "\t"; printEA(rex, sizeFromRexW); print ","; print32 () end | 0wx83 => let (* Word memory, 8-bit constant *) (* Opcode is determined by next byte. *) val nb = Word8.toInt (codeVecGet (seg, !ptr)) in printArith ((nb div 8) mod 8); print "\t"; printEA(rex, sizeFromRexW); print ","; print8 () end | 0wx87 => let (* xchng *) (* Register is in next byte. *) val nb = codeVecGet (seg, !ptr) val reg = (nb >>- 0w3) andb8 0w7 in print "xchng \t"; printEA(rex, sizeFromRexW); print ","; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)) end | 0wx88 => let (* mov eb,gb i.e a store *) (* Register is in next byte. *) val nb = Word8.toInt (codeVecGet (seg, !ptr)); val reg = (nb div 8) mod 8; in print "mov \t"; printEA(rex, SZByte); print ","; if rexR then print ("r" ^ Int.toString(reg+8) ^ "B") else case reg of 0 => print "al" | 1 => print "cl" | 2 => print "dl" | 3 => print "bl" (* If there is a REX byte these select the low byte of the registers. *) | 4 => print (if rex = 0w0 then "ah" else "sil") | 5 => print (if rex = 0w0 then "ch" else "dil") | 6 => print (if rex = 0w0 then "dh" else "bpl") | 7 => print (if rex = 0w0 then "bh" else "spl") | _ => print ("r" ^ Int.toString reg) end | 0wx89 => let (* mov ev,gv i.e. a store *) (* Register is in next byte. *) val nb = codeVecGet (seg, !ptr) val reg = (nb >>- 0w3) andb8 0w7 in print "mov \t"; (* This may have an opcode prefix. *) printEA(rex, if legacyPrefix = 0wx66 then SZWord else sizeFromRexW); print ","; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)) end | 0wx8b => let (* mov gv,ev i.e. a load *) (* Register is in next byte. *) val nb = codeVecGet (seg, !ptr) val reg = (nb >>- 0w3) andb8 0w7 in print "mov \t"; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)); print ","; printEA(rex, sizeFromRexW) end | 0wx8d => let (* lea gv.M *) (* Register is in next byte. *) val nb = codeVecGet (seg, !ptr) val reg = (nb >>- 0w3) andb8 0w7 in print "lea \t"; print (genRegRepr (mkReg(reg, rexR), sizeFromRexW)); print ","; printEA(rex, sizeFromRexW) end | 0wx8f => (print "pop \t"; printEA(rex, sz32_64)) - | 0wx90 => print "nop" + + | 0wx90 => if legacyPrefix = 0wxf3 then print "pause" else print "nop" | 0wx99 => if rexW then print "cqo" else print "cdq" | 0wx9e => print "sahf\n" | 0wxa4 => (if legacyPrefix = 0wxf3 then print "rep " else (); print "movsb") | 0wxa5 => (if legacyPrefix = 0wxf3 then print "rep " else (); print "movsl") | 0wxa6 => (if legacyPrefix = 0wxf3 then print "repe " else (); print "cmpsb") | 0wxa8 => (print "test\tal,"; print8 ()) | 0wxaa => (if legacyPrefix = 0wxf3 then print "rep " else (); print "stosb") | 0wxab => ( if legacyPrefix = 0wxf3 then print "rep " else (); if rexW then print "stosq" else print "stosl" ) | 0wxb8 => printMovCToR (opByte, sizeFromRexW, rexB) | 0wxb9 => printMovCToR (opByte, sizeFromRexW, rexB) | 0wxba => printMovCToR (opByte, sizeFromRexW, rexB) | 0wxbb => printMovCToR (opByte, sizeFromRexW, rexB) | 0wxbc => printMovCToR (opByte, sizeFromRexW, rexB) | 0wxbd => printMovCToR (opByte, sizeFromRexW, rexB) | 0wxbe => printMovCToR (opByte, sizeFromRexW, rexB) | 0wxbf => printMovCToR (opByte, sizeFromRexW, rexB) | 0wxc1 => (* Group2_8_A *) printShift (opByte, rex, sizeFromRexW) | 0wxc2 => (print "ret \t"; print16 ()) | 0wxc3 => print "ret" | 0wxc6 => (* move 8-bit constant to memory *) ( print "mov \t"; printEA(rex, SZByte); print ","; print8 () ) | 0wxc7 => (* move 32/64-bit constant to memory *) ( print "mov \t"; printEA(rex, sizeFromRexW); print ","; print32 () ) | 0wxca => (* Register mask *) let val mask = get16u (!ptr, seg) before (ptr +:= 0w2) in print "SAVE\t"; printMask mask end | 0wxcd => (* Register mask *) let val mask = get8u (!ptr, seg) before (ptr +:= 0w1) in print "SAVE\t"; printMask(Word8.toInt mask) end | 0wxd1 => (* Group2_1_A *) printShift (opByte, rex, sizeFromRexW) | 0wxd3 => (* Group2_CL_A *) printShift (opByte, rex, sizeFromRexW) | 0wxd8 => printFloat (opByte, rex) (* Floating point escapes *) | 0wxd9 => printFloat (opByte, rex) | 0wxda => printFloat (opByte, rex) | 0wxdb => printFloat (opByte, rex) | 0wxdc => printFloat (opByte, rex) | 0wxdd => printFloat (opByte, rex) | 0wxde => printFloat (opByte, rex) | 0wxdf => printFloat (opByte, rex) | 0wxe8 => let (* 32-bit relative call. *) val valu = get32s (!ptr, seg) before (ptr +:= 0w4) in print "call\t"; print (Word.fmt StringCvt.HEX (!ptr + Word.fromLargeInt valu)) end | 0wxe9 => let (* 32-bit relative jump. *) val valu = get32s (!ptr, seg) before (ptr +:= 0w4) in print "jmp \t"; print (Word.fmt StringCvt.HEX (!ptr + Word.fromLargeInt valu)) end | 0wxeb => (print "jmp \t"; printJmp()) | 0wxf4 => print "hlt" (* Marker to indicate end-of-code. *) | 0wxf6 => (* Group3_a *) let (* Opcode is determined by next byte. *) val nb = Word8.toInt (codeVecGet (seg, !ptr)) val opc = (nb div 8) mod 8 in print (case opc of 0 => "test" | 3 => "neg" | _ => "???" ); print "\t"; printEA(rex, SZByte); if opc = 0 then (print ","; print8 ()) else () end | 0wxf7 => (* Group3_A *) let (* Opcode is determined by next byte. *) val nb = Word8.toInt (codeVecGet (seg, !ptr)) val opc = (nb div 8) mod 8 in print (case opc of 0 => "test" | 3 => "neg " | 4 => "mul " | 5 => "imul" | 6 => "div " | 7 => "idiv" | _ => "???" ); print "\t"; printEA(rex, sizeFromRexW); (* Test has an immediate operand. It's 32-bits even in 64-bit mode. *) if opc = 0 then (print ","; print32 ()) else () end | 0wxff => (* Group5 *) let (* Opcode is determined by next byte. *) val nb = Word8.toInt (codeVecGet (seg, !ptr)) val opc = (nb div 8) mod 8 in print (case opc of 2 => "call" | 4 => "jmp " | 6 => "push" | _ => "???" ); print "\t"; printEA(rex, sz32_64) (* None of the cases we use need a prefix. *) end | _ => print(Word8.fmt StringCvt.HEX opByte); print "\n" end; (* end of while loop *) print "\n" end (* printCode *); (* Although this is used locally it must be defined at the top level otherwise a new RTS function will be compiler every time the containing function is called *) val sortFunction: (machineWord * word) array -> bool = RunCall.rtsCallFast1 "PolySortArrayOfAddresses" (* This actually does the final code-generation. *) fun generateCode {ops=operations, code=cvec as Code{procName, printAssemblyCode, printStream, profileObject, ...}, labelCount, resultClosure} : unit = let val (expanded, newLabelCount) = expandComplexOperations (operations, labelCount) val () = printLowLevelCode(expanded, cvec) local val initialBytesList = codeGenerate expanded in (* Fixup labels and shrink long branches to short. *) val (bytesList, codeSize) = fixupLabels(expanded, initialBytesList, newLabelCount) end local (* Extract the constants and the location of the references from the code. *) val (inlineConstants, addressConstants, nonAddressConstants) = getConstants(expanded, bytesList) (* Sort the non-address constants to remove duplicates. There don't seem to be many in practice. Since we're not actually interested in the order but only sorting to remove duplicates we can use a stripped-down Quicksort. *) fun sort([], out) = out | sort((addr, median) :: tl, out) = partition(median, tl, [addr], [], [], out) and partition(median, [], addrs, less, greater, out) = sort(less, sort(greater, (addrs, median) :: out)) | partition(median, (entry as (addr, value)) :: tl, addrs, less, greater, out) = if value = median then partition(median, tl, addr::addrs, less, greater, out) else if value < median then partition(median, tl, addrs, entry :: less, greater, out) else partition(median, tl, addrs, less, entry :: greater, out) (* Non-address constants. We can't use any ordering on them because a GC could change the values half way through the sort. Instead we use a simple search for a small number of constants and use an RTS call for larger numbers. We want to avoid quadratic cost when there are large numbers. *) val sortedConstants = if List.length addressConstants < 10 then let fun findDups([], out) = out | findDups((addr, value) :: tl, out) = let fun partition(e as (a, v), (eq, neq)) = if PolyML.pointerEq(value, v) then (a :: eq, neq) else (eq, e :: neq) val (eqAddr, neq) = List.foldl partition ([addr], []) tl in findDups(neq, (eqAddr, value) :: out) end in findDups(addressConstants, []) end else let fun swap (a, b) = (b, a) val arrayToSort: (machineWord * word) array = Array.fromList (List.map swap addressConstants) val _ = sortFunction arrayToSort fun makeList((v, a), []) = [([a], v)] | makeList((v, a), l as (aa, vv) :: tl) = if PolyML.pointerEq(v, vv) then (a :: aa, vv) :: tl else ([a], v) :: l in Array.foldl makeList [] arrayToSort end in val inlineConstants = inlineConstants and addressConstants = sortedConstants and nonAddressConstants = sort(nonAddressConstants, []) end (* Get the number of constants that need to be added to the address area. *) val constsInConstArea = List.length addressConstants local (* Add one byte for the HLT and round up to a number of words. *) val endOfCode = (codeSize+nativeWordSize) div nativeWordSize * (nativeWordSize div wordSize) val numOfNonAddrWords = Word.fromInt(List.length nonAddressConstants) (* Each entry in the non-address constant area is 8 bytes. *) val intSize = 0w8 div wordSize in val endOfByteArea = endOfCode + numOfNonAddrWords * intSize (* +4 for no of consts. function name, profile object and offset to start of consts. *) val segSize = endOfByteArea + Word.fromInt constsInConstArea + 0w4 end (* Create a byte vector and copy the data in. This is a byte area and not scanned by the GC so cannot contain any addresses. *) val byteVec = byteVecMake segSize val ic = ref 0w0 local fun genByte (ival: Word8.word) = set8u (ival, !ic, byteVec) before ic := !ic + 0w1 in fun genBytes l = Word8Vector.app (fn i => genByte i) l val () = List.app (fn b => genBytes b) bytesList val () = genBytes(Word8Vector.fromList(opCodeBytes(HLT, NONE))) (* Marker - this is used by ScanConstants in the RTS. *) end (* Align ic onto a fullword boundary. *) val () = ic := ((!ic + nativeWordSize - 0w1) andb ~nativeWordSize) (* Copy the non-address constants. These are only used in 64-bit mode and are either real constants or integers that are too large to fit in a 32-bit inline constants. We don't use this for real constants in 32-bit mode because we don't have relative addressing. Instead a real constant is the inline address of a boxed real number. *) local fun putNonAddrConst(addrs, constant) = let val addrOfConst = ! ic val () = genBytes(Word8Vector.fromList(largeWordToBytes(constant, 8))) fun setAddr addr = set32s(Word.toLargeInt(addrOfConst - addr - 0w4), addr, byteVec) in List.app setAddr addrs end in val () = List.app putNonAddrConst nonAddressConstants end val _ = bytesToWords(! ic) = endOfByteArea orelse raise InternalError "mismatch" (* Put in the number of constants. This must go in before we actually put in any constants. In 32-bit mode there are only two constants: the function name and the profile object. All other constants are in the code. *) local val lastWord = wordsToBytes(endOfByteArea + 0w3 + Word.fromInt constsInConstArea) fun setBytes(_, _, 0) = () | setBytes(ival, offset, count) = ( byteVecSet(byteVec, offset, Word8.fromLargeInt(ival mod 256)); setBytes(ival div 256, offset+0w1, count-1) ) in val () = setBytes(LargeInt.fromInt(2 + constsInConstArea), wordsToBytes endOfByteArea, Word.toInt wordSize) (* Set the last word of the code to the (negative) byte offset of the start of the code area from the end of this word. *) val () = setBytes(Word.toLargeIntX(wordsToBytes endOfByteArea - lastWord), lastWord, Word.toInt wordSize) end; (* We've put in all the byte data so it is safe to convert this to a mutable code cell that can contain addresses and will be scanned by the GC. *) val codeSeg = byteVecToCodeVec(byteVec, resultClosure) (* Various RTS functions assume that the first constant is the function name. The profiler assumes that the second word is the address of the mutable that contains the profile count. *) val () = codeVecPutWord (codeSeg, endOfByteArea + 0w1, toMachineWord procName) (* Next the profile object. *) val () = codeVecPutWord (codeSeg, endOfByteArea + 0w2, profileObject) in let fun setBytes(_, _, 0w0) = () | setBytes(b, addr, count) = ( codeVecSet (codeSeg, addr, wordToWord8 b); setBytes(b >> 0w8, addr+0w1, count-0w1) ) (* Inline constants - native 32-bit only plus one special case in 32-in-64 *) fun putInlConst (addrs, SelfAddress) = (* Self address goes inline. *) codeVecPutConstant (codeSeg, addrs, toMachineWord(codeVecAddr codeSeg), ConstAbsolute) | putInlConst (addrs, InlineAbsoluteAddress m) = codeVecPutConstant (codeSeg, addrs, m, ConstAbsolute) | putInlConst (addrs, InlineRelativeAddress m) = codeVecPutConstant (codeSeg, addrs, m, ConstX86Relative) val _ = List.app putInlConst inlineConstants (* Address constants - native 64-bit and 32-in-64. *) fun putAddrConst ((addrs, m), constAddr) = (* Put the constant in the constant area and set the original address to be the relative offset to the constant itself. *) ( codeVecPutWord (codeSeg, constAddr, m); (* Put in the 32-bit offset - always unsigned since the destination is after the reference. *) List.app(fn addr => setBytes(constAddr * wordSize - addr - 0w4, addr, 0w4)) addrs; constAddr+0w1 ) (* Put the constants. Any values in the constant area start at +3 i.e. after the profile. *) val _ = List.foldl putAddrConst (endOfByteArea+0w3) addressConstants val () = if printAssemblyCode then (* print out the code *) ( printCode(cvec, codeSeg); printStream "\n\n" ) else () in (* Finally lock the code. *) codeVecLock(codeSeg, resultClosure) end (* the result *) end (* generateCode *) structure Sharing = struct type code = code and reg = reg and genReg = genReg and fpReg = fpReg and addrs = addrs and operation = operation and regSet = RegSet.regSet and label = label and branchOps = branchOps and arithOp = arithOp and shiftType = shiftType and repOps = repOps and fpOps = fpOps and fpUnaryOps = fpUnaryOps and sse2Operations = sse2Operations and opSize = opSize and closureRef = closureRef end end (* struct *) (* CODECONS *); diff --git a/mlsource/MLCompiler/CodeTree/X86Code/X86PushRegisters.ML b/mlsource/MLCompiler/CodeTree/X86Code/X86PushRegisters.ML index e2211f81..a9122525 100644 --- a/mlsource/MLCompiler/CodeTree/X86Code/X86PushRegisters.ML +++ b/mlsource/MLCompiler/CodeTree/X86Code/X86PushRegisters.ML @@ -1,1534 +1,1535 @@ (* - Copyright David C. J. Matthews 2016-19 + Copyright David C. J. Matthews 2016-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) functor X86PushRegisters( structure ICODE: ICodeSig structure INTSET: INTSETSIG structure IDENTIFY: X86IDENTIFYREFSSIG sharing ICODE.Sharing = IDENTIFY.Sharing = INTSET ) : X86PUSHREGISTERSIG = struct open ICODE open INTSET open IDENTIFY (* Curried subscript functions *) fun asub a i = Array.sub(a, i) and vsub v i = Vector.sub(v, i) exception InternalError = Misc.InternalError (* Each preg in the input is mapped to either a new preg or the stack. *) datatype pregMapType = Unset | ToPReg of preg | ToStack of int * stackLocn (* The stack contains both entries in the input code and entries added here. It is really used to ensure that the stack at run time is the same size at the start of a block whichever block has jumped to it. *) datatype stackEntry = NewEntry of {pregNo: int} (* pregNo is the original preg that has been pushed here. *) | OriginalEntry of { stackLoc: stackLocn } | HandlerEntry fun addRegisterPushes{code: extendedBasicBlock vector, pushVec: bool vector, pregProps, firstPass} = let val maxPRegs = Vector.length pregProps val numberOfBlocks = Vector.length code (* Output registers and properties. *) val pregCounter = ref 0 val pregPropList = ref [] val pregMap = Array.array(maxPRegs, Unset) (* Cache registers. *) datatype cacheType = CacheStack of { rno: int }(* Original preg or stack loc. *) (* Cache memory location. This allows for general base/index/offset addressing but currently we only cache either NoMemIndex or ObjectIndex. *) | CacheMemory of { base: preg, offset: int, index: memoryIndex } (* CacheTagged is used if we tag a value to see if we can use the original untagged value somewhere. *) | CacheTagged of { reg: preg, isSigned: bool, opSize: opSize } (* CacheFloat is used if we tag a float (Real32.real). Double-precision reals (Real.real) are handled as CacheMemory *) | CacheFloat of { reg: preg } local (* The number of active cache entries is likely to be small and is at most proportional to the number of instructions in the block. Any function call will clear it. For memory entries we need to know if the value is tagged and what kind of move we're using. Stack entries always will be tagged and MoveWord. *) val cache: {cacheFor: cacheType, cacheReg: preg, isTagged: bool, kind: moveKind } list ref = ref [] fun isStack n {cacheFor, ...} = cacheFor = CacheStack{rno = n} and isMemory (r, off, index) {cacheFor, ...} = cacheFor = CacheMemory {base = r, offset = off, index=index} and isTagCache(r, s, os) {cacheFor, ...} = cacheFor = CacheTagged{reg = r, isSigned = s, opSize = os} and isFloatCache r {cacheFor, ...} = cacheFor =CacheFloat{reg = r } fun findCache f = List.find f (! cache) fun removeCache f = cache := List.filter (not o f) (! cache) in fun clearCache() = cache := [] fun findCachedStack n = Option.map (#cacheReg) (findCache (isStack n)) and findCachedMemory (r, off, index, kind) = ( case findCache(isMemory (r, off, index)) of SOME {cacheReg, isTagged, kind=cacheKind, ...} => (* Must check the size of the operand. In particular we could have loaded the low order 32-bits in 32-in-64 but later want all 64-bits because it's a large-word. See Test182. *) if kind = cacheKind then SOME (cacheReg, isTagged, kind) else NONE | NONE => NONE ) and findCachedTagged (r, s, os) = Option.map #cacheReg (findCache(isTagCache (r, s, os))) and findCachedFloat r = Option.map #cacheReg (findCache(isFloatCache r)) fun removeStackCache n = removeCache (isStack n) and removeMemoryCache (r, off, index) = removeCache (isMemory (r, off, index)) and removeTagCache (r, s, os) = removeCache (isTagCache (r, s, os)) and removeFloatCache r = removeCache (isFloatCache r) fun clearMemoryCache() = cache := List.filter(fn {cacheFor=CacheMemory _,...} => false | _ => true) (!cache) fun setStackCache(n, new) = ( removeStackCache n; cache := {cacheFor=CacheStack{rno=n}, cacheReg=new, isTagged=true, kind=moveNativeWord} :: ! cache ) and setMemoryCache(r, off, index, new, isTagged, kind) = ( removeMemoryCache (r, off, index); cache := {cacheFor=CacheMemory{base=r, offset=off, index=index}, cacheReg=new, isTagged=isTagged, kind=kind} :: ! cache ) and setTagCache(r, s, os, new) = ( removeTagCache (r, s, os); cache := {cacheFor=CacheTagged{reg=r, isSigned=s, opSize=os}, cacheReg=new, isTagged=true, kind=moveNativeWord} :: ! cache ) and setFloatCache(r, new) = ( removeFloatCache r; cache := {cacheFor=CacheFloat{reg=r}, cacheReg=new, isTagged=true, kind=MoveFloat} :: ! cache ) fun getCache () = ! cache (* Merge the cache states *) fun setCommonCacheState [] = clearCache() | setCommonCacheState [single] = cache := single | setCommonCacheState (many as first :: rest) = let (* Generally we will either be unable to merge and have an empty cache or will have just one or two entries. *) (* Find the shortest. If it's empty we're done. *) fun findShortest(_, [], _) = [] | findShortest(_, shortest, []) = shortest | findShortest(len, shortest, hd::tl) = let val hdLen = length hd in if hdLen < len then findShortest(hdLen, hd, tl) else findShortest(len, shortest, tl) end val shortest = findShortest(length first, first, rest) (* Find the item we're caching for. If it is in a different register we can't use it. *) fun findItem search (hd::tl) = if #cacheFor hd = #cacheFor search then #cacheReg hd = #cacheReg search else findItem search tl | findItem _ [] = false (* It's present if it's in all the sources. *) fun present search = List.all(findItem search) many val filtered = List.foldl (fn (search, l) => if present search then search :: l else l) [] shortest in cache := filtered end end val maxStack = ref 0 (* The stack size we've assumed for the block. Also indicates if a block has already been processed. *) val inputStackSizes = Array.array(numberOfBlocks, NONE: {expectedInput:int, reqCC: bool} option) (* The result of processing a block. *) val blockOutput = Array.array(numberOfBlocks, {code=[], cache=[], stackCount=0}) (* Extra blocks to adjust the stack are added here. *) val extraBlocks: basicBlock list ref = ref [] val blockCounter = ref numberOfBlocks (* Get the blocks that are inputs for each one. *) local val blockRefs = Array.array(numberOfBlocks, []) fun setReferences fromBlock = let val ExtendedBasicBlock{ flow, ...} = vsub code fromBlock val refs = successorBlocks flow fun setRefs toBlock = let val oldRefs = asub blockRefs toBlock in Array.update(blockRefs, toBlock, fromBlock :: oldRefs); if null oldRefs then setReferences toBlock else () end in List.app setRefs refs end val () = setReferences 0 in val blockRefs = blockRefs end (* Recursive scan of the blocks. For each block we produce an input and output state. The input state is the output state of the predecessor i.e. some block that jumps to this, but with any entries removed that are not used in this block. It is then necessary to match the input state, if necessary by adding extra blocks that just do the matching. *) local val haveProcessed = isSome o asub inputStackSizes fun processBlocks toDo = case List.filter (fn (n, _) => not(haveProcessed n)) toDo of [] => () (* Nothing left to do *) | stillToDo as head :: _ => let (* Try to find a block all of whose predecessors have been processed. That increases the chances that we will have cached items. *) fun available(dest, _) = List.all haveProcessed (Array.sub(blockRefs, dest)) val (blockNo, lastOutputState) = case List.find available stillToDo of SOME c => c | NONE => head (* This is the first time we've come to this block. *) val ExtendedBasicBlock{ block, flow, imports, passThrough, loopRegs, inCCState, initialStacks, ...} = vsub code blockNo val requiresCC = isSome inCCState (* Remove any items from the input state that are no longer needed for this block. They could be local to the previous block or needed by a different successor. Although the values in loopRegs are not required the stack space is so that they can be updated. *) fun removeItems(result as {stack=[], stackCount=0}) = result | removeItems{stack=[], ...} = raise InternalError "removeItems - stack size" | removeItems (thisStack as {stack=NewEntry{pregNo} :: rest, stackCount}) = if member(pregNo, imports) orelse member(pregNo, passThrough) orelse member(pregNo, loopRegs) then thisStack else removeItems{stack=rest, stackCount=stackCount-1} | removeItems (thisStack as {stack=OriginalEntry{stackLoc=StackLoc{rno, size}, ...} :: rest, stackCount}) = if member(rno, initialStacks) then thisStack else removeItems{stack=rest, stackCount=stackCount-size} | removeItems result = result val {stackCount=newSp, stack=newStack} = removeItems lastOutputState (* References to hold the current stack count (number of words on the stack) and the list of items on the stack. The list is not used directly to map stack addresses. Instead it is used to match the stack at the beginning and end of a block. *) val stackCount = ref newSp val stack = ref newStack (* Items from the stack that have been marked as deleted but not yet removed. We only remove items from the top of the stack to avoid quadratic behaviour with a very deep stack. *) val deletedItems = ref [] (* Save the stack size in case we come by a different route. *) val () = Array.update(inputStackSizes, blockNo, SOME{expectedInput=newSp, reqCC=requiresCC}) fun pushItemToStack item = let val size = case item of NewEntry _ => 1 | OriginalEntry{stackLoc=StackLoc{size, ...}, ...} => size | HandlerEntry => 2 in stackCount := ! stackCount+size; stack := item :: ! stack; maxStack := Int.max(!maxStack, !stackCount) end fun newPReg propKind = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := propKind :: !pregPropList in PReg regNo end and newStackLoc size = let val regNo = !pregCounter before pregCounter := !pregCounter + 1 val () = pregPropList := RegPropStack size :: !pregPropList in StackLoc{size=size, rno=regNo} end (* Map a source register. This always loads the argument. *) fun mapSrcRegEx(PReg n) = case Array.sub(pregMap, n) of Unset => raise InternalError "mapSrcReg - unset" | ToPReg preg => (preg, [], []) | ToStack(stackLoc, container as StackLoc{size, ...}) => let (* Make a new untagged register. That will prevent us pushing it if we have to spill registers. *) val newReg = newPReg RegPropUntagged val sourceCache = findCachedStack n val stackSource = StackLocation{wordOffset= !stackCount-stackLoc-size, container=container, field=0, cache=sourceCache} (* Because this is in a register we can copy it to a cache register. *) val newCacheReg = newPReg RegPropCacheTagged val () = setStackCache(n, newCacheReg) in (newReg, [LoadArgument{source=stackSource, dest=newReg, kind=moveNativeWord}], [CopyToCache{source=newReg, dest=newCacheReg, kind=moveNativeWord}]) end fun mapSrcReg srcReg = let val (newReg, codePre, codePost) = mapSrcRegEx srcReg in (newReg, codePost @ codePre) end fun mapDestReg(PReg n) = let val currentLocation = Array.sub(pregMap, n) val kind = Vector.sub(pregProps, n) in if Vector.sub(pushVec, n) then let (* This should not have been seen before. *) val _ = case currentLocation of Unset => () | _ => raise InternalError "mapDestReg - already set" val newReg = newPReg kind val newContainer = newStackLoc 1 val () = Array.update(pregMap, n, ToStack (!stackCount, newContainer)) val () = pushItemToStack(NewEntry{pregNo=n}) in (newReg, [PushValue{arg=RegisterArgument newReg, container=newContainer}]) end else let (* See if we already have a number for it. We may encounter the same preg as a destination when returning the result from a conditional in which case we have to use the same number. We shouldn't have pushed it. *) val newReg = case (currentLocation, kind) of (Unset, _) => let val newReg = newPReg kind val () = Array.update(pregMap, n, ToPReg newReg) in newReg end | (ToPReg preg, RegPropMultiple) => preg | _ => raise InternalError "mapDestReg - multiply defined non-merge reg" in (newReg, []) end end (* A work register must be a normal register. *) fun mapWorkReg(PReg n) = let val currentLocation = Array.sub(pregMap, n) val _ = Vector.sub(pushVec, n) andalso raise InternalError "mapWorkReg - MustPush" in case currentLocation of Unset => let val kind = Vector.sub(pregProps, n) val newReg = newPReg kind val () = Array.update(pregMap, n, ToPReg newReg) in newReg end | ToPReg preg => preg | ToStack _ => raise InternalError "mapWorkReg - on stack" end fun mapIndexEx(NoMemIndex) = (NoMemIndex, [], []) | mapIndexEx(MemIndex1 r) = let val (sreg, c1, c2) = mapSrcRegEx r in (MemIndex1 sreg, c1, c2) end | mapIndexEx(MemIndex2 r) = let val (sreg, c1, c2) = mapSrcRegEx r in (MemIndex2 sreg, c1, c2) end | mapIndexEx(MemIndex4 r) = let val (sreg, c1, c2) = mapSrcRegEx r in (MemIndex4 sreg, c1, c2) end | mapIndexEx(MemIndex8 r) = let val (sreg, c1, c2) = mapSrcRegEx r in (MemIndex8 sreg, c1, c2) end | mapIndexEx(ObjectIndex) = (ObjectIndex, [], []) fun mapIndex index = let val (newIndex, codePre, codePost) = mapIndexEx index in (newIndex, codePost @ codePre) end (* Adjust a stack offset from the old state to the new state. *) fun mapContainerAndStack(StackLoc{rno, size}, field) = let val (newStackAddr, newContainer) = case Array.sub(pregMap, rno) of Unset => raise InternalError "mapContainer - unset" | ToPReg _ => raise InternalError "mapContainer - ToPReg" | ToStack stackContainer => stackContainer val newOffset = !stackCount-(newStackAddr+size) + field in (newOffset, newContainer) end (* Add an entry for an existing stack entry. *) fun mapDestContainer(StackLoc{rno, size}, locn) = ( case Array.sub(pregMap, rno) of Unset => let val newContainer = newStackLoc size val () = Array.update(pregMap, rno, ToStack(locn, newContainer)) in newContainer end | _ => raise InternalError "mapDestContainer: already set" ) fun mapSourceEx(RegisterArgument(PReg r), _) = ( case Array.sub(pregMap, r) of Unset => raise InternalError "mapSource - unset" | ToPReg preg => (RegisterArgument preg, [], []) | ToStack(stackLoc, container as StackLoc{size, ...}) => let val sourceCache = findCachedStack r val stackLoc = StackLocation{wordOffset= !stackCount-stackLoc-size, container=container, field=0, cache=sourceCache} (* If this is cached we need to make a new cache register and copy it there. *) val cacheCode = case sourceCache of NONE => [] | SOME cacheR => let val newCacheReg = newPReg RegPropCacheTagged val () = setStackCache(r, newCacheReg) in [CopyToCache{source=cacheR, dest=newCacheReg, kind=moveNativeWord}] end in (stackLoc, [], cacheCode) end ) | mapSourceEx(a as AddressConstant _, _) = (a, [], []) | mapSourceEx(i as IntegerConstant _, _) = (i, [], []) | mapSourceEx(MemoryLocation{base, offset, index, cache, ...}, kind) = if (case index of NoMemIndex => true | ObjectIndex => true | _ => false) then let val (baseReg, baseCodePre, baseCodePost) = mapSrcRegEx base (* We can cache this if it is the first pass or if we have previously cached it and we haven't marked it as pushed. *) val newCache = case cache of NONE => if firstPass then findCachedMemory(base, offset, index, kind) else NONE | SOME (PReg c) => if Vector.sub(pushVec, c) then NONE (* We had marked this as to be pushed - we can't use a cache here. *) else findCachedMemory(base, offset, index, kind) val memLoc = MemoryLocation{base=baseReg, offset=offset, index=index, cache=Option.map #1 newCache} val cacheCode = case newCache of NONE => (removeMemoryCache(base, offset, index); []) | SOME (oldCacheReg, isTagged, kind) => let (* Set the cache kind. If this is the first pass we will have a general or untagged register. *) val cacheKind = if isTagged then RegPropCacheTagged else RegPropCacheUntagged val newCacheReg = newPReg cacheKind val () = setMemoryCache(base, offset, index, newCacheReg, isTagged, kind) in [CopyToCache{source=oldCacheReg, dest=newCacheReg, kind=kind}] end in (memLoc, baseCodePre, baseCodePost @ cacheCode) end else let val (baseReg, baseCodePre, baseCodePost) = mapSrcRegEx base val (indexValue, indexCodePre, indexCodePost) = mapIndexEx index in (MemoryLocation{base=baseReg, offset=offset, index=indexValue, cache=NONE}, baseCodePre @ indexCodePre, baseCodePost @ indexCodePost) end | mapSourceEx(StackLocation{container as StackLoc{rno, ...}, field, cache, ...}, _) = let val (newOffset, newContainer) = mapContainerAndStack(container, field) (* Was the item previously cached? If it wasn't or the cache reg has been marked as "must push" we can't use a cache. *) val newCache = case cache of NONE => NONE | SOME (PReg c) => if Vector.sub(pushVec, c) then NONE (* We had marked this as to be pushed - we can't use a cache here. *) else findCachedStack rno val stackLoc = StackLocation{wordOffset=newOffset, container=newContainer, field=field, cache=newCache} val cacheCode = case newCache of NONE => (removeStackCache rno; []) | SOME oldCacheReg => let val newCacheReg = newPReg RegPropCacheTagged val () = setStackCache(rno, newCacheReg) in [CopyToCache{source=oldCacheReg, dest=newCacheReg, kind=moveNativeWord}] end in (stackLoc, [], cacheCode) end | mapSourceEx(ContainerAddr{container, ...}, _) = let val (newOffset, newContainer) = mapContainerAndStack(container, 0) in (ContainerAddr{container=newContainer, stackOffset=newOffset}, [], []) end fun mapSource(src, kind) = let val (sourceVal, sourceCodePre, sourceCodePost) = mapSourceEx(src, kind) in (sourceVal, sourceCodePost @ sourceCodePre) end (* Force a load of the source into a register if it is on the stack. This is used in cases where a register or literal is allowed but not a memory location. If we do load it we can cache the register. *) fun mapAndLoad(source as RegisterArgument(PReg r), kind) = let val (sourceVal, sourceCodePre, sourceCodePost) = mapSourceEx(source, kind) in case sourceVal of stack as StackLocation _ => let val newReg = newPReg RegPropUntagged val newCacheReg = newPReg RegPropCacheTagged val _ = setStackCache(r, newCacheReg) in (RegisterArgument newReg, CopyToCache{source=newReg, dest=newCacheReg, kind=moveNativeWord} :: sourceCodePost @ LoadArgument{source=stack, dest=newReg, kind=moveNativeWord} :: sourceCodePre) end | _ => (sourceVal, sourceCodePost @ sourceCodePre) end | mapAndLoad(StackLocation _, _) = raise InternalError "mapAndLoad - already a stack loc" | mapAndLoad(MemoryLocation _, _) = raise InternalError "mapAndLoad - already a mem loc" | mapAndLoad(source, kind) = mapSource(source, kind) fun opSizeToMoveKind OpSize32 = Move32Bit | opSizeToMoveKind OpSize64 = Move64Bit (* Rewrite the code, replacing any registers that need to be pushed with references to the stack. The result is built up in reverse order and then reversed. *) fun pushRegisters({instr=LoadArgument{source, dest=PReg dReg, kind}, ...}, code) = if Vector.sub(pushVec, dReg) then (* We're going to push this. *) let val (sourceVal, sourceCode) = mapSource(source, kind) (* If we have to push the value we don't have to first load it into a register. *) val _ = case Array.sub(pregMap, dReg) of Unset => () | _ => raise InternalError "LoadArgument - already set" val container = newStackLoc 1 val () = Array.update(pregMap, dReg, ToStack(! stackCount, container)) val () = pushItemToStack(NewEntry{pregNo=dReg}) in if targetArch = ObjectId32Bit andalso (case sourceVal of MemoryLocation _ => true | AddressConstant _ => true | _ => false) then let (* Push will always push a 64-bit value. We have to put it in a register first. For MemoryLocations that's because it would push 8 bytes; for AddressConstants that's because we don't have a way of pushing an unsigned 32-bit constant. *) val newReg = newPReg RegPropUntagged in PushValue{arg=RegisterArgument newReg, container=container} :: LoadArgument{source=sourceVal, dest=newReg, kind=movePolyWord} :: sourceCode @ code end else PushValue{arg=sourceVal, container=container} :: sourceCode @ code end else (* We're not going to push this. *) let val (sourceVal, sourceCodePre, sourceCodePost) = mapSourceEx(source, kind) val dKind = Vector.sub(pregProps, dReg) val destReg = case (Array.sub(pregMap, dReg), dKind) of (Unset, _) => let val newReg = newPReg dKind val () = Array.update(pregMap, dReg, ToPReg newReg) in newReg end | (ToPReg preg, RegPropMultiple) => preg | _ => raise InternalError "LoadArgument - multiply defined non-merge reg" (* Can we cache this? . *) val cacheCode = case source of MemoryLocation{base, offset, index, ...} => (* Only cache if we have a fixed offset (not indexed). *) if (case index of NoMemIndex => true | ObjectIndex => true | _ => false) then let (* The cache kind must match the kind of register we're loading. If the value is untagged it must not be marked to be examined by the GC if we allocate anything. The move kind has to be suitable for a register to register move. *) val (cacheType, isTagged) = case dKind of RegPropGeneral => (RegPropCacheTagged, true) (* Generally there's no point in caching a multiply-defined register because it is only used once but allow it in case the other definitions have been optimised out. *) | RegPropMultiple => (RegPropCacheTagged, true) | RegPropUntagged => (RegPropCacheUntagged, false) | _ => raise InternalError "cacheKind" val newCacheReg = newPReg cacheType val _ = setMemoryCache(base, offset, index, newCacheReg, isTagged, kind) val moveKind = case kind of Move64Bit => Move64Bit | MoveByte => Move32Bit | Move16Bit => Move32Bit | Move32Bit => Move32Bit | MoveFloat => MoveFloat | MoveDouble => MoveDouble in [CopyToCache{source=destReg, dest=newCacheReg, kind=moveKind}] end else [] | _ => [] val destCode = LoadArgument{source=sourceVal, dest=destReg, kind=kind} in cacheCode @ sourceCodePost @ destCode :: sourceCodePre @ code end | pushRegisters({instr=StoreArgument{source, offset, base, index, kind, isMutable}, ...}, code) = let val (loadedSource, sourceCode) = mapAndLoad(source, kind) (* We can't have a memory-memory store so we have to load the source if it's now on the stack. *) val (baseReg, baseCode) = mapSrcReg(base) val (indexValue, indexCode) = mapIndex(index) (* If we're assigning to a mutable we can no longer rely on the memory cache. Clear it completely in that case although we could be more selective. *) val () = if isMutable then clearMemoryCache() else () in StoreArgument{source=loadedSource, base=baseReg, offset=offset, index=indexValue, kind=kind, isMutable=isMutable} :: indexCode @ baseCode @ sourceCode @ code end | pushRegisters({instr=LoadMemReg { offset, dest, kind}, ...}, code) = let val (destVal, destCode) = mapDestReg dest in destCode @ LoadMemReg { offset=offset, dest=destVal, kind=kind} :: code end | pushRegisters({instr=StoreMemReg { offset, source, kind}, ...}, code) = let val (sourceValue, sourceCode) = mapSrcReg source in StoreMemReg { offset=offset, source=sourceValue, kind=kind} :: sourceCode @ code end | pushRegisters({instr=BeginFunction {regArgs, stackArgs}, ...}, code) = let (* Create a new container list. The offsets begin at -numArgs. *) fun newContainers(src :: srcs, offset) = let val newContainer = mapDestContainer(src, offset) in newContainer :: newContainers(srcs, offset+1) end | newContainers _ = [] val newStackArgs = newContainers(stackArgs, ~ (List.length stackArgs)) (* Push any registers that need to be pushed. *) fun pushReg((preg, rreg), (others, code)) = let val (newReg, newCode) = mapDestReg(preg) in ((newReg, rreg) :: others, newCode @ code) end val (newRegArgs, pushCode) = List.foldl pushReg ([], []) regArgs in pushCode @ BeginFunction {regArgs=newRegArgs, stackArgs=newStackArgs} :: code end | pushRegisters({instr=FunctionCall{callKind, regArgs, stackArgs, dest, realDest, ...}, ...}, code) = let (* It's possible that this could lead to having to spill registers in order to load others. Leave that problem for the moment. *) fun loadStackArg (arg, (otherLoads, otherArgs)) = let val (argVal, loadCode) = mapSource(arg, movePolyWord) in (loadCode @ otherLoads, argVal :: otherArgs) end val (stackArgLoads, newStackArgs) = List.foldr loadStackArg ([], []) stackArgs fun loadRegArg ((arg, reg), (otherLoads, otherArgs)) = let val (argVal, loadCode) = mapSource(arg, movePolyWord) in (loadCode @ otherLoads, (argVal, reg) :: otherArgs) end val (regArgLoads, newRegArgs) = List.foldr loadRegArg ([], []) regArgs val (destVal, destCode) = mapDestReg dest (* Now clear the cache table. *) val () = clearCache() in destCode @ FunctionCall{ callKind=callKind, regArgs=newRegArgs, stackArgs=newStackArgs, dest=destVal, realDest=realDest, saveRegs=[]} :: regArgLoads @ stackArgLoads @ code end | pushRegisters({instr=TailRecursiveCall{callKind, regArgs, stackArgs, stackAdjust, workReg, ...}, ...}, code) = let val newWorkReg = mapWorkReg workReg val newStackOffset = !stackCount fun loadStackArg ({src, stack}, (otherLoads, otherArgs)) = let val (argVal, loadCode) = case mapSource(src, movePolyWord) of (source as StackLocation{wordOffset, ...}, loadCode) => (* If we're leaving it in its old location or we're pushing it above the current top we're ok. We're also ok if we're moving it from a somewhere above the last argument. Otherwise we have to load it. It goes into a normal tagged register which may mean that it could be pushed onto the stack in a subsequent pass. *) if wordOffset = stack+newStackOffset orelse stack+newStackOffset < 0 orelse newStackOffset-wordOffset > ~ stackAdjust then (source, loadCode) else let val preg = newPReg RegPropGeneral in (RegisterArgument preg, LoadArgument{source=source, dest=preg, kind=moveNativeWord} :: loadCode) end | argCode => argCode in (loadCode @ otherLoads, {src=argVal, stack=stack} :: otherArgs) end val (stackArgLoads, newStackArgs) = List.foldr loadStackArg ([], []) stackArgs fun loadRegArg ((arg, reg), (otherLoads, otherArgs)) = let val (argVal, loadCode) = mapSource(arg, movePolyWord) in (loadCode @ otherLoads, (argVal, reg) :: otherArgs) end val (regArgLoads, newRegArgs) = List.foldr loadRegArg ([], []) regArgs in TailRecursiveCall{ callKind=callKind, regArgs=newRegArgs, stackArgs=newStackArgs, stackAdjust=stackAdjust, currStackSize=newStackOffset, workReg=newWorkReg} :: regArgLoads @ stackArgLoads @ code end | pushRegisters({instr=AllocateMemoryOperation{size, flags, dest, ...}, ...}, code) = let val (destVal, destCode) = mapDestReg dest in destCode @ AllocateMemoryOperation{size=size, flags=flags, dest=destVal, saveRegs=[]} :: code end | pushRegisters({instr=AllocateMemoryVariable{size, dest, ...}, ...}, code) = let val (sizeVal, sizeCode) = mapSrcReg size val (destVal, destCode) = mapDestReg dest in destCode @ AllocateMemoryVariable{size=sizeVal, dest=destVal, saveRegs=[]} :: sizeCode @ code end | pushRegisters({instr=InitialiseMem{size, addr, init}, ...}, code) = let val (sizeVal, sizeCode) = mapSrcReg size val (addrVal, addrCode) = mapSrcReg addr val (initVal, initCode) = mapSrcReg init in InitialiseMem{size=sizeVal, addr=addrVal, init=initVal} :: initCode @ addrCode @ sizeCode @ code end | pushRegisters({instr=InitialisationComplete, ...}, code) = InitialisationComplete :: code | pushRegisters({instr=BeginLoop, ...}, code) = BeginLoop :: code | pushRegisters({instr=JumpLoop{regArgs, stackArgs, checkInterrupt, workReg}, ...}, code) = let (* Normally JumpLoop will be the last item in a block but it is possible that we've added a reset-stack after it. *) fun getValues [] = ([], [], []) | getValues ((source, PReg n) :: rest) = let val (otherRegArgs, otherStackArgs, otherCode) = getValues rest in case Array.sub(pregMap, n) of ToPReg lReg => let val (sourceVal, sourceCode) = mapSource(source, movePolyWord) in ((sourceVal, lReg) :: otherRegArgs, otherStackArgs, sourceCode @ otherCode) end | ToStack(stackLoc, stackC as StackLoc{size, ...}) => let val (sourceVal, sourceCode) = mapSource(source, movePolyWord) val stackOff = !stackCount - stackLoc - size in (otherRegArgs, (sourceVal, stackOff, stackC) :: otherStackArgs, sourceCode @ otherCode) end | Unset => (* Drop it. It's never used. This can happen if we are folding a function over a list such that it always returns the last value and then discard the result of the fold. *) (otherRegArgs, otherStackArgs, otherCode) end val (newRegArguments, newStackArgs, sourceCode) = getValues regArgs fun loadStackArg((source, _, destC), (otherLoads, otherArgs)) = let val (sourceVal, sourceCode) = mapSource(source, movePolyWord) val (newOffset, newContainer) = mapContainerAndStack(destC, 0) in (sourceCode @ otherLoads, (sourceVal, newOffset, newContainer) :: otherArgs) end val (stackArgLoads, oldStackArgs) = List.foldr loadStackArg ([], []) stackArgs val check = case checkInterrupt of NONE => NONE | SOME _ => SOME [] (* Map the work reg if it exists already but get a new one if we now have stack args. *) val newWorkReg = case (workReg, newStackArgs) of (SOME r, _) => SOME(mapWorkReg r) | (NONE, []) => NONE | _ => SOME(newPReg RegPropGeneral) in JumpLoop{ regArgs=newRegArguments, stackArgs=oldStackArgs @ newStackArgs, checkInterrupt=check, workReg=newWorkReg} :: sourceCode @ stackArgLoads @ code end | pushRegisters({instr=RaiseExceptionPacket{packetReg}, ...}, code) = let val (packetVal, packetCode) = mapSrcReg packetReg in RaiseExceptionPacket{packetReg=packetVal} :: packetCode @ code end | pushRegisters({instr=ReserveContainer{size, container}, ...}, code) = let val newContainer = mapDestContainer(container, !stackCount) val () = pushItemToStack(OriginalEntry{stackLoc=container}) in ReserveContainer{size=size, container=newContainer} :: code end | pushRegisters({instr=IndexedCaseOperation{testReg, workReg}, ...}, code) = let val (srcVal, srcCode) = mapSrcReg(testReg) val newWorkReg = mapWorkReg workReg in (* This is an unconditional branch. *) IndexedCaseOperation{testReg=srcVal, workReg=newWorkReg} :: srcCode @ code end | pushRegisters({instr=LockMutable{addr}, ...}, code) = let val (addrVal, addrCode) = mapSrcReg(addr) in LockMutable{addr=addrVal} :: addrCode @ code end | pushRegisters({instr=WordComparison{arg1, arg2, ccRef, opSize}, ...}, code) = let val (loadedOp1, op1Code) = mapSrcReg arg1 val (op2Val, op2Code) = mapSource(arg2, movePolyWord) in WordComparison{arg1=loadedOp1, arg2=op2Val, ccRef=ccRef, opSize=opSize} :: op2Code @ op1Code @ code end | pushRegisters({instr=CompareLiteral{arg1, arg2, opSize, ccRef}, ...}, code) = let val (op1Val, op1Code) = mapSource(arg1, movePolyWord) in CompareLiteral{arg1=op1Val, arg2=arg2, opSize=opSize, ccRef=ccRef} :: op1Code @ code end | pushRegisters({instr=CompareByteMem{arg1={base, offset, index, ...}, arg2, ccRef}, ...}, code) = let val (baseReg, baseCode) = mapSrcReg base val (indexValue, indexCode) = mapIndex index val newArg1 = {base=baseReg, offset=offset, index=indexValue} in CompareByteMem{arg1=newArg1, arg2=arg2, ccRef=ccRef} :: indexCode @ baseCode @ code end | pushRegisters({instr=PushExceptionHandler{workReg}, ...}, code) = let val newWorkReg = mapWorkReg workReg (* Add a handler entry to the stack. *) val () = pushItemToStack HandlerEntry in PushExceptionHandler{workReg=newWorkReg} :: code end | pushRegisters({instr=PopExceptionHandler{workReg, ...}, ...}, code) = let val newWorkReg = mapWorkReg workReg (* Appears at the end of the block whose exceptions are being handled. Delete the handler and anything above it. *) (* Get the state after removing the handler. *) fun popContext ([], _) = raise InternalError "pushRegisters - pop handler" | popContext (HandlerEntry :: tl, new) = (tl, new-2) | popContext (OriginalEntry{stackLoc=StackLoc{size, ...}, ...} :: tl, new) = popContext(tl, new-size) | popContext (NewEntry _ :: tl, new) = popContext(tl, new-1) val (newStack, nnCount) = popContext(!stack, !stackCount) val () = stack := newStack val oldStackPtr = ! stackCount val () = stackCount := nnCount (* Reset the stack to just above the two words of the handler. *) val resetCode = if oldStackPtr <> nnCount+2 then [ResetStackPtr{numWords=oldStackPtr-nnCount-2, preserveCC=false}] else [] in PopExceptionHandler{workReg=newWorkReg} :: resetCode @ code end | pushRegisters({instr=BeginHandler{packetReg, workReg, ...}, ...}, code) = let (* Clear the cache. This may not be necessary if we are only handling locally generated exceptions but keep it for the moment. *) val () = clearCache() (* Start of a handler. The top active entry should be the handler. *) val () = case !stack of HandlerEntry :: tl => stack := tl | _ => raise InternalError "pushRegisters: BeginHandler" val () = stackCount := !stackCount - 2 val newWorkReg = mapWorkReg workReg val (pktReg, pktCode) = mapDestReg(packetReg) in pktCode @ BeginHandler{packetReg=pktReg, workReg=newWorkReg} :: code end | pushRegisters({instr=ReturnResultFromFunction{resultReg, realReg, numStackArgs}, ...}, code) = let val (resultValue, loadResult) = mapSrcReg resultReg val resetCode = if !stackCount = 0 then [] else [ResetStackPtr{numWords= !stackCount, preserveCC=false}] in ReturnResultFromFunction{resultReg=resultValue, realReg=realReg, numStackArgs=numStackArgs} :: resetCode @ loadResult @ code end | pushRegisters({instr=ArithmeticFunction{oper, resultReg, operand1, operand2, ccRef, opSize}, ...}, code) = let val (loadedOp1, op1Code) = mapSrcReg operand1 val (op2Val, op2Code) = mapSource(operand2, opSizeToMoveKind opSize) val (destVal, destCode) = mapDestReg resultReg in destCode @ ArithmeticFunction{oper=oper, resultReg=destVal, operand1=loadedOp1, operand2=op2Val, ccRef=ccRef, opSize=opSize} :: op2Code @ op1Code @ code end | pushRegisters({instr=TestTagBit{arg, ccRef}, ...}, code) = let val (sourceVal, sourceCode) = mapSource(arg, movePolyWord) in TestTagBit{arg=sourceVal, ccRef=ccRef} :: sourceCode @ code end | pushRegisters({instr=PushValue{arg, container, ...}, ...}, code) = let val (sourceVal, sourceCode) = mapSource(arg, movePolyWord) (* This was a push from a previous pass. Treat as a container of size 1. *) val newContainer = mapDestContainer(container, !stackCount) val () = pushItemToStack(OriginalEntry{stackLoc=container}) in PushValue{arg=sourceVal, container=newContainer} :: sourceCode @ code end | pushRegisters({instr=CopyToCache _, ...}, code) = code (* This was added on a previous pass. Discard it. If we are going to cache this again we'll add new CopyToCache instructions. *) | pushRegisters({instr=ResetStackPtr _, ...}, code) = code (* Added in a previous pass - discard it. *) | pushRegisters({instr=StoreToStack{source, container, field, ...}, ...}, code) = let val (loadedSource, sourceCode) = mapAndLoad(source, movePolyWord) (* We can't have a memory-memory store so we have to load the source if it's now on the stack. *) val (newOffset, newContainer) = mapContainerAndStack(container, field) in StoreToStack{source=loadedSource, container=newContainer, field=field, stackOffset=newOffset} :: sourceCode @ code end | pushRegisters({instr=TagValue{source, dest, isSigned, opSize}, ...}, code) = let val (sourceVal, sourceCode) = mapSrcReg source val (destVal, destCode) = mapDestReg dest val _ = setTagCache(dest, isSigned, opSize, sourceVal) in destCode @ TagValue{source=sourceVal, dest=destVal, isSigned=isSigned, opSize=opSize} :: sourceCode @ code end | pushRegisters({instr=UntagValue{source, dest, isSigned, cache, opSize, ...}, ...}, code) = let val (loadedSource, sourceCode) = mapSrcReg source val (destVal, destCode) = mapDestReg dest (* As with MemoryLocation caching, we can try caching it if this is the first pass but otherwise we can only retain the caching if we have never marked it to be pushed. *) val newCache = case cache of NONE => if firstPass then findCachedTagged(source, isSigned, opSize) else NONE | SOME (PReg c) => if Vector.sub(pushVec, c) then NONE (* We had marked this as to be pushed - we can't use a cache here. *) else findCachedTagged(source, isSigned, opSize) in destCode @ UntagValue{source=loadedSource, dest=destVal, isSigned=isSigned, cache=newCache, opSize=opSize} :: sourceCode @ code end | pushRegisters({instr=LoadEffectiveAddress{base, offset, index, dest, opSize}, ...}, code) = let val (baseVal, baseCode) = case base of SOME bReg => let val (newBReg, regCode) = mapSrcReg(bReg) in (SOME newBReg, regCode) end | NONE => (NONE, []) val (indexVal, indexCode) = mapIndex index val (destVal, destCode) = mapDestReg dest in destCode @ LoadEffectiveAddress{base=baseVal, offset=offset, index=indexVal, dest=destVal, opSize=opSize} :: indexCode @ baseCode @ code end | pushRegisters({instr=ShiftOperation{shift, resultReg, operand, shiftAmount, ccRef, opSize}, ...}, code) = let val (opVal, opCode) = mapSrcReg operand val (shiftVal, shiftCode) = mapSource(shiftAmount, opSizeToMoveKind opSize) val (destVal, destCode) = mapDestReg resultReg in destCode @ ShiftOperation{shift=shift, resultReg=destVal, operand=opVal, shiftAmount=shiftVal, ccRef=ccRef, opSize=opSize} :: shiftCode @ opCode @ code end | pushRegisters({instr=Multiplication{resultReg, operand1, operand2, ccRef, opSize}, ...}, code) = let val (op1Val, op1Code) = mapSrcReg operand1 val (op2Val, op2Code) = mapSource(operand2, opSizeToMoveKind opSize) val (destVal, destCode) = mapDestReg resultReg in destCode @ Multiplication{resultReg=destVal, operand1=op1Val, operand2=op2Val, ccRef=ccRef, opSize=opSize} :: op2Code @ op1Code @ code end | pushRegisters({instr=Division{isSigned, dividend, divisor, quotient, remainder, opSize}, ...}, code) = let val (dividendVal, dividendCode) = mapSrcReg dividend val (divisorVal, divisorCode) = mapSource(divisor, opSizeToMoveKind opSize) val (quotVal, quotCode) = mapDestReg quotient val (remVal, remCode) = mapDestReg remainder in remCode @ quotCode @ Division{isSigned=isSigned, dividend=dividendVal, divisor=divisorVal, quotient=quotVal, remainder=remVal, opSize=opSize} :: divisorCode @ dividendCode @ code end - | pushRegisters({instr=AtomicExchangeAndAdd{base, source}, ...}, code) = + | pushRegisters({instr=AtomicExchangeAndAdd{base, source, resultReg}, ...}, code) = let val (baseVal, baseCode) = mapSrcReg(base) val (sourceVal, sourceCode) = mapSrcReg source - (* The "source" is also a result and must be in a register. It's an untagged reg - so it shouldn't have been marked as to be pushed. *) - val _ = case sourceCode of [] => () | _ => raise InternalError "pushRegisters - AtomicExchangeAndAdd" + val (destVal, destCode) = mapDestReg resultReg in - AtomicExchangeAndAdd{base=baseVal, source=sourceVal} :: baseCode @ code + destCode @ + AtomicExchangeAndAdd{base=baseVal, source=sourceVal, resultReg=destVal} :: sourceCode @ baseCode @ code end | pushRegisters({instr=BoxValue{boxKind, source, dest as PReg dReg, ...}, ...}, code) = let val (sourceVal, sourceCode) = mapSrcReg source val (destVal, destCode) = mapDestReg dest (* We can cache the boxed value except if this is an X87 box. We can't cache X87 values because there's effectively only one register and this box instruction uses FSTP (store and POP). *) val cacheCode = if Vector.sub(pushVec, dReg) orelse boxKind = BoxX87Double orelse boxKind = BoxX87Float then [] else let val newCacheReg = newPReg RegPropCacheUntagged val moveKind = case boxKind of BoxLargeWord => moveNativeWord | BoxX87Double => MoveDouble | BoxX87Float => MoveFloat | BoxSSE2Double => MoveDouble | BoxSSE2Float => MoveFloat val indexKind = case targetArch of ObjectId32Bit => ObjectIndex | _ => NoMemIndex (* The value we're putting in the cache is untagged. *) val _ = setMemoryCache(dest, 0, indexKind, newCacheReg, false, moveKind) in [CopyToCache{source=sourceVal, dest=newCacheReg, kind=moveKind}] end in cacheCode @ destCode @ BoxValue{boxKind=boxKind, source=sourceVal, dest=destVal, saveRegs=[]} :: sourceCode @ code end | pushRegisters({instr=CompareByteVectors{vec1Addr, vec2Addr, length, ccRef}, ...}, code) = let val (vec1Val, vec1Code) = mapSrcReg vec1Addr val (vec2Val, vec2Code) = mapSrcReg vec2Addr val (lengthVal, lengthCode) = mapSrcReg length in CompareByteVectors{vec1Addr=vec1Val, vec2Addr=vec2Val, length=lengthVal, ccRef=ccRef} :: lengthCode @ vec2Code @ vec1Code @ code end | pushRegisters({instr=BlockMove{srcAddr, destAddr, length, isByteMove}, ...}, code) = let val (srcVal, srcCode) = mapSrcReg srcAddr val (destVal, destCode) = mapSrcReg destAddr val (lengthVal, lengthCode) = mapSrcReg length (* For safety clear the memory cache here. That may not be necessary. *) val () = clearMemoryCache() in BlockMove{srcAddr=srcVal, destAddr=destVal, length=lengthVal, isByteMove=isByteMove} :: lengthCode @ destCode @ srcCode @ code end | pushRegisters({instr=X87Compare{arg1, arg2, isDouble, ccRef}, ...}, code) = let val (arg1Val, arg1Code) = mapSrcReg arg1 val (arg2Val, arg2Code) = mapSource(arg2, if isDouble then Move64Bit else Move32Bit) in X87Compare{arg1=arg1Val, arg2=arg2Val, isDouble=isDouble, ccRef=ccRef} :: arg2Code @ arg1Code @ code end | pushRegisters({instr=SSE2Compare{arg1, arg2, isDouble, ccRef}, ...}, code) = let val (arg1Val, arg1Code) = mapSrcReg arg1 val (arg2Val, arg2Code) = mapSource(arg2, if isDouble then Move64Bit else Move32Bit) in SSE2Compare{arg1=arg1Val, arg2=arg2Val, ccRef=ccRef, isDouble=isDouble} :: arg2Code @ arg1Code @ code end | pushRegisters({instr=X87FPGetCondition{dest, ccRef}, ...}, code) = let val (destVal, destCode) = mapDestReg dest in destCode @ X87FPGetCondition{dest=destVal, ccRef=ccRef} :: code end | pushRegisters({instr=X87FPArith{opc, resultReg, arg1, arg2, isDouble}, ...}, code) = let val (arg1Val, arg1Code) = mapSrcReg arg1 val (arg2Val, arg2Code) = mapSource(arg2, if isDouble then Move64Bit else Move32Bit) val (destVal, destCode) = mapDestReg resultReg in destCode @ X87FPArith{opc=opc, resultReg=destVal, arg1=arg1Val, arg2=arg2Val, isDouble=isDouble} :: arg2Code @ arg1Code @ code end | pushRegisters({instr=X87FPUnaryOps{fpOp, dest, source}, ...}, code) = let val (sourceVal, sourceCode) = mapSrcReg source val (destVal, destCode) = mapDestReg dest in destCode @ X87FPUnaryOps{fpOp=fpOp, dest=destVal, source=sourceVal} :: sourceCode @ code end | pushRegisters({instr=X87Float{dest, source}, ...}, code) = let val (sourceVal, sourceCode) = mapSource(source, movePolyWord) val (destVal, destCode) = mapDestReg dest in destCode @ X87Float{dest=destVal, source=sourceVal} :: sourceCode @ code end | pushRegisters({instr=SSE2Float{dest, source}, ...}, code) = let val (sourceVal, sourceCode) = mapSource(source, movePolyWord) val (destVal, destCode) = mapDestReg dest in destCode @ SSE2Float{dest=destVal, source=sourceVal} :: sourceCode @ code end | pushRegisters({instr=SSE2FPUnary{opc, resultReg, source}, ...}, code) = let val (argVal, argCode) = mapSource(source, case opc of SSE2UDoubleToFloat => Move64Bit | SSE2UFloatToDouble => Move32Bit) val (destVal, destCode) = mapDestReg resultReg in destCode @ SSE2FPUnary{opc=opc, resultReg=destVal, source=argVal} :: argCode @ code end | pushRegisters({instr=SSE2FPBinary{opc, resultReg, arg1, arg2}, ...}, code) = let val argMove = case opc of SSE2BAddDouble => Move64Bit | SSE2BSubDouble => Move64Bit | SSE2BMulDouble => Move64Bit | SSE2BDivDouble => Move64Bit | SSE2BXor => Move64Bit (* Actually 128 bit but always in a reg. *) | SSE2BAnd => Move64Bit | SSE2BAddSingle => Move32Bit | SSE2BSubSingle => Move32Bit | SSE2BMulSingle => Move32Bit | SSE2BDivSingle => Move32Bit val (arg1Val, arg1Code) = mapSrcReg arg1 val (arg2Val, arg2Code) = mapSource(arg2, argMove) val (destVal, destCode) = mapDestReg resultReg in destCode @ SSE2FPBinary{opc=opc, resultReg=destVal, arg1=arg1Val, arg2=arg2Val} :: arg2Code @ arg1Code @ code end | pushRegisters({instr=TagFloat{source, dest}, ...}, code) = let val (sourceVal, sourceCode) = mapSrcReg source val (destVal, destCode) = mapDestReg dest val _ = setFloatCache(dest, sourceVal) in destCode @ TagFloat{source=sourceVal, dest=destVal} :: sourceCode @ code end | pushRegisters({instr=UntagFloat{source as RegisterArgument srcReg, dest, cache, ...}, ...}, code) = let val (sourceVal, sourceCode) = mapSource(source, Move32Bit) val (destVal, destCode) = mapDestReg dest (* As with MemoryLocation caching, we can try caching it if this is the first pass but otherwise we can only retain the caching if we have never marked it to be pushed. *) val newCache = case cache of NONE => if firstPass then findCachedFloat srcReg else NONE | SOME (PReg c) => if Vector.sub(pushVec, c) then NONE (* We had marked this as to be pushed - we can't use a cache here. *) else findCachedFloat srcReg in destCode @ UntagFloat{source=sourceVal, dest=destVal, cache=newCache} :: sourceCode @ code end | pushRegisters({instr=UntagFloat{source, dest, ...}, ...}, code) = (* This may also be a memory location in which case we don't cache. *) let val (sourceVal, sourceCode) = mapSource(source, Move32Bit) val (destVal, destCode) = mapDestReg dest in destCode @ UntagFloat{source=sourceVal, dest=destVal, cache=NONE} :: sourceCode @ code end | pushRegisters({instr=GetSSE2ControlReg{dest}, ...}, code) = let val (destVal, destCode) = mapDestReg dest in destCode @ GetSSE2ControlReg{dest=destVal} :: code end | pushRegisters({instr=SetSSE2ControlReg{source}, ...}, code) = let val (sourceVal, sourceCode) = mapSrcReg source in SetSSE2ControlReg{source=sourceVal} :: sourceCode @ code end | pushRegisters({instr=GetX87ControlReg{dest}, ...}, code) = let val (destVal, destCode) = mapDestReg dest in destCode @ GetX87ControlReg{dest=destVal} :: code end | pushRegisters({instr=SetX87ControlReg{source}, ...}, code) = let val (sourceVal, sourceCode) = mapSrcReg source in SetX87ControlReg{source=sourceVal} :: sourceCode @ code end | pushRegisters({instr=X87RealToInt{source, dest}, ...}, code) = let val (sourceVal, sourceCode) = mapSrcReg source val (destVal, destCode) = mapDestReg dest in destCode @ X87RealToInt{source=sourceVal, dest=destVal} :: sourceCode @ code end | pushRegisters({instr=SSE2RealToInt{source, dest, isDouble, isTruncate}, ...}, code) = let val (srcVal, sourceCode) = mapSource(source, if isDouble then Move64Bit else Move32Bit) val (destVal, destCode) = mapDestReg dest in destCode @ SSE2RealToInt{source=srcVal, dest=destVal, isDouble=isDouble, isTruncate=isTruncate} :: sourceCode @ code end | pushRegisters({instr=SignExtend32To64{source, dest}, ...}, code) = let val (sourceVal, sourceCode) = mapSource(source, Move32Bit) val (destVal, destCode) = mapDestReg dest in destCode @ SignExtend32To64{source=sourceVal, dest=destVal} :: sourceCode @ code end | pushRegisters({instr=TouchArgument{source}, ...}, code) = let val (sourceVal, sourceCode) = mapSrcReg source in TouchArgument{source=sourceVal} :: sourceCode @ code end + | pushRegisters({instr=PauseCPU, ...}, code) = PauseCPU :: code + (* Find the common cache state. *) val () = setCommonCacheState(List.map (#cache o asub blockOutput) (asub blockRefs blockNo)) local fun doPush(instr as {kill, ...}, code) = let val newCode = pushRegisters(instr, code) (* Can we pop the stack? *) val stackReset = case setToList (minus(kill, loopRegs)) of [] => [] | killList => let (* See if any of the kill items are at the top of the stack. If they are we can pop them and perhaps items we've previously marked for deletion but not been able to pop. *) val oldStack = !stackCount fun checkAndAdd(r, output) = case Array.sub(pregMap, r) of ToStack(stackLoc, StackLoc{size, ...}) => if stackLoc < 0 then r :: output (* We can have arguments and return address. *) else if !stackCount = stackLoc+size then ( stack := tl (!stack); stackCount := stackLoc; output ) else r :: output | _ => r :: output val toAdd = List.foldl checkAndAdd [] killList fun reprocess list = let val prevStack = !stackCount val outlist = List.foldl checkAndAdd [] list in if !stackCount = prevStack then list else reprocess outlist end val () = if !stackCount = oldStack then deletedItems := toAdd @ !deletedItems else deletedItems := reprocess(toAdd @ !deletedItems) val _ = oldStack >= !stackCount orelse raise InternalError "negative stack offset" in if !stackCount = oldStack then [] else [ResetStackPtr{numWords=oldStack - !stackCount, preserveCC=true (* In case*)}] end in stackReset @ newCode end in val codeResult = List.foldl doPush [] block val outputCount = ! stackCount val results = {code=codeResult, cache=getCache(), stackCount= outputCount} val stateResult = { stackCount= outputCount, stack= !stack } val () = Array.update(blockOutput, blockNo, results) end val addSet = case flow of ExitCode => [] | IndexedBr cases => cases | Unconditional dest => [dest] | Conditional {trueJump, falseJump, ...} => [falseJump, trueJump] | SetHandler { handler, continue } => [handler, continue] | UnconditionalHandle _ => [] | ConditionalHandle { continue, ...} => [continue] val addItems = List.map(fn m => (m, stateResult)) addSet in processBlocks(addItems @ stillToDo) end in val () = processBlocks([(0, {stack=[], stackCount=0})]) end (* Put together the result code and blocks. *) local fun createBlock blockNo = (* Skip unreferenced blocks apart from block 0. *) if blockNo <> 0 andalso null (asub blockRefs blockNo) then BasicBlock{block=[], flow=ExitCode} else let val ExtendedBasicBlock{ flow, ...} = vsub code blockNo val {code=codeResult, stackCount=outputCount, ...} = asub blockOutput blockNo (* Process the successor. If we need a stack adjustment this will require an adjustment block. TODO: We could put a pre-adjustment if we only have one branch to this block. *) fun matchStacks targetBlock = let (* Process the destination. If it hasn't been processed. *) val {expectedInput, ...} = valOf (asub inputStackSizes targetBlock) in if expectedInput = outputCount then targetBlock else let val _ = outputCount > expectedInput orelse raise InternalError "adjustStack" val adjustCode = [ResetStackPtr{numWords=outputCount-expectedInput, preserveCC=true (* For the moment *)}] val newBlock = BasicBlock{block=adjustCode, flow=Unconditional targetBlock} val newBlockNo = !blockCounter before blockCounter := !blockCounter+1 val () = extraBlocks := newBlock :: !extraBlocks in newBlockNo end end val (finalCode, newFlow) = case flow of ExitCode => (codeResult, ExitCode) | Unconditional m => let (* Process the block. Since we're making an unconditional jump we can include any stack adjustment needed to match the destination in here. In particular this includes loops. *) val {expectedInput, reqCC} = valOf (asub inputStackSizes m) val _ = outputCount >= expectedInput orelse raise InternalError "negative reset" val resultCode = if expectedInput = outputCount then codeResult else ResetStackPtr{numWords=outputCount-expectedInput, preserveCC=reqCC} :: codeResult in (resultCode, Unconditional m) end (* For any of these, if we need to adjust the stack we have to add an adjustment block. *) | Conditional {trueJump, falseJump, ccRef, condition} => (codeResult, Conditional{trueJump=matchStacks trueJump, falseJump=matchStacks falseJump, ccRef=ccRef, condition=condition}) | SetHandler{ handler, continue } => (codeResult, SetHandler{ handler=matchStacks handler, continue=matchStacks continue}) | IndexedBr cases => (codeResult, IndexedBr(map matchStacks cases)) | u as UnconditionalHandle _ => (codeResult, u) | c as ConditionalHandle{ continue, ... } => let (* As for unconditional branch *) val {expectedInput, reqCC} = valOf (asub inputStackSizes continue) val _ = outputCount >= expectedInput orelse raise InternalError "negative reset" val resultCode = if expectedInput = outputCount then codeResult else ResetStackPtr{numWords=outputCount-expectedInput, preserveCC=reqCC} :: codeResult in (resultCode, c) end in BasicBlock{block=List.rev finalCode, flow=newFlow} end in val resultBlocks = Vector.tabulate(numberOfBlocks, createBlock) end (* Add any extra blocks to the result. *) val finalResult = case !extraBlocks of [] => resultBlocks | blocks => Vector.concat[resultBlocks, Vector.fromList(List.rev blocks)] val pregProperties = Vector.fromList(List.rev(! pregPropList)) in {code=finalResult, pregProps=pregProperties, maxStack= !maxStack} end structure Sharing = struct type x86ICode = x86ICode and preg = preg and intSet = intSet and extendedBasicBlock = extendedBasicBlock and basicBlock = basicBlock and regProperty = regProperty end end; diff --git a/mlsource/MLCompiler/INITIALISE_.ML b/mlsource/MLCompiler/INITIALISE_.ML index 1e2c60c2..e0d8c63f 100644 --- a/mlsource/MLCompiler/INITIALISE_.ML +++ b/mlsource/MLCompiler/INITIALISE_.ML @@ -1,2074 +1,2074 @@ (* Copyright (c) 2000 Cambridge University Technical Services Limited - Updated David C.J. Matthews 2008-9, 2012, 2013, 2015-20 + Updated David C.J. Matthews 2008-9, 2012, 2013, 2015-21 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *) (* Title: Initialise ML Global Declarations. Author: Dave Matthews,Cambridge University Computer Laboratory Copyright Cambridge University 1985 *) functor INITIALISE_ ( structure LEX: LEXSIG structure TYPETREE : TYPETREESIG structure STRUCTVALS : STRUCTVALSIG structure VALUEOPS : VALUEOPSSIG structure CODETREE : CODETREESIG structure EXPORTTREE: EXPORTTREESIG structure DATATYPEREP: DATATYPEREPSIG structure TYPEIDCODE: TYPEIDCODESIG structure MAKE: MAKESIG structure ADDRESS : AddressSig structure DEBUG: DEBUG structure DEBUGGER : DEBUGGER structure PRETTY : PRETTYSIG structure PRINTTABLE: PRINTTABLESIG structure MISC : sig val unescapeString : string -> string exception Conversion of string; (* string to int conversion failure *) end structure VERSION: sig val compilerVersion: string val versionNumber: int end structure UNIVERSALTABLE: sig type universal = Universal.universal type univTable type 'a tag = 'a Universal.tag val univLookup: univTable * 'a tag * string -> 'a option val fold: (string * universal * 'a -> 'a) -> 'a -> univTable -> 'a end sharing STRUCTVALS.Sharing = VALUEOPS.Sharing = TYPETREE.Sharing = EXPORTTREE.Sharing = PRETTY.Sharing = CODETREE.Sharing = MAKE.Sharing = ADDRESS = DATATYPEREP.Sharing = TYPEIDCODE.Sharing = DEBUGGER.Sharing = LEX.Sharing = PRINTTABLE.Sharing sharing STRUCTVALS.Sharing = VALUEOPS.Sharing = TYPETREE.Sharing = EXPORTTREE.Sharing = PRETTY.Sharing = CODETREE.Sharing = MAKE.Sharing = ADDRESS = DATATYPEREP.Sharing = TYPEIDCODE.Sharing = DEBUGGER.Sharing = LEX.Sharing = PRINTTABLE.Sharing = UNIVERSALTABLE ) : sig type gEnv val initGlobalEnv : {globalTable : gEnv, intIsArbitraryPrecision: bool } -> unit end = struct open STRUCTVALS; open TYPETREE open VALUEOPS; open CODETREE; open ADDRESS; open MAKE; open MISC; open EXPORTTREE open DATATYPEREP val intInfType = mkTypeConstruction ("int", intInfConstr, [], []) and realType = mkTypeConstruction ("real", realConstr, [], []) and charType = mkTypeConstruction ("char", charConstr, [], []) and wordType = mkTypeConstruction ("word", wordConstr, [], []) val declInBasis = [DeclaredAt inBasis] fun applyList _ [] = () | applyList f (h :: t) = (f h : unit; applyList f t); fun initGlobalEnv{globalTable : gEnv, intIsArbitraryPrecision: bool } = let val Env globalEnv = MAKE.gEnvAsEnv globalTable val enterGlobalValue = #enterVal globalEnv; val enterGlobalType = #enterType globalEnv; (* Some routines to help make the types. *) local (* careful - STRUCTVALS.intType differs from TYPETREE.intType *) open TYPETREE; in (* Make some type variables *) fun makeEqTV () = mkTypeVar (generalisable, true, false, false) fun makeTV () = mkTypeVar (generalisable, false, false, false) fun makePrintTV() = mkTypeVar (generalisable, false, false, true) fun makeTypeVariable() = makeTv {value=emptyType, level=generalisable, equality=false, nonunifiable=false, printable=false} (* Make some functions *) infixr 5 ->> fun a ->> b = mkFunctionType (a, b); infix 7 **; fun a ** b = mkProductType [a, b]; (* Type identifiers for the types of the declarations. *) val Int = if intIsArbitraryPrecision then intInfType else fixedIntType val String = stringType; val Bool = boolType; val Unit = unitType; val Char = charType; val Word = wordType; val Real = realType val Exn = exnType val mkTypeConstruction = mkTypeConstruction; val () = setPreferredInt(if intIsArbitraryPrecision then intInfConstr else fixedIntConstr) end; fun makePolymorphic(tvs, c) = let open TYPEIDCODE val tvs = List.filter(fn TypeVar tv => not justForEqualityTypes orelse tvEquality tv | _ => false) tvs in if null tvs then c else mkInlproc(c, List.length tvs, "", [], 0) end (* Function to make a type identifier with a pretty printer that just prints "?". None of the types are equality types so the equality function is empty. *) local fun monotypePrinter _ = PRETTY.PrettyString "?" in fun defaultEqAndPrintCode () = let open TypeValue val code = createTypeValue{ eqCode = CodeZero, printCode = mkConst (toMachineWord (ref monotypePrinter)), boxedCode = boxedNever, sizeCode = singleWord } in Global (genCode(code, [], 0) ()) end end fun makeTypeAbbreviation(name, fullName, typeVars, typeResult, locations) = makeTypeConstructor( name, typeVars, makeTypeFunction(basisDescription fullName, (typeVars, typeResult)), locations) (* Make an opaque type and add it to an environment. *) fun makeAndDeclareOpaqueType(typeName, fullName, env) = let val typeconstr = makeTypeConstructor(typeName, [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription fullName), declInBasis); in #enterType env (typeName, TypeConstrSet(typeconstr, [])); mkTypeConstruction (typeName, typeconstr, [], declInBasis) end; (* List of something *) fun List (base : types) : types = mkTypeConstruction ("list", tsConstr listConstr, [base], declInBasis); (* ref something *) fun Ref (base : types) : types = mkTypeConstruction ("ref", refConstr, [base], declInBasis); fun Option (base : types) : types = mkTypeConstruction ("option", tsConstr optionConstr, [base], declInBasis); (* Type-dependent functions. *) fun mkSpecialFun (name:string, typeof:types, opn: typeDependent) : values = makeOverloaded (name, typeof, opn); (* Overloaded functions. *) fun mkOverloaded (name:string) (typeof: types) : values = mkSpecialFun(name, typeof, TypeDep) (* Make a structure. Returns the table as an environment so that entries can be added to the structure. *) fun makeStructure(parentEnv, name) = let val str as Struct{signat=Signatures{tab, ...}, ...} = makeEmptyGlobal name val () = #enterStruct parentEnv (name, str) val Env env = makeEnv tab in env end val () = enterGlobalType ("unit", TypeConstrSet(unitConstr, [])); local val falseCons = mkGconstr ("false", Bool, createNullaryConstructor(EnumForm{tag=0w0, maxTag=0w1}, [], "false"), true, 2, declInBasis) val trueCons = mkGconstr ("true", Bool, createNullaryConstructor(EnumForm{tag=0w1, maxTag=0w1}, [], "true"), true, 2, declInBasis) val boolEnv = makeStructure(globalEnv, "Bool") (* Bool structure *) val notFn = mkGvar("not", Bool ->> Bool, mkUnaryFn BuiltIns.NotBoolean, declInBasis) in val () = #enterType boolEnv ("bool", TypeConstrSet(boolConstr, [trueCons, falseCons])) val () = #enterVal boolEnv ("true", trueCons) val () = #enterVal boolEnv ("false", falseCons) val () = #enterVal boolEnv ("not", notFn) end; val () = enterGlobalType ("int", TypeConstrSet(if intIsArbitraryPrecision then intInfConstr else fixedIntConstr, [])) val () = enterGlobalType ("char", TypeConstrSet(charConstr, [])) val () = enterGlobalType ("string", TypeConstrSet(stringConstr, [])) val () = enterGlobalType ("real", TypeConstrSet(realConstr, [])) val () = (* Enter :: and nil. *) List.app(fn(tv as Value{name, ...}) => enterGlobalValue(name, tv)) (tsConstructors listConstr) val () = enterGlobalType ("list", listConstr); val () = (* Enter NONE and SOME. *) List.app(fn(tv as Value{name, ...}) => enterGlobalValue(name, tv)) (tsConstructors optionConstr) val () = enterGlobalType ("option", optionConstr); local val refCons = let val a = mkTypeVar(generalisable, false, false, false) in mkGconstr ("ref", a ->> Ref a, createUnaryConstructor(RefForm, [a], "ref"), false, 1, declInBasis) end in val () = enterGlobalType ("ref", TypeConstrSet(refConstr, [refCons])); val () = enterGlobalValue ("ref", refCons); end local open BuiltIns fun monoTypePrinter _ = PRETTY.PrettyString "?" val idCode = let open TypeValue val equalLongWordFn = mkInlproc( mkBinary(LargeWordComparison TestEqual, mkLoadArgument 0, mkLoadArgument 1), 2, "EqualLargeWord()", [], 0) val code = createTypeValue{ eqCode=equalLongWordFn, printCode=mkConst (toMachineWord (ref monoTypePrinter)), boxedCode = boxedNever, sizeCode = singleWord } in Global (genCode(code, [], 0) ()) end in val largeWordType = makeTypeConstructor("word", [], makeFreeId(0, idCode, true, basisDescription "LargeWord.word"), declInBasis) val LargeWord = mkTypeConstruction ("LargeWord.word", largeWordType, [], declInBasis) end val () = enterGlobalType ("exn", TypeConstrSet(exnConstr, [])); val () = enterGlobalType ("word", TypeConstrSet(wordConstr, [])); val runCallEnv = makeStructure(globalEnv, "RunCall") fun enterRunCall (name : string, entry : codetree, typ : types) : unit = let val value = mkGvar (name, typ, entry, declInBasis); in #enterVal runCallEnv (name, value) end local (* unsafeCast. Can be used to convert any value to any type. *) val a = makeTV () val b = makeTV () val unsafeCastEntry = mkInlproc (mkLoadArgument 0 (* just the parameter *), 1, "unsafeCast(1)", [], 0) in val () = enterRunCall ("unsafeCast", makePolymorphic([a, b], unsafeCastEntry), a ->> b) end local val a = makeTV() and b = makeTV() open BuiltIns in (* isShort - test if a value is tagged rather than being an address. *) val () = enterRunCall ("isShort", makePolymorphic([a], mkUnaryFn IsTaggedValue), a ->> Bool) (* pointer equality *) val () = enterRunCall ("pointerEq", makePolymorphic([a], mkBinaryFn PointerEq), a ** a ->> Bool) (* load a word. The index is in words and is always zero or positive. *) val () = enterRunCall ("loadWord", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLWord{isImmutable=false})), a ** Word ->> b) (* Load a word from an immutable. *) val () = enterRunCall ("loadWordFromImmutable", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLWord{isImmutable=true})), a ** Word ->> b) (* load a byte. The index is in bytes and is always zero or positive. Probably the result should be a Word8.word value or a char. *) val () = enterRunCall ("loadByte", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLByte{isImmutable=false})), a ** Word ->> b) (* Load a byte from an immutable. *) val () = enterRunCall ("loadByteFromImmutable", makePolymorphic([a, b], mkLoadOperationFn(LoadStoreMLByte{isImmutable=true})), a ** Word ->> b) (* Get the length of a heap cell. *) val () = enterRunCall ("memoryCellLength", makePolymorphic([a], mkUnaryFn MemoryCellLength), a ->> Word) (* Return the flags. Perhaps this could return a Word8.word value instead of a word. *) val () = enterRunCall ("memoryCellFlags", makePolymorphic([a], mkUnaryFn MemoryCellFlags), a ->> Word) (* Return the number of bytes per word. This is a constant since we have separate pre-built compilers for 32-bit and 64-bit. N.B. The byte order is not a constant since we only have a single pre-built compiler for little-endian and big-endian interpreted code. *) val () = enterRunCall ("bytesPerWord", mkConst(toMachineWord wordSize), Word) (* Store a word *) val () = enterRunCall ("storeWord", makePolymorphic([a, b], mkStoreOperationFn(LoadStoreMLWord{isImmutable=false})), mkProductType[a, Word, b] ->> Unit) (* Store a byte *) val () = enterRunCall ("storeByte", makePolymorphic([a, b], mkStoreOperationFn(LoadStoreMLByte{isImmutable=false})), mkProductType[a, Word, b] ->> Unit) (* Lock a mutable cell. *) val () = enterRunCall ("clearMutableBit", makePolymorphic([a], mkUnaryFn ClearMutableFlag), a ->> Unit) (* Allocate a byte cell. The second argument is the flags byte. It might be better if this were a Word8.word value. *) val () = enterRunCall ("allocateByteMemory", makePolymorphic([a], mkBinaryFn AllocateByteMemory), Word ** Word ->> a) (* Allocate a word cell. *) val () = enterRunCall ("allocateWordMemory", makePolymorphic([a, b], mkAllocateWordMemoryFn), mkProductType[Word, Word, a] ->> b) (* Byte vector operations. *) val () = enterRunCall ("byteVectorEqual", makePolymorphic([a], mkBlockOperationFn BlockOpEqualByte), mkProductType[a, a, Word, Word, Word] ->> Bool) val () = enterRunCall ("byteVectorCompare", makePolymorphic([a], mkBlockOperationFn BlockOpCompareByte), mkProductType[a, a, Word, Word, Word] ->> Int) (* Block moves. *) val () = enterRunCall ("moveBytes", makePolymorphic([a], mkBlockOperationFn (BlockOpMove{isByteMove=true})), mkProductType[a, a, Word, Word, Word] ->> Unit) val () = enterRunCall ("moveWords", makePolymorphic([a], mkBlockOperationFn (BlockOpMove{isByteMove=false})), mkProductType[a, a, Word, Word, Word] ->> Unit) (* Untagged loads and stores. *) val () = enterRunCall ("loadUntagged", mkLoadOperationFn LoadStoreUntaggedUnsigned, String ** Word ->> Word) val () = enterRunCall ("storeUntagged", mkStoreOperationFn LoadStoreUntaggedUnsigned, mkProductType[String, Word, Word] ->> Unit) val () = enterRunCall ("touch", makePolymorphic([a], mkUnaryFn TouchAddress), a ->> Unit) end local val debugOpts = [] (* Place to add debugging if necessary. *) (* [tagInject Pretty.compilerOutputTag (Pretty.prettyPrint(print, 70)), tagInject assemblyCodeTag true] *) fun makeCall rtsCall n entryName = rtsCall (entryName, n, debugOpts) val makeFastCall = makeCall CODETREE.Foreign.rtsCallFast (* We need to wrap this so that the arguments are passed in registers. *) fun makeRunCallTupled (argTypes, resultType, callN) : codetree = let val width = List.length argTypes val name = "rtsCall" ^ Int.toString width; local val f = mkLoadClosure 0 (* first item from enclosing scope *) val tuple = mkLoadArgument 0 (* the inner parameter *) val args = case argTypes of [singleType] => [(tuple, singleType)] | argTypes => let val argVals = List.tabulate(width, fn n => mkInd (n, tuple)) in ListPair.zipEq(argVals, argTypes) end in val innerBody = mkCall (f, args, resultType) end local (* The closure contains the address of the RTS call. *) val f = mkEval(mkConst callN, [mkLoadArgument 0]) val innerLambda = mkInlproc (innerBody, 1, name ^ "(1)", [mkLoadLocal 0], 0) in val outerBody = mkEnv([mkDec (0, f)], innerLambda) end val outerLambda = mkInlproc (outerBody, 1, name, [], 1) in outerLambda end (* "Full" calls pass the thread Id as the first parameter. *) fun makeRunCallTupledFull (argTypes, resultType) = let val width = List.length argTypes val callN = toMachineWord(makeFastCall(width + 1)) val name = "rtsCall" ^ Int.toString width; local val f = mkLoadClosure 0 (* first item from enclosing scope *) val tuple = mkLoadArgument 0 (* the inner parameter *) val args = case argTypes of [singleType] => [(tuple, singleType)] | argTypes => let val argVals = List.tabulate(width, fn n => mkInd (n, tuple)) in ListPair.zipEq(argVals, argTypes) end in val innerBody = mkEnv( [ mkDec(0, mkCall (f, (getCurrentThreadId, GeneralType) :: args, resultType)), mkNullDec checkRTSException ], mkLoadLocal 0) end local (* The closure contains the address of the RTS call. *) val f = mkEval(mkConst callN, [mkLoadArgument 0]) (* This creates the actual call. *) val innerLambda = mkInlproc (innerBody, 1, name ^ "(1)", [mkLoadLocal 0], 1) in val outerBody = mkEnv([mkDec (0, f)], innerLambda) end val outerLambda = mkInlproc (outerBody, 1, name, [], 1) in outerLambda end local val a = makeTV () and b = makeTV () fun makeInlCode(makeCall, name) = let val call1 = toMachineWord(makeCall 1) val body = mkEval(mkConst call1, [mkLoadArgument 0]) val proc = mkInlproc (body, 1, name, [], 0) in makePolymorphic([a, b], proc) end in val rtsCallFast1Entry = makeInlCode(makeFastCall, "rtsCallFast1") end local val a = makeTV () and b = makeTV () and c = makeTV () and d = makeTV () and e = makeTV () and f = makeTV () fun makeRtsCall(n, makeCall) = makeRunCallTupled(List.tabulate(n, fn _ => GeneralType), GeneralType, toMachineWord(makeCall n)) fun makeFullRtsCall n = makeRunCallTupledFull(List.tabulate(n, fn _ => GeneralType), GeneralType) in val rtsCallFull0Entry = makePolymorphic([a], makeFullRtsCall 0) and rtsCallFast0Entry = makePolymorphic([a], makeRtsCall(0, makeFastCall)) val rtsCall0Type = String ->> Unit ->> a val rtsCall1Type = String ->> a ->> b val rtsCallFull1Entry = makePolymorphic([a, b], makeFullRtsCall 1) val rtsCallFull2Entry = makePolymorphic([a, b, c], makeFullRtsCall 2) and rtsCallFast2Entry = makePolymorphic([a, b, c], makeRtsCall(2, makeFastCall)) val rtsCall2Type = String ->> TYPETREE.mkProductType [a,b] ->> c val rtsCallFull3Entry = makePolymorphic([a, b, c, d], makeFullRtsCall 3) val rtsCallFast3Entry = makePolymorphic([a, b, c, d], makeRtsCall(3, makeFastCall)) val rtsCall3Type = String ->> TYPETREE.mkProductType [a,b,c] ->> d val rtsCallFull4Entry = makePolymorphic([a, b, c, d, e], makeFullRtsCall 4) val rtsCallFast4Entry = makePolymorphic([a, b, c, d, e], makeRtsCall(4, makeFastCall)) val rtsCall4Type = String ->> TYPETREE.mkProductType [a,b,c,d] ->> e val rtsCallFull5Entry = makePolymorphic([a, b, c, d, e, f], makeFullRtsCall 5) val rtsCall5Type = String ->> TYPETREE.mkProductType [a,b,c,d,e] ->> f end in val () = enterRunCall ("rtsCallFull0", rtsCallFull0Entry, rtsCall0Type) val () = enterRunCall ("rtsCallFast0", rtsCallFast0Entry, rtsCall0Type) val () = enterRunCall ("rtsCallFull1", rtsCallFull1Entry, rtsCall1Type) val () = enterRunCall ("rtsCallFast1", rtsCallFast1Entry, rtsCall1Type) val () = enterRunCall ("rtsCallFull2", rtsCallFull2Entry, rtsCall2Type) val () = enterRunCall ("rtsCallFast2", rtsCallFast2Entry, rtsCall2Type) val () = enterRunCall ("rtsCallFull3", rtsCallFull3Entry, rtsCall3Type) val () = enterRunCall ("rtsCallFast3", rtsCallFast3Entry, rtsCall3Type) val () = enterRunCall ("rtsCallFast4", rtsCallFast4Entry, rtsCall4Type) val () = enterRunCall ("rtsCallFull4", rtsCallFull4Entry, rtsCall4Type) val () = enterRunCall ("rtsCallFull5", rtsCallFull5Entry, rtsCall5Type) val makeRunCallTupled = makeRunCallTupled (* Needed for reals. *) end local (* Create nullary exception. *) fun makeException0(name, id) = let val exc = Value{ name = name, typeOf = TYPETREE.exnType, access = Global(mkConst(toMachineWord id)), class = Exception, locations = declInBasis, references = NONE, instanceTypes=NONE } in #enterVal runCallEnv (name, exc) end (* Create exception with parameter. *) and makeException1(name, id, exType) = let val exc = Value{ name = name, typeOf = exType ->> TYPETREE.exnType, access = Global(mkConst(toMachineWord id)), class = Exception, locations = declInBasis, references = NONE, instanceTypes=NONE } in #enterVal runCallEnv (name, exc) end (* Exception numbers. Most of these are hard-coded in the RTS. *) val EXC_interrupt = 1 val EXC_syserr = 2 val EXC_size = 4 val EXC_overflow = 5 val EXC_divide = 7 val EXC_conversion = 8 val EXC_XWindows = 10 val EXC_subscript = 11 val EXC_thread = 12 val EXC_Bind = 100 (* In Match compiler. *) val EXC_Match = 101 val EXC_Fail = 103 in val () = List.app makeException0 [ ("Interrupt", EXC_interrupt), ("Size", EXC_size), ("Bind", EXC_Bind), ("Div", EXC_divide), ("Match", EXC_Match), ("Overflow", EXC_overflow), ("Subscript", EXC_subscript) ] val () = List.app makeException1 [ ("Fail", EXC_Fail, String), ("Conversion", EXC_conversion, String), ("XWindows", EXC_XWindows, String), ("Thread", EXC_thread, String), ("SysErr", EXC_syserr, String ** Option LargeWord) ] end (* Standard Basis structures for basic types. These contain the definitions of the basic types and operations on them. The structures are extended in the basis library and overloaded functions are extracted from them. *) local val largeIntEnv = makeStructure(globalEnv, "LargeInt") (* The comparison operations take two arbitrary precision ints and a general "compare" function that returns a fixed precision int. *) val compareType = mkProductType[intInfType, intInfType, intInfType ** intInfType ->> fixedIntType] ->> Bool val arithType = mkProductType[intInfType, intInfType, intInfType ** intInfType ->> intInfType] ->> intInfType fun enterArbitrary(name, oper, typ) = let val value = mkGvar (name, typ, mkArbitraryFn oper, declInBasis) in #enterVal largeIntEnv (name, value) end in val () = #enterType largeIntEnv ("int", TypeConstrSet(intInfConstr, [])) (* These functions are used internally. *) val () = enterArbitrary("less", ArbCompare BuiltIns.TestLess, compareType) val () = enterArbitrary("greater", ArbCompare BuiltIns.TestGreater, compareType) val () = enterArbitrary("lessEq", ArbCompare BuiltIns.TestLessEqual, compareType) val () = enterArbitrary("greaterEq", ArbCompare BuiltIns.TestGreaterEqual, compareType) val () = enterArbitrary("add", ArbArith BuiltIns.ArithAdd, arithType) val () = enterArbitrary("subtract", ArbArith BuiltIns.ArithSub, arithType) val () = enterArbitrary("multiply", ArbArith BuiltIns.ArithMult, arithType) end local val fixedIntEnv = makeStructure(globalEnv, "FixedInt") open BuiltIns fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal fixedIntEnv (name, value) end val compareType = fixedIntType ** fixedIntType ->> Bool and binaryType = fixedIntType ** fixedIntType ->> fixedIntType fun enterComparison(name, test) = enterBinary(name, WordComparison{test=test, isSigned=true}, compareType) and enterBinaryOp(name, oper) = enterBinary(name, FixedPrecisionArith oper, binaryType) in val () = #enterType fixedIntEnv ("int", TypeConstrSet(fixedIntConstr, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("quot", ArithQuot) val () = enterBinaryOp("rem", ArithRem) end local open BuiltIns val largeWordEnv = makeStructure(globalEnv, "LargeWord") fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal largeWordEnv (name, value) end val compareType = LargeWord ** LargeWord ->> Bool and binaryType = LargeWord ** LargeWord ->> LargeWord and shiftType = LargeWord ** Word ->> LargeWord (* The shift amount is a Word. *) fun enterComparison(name, test) = enterBinary(name, LargeWordComparison test, compareType) and enterBinaryOp(name, oper) = enterBinary(name, LargeWordArith oper, binaryType) and enterBinaryLogical(name, oper) = enterBinary(name, LargeWordLogical oper, binaryType) and enterBinaryShift(name, oper) = enterBinary(name, LargeWordShift oper, shiftType) in val () = #enterType largeWordEnv ("word", TypeConstrSet(largeWordType, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("div", ArithDiv) val () = enterBinaryOp("mod", ArithMod) val () = enterBinaryLogical("orb", LogicalOr) val () = enterBinaryLogical("andb", LogicalAnd) val () = enterBinaryLogical("xorb", LogicalXor) val () = enterBinaryShift("<<", ShiftLeft) val () = enterBinaryShift(">>", ShiftRightLogical) val () = enterBinaryShift("~>>", ShiftRightArithmetic) val LargeWord = LargeWord end local val wordStructEnv = makeStructure(globalEnv, "Word") open BuiltIns fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal wordStructEnv (name, value) end val compareType = Word ** Word ->> Bool and binaryType = Word ** Word ->> Word fun enterComparison(name, test) = enterBinary(name, WordComparison{test=test, isSigned=false}, compareType) and enterBinaryOp(name, oper) = enterBinary(name, WordArith oper, binaryType) and enterBinaryLogical(name, oper) = enterBinary(name, WordLogical oper, binaryType) and enterBinaryShift(name, oper) = enterBinary(name, WordShift oper, binaryType) val toLargeWordFn = mkGvar ("toLargeWord", Word ->> LargeWord, mkUnaryFn UnsignedToLongWord, declInBasis) and toLargeWordXFn = mkGvar ("toLargeWordX", Word ->> LargeWord, mkUnaryFn SignedToLongWord, declInBasis) and fromLargeWordFn = mkGvar ("fromLargeWord", LargeWord ->> Word, mkUnaryFn LongWordToTagged, declInBasis) in val () = #enterType wordStructEnv ("word", TypeConstrSet(wordConstr, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("div", ArithDiv) val () = enterBinaryOp("mod", ArithMod) val () = enterBinaryLogical("orb", LogicalOr) val () = enterBinaryLogical("andb", LogicalAnd) val () = enterBinaryLogical("xorb", LogicalXor) val () = enterBinaryShift("<<", ShiftLeft) val () = enterBinaryShift(">>", ShiftRightLogical) val () = enterBinaryShift("~>>", ShiftRightArithmetic) val () = #enterVal wordStructEnv ("toLargeWord", toLargeWordFn) val () = #enterVal wordStructEnv ("toLargeWordX", toLargeWordXFn) val () = #enterVal wordStructEnv ("fromLargeWord", fromLargeWordFn) end local val charEnv = makeStructure(globalEnv, "Char") open BuiltIns (* Comparison functions are the same as Word. *) fun enterComparison(name, test) = let val typ = Char ** Char ->> Bool val entry = mkBinaryFn(WordComparison{test=test, isSigned=false}) val value = mkGvar (name, typ, entry, declInBasis) in #enterVal charEnv (name, value) end in val () = #enterType charEnv ("char", TypeConstrSet(charConstr, [])) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) end local val stringEnv = makeStructure(globalEnv, "String") in val () = #enterType stringEnv ("string", TypeConstrSet(stringConstr, [])) end local val realEnv = makeStructure(globalEnv, "Real") (* These are only used in Real so are included here rather than in RunCall. rtsCallFastRealtoReal is used for functions such as sqrt. rtsCallFastGeneraltoReal is used for Real.fromLargeInt. *) val debugOpts = [] (* Place to add debugging if necessary. *) fun makeFastRealRealCall entryName = CODETREE.Foreign.rtsCallFastRealtoReal (entryName, debugOpts) and makeFastRealRealRealCall entryName = CODETREE.Foreign.rtsCallFastRealRealtoReal (entryName, debugOpts) and makeFastIntInfRealCall entryName = CODETREE.Foreign.rtsCallFastGeneraltoReal (entryName, debugOpts) and makeFastRealGeneralRealCall entryName = CODETREE.Foreign.rtsCallFastRealGeneraltoReal (entryName, debugOpts) val rtsCallFastR_REntry = makeRunCallTupled([DoubleFloatType], DoubleFloatType, toMachineWord makeFastRealRealCall) (* This needs to be tupled. *) val rtsCallFastRR_REntry = makeRunCallTupled([DoubleFloatType, DoubleFloatType], DoubleFloatType, toMachineWord makeFastRealRealRealCall) and rtsCallFastRI_REntry = makeRunCallTupled([DoubleFloatType, GeneralType], DoubleFloatType, toMachineWord makeFastRealGeneralRealCall) val rtsCallFastI_REntry = makeRunCallTupled([GeneralType], DoubleFloatType, toMachineWord makeFastIntInfRealCall) val rtsCallFastF_F = mkGvar ("rtsCallFastR_R", String ->> Real ->> Real, rtsCallFastR_REntry, declInBasis) val rtsCallFastFF_F = mkGvar ("rtsCallFastRR_R", String ->> Real ** Real ->> Real, rtsCallFastRR_REntry, declInBasis) val rtsCallFastFG_F = mkGvar ("rtsCallFastRI_R", String ->> Real ** Int ->> Real, rtsCallFastRI_REntry, declInBasis) val rtsCallFastG_F = mkGvar ("rtsCallFastI_R", String ->> intInfType ->> Real, rtsCallFastI_REntry, declInBasis) fun enterUnary(name, oper, typ) = let val value = mkGvar (name, typ, mkUnaryFn oper, declInBasis) in #enterVal realEnv (name, value) end fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal realEnv (name, value) end val compareType = Real ** Real ->> Bool and binaryType = Real ** Real ->> Real and unaryType = Real ->> Real and realToFixType = Real ->> fixedIntType open BuiltIns IEEEReal fun enterComparison(name, test) = enterBinary(name, RealComparison(test, PrecDouble), compareType) and enterBinaryOp(name, oper) = enterBinary(name, RealArith(oper, PrecDouble), binaryType) in val () = #enterType realEnv ("real", TypeConstrSet(realConstr, [])) val () = #enterVal realEnv ("rtsCallFastR_R", rtsCallFastF_F) val () = #enterVal realEnv ("rtsCallFastRR_R", rtsCallFastFF_F) val () = #enterVal realEnv ("rtsCallFastRI_R", rtsCallFastFG_F) val () = #enterVal realEnv ("rtsCallFastI_R", rtsCallFastG_F) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterComparison("==", TestEqual) (* real is not an eqtype. *) (* Included unordered mainly because it's easy to implement isNan. *) val () = enterComparison("unordered", TestUnordered) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("/", ArithDiv) val () = enterUnary("~", RealNeg PrecDouble, unaryType) val () = enterUnary("abs", RealAbs PrecDouble, unaryType) val () = enterUnary("fromFixedInt", RealFixedInt PrecDouble, fixedIntType ->> Real) val () = enterUnary("truncFix", RealToInt(PrecDouble, TO_ZERO), realToFixType) val () = enterUnary("roundFix", RealToInt(PrecDouble, TO_NEAREST), realToFixType) val () = enterUnary("ceilFix", RealToInt(PrecDouble, TO_POSINF), realToFixType) val () = enterUnary("floorFix", RealToInt(PrecDouble, TO_NEGINF), realToFixType) end local val real32Env = makeStructure(globalEnv, "Real32") val floatType = mkTypeConstruction ("real", floatConstr, [], []) val Float = floatType val debugOpts = [] (* Place to add debugging if necessary. *) fun makeFastFloatFloatCall entryName = CODETREE.Foreign.rtsCallFastFloattoFloat (entryName, debugOpts) and makeFastFloatFloatFloatCall entryName = CODETREE.Foreign.rtsCallFastFloatFloattoFloat (entryName, debugOpts) and makeFastIntInfFloatCall entryName = CODETREE.Foreign.rtsCallFastGeneraltoFloat (entryName, debugOpts) and makeFastFloatGeneralFloatCall entryName = CODETREE.Foreign.rtsCallFastFloatGeneraltoFloat (entryName, debugOpts) val rtsCallFastR_REntry = makeRunCallTupled([SingleFloatType], SingleFloatType, toMachineWord makeFastFloatFloatCall) (* This needs to be tupled. *) val rtsCallFastRR_REntry = makeRunCallTupled([SingleFloatType, SingleFloatType], SingleFloatType, toMachineWord makeFastFloatFloatFloatCall) and rtsCallFastRI_REntry = makeRunCallTupled([SingleFloatType, GeneralType], SingleFloatType, toMachineWord makeFastFloatGeneralFloatCall) val rtsCallFastI_REntry = makeRunCallTupled([GeneralType], SingleFloatType, toMachineWord makeFastIntInfFloatCall) val rtsCallFastF_F = mkGvar ("rtsCallFastF_F", String ->> Float ->> Float, rtsCallFastR_REntry, declInBasis) val rtsCallFastFF_F = mkGvar ("rtsCallFastFF_F", String ->> Float ** Float ->> Float, rtsCallFastRR_REntry, declInBasis) val rtsCallFastFG_F = mkGvar ("rtsCallFastFI_F", String ->> Float ** Int ->> Float, rtsCallFastRI_REntry, declInBasis) val rtsCallFastG_F = mkGvar ("rtsCallFastI_F", String ->> intInfType ->> Float, rtsCallFastI_REntry, declInBasis) fun enterUnary(name, oper, typ) = let val value = mkGvar (name, typ, mkUnaryFn oper, declInBasis) in #enterVal real32Env (name, value) end fun enterBinary(name, oper, typ) = let val value = mkGvar (name, typ, mkBinaryFn oper, declInBasis) in #enterVal real32Env (name, value) end val compareType = Float ** Float ->> Bool and binaryType = Float ** Float ->> Float and unaryType = Float ->> Float and floatToFixType = Float ->> fixedIntType open BuiltIns IEEEReal fun enterComparison(name, test) = enterBinary(name, RealComparison(test, PrecSingle), compareType) and enterBinaryOp(name, oper) = enterBinary(name, RealArith(oper, PrecSingle), binaryType) in val () = #enterType real32Env ("real", TypeConstrSet(floatConstr, [])) val () = enterUnary("toLarge", BuiltIns.FloatToDouble, floatType ->> Real) (* Conversion with the current rounding mode. *) and () = enterUnary("fromReal", BuiltIns.DoubleToFloat NONE, Real ->> floatType) (* There are various versions of this function for each of the rounding modes. *) and () = enterUnary("fromRealRound", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_NEAREST), Real ->> floatType) and () = enterUnary("fromRealTrunc", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_ZERO), Real ->> floatType) and () = enterUnary("fromRealCeil", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_POSINF), Real ->> floatType) and () = enterUnary("fromRealFloor", BuiltIns.DoubleToFloat (SOME IEEEReal.TO_NEGINF), Real ->> floatType) val () = #enterVal real32Env ("rtsCallFastR_R", rtsCallFastF_F) val () = #enterVal real32Env ("rtsCallFastRR_R", rtsCallFastFF_F) val () = #enterVal real32Env ("rtsCallFastRI_R", rtsCallFastFG_F) val () = #enterVal real32Env ("rtsCallFastI_R", rtsCallFastG_F) val () = enterComparison("<", TestLess) val () = enterComparison("<=", TestLessEqual) val () = enterComparison(">", TestGreater) val () = enterComparison(">=", TestGreaterEqual) val () = enterComparison("==", TestEqual) (* Real32.real is not an eqtype. *) val () = enterComparison("unordered", TestUnordered) val () = enterBinaryOp("+", ArithAdd) val () = enterBinaryOp("-", ArithSub) val () = enterBinaryOp("*", ArithMult) val () = enterBinaryOp("/", ArithDiv) val () = enterUnary("~", RealNeg PrecSingle, unaryType) val () = enterUnary("abs", RealAbs PrecSingle, unaryType) val () = enterUnary("truncFix", RealToInt(PrecSingle, TO_ZERO), floatToFixType) val () = enterUnary("roundFix", RealToInt(PrecSingle, TO_NEAREST), floatToFixType) val () = enterUnary("ceilFix", RealToInt(PrecSingle, TO_POSINF), floatToFixType) val () = enterUnary("floorFix", RealToInt(PrecSingle, TO_NEGINF), floatToFixType) end val bootstrapEnv = makeStructure(globalEnv, "Bootstrap") fun enterBootstrap (name : string, entry : codetree, typ : types) : unit = let val value = mkGvar (name, typ, entry, declInBasis) in #enterVal bootstrapEnv (name, value) end local val threadEnv = makeStructure(globalEnv, "Thread") open TypeValue fun monoTypePrinter _ = PRETTY.PrettyString "?" val code = createTypeValue{ eqCode=equalPointerOrWordFn, printCode=mkConst (toMachineWord (ref monoTypePrinter)), boxedCode=boxedAlways, sizeCode=singleWord } (* Thread.thread type. This is an equality type with pointer equality. *) val threadConstr= makeTypeConstructor ( "thread", [], makeFreeId(0, Global (genCode(code, [], 0) ()), true, basisDescription "thread"), [DeclaredAt inBasis]) val threadType = mkTypeConstruction ("thread", threadConstr, [], []); val selfFunction = mkGvar ("self", Unit ->> threadType, getCurrentThreadIdFn, declInBasis) - val atIncrFunction = mkGvar("atomicIncr", Ref Word ->> Word, mkUnaryFn BuiltIns.AtomicIncrement, declInBasis) - val atDecrFunction = mkGvar("atomicDecr", Ref Word ->> Word, mkUnaryFn BuiltIns.AtomicDecrement, declInBasis) + val atExAddFunction = mkGvar("atomicExchAdd", Ref Word ** Word ->> Word, mkBinaryFn BuiltIns.AtomicExchangeAdd, declInBasis) val atResetFunction = mkGvar("atomicReset", Ref Word ->> Unit, mkUnaryFn BuiltIns.AtomicReset, declInBasis) + val cpuPauseFunction = mkGvar("cpuPause", Unit ->> Unit, cpuPauseFn, declInBasis) in val () = #enterType threadEnv ("thread", TypeConstrSet(threadConstr, [])) val () = #enterVal threadEnv ("self", selfFunction) - val () = #enterVal threadEnv ("atomicIncr", atIncrFunction) - val () = #enterVal threadEnv ("atomicDecr", atDecrFunction) + val () = #enterVal threadEnv ("atomicExchAdd", atExAddFunction) val () = #enterVal threadEnv ("atomicReset", atResetFunction) + val () = #enterVal threadEnv ("cpuPause", cpuPauseFunction) end local val fmemEnv = makeStructure(globalEnv, "ForeignMemory") val a = makeTV() (* We don't have Word8.word or Word32.word at this point so the easiest way to deal with this is to make them polymorphic. *) val get8Function = mkGvar("get8", LargeWord ** Word ->> a, makePolymorphic([a], mkLoadOperationFn LoadStoreC8), declInBasis) val get16Function = mkGvar("get16", LargeWord ** Word ->> Word, mkLoadOperationFn LoadStoreC16, declInBasis) val get32Function = mkGvar("get32", LargeWord ** Word ->> a, makePolymorphic([a], mkLoadOperationFn LoadStoreC32), declInBasis) val get64Function = mkGvar("get64", LargeWord ** Word ->> LargeWord, mkLoadOperationFn LoadStoreC64, declInBasis) val getFloatFunction = mkGvar("getFloat", LargeWord ** Word ->> Real, mkLoadOperationFn LoadStoreCFloat, declInBasis) val getDoubleFunction = mkGvar("getDouble", LargeWord ** Word ->> Real, mkLoadOperationFn LoadStoreCDouble, declInBasis) val set8Function = mkGvar("set8", mkProductType[LargeWord, Word, a] ->> Unit, makePolymorphic([a], mkStoreOperationFn LoadStoreC8), declInBasis) val set16Function = mkGvar("set16", mkProductType[LargeWord, Word, Word] ->> Unit, mkStoreOperationFn LoadStoreC16, declInBasis) val set32Function = mkGvar("set32", mkProductType[LargeWord, Word, a] ->> Unit, makePolymorphic([a], mkStoreOperationFn LoadStoreC32), declInBasis) val set64Function = mkGvar("set64", mkProductType[LargeWord, Word, LargeWord] ->> Unit, mkStoreOperationFn LoadStoreC64, declInBasis) val setFloatFunction = mkGvar("setFloat", mkProductType[LargeWord, Word, Real] ->> Unit, mkStoreOperationFn LoadStoreCFloat, declInBasis) val setDoubleFunction = mkGvar("setDouble", mkProductType[LargeWord, Word, Real] ->> Unit, mkStoreOperationFn LoadStoreCDouble, declInBasis) val allocCStackFn = mkGvar("allocCStack", Word ->> LargeWord, mkUnaryFn BuiltIns.AllocCStack, declInBasis) val freeCStackFn = mkGvar("freeCStack", LargeWord ** Word ->> Unit, mkBinaryFn BuiltIns.FreeCStack, declInBasis) in val () = #enterVal fmemEnv ("get8", get8Function) val () = #enterVal fmemEnv ("get16", get16Function) val () = #enterVal fmemEnv ("get32", get32Function) val () = #enterVal fmemEnv ("get64", get64Function) val () = #enterVal fmemEnv ("getFloat", getFloatFunction) val () = #enterVal fmemEnv ("getDouble", getDoubleFunction) val () = #enterVal fmemEnv ("set8", set8Function) val () = #enterVal fmemEnv ("set16", set16Function) val () = #enterVal fmemEnv ("set32", set32Function) val () = #enterVal fmemEnv ("set64", set64Function) val () = #enterVal fmemEnv ("setFloat", setFloatFunction) val () = #enterVal fmemEnv ("setDouble", setDoubleFunction) val () = #enterVal fmemEnv ("allocCStack", allocCStackFn) (* Free is a binary operation that takes both the allocated address and the size. The size is used by the compiled code where this is implemented using the C-stack. The allocated address is intended for possible use by the interpreter where so that it can be implemented as malloc/free. *) val () = #enterVal fmemEnv ("freeCStack", freeCStackFn) end local val foreignEnv = makeStructure(globalEnv, "Foreign") local val EXC_foreign = 23 val foreignException = Value{ name = "Foreign", typeOf = String ->> TYPETREE.exnType, access = Global(mkConst(toMachineWord EXC_foreign)), class = Exception, locations = declInBasis, references = NONE, instanceTypes=NONE } in val () = #enterVal foreignEnv ("Foreign", foreignException) end val arg0 = mkLoadArgument 0 val arg1 = mkLoadArgument 1 local val callForeignCall = mkEval(mkConst (toMachineWord CODETREE.Foreign.foreignCall), [arg0]) val innerBody = mkEval(mkLoadClosure 0, [mkInd(0, arg0), mkInd(1, arg0), mkInd(2, arg0)]) val outerBody = mkEnv([mkDec(0, callForeignCall)], mkInlproc(innerBody, 1, "foreignCall(1)(1)", [mkLoadLocal 0], 0)) in val foreignCallEntry = mkInlproc(outerBody, 1, "foreignCall(1)", [], 1) end local (* Build a callback. First apply the compiler to the abi/argtype/restype values. Then apply the result to a function to generate the final C callback code. The C callback code calls the function with two arguments. Here we have to pass it a function that expects a tuple and unwrap it. *) val innerMost = mkInlproc(mkEval(mkLoadClosure 0, [mkTuple[arg0, arg1]]), 2, "buildCallBack(1)(1)2", [mkLoadArgument 0], 0) val resultFn = mkInlproc(mkEval(mkLoadClosure 0, [innerMost]), 1, "buildCallBack(1)(1)", [mkLoadLocal 0], 0) val firstBuild = mkEval(mkConst (toMachineWord CODETREE.Foreign.buildCallBack), [arg0]) val outerBody = mkEnv([mkDec(0, firstBuild)], resultFn) in val buildCallBackEntry = mkInlproc(outerBody, 1, "buildCallBack(1)", [], 1) end (* Abi - an eqtype. An enumerated type or short int. *) local open TypeValue fun monotypePrinter _ = PRETTY.PrettyString "?" val code = createTypeValue{ eqCode = equalTaggedWordFn, printCode = mkConst (toMachineWord (ref monotypePrinter)), boxedCode = boxedNever, sizeCode = singleWord } val abiEqAndPrint = Global (genCode(code, [], 0) ()) in val abiConstr = makeTypeConstructor("abi", [], makeFreeId(0, abiEqAndPrint, true, basisDescription "Foreign.LowLevel.abi"), declInBasis) end val () = #enterType foreignEnv ("abi", TypeConstrSet(abiConstr, [])) val abiType = mkTypeConstruction ("abi", abiConstr, [], declInBasis) (* It would be possible to put the definition of cType in here but it's complicated. It's easier to use an opaque type and put in a cast later. *) val ctypeConstr = makeTypeConstructor("ctype", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "Foreign.LowLevel.ctype"), declInBasis) val () = #enterType foreignEnv ("ctype", TypeConstrSet(ctypeConstr, [])) val ffiType = mkTypeConstruction ("ctype", ctypeConstr, [], declInBasis) val foreignCallType = mkProductType[abiType, List ffiType, ffiType] ->> mkProductType[LargeWord, LargeWord, LargeWord] ->> Unit val buildCallBackType = mkProductType[abiType, List ffiType, ffiType] ->> (mkProductType[LargeWord, LargeWord] ->> Unit) ->> LargeWord fun enterForeign (name, entry, typ) = #enterVal foreignEnv (name, mkGvar (name, typ, entry, declInBasis)) in val () = enterForeign("foreignCall", foreignCallEntry, foreignCallType) val () = enterForeign("buildCallBack", buildCallBackEntry, buildCallBackType) (* Apply the abiList function here. The ABIs depend on the platform in the interpreted version. *) val () = enterForeign("abiList", mkConst(toMachineWord(CODETREE.Foreign.abiList())), List (String ** abiType)) end local fun addVal (name : string, value : 'a, typ : types) : unit = enterBootstrap (name, mkConst (toMachineWord value), typ) (* These are only used during the bootstrap phase. Replacements are installed once the appropriate modules of the basis library are compiled. *) fun intOfString s = let val radix = if String.size s >= 3 andalso String.substring(s, 0, 2) = "0x" orelse String.size s >= 4 andalso String.substring(s, 0, 3) = "~0x" then StringCvt.HEX else StringCvt.DEC in case StringCvt.scanString (Int.scan radix) s of NONE => raise Conversion "Invalid integer constant" | SOME res => res end fun wordOfString s = let val radix = if String.size s > 2 andalso String.sub(s, 2) = #"x" then StringCvt.HEX else StringCvt.DEC in case StringCvt.scanString (Word.scan radix) s of NONE => raise Conversion "Invalid word constant" | SOME res => res end fun unescapeChar (s: string) : char = let fun rdr i = if i = size s then NONE else SOME(String.sub(s, i), i+1) in case Char.scan rdr 0 of NONE => (* Bad conversion *) raise Conversion "Invalid string constant" | SOME(res, _) => res end open PRINTTABLE val convstringCode = genCode(mkConst(toMachineWord unescapeString), [], 0) () val convintCode = genCode(mkConst(toMachineWord intOfString), [], 0) () val convwordCode = genCode(mkConst(toMachineWord wordOfString), [], 0) () val convcharCode = genCode(mkConst(toMachineWord unescapeChar), [], 0) () in (* We need this for compatibility with the 5.8.2 bootstrap. *) val () = addVal ("convString", unescapeString: string -> string, String ->> String) (* Flag to indicate which version of Int to compile *) val () = addVal ("intIsArbitraryPrecision", intIsArbitraryPrecision, Bool) (* Install the overloads now. *) val () = addOverload("convString", stringConstr, convstringCode) val () = addOverload("convInt", fixedIntConstr, convintCode) val () = addOverload("convInt", intInfConstr, convintCode) val () = addOverload("convWord", wordConstr, convwordCode) val () = addOverload("convChar", charConstr, convcharCode) end (* The only reason we have vector here is to get equality right. We need vector to be an equality type and to have a specific equality function. *) local fun polyTypePrinter _ _ = PRETTY.PrettyString "?" (* The equality function takes the base equality type as an argument. The inner function takes two arguments which are the two vectors to compare, checks the lengths and if they're equal applies the base equality to each field. *) val eqCode = mkInlproc( mkProc( mkEnv([ (* Length of the items. *) mkDec(0, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 0)), mkDec(1, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 1)), mkMutualDecs[(2, (* Loop function. *) mkProc( mkIf( (* Finished? *) mkEqualTaggedWord(mkLoadClosure 0, mkLoadArgument 0), CodeTrue, (* Yes, all equal. *) mkIf( mkEval( TypeValue.extractEquality(mkLoadClosure 2), (* Base equality fn *) [ mkLoadOperation(LoadStoreMLWord{isImmutable=true}, mkLoadClosure 3, mkLoadArgument 0), mkLoadOperation(LoadStoreMLWord{isImmutable=true}, mkLoadClosure 4, mkLoadArgument 0) ]), mkEval(mkLoadClosure 1, (* Recursive call with index+1. *) [ mkBinary(BuiltIns.WordArith BuiltIns.ArithAdd, mkLoadArgument 0, mkConst(toMachineWord 1)) ]), CodeFalse (* Not equal elements - result false *) ) ), 1, "vector-loop", [mkLoadLocal 0 (* Length *), mkLoadLocal 2 (* Loop function *), mkLoadClosure 0 (* Base equality function *), mkLoadArgument 0 (* Vector 0 *), mkLoadArgument 1 (* Vector 1 *)], 0))] ], mkIf( (* Test the lengths. *) mkEqualTaggedWord(mkLoadLocal 0, mkLoadLocal 1), (* Equal - test the contents. *) mkEval(mkLoadLocal 2, [CodeZero]), CodeFalse (* Not same length- result false *) ) ), 2, "vector-eq", [mkLoadArgument 0], 3), 1, "vector-eq()", [], 0) val idCode = (* Polytype *) let open TypeValue val code = createTypeValue{ eqCode=eqCode, printCode=mkConst (toMachineWord (ref polyTypePrinter)), boxedCode=mkInlproc(boxedAlways, 1, "boxed-vector", [], 0), sizeCode=mkInlproc(singleWord, 1, "size-vector", [], 0)} in Global (genCode(code, [], 0) ()) end in val vectorType = makeTypeConstructor("vector", [makeTypeVariable()], makeFreeId(1, idCode, true, basisDescription "vector"), declInBasis) val () = enterGlobalType ("vector", TypeConstrSet(vectorType, [])) end (* We also need a type with byte-wise equality. *) local fun monoTypePrinter _ = PRETTY.PrettyString "?" (* This is a monotype equality function that takes two byte vectors and compares them byte-by-byte for equality. Because they are vectors of bytes it's unsafe to load the whole words which could look like addresses if the bottom bit happens to be zero. *) val eqCode = mkProc( mkEnv([ (* Length of the items. *) mkDec(0, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 0)), mkDec(1, mkUnary(BuiltIns.MemoryCellLength, mkLoadArgument 1)) ], mkIf( (* Test the lengths. *) mkEqualTaggedWord(mkLoadLocal 0, mkLoadLocal 1), (* Equal - test the contents. *) mkEnv([ (* ByteVecEqual takes a byte length so we have to multiply by the number of bytes per word. *) mkDec(2, mkBinary(BuiltIns.WordArith BuiltIns.ArithMult, mkConst(toMachineWord RunCall.bytesPerWord), mkLoadLocal 0)) ], mkBlockOperation{kind=BlockOpEqualByte, leftBase=mkLoadArgument 0, rightBase=mkLoadArgument 1, leftIndex=CodeZero, rightIndex=CodeZero, length=mkLoadLocal 2}), CodeFalse (* Not same length- result false *) ) ), 2, "byteVector-eq", [], 3) val idCode = (* Polytype *) let open TypeValue val code = createTypeValue{ eqCode=eqCode, printCode=mkConst (toMachineWord (ref monoTypePrinter)), boxedCode=boxedAlways, sizeCode=singleWord} in Global (genCode(code, [], 0) ()) end in val byteVectorType = makeTypeConstructor("byteVector", [], makeFreeId(0, idCode, true, basisDescription "byteVector"), declInBasis) val () = #enterType bootstrapEnv ("byteVector", TypeConstrSet(byteVectorType, [])) end (* We also need array and Array2.array to be passed through here so that they have the special property of being eqtypes even if their argument is not. "array" is defined to be in the global environment. *) val () = enterGlobalType ("array", TypeConstrSet(arrayConstr, [])) val () = #enterType bootstrapEnv ("array", TypeConstrSet(array2Constr, [])) val () = #enterType bootstrapEnv ("byteArray", TypeConstrSet(byteArrayConstr, [])) (* "=', '<>', PolyML.print etc are type-specific function which appear to be polymorphic. The compiler recognises these and treats them specially. For (in)equality that means generating type-specific versions of the equality operations; for print etc that means printing in a type-specific way. They can become true polymorphic functions and lose their type-specificity. For (in)equality that means defaulting to structure equality which is normal and expected behaviour. For print etc that means losing the ability to print and just printing "?" so it's important to avoid that happening. "open" treats type-specific functions specially and retains the type-specificity. That's important to allow the prelude code to expand the PolyML structure. *) local val eqType = let val a = makeEqTV () in a ** a ->> Bool end val eqVal = mkSpecialFun("=", eqType, Equal) in val () = enterGlobalValue ("=", eqVal) end local val neqType = let val a = makeEqTV () in a ** a ->> Bool end val neqVal = mkSpecialFun("<>", neqType, NotEqual) in val () = enterGlobalValue ("<>", neqVal) end val polyMLEnv = makeStructure(globalEnv, "PolyML") val enterPolyMLVal = #enterVal polyMLEnv local (* This version of the environment must match that used in the NameSpace structure. *) open TYPETREE (* Create a new structure for them. *) val nameSpaceEnv = makeStructure(polyMLEnv, "NameSpace") (* Substructures. *) val valuesEnv = makeStructure(nameSpaceEnv, "Values") and typesEnv = makeStructure(nameSpaceEnv, "TypeConstrs") and fixesEnv = makeStructure(nameSpaceEnv, "Infixes") and structsEnv = makeStructure(nameSpaceEnv, "Structures") and sigsEnv = makeStructure(nameSpaceEnv, "Signatures") and functsEnv = makeStructure(nameSpaceEnv, "Functors") (* Types for the basic values. These are opaque. *) val valueVal = makeAndDeclareOpaqueType("value", "PolyML.NameSpace.Values.value", valuesEnv) (* Representation of the type of a value. *) val Types = makeAndDeclareOpaqueType("typeExpression", "PolyML.NameSpace.Values.typeExpression", valuesEnv) val typeVal = makeAndDeclareOpaqueType("typeConstr", "PolyML.NameSpace.TypeConstrs.typeConstr", typesEnv) val fixityVal = makeAndDeclareOpaqueType("fixity", "PolyML.NameSpace.Infixes.fixity", fixesEnv) val signatureVal = makeAndDeclareOpaqueType("signatureVal", "PolyML.NameSpace.Signatures.signatureVal", sigsEnv) val structureVal = makeAndDeclareOpaqueType("structureVal", "PolyML.NameSpace.Structures.structureVal", structsEnv) val functorVal = makeAndDeclareOpaqueType("functorVal", "PolyML.NameSpace.Functors.functorVal", functsEnv) (* nameSpace type. Labelled record. *) fun createFields(name, vType): { name: string, typeof: types} list = let val enterFun = String ** vType ->> Unit val lookupFun = String ->> Option vType val allFun = Unit ->> List (String ** vType) in [mkLabelEntry("enter" ^ name, enterFun), mkLabelEntry("lookup" ^ name, lookupFun), mkLabelEntry("all" ^ name, allFun)] end (* We have to use the same names as we use in the env type because we're passing "env" values through the bootstrap. *) val valTypes = [("Val", valueVal), ("Type", typeVal), ("Fix", fixityVal), ("Struct", structureVal), ("Sig", signatureVal), ("Funct", functorVal)] val fields = List.foldl (fn (p,l) => createFields p @ l) [] valTypes val recordType = makeTypeAbbreviation("nameSpace", "PolyML.NameSpace.nameSpace", [], mkLabelled(sortLabels fields, true), declInBasis); val () = #enterType nameSpaceEnv ("nameSpace", TypeConstrSet(recordType, [])); (* The result type of the compiler includes valueVal etc. *) val resultFields = List.map TYPETREE.mkLabelEntry [("values", List(String ** valueVal)), ("fixes", List(String ** fixityVal)), ("types", List(String ** typeVal)), ("structures", List(String ** structureVal)), ("signatures", List(String ** signatureVal)), ("functors", List(String ** functorVal))] in val nameSpaceType = mkTypeConstruction ("nameSpace", recordType, [], declInBasis) val execResult = mkLabelled(sortLabels resultFields, true) type execResult = { fixes: (string * fixStatus) list, values: (string * values) list, structures: (string * structVals) list, signatures: (string * signatures) list, functors: (string * functors) list, types: (string * typeConstrSet) list } val valueVal = valueVal val typeVal = typeVal val fixityVal = fixityVal val signatureVal = signatureVal val structureVal = structureVal val functorVal = functorVal val Types = Types val valuesEnv = valuesEnv and typesEnv = typesEnv and fixesEnv = fixesEnv and structsEnv = structsEnv and sigsEnv = sigsEnv and functsEnv = functsEnv end local val typeconstr = locationConstr val () = #enterType polyMLEnv ("location", typeconstr); in val Location = mkTypeConstruction ("location", tsConstr typeconstr, [], declInBasis) end (* Interface to the debugger. *) local open TYPETREE val debuggerEnv = makeStructure(polyMLEnv, "DebuggerInterface") (* Make these opaque at this level. *) val locationPropList = makeAndDeclareOpaqueType("locationPropList", "PolyML.DebuggerInterface.locationPropList", debuggerEnv) val typeId = makeAndDeclareOpaqueType("typeId", "PolyML.DebuggerInterface.typeId", debuggerEnv) val machineWordType = makeAndDeclareOpaqueType("machineWord", "PolyML.DebuggerInterface.machineWord", debuggerEnv) (* For long term security keep these as different from global types and sigs. Values in the static environment need to be copied before they are global. *) val localType = makeAndDeclareOpaqueType("localType", "PolyML.DebuggerInterface.localType", debuggerEnv) val localTypeConstr = makeAndDeclareOpaqueType("localTypeConstr", "PolyML.DebuggerInterface.localTypeConstr", debuggerEnv) val localSig = makeAndDeclareOpaqueType("localSig", "PolyML.DebuggerInterface.localSig", debuggerEnv) open DEBUGGER (* Entries in the static list. This type is only used within the implementation of DebuggerInterface in the basis library and does not appear in the final signature. *) val environEntryConstr = makeTypeConstructor("environEntry", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "PolyML.DebuggerInterface.environEntry"), declInBasis) val environEntryType = mkTypeConstruction ("environEntry", environEntryConstr, [], declInBasis) val constrs = (* Order is significant. *) [ ("EnvEndFunction", mkProductType[String, Location, localType]), ("EnvException", mkProductType[String, localType, locationPropList]), ("EnvStartFunction", mkProductType[String, Location, localType]), ("EnvStructure", mkProductType[String, localSig, locationPropList]), ("EnvTConstr", String ** localTypeConstr), ("EnvTypeid", typeId ** typeId), ("EnvVConstr", mkProductType[String, localType, Bool, Int, locationPropList]), ("EnvValue", mkProductType[String, localType, locationPropList]) ] (* This representation must match the representation defined in DEBUGGER_.sml. *) val numConstrs = List.length constrs val {constrs=constrReps, ...} = chooseConstrRepr(constrs, []) val constructors = ListPair.map (fn ((s,t), code) => mkGconstr(s, t ->> environEntryType, code, false, numConstrs, declInBasis)) (constrs, constrReps) val () = List.app (fn c => #enterVal debuggerEnv(valName c, c)) constructors (* Put these constructors onto the type. *) val () = #enterType debuggerEnv ("environEntry", TypeConstrSet(environEntryConstr, constructors)) (* Debug state type. *) val debugStateConstr = makeTypeAbbreviation("debugState", "PolyML.DebuggerInterface.debugState", [], mkProductType[List environEntryType, List machineWordType, Location], declInBasis) val () = #enterType debuggerEnv ("debugState", TypeConstrSet(debugStateConstr, [])) val debugStateType = mkTypeConstruction ("debugState", debugStateConstr, [], declInBasis) in val () = applyList (fn (name, v, t) => #enterVal debuggerEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("makeValue", toMachineWord(makeValue: debugState -> string * types * locationProp list * machineWord -> values), debugStateType ->> mkProductType[String, localType, locationPropList, machineWordType] ->> valueVal), ("makeException", toMachineWord(makeException: debugState -> string * types * locationProp list * machineWord -> values), debugStateType ->> mkProductType[String, localType, locationPropList, machineWordType] ->> valueVal), ("makeConstructor", toMachineWord(makeConstructor: debugState -> string * types * bool * int * locationProp list * machineWord -> values), debugStateType ->> mkProductType[String, localType, Bool, Int, locationPropList, machineWordType] ->> valueVal), ("makeAnonymousValue", toMachineWord(makeAnonymousValue: debugState -> types * machineWord -> values), debugStateType ->> mkProductType[localType, machineWordType] ->> valueVal), ("makeStructure", toMachineWord(makeStructure: debugState -> string * signatures * locationProp list * machineWord -> structVals), debugStateType ->> mkProductType[String, localSig, locationPropList, machineWordType] ->> structureVal), ("makeTypeConstr", toMachineWord(makeTypeConstr: debugState -> typeConstrSet -> typeConstrSet), debugStateType ->> localTypeConstr ->> typeVal), ("unitValue", toMachineWord(mkGvar("", unitType, CodeZero, []): values), valueVal), (* Used as a default *) ("setOnEntry", toMachineWord(setOnEntry: (string * PolyML.location -> unit) option -> unit), Option (String ** Location ->> Unit) ->> Unit), ("setOnExit", toMachineWord(setOnExit: (string * PolyML.location -> unit) option -> unit), Option (String ** Location ->> Unit) ->> Unit), ("setOnExitException", toMachineWord(setOnExitException: (string * PolyML.location -> exn -> unit) option -> unit), Option (String ** Location ->> Exn ->> Unit) ->> Unit), ("setOnBreakPoint", toMachineWord(setOnBreakPoint: (PolyML.location * bool ref -> unit) option -> unit), Option (Location ** Ref Bool ->> Unit) ->> Unit) ] end local val typeconstr = contextConstr in val () = #enterType polyMLEnv ("context", typeconstr); val () = List.app(fn(tv as Value{name, ...}) => #enterVal polyMLEnv(name, tv)) (tsConstructors typeconstr) end local val typeconstr = prettyConstr in val () = #enterType polyMLEnv ("pretty", typeconstr); val () = List.app(fn(tv as Value{name, ...}) => #enterVal polyMLEnv(name, tv)) (tsConstructors typeconstr) val PrettyType = mkTypeConstruction ("pretty", tsConstr typeconstr, [], declInBasis) end local val printType = let val a = makePrintTV () in a ->> a end; val printVal = mkSpecialFun("print", printType, Print); in val () = enterPolyMLVal ("print", printVal); end; local val makeStringType = let val a = makePrintTV () in a ->> String end; val makeStringVal = mkSpecialFun("makestring", makeStringType, MakeString); in val () = enterPolyMLVal ("makestring", makeStringVal); end; local val prettyType = let val a = makePrintTV () in a ** fixedIntType ->> PrettyType end; val prettyVal = mkSpecialFun("prettyRepresentation", prettyType, GetPretty); in val () = enterPolyMLVal ("prettyRepresentation", prettyVal); end; local (* addPrettyPrinter is the new function to install a pretty printer. *) val a = makeTV () val b = makeTV () val addPrettyType = (TYPETREE.fixedIntType ->> b ->> a ->> PrettyType) ->> Unit; val addPrettyVal = mkSpecialFun("addPrettyPrinter", addPrettyType, AddPretty); in val () = enterPolyMLVal ("addPrettyPrinter", addPrettyVal); end; (* This goes in RunCall since it's only for the basis library. *) local val addOverloadType = let val a = makeTV () and b = makeTV () in (a ->> b) ->> String ->> Unit end; val addOverloadVal = mkSpecialFun("addOverload", addOverloadType, AddOverload); in val () = #enterVal runCallEnv ("addOverload", addOverloadVal); end local (* Add a function to switch the default integer type. *) fun setType isArbitrary = setPreferredInt(if isArbitrary then intInfConstr else fixedIntConstr) in val () = #enterVal runCallEnv ("setDefaultIntTypeArbitrary", mkGvar ("setDefaultIntTypeArbitrary", Bool ->> Unit, mkConst (toMachineWord setType), declInBasis)) end local val sourceLocVal = mkSpecialFun("sourceLocation", Unit ->> Location, GetLocation); in val () = enterPolyMLVal ("sourceLocation", sourceLocVal); end; local (* This is used as one of the arguments to the compiler function. *) open TYPETREE val uniStructEnv = makeStructure(bootstrapEnv, "Universal") fun enterUniversal (name : string, entry : codetree, typ : types) : unit = let val value = mkGvar (name, typ, entry, declInBasis); in #enterVal uniStructEnv (name, value) end; local fun polyTypePrinter _ _ = PRETTY.PrettyString "?" open TypeValue val idCode = let val code = createTypeValue{ eqCode=CodeZero, (* Not an equality type *) printCode=mkConst (toMachineWord (ref polyTypePrinter)), boxedCode=mkInlproc(boxedEither(* Assume worst case *), 1, "boxed-tag", [], 0), sizeCode=mkInlproc(singleWord, 1, "size-tag", [], 0)} in Global (genCode(code, [], 0) ()) end in (* type 'a tag *) val tagConstr = makeTypeConstructor("tag", [makeTypeVariable()], makeFreeId(1, idCode, false, basisDescription "tag"), declInBasis); val () = #enterType uniStructEnv ("tag", TypeConstrSet(tagConstr, [])) end (* type universal *) val univConstr = makeTypeConstructor("universal", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "universal"), declInBasis); val () = #enterType uniStructEnv ("universal", TypeConstrSet(univConstr, [])); fun Tag base = mkTypeConstruction ("tag", tagConstr, [base], declInBasis) val Universal = mkTypeConstruction ("universal", univConstr, [], declInBasis) val a = makeTV() (* val tagInject : 'a tag -> 'a -> universal *) val injectType = Tag a ->> a ->> Universal val () = enterUniversal ("tagInject", makePolymorphic([a], mkConst (toMachineWord (Universal.tagInject: 'a Universal.tag -> 'a -> Universal.universal))), injectType) (* We don't actually need tagIs and tagProject since this is only used for the compiler. Universal is redefined in the basis library. *) val projectType = Tag a ->> Universal ->> a val () = enterUniversal ("tagProject", makePolymorphic([a], mkConst (toMachineWord(Universal.tagProject: 'a Universal.tag -> Universal.universal -> 'a))), projectType) val testType = Tag a ->> Universal ->> Bool val () = enterUniversal ("tagIs", makePolymorphic([a], mkConst (toMachineWord(Universal.tagIs: 'a Universal.tag -> Universal.universal -> bool))), testType) in val Tag = Tag and Universal = Universal end local open TYPETREE (* Parsetree properties datatype. *) val propConstr = makeTypeConstructor("ptProperties", [], makeFreeId(0, defaultEqAndPrintCode(), false, basisDescription "PolyML.ptProperties"), declInBasis); val PtProperties = mkTypeConstruction ("ptProperties", propConstr, [], declInBasis) (* Parsetree type. *) val parseTreeConstr = makeTypeAbbreviation("parseTree", "PolyML.parseTree", [], Location ** List PtProperties, declInBasis); val ParseTree = mkTypeConstruction ("parseTree", parseTreeConstr, [], declInBasis) val () = #enterType polyMLEnv ("parseTree", TypeConstrSet(parseTreeConstr, [])); val constrs = (* Order is significant. *) [ ("PTbreakPoint", Ref Bool), ("PTcompletions", List String), ("PTdeclaredAt", Location), ("PTdefId", fixedIntType), ("PTfirstChild", Unit ->> ParseTree), ("PTnextSibling", Unit ->> ParseTree), ("PTopenedAt", Location), ("PTparent", Unit ->> ParseTree), ("PTpreviousSibling", Unit ->> ParseTree), ("PTprint", fixedIntType ->> PrettyType), ("PTreferences", Bool ** List Location), ("PTrefId", fixedIntType), ("PTstructureAt", Location), ("PTtype", Types) ]; (* This representation must match the representation defined in ExportTree.sml. *) val numConstrs = List.length constrs val {constrs=constrReps, ...} = chooseConstrRepr(constrs, []) val constructors = ListPair.map (fn ((s,t), code) => mkGconstr(s, t ->> PtProperties, code, false, numConstrs, declInBasis)) (constrs, constrReps) val () = List.app (fn c => #enterVal polyMLEnv(valName c, c)) constructors (* Put these constructors onto the type. *) val () = #enterType polyMLEnv ("ptProperties", TypeConstrSet(propConstr, constructors)); in val ParseTree = ParseTree and PtProperties = PtProperties end local open TYPETREE val compilerType : types = mkProductType[nameSpaceType, Unit ->> Option Char, List Universal] ->> mkProductType[Option ParseTree, Option (Unit ->> execResult)] type compilerType = nameSpace * (unit -> char option) * Universal.universal list -> exportTree option * (unit->execResult) option in val () = enterBootstrap ("use", mkConst (toMachineWord ((useIntoEnv globalTable []): string -> unit)), String ->> Unit) val () = enterBootstrap ("useWithParms", mkConst (toMachineWord ((useIntoEnv globalTable): Universal.universal list -> string -> unit)), List Universal ->> String ->> Unit) val () = enterPolyMLVal("compiler", mkGvar ("compiler", compilerType, mkConst (toMachineWord (compiler: compilerType)), declInBasis)); val () = enterBootstrap("globalSpace", mkConst (toMachineWord(gEnvAsNameSpace globalTable: nameSpace)), nameSpaceType) (* Add a print function so we can print a message at the start of a bootstrap phase. *) val () = enterBootstrap("print", mkConst (toMachineWord TextIO.print), String ->> Unit) end; local val ty = TYPETREE.mkOverloadSet[] val addType = ty ** ty ->> ty; val negType = ty ->> ty; val cmpType = ty ** ty ->> Bool; in val () = enterGlobalValue ("+", mkOverloaded "+" addType); val () = enterGlobalValue ("-", mkOverloaded "-" addType); val () = enterGlobalValue ("*", mkOverloaded "*" addType); val () = enterGlobalValue ("~", mkOverloaded "~" negType); val () = enterGlobalValue ("abs", mkOverloaded "abs" negType); val () = enterGlobalValue (">=", mkOverloaded ">=" cmpType); val () = enterGlobalValue ("<=", mkOverloaded "<=" cmpType); val () = enterGlobalValue (">", mkOverloaded ">" cmpType); val () = enterGlobalValue ("<", mkOverloaded "<" cmpType); (* The following overloads are added in ML97 *) val () = enterGlobalValue ("div", mkOverloaded "div" addType); val () = enterGlobalValue ("mod", mkOverloaded "mod" addType); val () = enterGlobalValue ("/", mkOverloaded "/" addType); end; local open DEBUG; local open TYPETREE val fields = [ mkLabelEntry("location", Location), mkLabelEntry("hard", Bool), mkLabelEntry("message", PrettyType), mkLabelEntry("context", Option PrettyType) ] in val errorMessageProcType = mkLabelled(sortLabels fields, true) ->> Unit type errorMessageProcType = { location: location, hard: bool, message: pretty, context: pretty option } -> unit end local open TYPETREE val optNav = Option(Unit->>ParseTree) val fields = [ mkLabelEntry("parent", optNav), mkLabelEntry("next", optNav), mkLabelEntry("previous", optNav) ] in val navigationType = mkLabelled(sortLabels fields, true) type navigationType = { parent: (unit->exportTree) option, next: (unit->exportTree) option, previous: (unit->exportTree) option } end type 'a tag = 'a Universal.tag in val () = applyList (fn (name, v, t) => enterBootstrap(name, mkConst v, t)) [ ("compilerVersion", toMachineWord (VERSION.compilerVersion: string), String), ("compilerVersionNumber", toMachineWord (VERSION.versionNumber: int), Int), ("lineNumberTag", toMachineWord (lineNumberTag : (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("offsetTag", toMachineWord (offsetTag: (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("fileNameTag", toMachineWord (fileNameTag: string tag), Tag String), ("bindingCounterTag", toMachineWord (bindingCounterTag: (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("maxInlineSizeTag", toMachineWord (maxInlineSizeTag: FixedInt.int tag), Tag fixedIntType), ("assemblyCodeTag", toMachineWord (assemblyCodeTag: bool tag), Tag Bool), ("parsetreeTag", toMachineWord (parsetreeTag: bool tag), Tag Bool), ("codetreeTag", toMachineWord (codetreeTag: bool tag), Tag Bool), ("icodeTag", toMachineWord (icodeTag: bool tag), Tag Bool), ("lowlevelOptimiseTag", toMachineWord (lowlevelOptimiseTag: bool tag), Tag Bool), ("codetreeAfterOptTag", toMachineWord (codetreeAfterOptTag: bool tag), Tag Bool), ("inlineFunctorsTag", toMachineWord (inlineFunctorsTag: bool tag), Tag Bool), ("debugTag", toMachineWord (debugTag: bool tag), Tag Bool), ("printDepthFunTag", toMachineWord (DEBUG.printDepthFunTag: (unit->FixedInt.int) tag), Tag (Unit->>fixedIntType)), ("errorDepthTag", toMachineWord (DEBUG.errorDepthTag: FixedInt.int tag), Tag fixedIntType), ("lineLengthTag", toMachineWord (DEBUG.lineLengthTag: FixedInt.int tag), Tag fixedIntType), ("profileAllocationTag", toMachineWord (DEBUG.profileAllocationTag: FixedInt.int tag), Tag fixedIntType), ("printOutputTag", toMachineWord (PRETTY.printOutputTag: (pretty->unit) tag), Tag (PrettyType->>Unit)) , ("compilerOutputTag", toMachineWord (PRETTY.compilerOutputTag: (pretty->unit) tag), Tag (PrettyType->>Unit)), ("errorMessageProcTag", toMachineWord (LEX.errorMessageProcTag: errorMessageProcType tag), Tag errorMessageProcType), ("rootTreeTag", toMachineWord (EXPORTTREE.rootTreeTag: navigation tag), Tag navigationType), ("reportUnreferencedIdsTag", toMachineWord (reportUnreferencedIdsTag: bool tag), Tag Bool), ("reportExhaustiveHandlersTag", toMachineWord (reportExhaustiveHandlersTag: bool tag), Tag Bool), ("narrowOverloadFlexRecordTag", toMachineWord (narrowOverloadFlexRecordTag: bool tag), Tag Bool), ("createPrintFunctionsTag", toMachineWord (createPrintFunctionsTag: bool tag), Tag Bool), ("reportDiscardedValuesTag", toMachineWord (reportDiscardedValuesTag: FixedInt.int tag), Tag fixedIntType) ] end; (* PolyML.CodeTree structure. This exports the CodeTree structure into the ML space. *) local open CODETREE val codetreeEnv = makeStructure(polyMLEnv, "CodeTree") fun createType typeName = makeAndDeclareOpaqueType(typeName, "PolyML.CodeTree." ^ typeName, codetreeEnv) val CodeTree = createType "codetree" and MachineWord = createType "machineWord" and CodeBinding = createType "codeBinding" (* For the moment export these only for the general argument and result types. *) fun simpleFn (code, nArgs, name, closure, nLocals) = mkFunction{body=code, argTypes=List.tabulate(nArgs, fn _ => GeneralType), resultType=GeneralType, name=name, closure=closure, numLocals=nLocals} and simpleInlineFn (code, nArgs, name, closure, nLocals) = mkInlineFunction{body=code, argTypes=List.tabulate(nArgs, fn _ => GeneralType), resultType=GeneralType, name=name, closure=closure, numLocals=nLocals} and simpleCall(func, args) = mkCall(func, List.map (fn c => (c, GeneralType)) args, GeneralType) in val CodeTree = CodeTree val () = applyList (fn (name, v, t) => #enterVal codetreeEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("pretty", toMachineWord (CODETREE.pretty: codetree -> pretty), CodeTree ->> PrettyType), ("mkConstant", toMachineWord(mkConst: machineWord -> codetree), MachineWord ->> CodeTree), ("genCode", toMachineWord (genCode: codetree * Universal.universal list * int -> (unit->codetree)), mkProductType[CodeTree, List Universal, Int] ->> (Unit ->> CodeTree)), ("evalue", toMachineWord (evalue: codetree -> machineWord option), CodeTree ->> Option MachineWord), ("mkFunction", toMachineWord (simpleFn: codetree * int * string * codetree list * int -> codetree), mkProductType[CodeTree, Int, String, List CodeTree, Int] ->> CodeTree), ("mkInlineFunction", toMachineWord (simpleInlineFn: codetree * int * string * codetree list * int -> codetree), mkProductType[CodeTree, Int, String, List CodeTree, Int] ->> CodeTree), ("mkCall", toMachineWord (simpleCall: codetree * codetree list -> codetree), CodeTree ** List CodeTree ->> CodeTree), ("mkLoadLocal", toMachineWord (mkLoadLocal: int -> codetree), Int ->> CodeTree), ("mkLoadArgument", toMachineWord (mkLoadArgument: int -> codetree), Int ->> CodeTree), ("mkLoadClosure", toMachineWord (mkLoadClosure: int -> codetree), Int ->> CodeTree), ("mkDec", toMachineWord (mkDec: int * codetree -> codeBinding), Int ** CodeTree ->> CodeBinding), ("mkInd", toMachineWord (mkInd: int * codetree -> codetree), Int ** CodeTree ->> CodeTree), ("mkIf", toMachineWord (mkIf: codetree * codetree * codetree -> codetree), mkProductType[CodeTree, CodeTree, CodeTree] ->> CodeTree), ("mkWhile", toMachineWord (mkWhile: codetree * codetree -> codetree), CodeTree ** CodeTree ->> CodeTree), ("mkLoop", toMachineWord (mkLoop: codetree list -> codetree), List CodeTree ->> CodeTree), ("mkBeginLoop", toMachineWord (mkBeginLoop: codetree * (int * codetree) list -> codetree), CodeTree ** List(Int ** CodeTree) ->> CodeTree), ("mkEnv", toMachineWord (mkEnv: codeBinding list * codetree -> codetree), List CodeBinding ** CodeTree ->> CodeTree), ("mkMutualDecs", toMachineWord (mkMutualDecs: (int * codetree) list -> codeBinding), List(Int ** CodeTree) ->> CodeBinding), ("mkTuple", toMachineWord (mkTuple: codetree list -> codetree), List CodeTree ->> CodeTree), ("mkRaise", toMachineWord (mkRaise: codetree -> codetree), CodeTree ->> CodeTree), ("mkHandle", toMachineWord (mkHandle: codetree * codetree * int -> codetree), mkProductType[CodeTree, CodeTree, Int] ->> CodeTree), ("mkNullDec", toMachineWord (mkNullDec: codetree -> codeBinding), CodeTree ->> CodeBinding) ] end local (* Finish off the NameSpace structure now we have types such as pretty. *) open TYPETREE (* The exported versions expect full name spaces as arguments. Because we convert the exported versions to machineWord and give them types as data structures the compiler can't actually check that the type we give matched the internal type. *) fun makeTypeEnv NONE = { lookupType = fn _ => NONE, lookupStruct = fn _ => NONE } | makeTypeEnv(SOME(nameSpace: nameSpace)): printTypeEnv = { lookupType = fn s => case #lookupType nameSpace s of NONE => NONE | SOME t => SOME(t, NONE), lookupStruct = fn s => case #lookupStruct nameSpace s of NONE => NONE | SOME t => SOME(t, NONE) } local (* Values substructure. This also has operations related to type expressions. *) fun codeForValue (Value{access = Global code, class = ValBound, ...}) = code | codeForValue _ = raise Fail "Not a global value" and exportedDisplayTypeExp(ty, depth, nameSpace: nameSpace option) = TYPETREE.display(ty, depth, makeTypeEnv nameSpace) and exportedDisplayValues(valu, depth, nameSpace: nameSpace option) = displayValues(valu, depth, makeTypeEnv nameSpace) and propsForValue (Value {locations, typeOf, ...}) = PTtype typeOf :: mapLocationProps locations fun isConstructor (Value{class = Exception, ...}) = true | isConstructor (Value{class = Constructor _, ...}) = true | isConstructor _ = false fun isException (Value{class = Exception, ...}) = true | isException _ = false in val () = applyList (fn (name, v, t) => #enterVal valuesEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord (valName: values -> string), valueVal ->> String), ("print", toMachineWord (printValues: values * FixedInt.int -> pretty), mkProductType[valueVal, fixedIntType] ->> PrettyType), ("printWithType", toMachineWord (exportedDisplayValues: values * FixedInt.int * nameSpace option -> pretty), mkProductType[valueVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("printType", toMachineWord(exportedDisplayTypeExp: types * FixedInt.int * nameSpace option -> pretty), mkProductType[Types, fixedIntType, Option nameSpaceType] ->> PrettyType), ("typeof", toMachineWord (valTypeOf: values -> types), valueVal ->> Types), ("code", toMachineWord (codeForValue: values -> codetree), valueVal ->> CodeTree), ("properties", toMachineWord (propsForValue: values ->ptProperties list), valueVal ->> List PtProperties), ("isConstructor", toMachineWord(isConstructor: values -> bool), valueVal ->> Bool), ("isException", toMachineWord(isException: values -> bool), valueVal ->> Bool) ] end local (* TypeConstrs substructure. *) fun exportedDisplayTypeConstr(tyCons, depth, nameSpace: nameSpace option) = TYPETREE.displayTypeConstrs(tyCons, depth, makeTypeEnv nameSpace) and propsForTypeConstr (TypeConstrSet(TypeConstrs {locations, ...}, _)) = mapLocationProps locations and nameForType (TypeConstrSet(TypeConstrs{name, ...}, _)) = name in val () = applyList (fn (name, v, t) => #enterVal typesEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForType: typeConstrSet -> string), typeVal ->> String), ("print", toMachineWord (exportedDisplayTypeConstr: typeConstrSet * FixedInt.int * nameSpace option -> pretty), mkProductType[typeVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("properties", toMachineWord (propsForTypeConstr: typeConstrSet ->ptProperties list), typeVal ->> List PtProperties) ] end local (* Structures substructure *) fun exportedDisplayStructs(str, depth, nameSpace: nameSpace option) = displayStructures(str, depth, makeTypeEnv nameSpace) and codeForStruct (Struct{access = Global code, ...}) = code | codeForStruct _ = raise Fail "Not a global structure" and propsForStruct (Struct {locations, ...}) = mapLocationProps locations and nameForStruct (Struct{name, ...}) = name fun nameSpaceForStruct(baseStruct as Struct{signat=Signatures { tab, ...}, ...}): nameSpace = let open UNIVERSALTABLE fun lookupVal s = case univLookup (tab, valueVar, s) of NONE => NONE | SOME v => SOME(makeSelectedValue(v, baseStruct)) and lookupType s = case univLookup (tab, typeConstrVar, s) of NONE => NONE | SOME t => SOME(makeSelectedType(t, baseStruct)) and lookupStruct s = case univLookup (tab, structVar, s) of NONE => NONE | SOME s => SOME(makeSelectedStructure(s, baseStruct)) local fun extractItems t tab = UNIVERSALTABLE.fold (fn (s, u, l) => if Universal.tagIs t u then (s, Universal.tagProject t u) :: l else l ) [] tab in fun allValues() = map(fn (s, v) => (s, makeSelectedValue(v, baseStruct))) (extractItems valueVar tab) and allTypes() = map(fn (s, t) => (s, makeSelectedType(t, baseStruct))) (extractItems typeConstrVar tab) and allStructs() = map(fn (s, v) => (s, makeSelectedStructure(v, baseStruct))) (extractItems structVar tab) end fun enterFunction _ = raise Fail "updating a structure is not possible." (* Raise an exception for any attempt to enter a new value. Return empty for the classes that can't exist in a structure. *) in { lookupVal = lookupVal, lookupType = lookupType, lookupStruct = lookupStruct, lookupFix = fn _ => NONE, lookupSig = fn _ => NONE, lookupFunct = fn _ => NONE, enterVal = enterFunction, enterType = enterFunction, enterFix = enterFunction, enterStruct = enterFunction, enterSig = enterFunction, enterFunct = enterFunction, allVal = allValues, allType = allTypes, allStruct = allStructs, allFix = fn () => [], allSig = fn () => [], allFunct = fn () => [] } end in val () = applyList (fn (name, v, t) => #enterVal structsEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForStruct: structVals -> string), structureVal ->> String), ("print", toMachineWord (exportedDisplayStructs: structVals * FixedInt.int * nameSpace option -> pretty), mkProductType[structureVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("code", toMachineWord (codeForStruct: structVals -> codetree), structureVal ->> CodeTree), ("properties", toMachineWord (propsForStruct: structVals ->ptProperties list), structureVal ->> List PtProperties), ("contents", toMachineWord(nameSpaceForStruct: structVals -> nameSpace), structureVal ->> nameSpaceType) ] end local (* Signatures substructure *) fun exportedDisplaySigs(sign, depth, nameSpace: nameSpace option) = displaySignatures(sign, depth, makeTypeEnv nameSpace) and propsForSig (Signatures {locations, ...}) = mapLocationProps locations and nameForSig (Signatures{name, ...}) = name in val () = applyList (fn (name, v, t) => #enterVal sigsEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForSig: signatures -> string), signatureVal ->> String), ("print", toMachineWord (exportedDisplaySigs: signatures * FixedInt.int * nameSpace option -> pretty), mkProductType[signatureVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("properties", toMachineWord (propsForSig: signatures ->ptProperties list), signatureVal ->> List PtProperties) ] end local (* Functors substructure *) fun exportedDisplayFunctors(funct, depth, nameSpace: nameSpace option) = displayFunctors(funct, depth, makeTypeEnv nameSpace) and codeForFunct (Functor{access = Global code, ...}) = code | codeForFunct _ = raise Fail "Not a global functor" and propsForFunctor (Functor {locations, ...}) = mapLocationProps locations and nameForFunctor (Functor{name, ...}) = name in val () = applyList (fn (name, v, t) => #enterVal functsEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForFunctor: functors -> string), functorVal ->> String), ("print", toMachineWord (exportedDisplayFunctors: functors * FixedInt.int * nameSpace option -> pretty), mkProductType[functorVal, fixedIntType, Option nameSpaceType] ->> PrettyType), ("code", toMachineWord (codeForFunct: functors -> codetree), functorVal ->> CodeTree), ("properties", toMachineWord (propsForFunctor: functors ->ptProperties list), functorVal ->> List PtProperties) ] end local (* Infixes substructure *) fun nameForFix(FixStatus(s, _)) = s in val () = applyList (fn (name, v, t) => #enterVal fixesEnv (name, mkGvar (name, t, mkConst v, declInBasis))) [ ("name", toMachineWord(nameForFix: fixStatus -> string), fixityVal ->> String), ("print", toMachineWord (displayFixStatus: fixStatus -> pretty), fixityVal ->> PrettyType) ] end in end in () end (* initGlobalEnv *); end;