diff --git a/libpolyml/elfexport.cpp b/libpolyml/elfexport.cpp index f5c5a025..e16c7971 100644 --- a/libpolyml/elfexport.cpp +++ b/libpolyml/elfexport.cpp @@ -1,787 +1,819 @@ /* Title: Write out a database as an ELF object file Author: David Matthews. Copyright (c) 2006-7, 2011, 2016-18, 2020 David C. J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR H PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_STDIO_H #include #endif #ifdef HAVE_STDDEF_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_ERRNO_H #include #endif #ifdef HAVE_TIME_H #include #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) #endif #ifdef HAVE_ELF_H #include #elif defined(HAVE_ELF_ABI_H) #include #endif #ifdef HAVE_MACHINE_RELOC_H #include #ifndef EM_X86_64 #define EM_X86_64 EM_AMD64 #endif #if defined(HOSTARCHITECTURE_X86_64) #ifndef R_386_PC32 #define R_386_PC32 R_X86_64_PC32 #endif #ifndef R_386_32 #define R_386_32 R_X86_64_32 #endif #ifndef R_X86_64_64 #define R_X86_64_64 R_X86_64_64 #endif #endif /* HOSTARCHITECTURE_X86_64 */ #endif // Solaris seems to put processor-specific constants in separate files #ifdef HAVE_SYS_ELF_SPARC_H #include #endif #ifdef HAVE_SYS_ELF_386_H #include #endif #ifdef HAVE_SYS_ELF_AMD64_H #include #endif // Android has the ARM relocation symbol here #ifdef HAVE_ASM_ELF_H #include #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_SYS_UTSNAME_H #include #endif #include "globals.h" #include "diagnostics.h" #include "sys.h" #include "machine_dep.h" #include "gc.h" #include "mpoly.h" #include "scanaddrs.h" #include "elfexport.h" #include "run_time.h" #include "version.h" #include "polystring.h" #include "timing.h" #define sym_last_local_sym sym_data_section #if defined(HOSTARCHITECTURE_X86) # define HOST_E_MACHINE EM_386 # define HOST_DIRECT_DATA_RELOC R_386_32 # define HOST_DIRECT_FPTR_RELOC R_386_32 # define USE_RELA 0 #elif defined(HOSTARCHITECTURE_PPC) # define HOST_E_MACHINE EM_PPC # define HOST_DIRECT_DATA_RELOC R_PPC_ADDR32 # define HOST_DIRECT_FPTR_RELOC R_PPC_ADDR32 # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_PPC64) # define HOST_E_MACHINE EM_PPC64 # define HOST_DIRECT_DATA_RELOC R_PPC64_ADDR64 # define HOST_DIRECT_FPTR_RELOC R_PPC64_ADDR64 # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_S390) # define HOST_E_MACHINE EM_S390 # define HOST_DIRECT_DATA_RELOC R_390_32 # define HOST_DIRECT_FPTR_RELOC R_390_32 # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_S390X) # define HOST_E_MACHINE EM_S390 # define HOST_DIRECT_DATA_RELOC R_390_64 # define HOST_DIRECT_FPTR_RELOC R_390_64 # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_SH) # define HOST_E_MACHINE EM_SH # define HOST_DIRECT_DATA_RELOC R_SH_DIR32 # define HOST_DIRECT_FPTR_RELOC R_SH_DIR32 # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_SPARC) # define HOST_E_MACHINE EM_SPARC # define HOST_DIRECT_DATA_RELOC R_SPARC_32 # define HOST_DIRECT_FPTR_RELOC R_SPARC_32 # define USE_RELA 1 /* Sparc/Solaris, at least 2.8, requires ELF32_Rela relocations. For some reason, though, it adds the value in the location being relocated (as with ELF32_Rel relocations) as well as the addend. To be safe, whenever we use an ELF32_Rela relocation we always zero the location to be relocated. */ #elif defined(HOSTARCHITECTURE_SPARC64) # define HOST_E_MACHINE EM_SPARCV9 # define HOST_DIRECT_DATA_RELOC R_SPARC_64 # define HOST_DIRECT_FPTR_RELOC R_SPARC_64 /* Use the most relaxed memory model. At link time, the most restrictive one is chosen, so it does no harm to be as permissive as possible here. */ # define HOST_E_FLAGS EF_SPARCV9_RMO # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_X86_64) /* It seems Solaris/X86-64 only supports ELF64_Rela relocations. It appears that Linux will support either so we now use Rela on X86-64. */ # define HOST_E_MACHINE EM_X86_64 # define HOST_DIRECT_DATA_RELOC R_X86_64_64 # define HOST_DIRECT_FPTR_RELOC R_X86_64_64 # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_X32) # define HOST_E_MACHINE EM_X86_64 # define HOST_DIRECT_DATA_RELOC R_X86_64_32 # define HOST_DIRECT_FPTR_RELOC R_X86_64_32 # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_ARM) # ifndef EF_ARM_EABI_VER4 # define EF_ARM_EABI_VER4 0x04000000 # endif // When linking ARM binaries the linker checks the ABI version. We // need to set the version to the same as the libraries. // GCC currently uses version 4. # define HOST_E_MACHINE EM_ARM # define HOST_DIRECT_DATA_RELOC R_ARM_ABS32 # define HOST_DIRECT_FPTR_RELOC R_ARM_ABS32 # define USE_RELA 0 # define HOST_E_FLAGS EF_ARM_EABI_VER4 #elif defined(HOSTARCHITECTURE_HPPA) # if defined(__hpux) # define HOST_OSABI ELFOSABI_HPUX # elif defined(__NetBSD__) # define HOST_OSABI ELFOSABI_NETBSD # elif defined(__linux__) # define HOST_OSABI ELFOSABI_GNU # endif # define HOST_E_MACHINE EM_PARISC # define HOST_DIRECT_DATA_RELOC R_PARISC_DIR32 # define HOST_DIRECT_FPTR_RELOC R_PARISC_PLABEL32 # define HOST_E_FLAGS EFA_PARISC_1_0 # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_IA64) # define HOST_E_MACHINE EM_IA_64 # define HOST_DIRECT_DATA_RELOC R_IA64_DIR64LSB # define HOST_DIRECT_FPTR_RELOC R_IA64_FPTR64LSB # define HOST_E_FLAGS EF_IA_64_ABI64 # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_AARCH64) # define HOST_E_MACHINE EM_AARCH64 # define HOST_DIRECT_DATA_RELOC R_AARCH64_ABS64 # define HOST_DIRECT_FPTR_RELOC R_AARCH64_ABS64 # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_M68K) # define HOST_E_MACHINE EM_68K # define HOST_DIRECT_DATA_RELOC R_68K_32 # define HOST_DIRECT_FPTR_RELOC R_68K_32 # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_MIPS) # define HOST_E_MACHINE EM_MIPS # define HOST_DIRECT_DATA_RELOC R_MIPS_32 # define HOST_DIRECT_FPTR_RELOC R_MIPS_32 # ifdef __PIC__ # define HOST_E_FLAGS EF_MIPS_CPIC # endif # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_MIPS64) # define HOST_E_MACHINE EM_MIPS # define HOST_DIRECT_DATA_RELOC R_MIPS_64 # define HOST_DIRECT_FPTR_RELOC R_MIPS_64 # ifdef __PIC__ # define HOST_E_FLAGS (EF_MIPS_ARCH_64 | EF_MIPS_CPIC) # else # define HOST_E_FLAGS EF_MIPS_ARCH_64 # endif # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_ALPHA) # define HOST_E_MACHINE EM_ALPHA # define HOST_DIRECT_DATA_RELOC R_ALPHA_REFQUAD # define HOST_DIRECT_FPTR_RELOC R_ALPHA_REFQUAD # define USE_RELA 1 #elif defined(HOSTARCHITECTURE_RISCV32) || defined(HOSTARCHITECTURE_RISCV64) # define HOST_E_MACHINE EM_RISCV # if defined(HOSTARCHITECTURE_RISCV32) # define HOST_DIRECT_DATA_RELOC R_RISCV_32 # define HOST_DIRECT_FPTR_RELOC R_RISCV_32 # else # define HOST_DIRECT_DATA_RELOC R_RISCV_64 # define HOST_DIRECT_FPTR_RELOC R_RISCV_64 # endif # if defined(__riscv_float_abi_soft) # define HOST_E_FLAGS_FLOAT_ABI EF_RISCV_FLOAT_ABI_SOFT # elif defined(__riscv_float_abi_single) # define HOST_E_FLAGS_FLOAT_ABI EF_RISCV_FLOAT_ABI_SINGLE # elif defined(__riscv_float_abi_double) # define HOST_E_FLAGS_FLOAT_ABI EF_RISCV_FLOAT_ABI_DOUBLE # elif defined(__riscv_float_abi_quad) # define HOST_E_FLAGS_FLOAT_ABI EF_RISCV_FLOAT_ABI_QUAD # else # error "Unknown RISC-V float ABI" # endif # ifdef __riscv_32e # define HOST_E_FLAGS_RVE __riscv_32e # else # define HOST_E_FLAGS_RVE 0 # endif # define HOST_E_FLAGS (HOST_E_FLAGS_FLOAT_ABI | HOST_E_FLAGS_RVE) # define USE_RELA 1 #else # error "No support for exporting on this architecture" #endif // The first two symbols are special: // Zero is always special in ELF // 1 is used for the data section #define EXTRA_SYMBOLS 2 static unsigned AreaToSym(unsigned area) { return area+EXTRA_SYMBOLS; } // Section table entries enum { sect_initial = 0, sect_sectionnametable, sect_stringtable, // Data and relocation entries come in here. sect_data // Finally the symbol table }; // Add an external reference to the RTS void ELFExport::addExternalReference(void *relocAddr, const char *name, bool isFuncPtr) { externTable.makeEntry(name); // The symbol is added after the memory table entries and poly_exports writeRelocation(0, relocAddr, symbolNum++, isFuncPtr); } // Generate the address relative to the start of the segment. void ELFExport::setRelocationAddress(void *p, ElfXX_Addr *reloc) { unsigned area = findArea(p); POLYUNSIGNED offset = (char*)p - (char*)memTable[area].mtOriginalAddr; *reloc = offset; } /* Get the index corresponding to an address. */ PolyWord ELFExport::createRelocation(PolyWord p, void *relocAddr) { void *addr = p.AsAddress(); unsigned addrArea = findArea(addr); POLYUNSIGNED offset = (char*)addr - (char*)memTable[addrArea].mtOriginalAddr; return writeRelocation(offset, relocAddr, AreaToSym(addrArea), false); } PolyWord ELFExport::writeRelocation(POLYUNSIGNED offset, void *relocAddr, unsigned symbolNum, bool isFuncPtr) { #if USE_RELA ElfXX_Rela reloc; reloc.r_addend = offset; offset = 0; #else ElfXX_Rel reloc; #endif // Set the offset within the section we're scanning. setRelocationAddress(relocAddr, &reloc.r_offset); #ifdef HOSTARCHITECTURE_MIPS64 reloc.r_sym = symbolNum; reloc.r_ssym = 0; reloc.r_type = isFuncPtr ? HOST_DIRECT_FPTR_RELOC : HOST_DIRECT_DATA_RELOC; reloc.r_type2 = 0; reloc.r_type3 = 0; #else reloc.r_info = ELFXX_R_INFO(symbolNum, isFuncPtr ? HOST_DIRECT_FPTR_RELOC : HOST_DIRECT_DATA_RELOC); #endif fwrite(&reloc, sizeof(reloc), 1, exportFile); relocationCount++; return PolyWord::FromUnsigned(offset); } /* This is called for each constant within the code. Print a relocation entry for the word and return a value that means that the offset is saved in original word. */ void ELFExport::ScanConstant(PolyObject *base, byte *addr, ScanRelocationKind code) { #ifndef POLYML32IN64 PolyObject *p = GetConstantValue(addr, code); if (p == 0) return; void *a = p; unsigned aArea = findArea(a); // We don't need a relocation if this is relative to the current segment // since the relative address will already be right. if (code == PROCESS_RELOC_I386RELATIVE && aArea == findArea(addr)) return; // Set the value at the address to the offset relative to the symbol. POLYUNSIGNED offset = (char*)a - (char*)memTable[aArea].mtOriginalAddr; switch (code) { case PROCESS_RELOC_DIRECT: // 32 or 64 bit address of target { PolyWord r = createRelocation(p, addr); POLYUNSIGNED w = r.AsUnsigned(); for (unsigned i = 0; i < sizeof(PolyWord); i++) { addr[i] = (byte)(w & 0xff); w >>= 8; } } break; #if(defined(HOSTARCHITECTURE_X86) || defined(HOSTARCHITECTURE_X86_64) || \ defined(HOSTARCHITECTURE_X32)) #ifdef HOSTARCHITECTURE_X86 #define R_PC_RELATIVE R_386_PC32 #else #define R_PC_RELATIVE R_X86_64_PC32 #endif case PROCESS_RELOC_I386RELATIVE: // 32 bit relative address { #if USE_RELA ElfXX_Rela reloc; reloc.r_addend = offset; #else ElfXX_Rel reloc; #endif setRelocationAddress(addr, &reloc.r_offset); // We seem to need to subtract 4 bytes to get the correct offset in ELF offset -= 4; reloc.r_info = ELFXX_R_INFO(AreaToSym(aArea), R_PC_RELATIVE); #if USE_RELA // Clear the field. Even though it's not supposed to be used with Rela the // Linux linker at least seems to add the value in here sometimes. memset(addr, 0, 4); #else for (unsigned i = 0; i < 4; i++) { addr[i] = (byte)(offset & 0xff); offset >>= 8; } #endif fwrite(&reloc, sizeof(reloc), 1, exportFile); relocationCount++; } break; #endif default: ASSERT(0); // Wrong type of relocation for this architecture. } #endif } unsigned long ELFExport::makeStringTableEntry(const char *str, ExportStringTable *stab) { if (str == NULL || str[0] == 0) return 0; // First entry is the null string. else return stab->makeEntry(str); } void ELFExport::writeSymbol(const char *symbolName, long value, long size, int binding, int sttype, int section) { ElfXX_Sym symbol; memset(&symbol, 0, sizeof(symbol)); // Zero unused fields symbol.st_name = makeStringTableEntry(symbolName, &symStrings); symbol.st_value = value; symbol.st_size = size; symbol.st_info = ELFXX_ST_INFO(binding, sttype); symbol.st_other = 0; symbol.st_shndx = section; fwrite(&symbol, sizeof(symbol), 1, exportFile); } // Set the file alignment. void ELFExport::alignFile(int align) { char pad[32] = {0}; // Maximum alignment int offset = ftell(exportFile); if ((offset % align) == 0) return; fwrite(&pad, align - (offset % align), 1, exportFile); } void ELFExport::createStructsRelocation(unsigned sym, size_t offset, size_t addend) { #if USE_RELA ElfXX_Rela reloc; reloc.r_addend = addend; #else ElfXX_Rel reloc; #endif reloc.r_offset = offset; #ifdef HOSTARCHITECTURE_MIPS64 reloc.r_sym = sym; reloc.r_ssym = 0; reloc.r_type = HOST_DIRECT_DATA_RELOC; reloc.r_type2 = 0; reloc.r_type3 = 0; #else reloc.r_info = ELFXX_R_INFO(sym, HOST_DIRECT_DATA_RELOC); #endif fwrite(&reloc, sizeof(reloc), 1, exportFile); relocationCount++; } void ELFExport::exportStore(void) { PolyWord *p; ElfXX_Ehdr fhdr; ElfXX_Shdr *sections = 0; #ifdef __linux__ unsigned extraSections = 1; // Extra section for .note.GNU-stack #else unsigned extraSections = 0; #endif - unsigned numSections = 6 + 2*memTableEntries /*- 1*/ + extraSections; - // The symbol table comes at the end. - unsigned sect_symtab = sect_data + 2*memTableEntries + 2/* - 1*/; - - unsigned i; + unsigned numSections = 0; + for (unsigned j = 0; j < memTableEntries; j++) + { + if ((memTable[j].mtFlags & (MTF_BYTES|MTF_WRITEABLE)) == MTF_BYTES) + numSections += 1; + else numSections += 2; + } + // The symbol table comes at the end. + unsigned sect_symtab = sect_data + numSections + 2; + numSections += 6 + extraSections; // External symbols start after the memory table entries and "poly_exports". symbolNum = EXTRA_SYMBOLS+memTableEntries+1; // Both the string tables have an initial null entry. symStrings.makeEntry(""); sectionStrings.makeEntry(""); // Write out initial values for the headers. These are overwritten at the end. // File header memset(&fhdr, 0, sizeof(fhdr)); fhdr.e_ident[EI_MAG0] = 0x7f; fhdr.e_ident[EI_MAG1] = 'E'; fhdr.e_ident[EI_MAG2] = 'L'; fhdr.e_ident[EI_MAG3] = 'F'; fhdr.e_ident[EI_CLASS] = ELFCLASSXX; // ELFCLASS32 or ELFCLASS64 fhdr.e_ident[EI_VERSION] = EV_CURRENT; #ifdef HOST_OSABI fhdr.e_ident[EI_OSABI] = HOST_OSABI; #endif { union { unsigned long wrd; char chrs[sizeof(unsigned long)]; } endian; endian.wrd = 1; if (endian.chrs[0] == 0) fhdr.e_ident[EI_DATA] = ELFDATA2MSB; // Big endian else fhdr.e_ident[EI_DATA] = ELFDATA2LSB; // Little endian } fhdr.e_type = ET_REL; // The machine needs to match the machine we're compiling for // even if this is actually portable code. fhdr.e_machine = HOST_E_MACHINE; #ifdef HOST_E_FLAGS fhdr.e_flags = HOST_E_FLAGS; #endif fhdr.e_version = EV_CURRENT; fhdr.e_shoff = sizeof(fhdr); // Offset to section header - immediately follows fhdr.e_ehsize = sizeof(fhdr); fhdr.e_shentsize = sizeof(ElfXX_Shdr); fhdr.e_shnum = numSections; fhdr.e_shstrndx = sect_sectionnametable; // Section name table section index; fwrite(&fhdr, sizeof(fhdr), 1, exportFile); // Write it for the moment. sections = new ElfXX_Shdr[numSections]; memset(sections, 0, sizeof(ElfXX_Shdr) * numSections); // Necessary? // Set up the section header but don't write it yet. // Section 0 - all zeros sections[sect_initial].sh_type = SHT_NULL; sections[sect_initial].sh_link = SHN_UNDEF; // Section name table. sections[sect_sectionnametable].sh_name = makeStringTableEntry(".shstrtab", §ionStrings); sections[sect_sectionnametable].sh_type = SHT_STRTAB; sections[sect_sectionnametable].sh_addralign = sizeof(char); // sections[sect_sectionnametable].sh_offset is set later // sections[sect_sectionnametable].sh_size is set later // Symbol name table. sections[sect_stringtable].sh_name = makeStringTableEntry(".strtab", §ionStrings); sections[sect_stringtable].sh_type = SHT_STRTAB; sections[sect_stringtable].sh_addralign = sizeof(char); // sections[sect_stringtable].sh_offset is set later // sections[sect_stringtable].sh_size is set later unsigned long dataName = makeStringTableEntry(".data", §ionStrings); unsigned long dataRelName = makeStringTableEntry(USE_RELA ? ".rela.data" : ".rel.data", §ionStrings); #ifndef CODEISNOTEXECUTABLE unsigned long textName = makeStringTableEntry(".text", §ionStrings); unsigned long textRelName = makeStringTableEntry(USE_RELA ? ".rela.text" : ".rel.text", §ionStrings); #endif // The Linux linker does not like relocations in the .rodata section and marks the executable // as containing text relocations. Putting the data in a .data.rel.ro section seems to work. - unsigned long rodataName = makeStringTableEntry(".data.rel.ro", §ionStrings); - unsigned long rodataRelName = makeStringTableEntry(USE_RELA ? ".rela.data.ro" : ".rel.data.ro", §ionStrings); + unsigned long relDataName = makeStringTableEntry(".data.rel.ro", §ionStrings); + unsigned long relDataRelName = makeStringTableEntry(USE_RELA ? ".rela.data.rel.ro" : ".rel.data.rel.ro", §ionStrings); + // Byte and other leaf data that do not require relocation can go in the .rodata section + unsigned long nRelDataName = makeStringTableEntry(".rodata", §ionStrings); // Main data sections. Each one has a relocation section. - for (i=0; i < memTableEntries; i++) + unsigned s = sect_data; + for (unsigned i=0; i < memTableEntries; i++) { - unsigned s = sect_data + i*2; sections[s].sh_addralign = 8; // 8-byte alignment sections[s].sh_type = SHT_PROGBITS; if (memTable[i].mtFlags & MTF_WRITEABLE) { // Mutable areas ASSERT(!(memTable[i].mtFlags & MTF_EXECUTABLE)); // Executable areas can't be writable. sections[s].sh_name = dataName; sections[s].sh_flags = SHF_WRITE | SHF_ALLOC; - sections[s+1].sh_name = dataRelName; // Name of relocation section + s++; + // Mutable byte areas can contain external references so need relocation + sections[s].sh_name = dataRelName; // Name of relocation section } #ifndef CODEISNOTEXECUTABLE // Not if we're building the interpreted version. else if (memTable[i].mtFlags & MTF_EXECUTABLE) { // Code areas are marked as executable. sections[s].sh_name = textName; sections[s].sh_flags = SHF_ALLOC | SHF_EXECINSTR; - sections[s+1].sh_name = textRelName; // Name of relocation section + s++; + sections[s].sh_name = textRelName; // Name of relocation section } #endif - else + else if (memTable[i].mtFlags & MTF_BYTES) { + // Data that does not require relocation. // Non-code immutable areas - sections[s].sh_name = rodataName; + sections[s].sh_name = nRelDataName; sections[s].sh_flags = SHF_ALLOC; - sections[s+1].sh_name = rodataRelName; // Name of relocation section + s++; + continue; // Skip the relocation section for this + } + else + { + // Non-code immutable areas + sections[s].sh_name = relDataName; + // The .data.rel.ro has to be writable in order to be relocated. + // It is set to read-only after relocation. + sections[s].sh_flags = SHF_WRITE | SHF_ALLOC; + s++; + sections[s].sh_name = relDataRelName; // Name of relocation section } // sections[s].sh_size is set later // sections[s].sh_offset is set later. // sections[s].sh_size is set later. // Relocation section - sections[s+1].sh_type = USE_RELA ? SHT_RELA : SHT_REL; // Contains relocation with/out explicit addends (ElfXX_Rel) - sections[s+1].sh_link = sect_symtab; // Index to symbol table - sections[s+1].sh_info = s; // Applies to the data section - sections[s+1].sh_addralign = sizeof(long); // Align to a word - sections[s+1].sh_entsize = USE_RELA ? sizeof(ElfXX_Rela) : sizeof(ElfXX_Rel); + sections[s].sh_type = USE_RELA ? SHT_RELA : SHT_REL; // Contains relocation with/out explicit addends (ElfXX_Rel) + sections[s].sh_link = sect_symtab; // Index to symbol table + sections[s].sh_info = s-1; // Applies to the data section + sections[s].sh_addralign = sizeof(long); // Align to a word + sections[s].sh_entsize = USE_RELA ? sizeof(ElfXX_Rela) : sizeof(ElfXX_Rel); + s++; // sections[s+1].sh_offset is set later. // sections[s+1].sh_size is set later. } // Table data - Poly tables that describe the memory layout. - unsigned sect_table_data = sect_data + 2*memTableEntries; + unsigned sect_table_data = s; sections[sect_table_data].sh_name = dataName; sections[sect_table_data].sh_type = SHT_PROGBITS; sections[sect_table_data].sh_flags = SHF_WRITE | SHF_ALLOC; sections[sect_table_data].sh_addralign = 8; // 8-byte alignment // Table relocation sections[sect_table_data+1].sh_name = dataRelName; sections[sect_table_data+1].sh_type = USE_RELA ? SHT_RELA : SHT_REL; // Contains relocation with/out explicit addends (ElfXX_Rel) sections[sect_table_data+1].sh_link = sect_symtab; // Index to symbol table sections[sect_table_data+1].sh_info = sect_table_data; // Applies to table section sections[sect_table_data+1].sh_addralign = sizeof(long); // Align to a word sections[sect_table_data+1].sh_entsize = USE_RELA ? sizeof(ElfXX_Rela) : sizeof(ElfXX_Rel); // Symbol table. sections[sect_symtab].sh_name = makeStringTableEntry(".symtab", §ionStrings); sections[sect_symtab].sh_type = SHT_SYMTAB; sections[sect_symtab].sh_link = sect_stringtable; // String table to use sections[sect_symtab].sh_addralign = sizeof(long); // Align to a word sections[sect_symtab].sh_entsize = sizeof(ElfXX_Sym); // sections[sect_symtab].sh_info is set later // sections[sect_symtab].sh_size is set later // sections[sect_symtab].sh_offset is set later #ifdef __linux__ // Add a .note.GNU-stack section to indicate this does not require executable stack sections[numSections-1].sh_name = makeStringTableEntry(".note.GNU-stack", §ionStrings); sections[numSections - 1].sh_type = SHT_PROGBITS; #endif // Write the relocations. - - for (i = 0; i < memTableEntries; i++) + unsigned relocSection = sect_data; + for (unsigned i = 0; i < memTableEntries; i++) { - unsigned relocSection = sect_data + i*2 + 1; + relocSection++; + if ((memTable[i].mtFlags & (MTF_BYTES|MTF_WRITEABLE)) == MTF_BYTES) + continue; alignFile(sections[relocSection].sh_addralign); sections[relocSection].sh_offset = ftell(exportFile); relocationCount = 0; // Create the relocation table and turn all addresses into offsets. char *start = (char*)memTable[i].mtOriginalAddr; char *end = start + memTable[i].mtLength; for (p = (PolyWord*)start; p < (PolyWord*)end; ) { p++; PolyObject *obj = (PolyObject*)p; POLYUNSIGNED length = obj->Length(); // Update any constants before processing the object // We need that for relative jumps/calls in X86/64. if (length != 0 && obj->IsCodeObject()) machineDependent->ScanConstantsWithinCode(obj, this); relocateObject(obj); p += length; } sections[relocSection].sh_size = relocationCount * (USE_RELA ? sizeof(ElfXX_Rela) : sizeof(ElfXX_Rel)); + relocSection++; } // Relocations for "exports" and "memTable"; alignFile(sections[sect_table_data+1].sh_addralign); sections[sect_table_data+1].sh_offset = ftell(exportFile); relocationCount = 0; // TODO: This won't be needed if we put these in a separate section. POLYUNSIGNED areaSpace = 0; - for (i = 0; i < memTableEntries; i++) + for (unsigned i = 0; i < memTableEntries; i++) areaSpace += memTable[i].mtLength; // Address of "memTable" within "exports". We can't use createRelocation because // the position of the relocation is not in either the mutable or the immutable area. size_t memTableOffset = sizeof(exportDescription); // It follows immediately after this. createStructsRelocation(AreaToSym(memTableEntries), offsetof(exportDescription, memTable), memTableOffset); // Address of "rootFunction" within "exports" unsigned rootAddrArea = findArea(rootFunction); size_t rootOffset = (char*)rootFunction - (char*)memTable[rootAddrArea].mtOriginalAddr; createStructsRelocation(AreaToSym(rootAddrArea), offsetof(exportDescription, rootFunction), rootOffset); // Addresses of the areas within memtable. - for (i = 0; i < memTableEntries; i++) + for (unsigned i = 0; i < memTableEntries; i++) { createStructsRelocation(AreaToSym(i), sizeof(exportDescription) + i * sizeof(memoryTableEntry) + offsetof(memoryTableEntry, mtCurrentAddr), 0 /* No offset relative to base symbol*/); } sections[sect_table_data+1].sh_size = relocationCount * (USE_RELA ? sizeof(ElfXX_Rela) : sizeof(ElfXX_Rel)); // Now the symbol table. alignFile(sections[sect_symtab].sh_addralign); sections[sect_symtab].sh_offset = ftell(exportFile); writeSymbol("", 0, 0, 0, 0, 0); // Initial symbol // Write the local symbols first. writeSymbol("", 0, 0, STB_LOCAL, STT_SECTION, sect_data); // .data section // Create symbols for the address areas. AreaToSym assumes these come first. - for (i = 0; i < memTableEntries; i++) + s = sect_data; + for (unsigned i = 0; i < memTableEntries; i++) { - unsigned s = sect_data + i*2; char buff[50]; sprintf(buff, "area%1u", i); writeSymbol(buff, 0, 0, STB_LOCAL, STT_OBJECT, s); + if ((memTable[i].mtFlags & (MTF_BYTES|MTF_WRITEABLE)) == MTF_BYTES) + s += 1; + else s += 2; } // Global symbols - Exported symbol for table. writeSymbol("poly_exports", 0, sizeof(exportDescription)+sizeof(memoryTableEntry)*memTableEntries, STB_GLOBAL, STT_OBJECT, sect_table_data); // External references for (unsigned i = 0; i < externTable.stringSize; i += (unsigned)strlen(externTable.strings+i) + 1) writeSymbol(externTable.strings+i, 0, 0, STB_GLOBAL, STT_FUNC, SHN_UNDEF); sections[sect_symtab].sh_info = EXTRA_SYMBOLS+memTableEntries; // One more than last local sym sections[sect_symtab].sh_size = sizeof(ElfXX_Sym) * symbolNum; // Now the binary data. - for (i = 0; i < memTableEntries; i++) + unsigned dataSection = sect_data; + for (unsigned i = 0; i < memTableEntries; i++) { - unsigned dataSection = sect_data + i*2; sections[dataSection].sh_size = memTable[i].mtLength; alignFile(sections[dataSection].sh_addralign); sections[dataSection].sh_offset = ftell(exportFile); fwrite(memTable[i].mtOriginalAddr, 1, memTable[i].mtLength, exportFile); + if ((memTable[i].mtFlags & (MTF_BYTES|MTF_WRITEABLE)) == MTF_BYTES) + dataSection += 1; + else dataSection += 2; } exportDescription exports; memset(&exports, 0, sizeof(exports)); exports.structLength = sizeof(exportDescription); exports.memTableSize = sizeof(memoryTableEntry); exports.memTableEntries = memTableEntries; exports.memTable = USE_RELA ? 0 : (memoryTableEntry *)memTableOffset; // Set the value to be the offset relative to the base of the area. We have set a relocation // already which will add the base of the area. exports.rootFunction = USE_RELA ? 0 : (void*)rootOffset; exports.timeStamp = getBuildTime(); exports.architecture = machineDependent->MachineArchitecture(); exports.rtsVersion = POLY_version_number; #ifdef POLYML32IN64 exports.originalBaseAddr = globalHeapBase; #else exports.originalBaseAddr = 0; #endif // Set the address values to zero before we write. They will always // be relative to their base symbol. - for (i = 0; i < memTableEntries; i++) + for (unsigned i = 0; i < memTableEntries; i++) memTable[i].mtCurrentAddr = 0; // Now the binary data. alignFile(sections[sect_table_data].sh_addralign); sections[sect_table_data].sh_offset = ftell(exportFile); sections[sect_table_data].sh_size = sizeof(exportDescription) + memTableEntries*sizeof(memoryTableEntry); fwrite(&exports, sizeof(exports), 1, exportFile); fwrite(memTable, sizeof(memoryTableEntry), memTableEntries, exportFile); // The section name table sections[sect_sectionnametable].sh_offset = ftell(exportFile); fwrite(sectionStrings.strings, sectionStrings.stringSize, 1, exportFile); sections[sect_sectionnametable].sh_size = sectionStrings.stringSize; // The symbol name table sections[sect_stringtable].sh_offset = ftell(exportFile); fwrite(symStrings.strings, symStrings.stringSize, 1, exportFile); sections[sect_stringtable].sh_size = symStrings.stringSize; // Finally the section headers. alignFile(4); fhdr.e_shoff = ftell(exportFile); fwrite(sections, sizeof(ElfXX_Shdr) * numSections, 1, exportFile); // Rewind to rewrite the file header with the offset of the section headers. rewind(exportFile); fwrite(&fhdr, sizeof(fhdr), 1, exportFile); fclose(exportFile); exportFile = NULL; delete[]sections; } diff --git a/libpolyml/exporter.cpp b/libpolyml/exporter.cpp index d1278bab..96a1f056 100644 --- a/libpolyml/exporter.cpp +++ b/libpolyml/exporter.cpp @@ -1,929 +1,926 @@ /* Title: exporter.cpp - Export a function as an object or C file Copyright (c) 2006-7, 2015, 2016-20 David C.J. Matthews This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_ERRNO_H #include #endif #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_STDLIB_H #include #endif #if (defined(_WIN32)) #include #else #define _T(x) x #define _tcslen strlen #define _tcscmp strcmp #define _tcscat strcat #endif #include "exporter.h" #include "save_vec.h" #include "polystring.h" #include "run_time.h" #include "osmem.h" #include "scanaddrs.h" #include "gc.h" #include "machine_dep.h" #include "diagnostics.h" #include "memmgr.h" #include "processes.h" // For IO_SPACING #include "sys.h" // For EXC_Fail #include "rtsentry.h" #include "pexport.h" #ifdef HAVE_PECOFF #include "pecoffexport.h" #elif defined(HAVE_ELF_H) || defined(HAVE_ELF_ABI_H) #include "elfexport.h" #elif defined(HAVE_MACH_O_RELOC_H) #include "machoexport.h" #endif #if (defined(_WIN32)) #define NOMEMORY ERROR_NOT_ENOUGH_MEMORY #define ERRORNUMBER _doserrno #else #define NOMEMORY ENOMEM #define ERRORNUMBER errno #endif extern "C" { POLYEXTERNALSYMBOL POLYUNSIGNED PolyExport(FirstArgument threadId, PolyWord fileName, PolyWord root); POLYEXTERNALSYMBOL POLYUNSIGNED PolyExportPortable(FirstArgument threadId, PolyWord fileName, PolyWord root); } /* To export the function and everything reachable from it we need to copy all the objects into a new area. We leave tombstones in the original objects by overwriting the length word. That prevents us from copying an object twice and breaks loops. Once we've copied the objects we then have to go back over the memory and turn the tombstones back into length words. */ GraveYard::~GraveYard() { free(graves); } // Used to calculate the space required for the ordinary mutables // and the no-overwrite mutables. They are interspersed in local space. class MutSizes : public ScanAddress { public: MutSizes() : mutSize(0), noOverSize(0) {} virtual PolyObject *ScanObjectAddress(PolyObject *base) { return base; }// No Actually used virtual void ScanAddressesInObject(PolyObject *base, POLYUNSIGNED lengthWord) { const POLYUNSIGNED words = OBJ_OBJECT_LENGTH(lengthWord) + 1; // Include length word if (OBJ_IS_NO_OVERWRITE(lengthWord)) noOverSize += words; else mutSize += words; } POLYUNSIGNED mutSize, noOverSize; }; CopyScan::CopyScan(unsigned h/*=0*/): hierarchy(h) { defaultImmSize = defaultMutSize = defaultCodeSize = defaultNoOverSize = 0; tombs = 0; graveYard = 0; } void CopyScan::initialise(bool isExport/*=true*/) { ASSERT(gMem.eSpaces.size() == 0); // Set the space sizes to a proportion of the space currently in use. // Computing these sizes is not obvious because CopyScan is used both // for export and for saved states. For saved states in particular we // want to use a smaller size because they are retained after we save // the state and if we have many child saved states it's important not // to waste memory. if (hierarchy == 0) { graveYard = new GraveYard[gMem.pSpaces.size()]; if (graveYard == 0) { if (debugOptions & DEBUG_SAVING) Log("SAVE: Unable to allocate graveyard, size: %lu.\n", gMem.pSpaces.size()); throw MemoryException(); } } for (std::vector::iterator i = gMem.pSpaces.begin(); i < gMem.pSpaces.end(); i++) { PermanentMemSpace *space = *i; if (space->hierarchy >= hierarchy) { // Include this if we're exporting (hierarchy=0) or if we're saving a state // and will include this in the new state. size_t size = (space->top-space->bottom)/4; if (space->noOverwrite) defaultNoOverSize += size; else if (space->isMutable) defaultMutSize += size; else if (space->isCode) defaultCodeSize += size; else defaultImmSize += size; if (space->hierarchy == 0 && ! space->isMutable) { // We need a separate area for the tombstones because this is read-only graveYard[tombs].graves = (PolyWord*)calloc(space->spaceSize(), sizeof(PolyWord)); if (graveYard[tombs].graves == 0) { if (debugOptions & DEBUG_SAVING) Log("SAVE: Unable to allocate graveyard for permanent space, size: %lu.\n", space->spaceSize() * sizeof(PolyWord)); throw MemoryException(); } if (debugOptions & DEBUG_SAVING) Log("SAVE: Allocated graveyard for permanent space, %p size: %lu.\n", graveYard[tombs].graves, space->spaceSize() * sizeof(PolyWord)); graveYard[tombs].startAddr = space->bottom; graveYard[tombs].endAddr = space->top; tombs++; } } } for (std::vector::iterator i = gMem.lSpaces.begin(); i < gMem.lSpaces.end(); i++) { LocalMemSpace *space = *i; uintptr_t size = space->allocatedSpace(); // It looks as though the mutable size generally gets // overestimated while the immutable size is correct. if (space->isMutable) { MutSizes sizeMut; sizeMut.ScanAddressesInRegion(space->bottom, space->lowerAllocPtr); sizeMut.ScanAddressesInRegion(space->upperAllocPtr, space->top); defaultNoOverSize += sizeMut.noOverSize / 4; defaultMutSize += sizeMut.mutSize / 4; } else defaultImmSize += size/2; } for (std::vector::iterator i = gMem.cSpaces.begin(); i < gMem.cSpaces.end(); i++) { CodeSpace *space = *i; uintptr_t size = space->spaceSize(); defaultCodeSize += size/2; } if (isExport) { // Minimum 1M words. if (defaultMutSize < 1024*1024) defaultMutSize = 1024*1024; if (defaultImmSize < 1024*1024) defaultImmSize = 1024*1024; if (defaultCodeSize < 1024*1024) defaultCodeSize = 1024*1024; #ifdef MACOSX // Limit the segment size for Mac OS X. The linker has a limit of 2^24 relocations // in a segment so this is a crude way of ensuring the limit isn't exceeded. // It's unlikely to be exceeded by the code itself. // Actually, from trial-and-error, the limit seems to be around 6M. if (defaultMutSize > 6 * 1024 * 1024) defaultMutSize = 6 * 1024 * 1024; if (defaultImmSize > 6 * 1024 * 1024) defaultImmSize = 6 * 1024 * 1024; #endif if (defaultNoOverSize < 4096) defaultNoOverSize = 4096; // Except for the no-overwrite area } else { // Much smaller minimum sizes for saved states. if (defaultMutSize < 1024) defaultMutSize = 1024; if (defaultImmSize < 4096) defaultImmSize = 4096; if (defaultCodeSize < 4096) defaultCodeSize = 4096; if (defaultNoOverSize < 4096) defaultNoOverSize = 4096; // Set maximum sizes as well. We may have insufficient contiguous space for // very large areas. if (defaultMutSize > 1024 * 1024) defaultMutSize = 1024 * 1024; if (defaultImmSize > 1024 * 1024) defaultImmSize = 1024 * 1024; if (defaultCodeSize > 1024 * 1024) defaultCodeSize = 1024 * 1024; if (defaultNoOverSize > 1024 * 1024) defaultNoOverSize = 1024 * 1024; } if (debugOptions & DEBUG_SAVING) Log("SAVE: Copyscan default sizes: Immutable: %" POLYUFMT ", Mutable: %" POLYUFMT ", Code: %" POLYUFMT ", No-overwrite %" POLYUFMT ".\n", defaultImmSize, defaultMutSize, defaultCodeSize, defaultNoOverSize); } CopyScan::~CopyScan() { gMem.DeleteExportSpaces(); if (graveYard) delete[](graveYard); } // This function is called for each address in an object // once it has been copied to its new location. We copy first // then scan to update the addresses. POLYUNSIGNED CopyScan::ScanAddressAt(PolyWord *pt) { PolyWord val = *pt; // Ignore integers. if (IS_INT(val) || val == PolyWord::FromUnsigned(0)) return 0; PolyObject *obj = val.AsObjPtr(); POLYUNSIGNED l = ScanAddress(&obj); *pt = obj; return l; } // This function is called for each address in an object // once it has been copied to its new location. We copy first // then scan to update the addresses. POLYUNSIGNED CopyScan::ScanAddress(PolyObject **pt) { PolyObject *obj = *pt; MemSpace *space = gMem.SpaceForObjectAddress(obj); ASSERT(space != 0); // We may sometimes get addresses that have already been updated // to point to the new area. e.g. (only?) in the case of constants // that have been updated in ScanConstantsWithinCode. if (space->spaceType == ST_EXPORT) return 0; // If this is at a lower level than the hierarchy we are saving // then leave it untouched. if (space->spaceType == ST_PERMANENT) { PermanentMemSpace *pmSpace = (PermanentMemSpace*)space; if (pmSpace->hierarchy < hierarchy) return 0; } // Have we already scanned this? if (obj->ContainsForwardingPtr()) { // Update the address to the new value. #ifdef POLYML32IN64 PolyObject *newAddr; if (space->isCode) newAddr = (PolyObject*)(globalCodeBase + ((obj->LengthWord() & ~_OBJ_TOMBSTONE_BIT) << 1)); else newAddr = obj->GetForwardingPtr(); #else PolyObject *newAddr = obj->GetForwardingPtr(); #endif *pt = newAddr; return 0; // No need to scan it again. } else if (space->spaceType == ST_PERMANENT) { // See if we have this in the grave-yard. for (unsigned i = 0; i < tombs; i++) { GraveYard *g = &graveYard[i]; if ((PolyWord*)obj >= g->startAddr && (PolyWord*)obj < g->endAddr) { PolyWord *tombAddr = g->graves + ((PolyWord*)obj - g->startAddr); PolyObject *tombObject = (PolyObject*)tombAddr; if (tombObject->ContainsForwardingPtr()) { #ifdef POLYML32IN64 PolyObject *newAddr; if (space->isCode) newAddr = (PolyObject*)(globalCodeBase + ((tombObject->LengthWord() & ~_OBJ_TOMBSTONE_BIT) << 1)); else newAddr = tombObject->GetForwardingPtr(); #else PolyObject *newAddr = tombObject->GetForwardingPtr(); #endif *pt = newAddr; return 0; } break; // No need to look further } } } // No, we need to copy it. ASSERT(space->spaceType == ST_LOCAL || space->spaceType == ST_PERMANENT || space->spaceType == ST_CODE); POLYUNSIGNED lengthWord = obj->LengthWord(); POLYUNSIGNED words = OBJ_OBJECT_LENGTH(lengthWord); PolyObject *newObj = 0; - PolyObject* writeAble = 0; + PolyObject* writAble = 0; bool isMutableObj = obj->IsMutable(); bool isNoOverwrite = false; - bool isByteObj = false; + bool isByteObj = obj->IsByteObject(); bool isCodeObj = false; if (isMutableObj) - { isNoOverwrite = obj->IsNoOverwriteObject(); - isByteObj = obj->IsByteObject(); - } else isCodeObj = obj->IsCodeObject(); // Allocate a new address for the object. for (std::vector::iterator i = gMem.eSpaces.begin(); i < gMem.eSpaces.end(); i++) { PermanentMemSpace *space = *i; if (isMutableObj == space->isMutable && isNoOverwrite == space->noOverwrite && isByteObj == space->byteOnly && isCodeObj == space->isCode) { ASSERT(space->topPointer <= space->top && space->topPointer >= space->bottom); size_t spaceLeft = space->top - space->topPointer; if (spaceLeft > words) { newObj = (PolyObject*)(space->topPointer + 1); - writeAble = space->writeAble(newObj); + writAble = space->writeAble(newObj); space->topPointer += words + 1; #ifdef POLYML32IN64 // Maintain the odd-word alignment of topPointer if ((words & 1) == 0 && space->topPointer < space->top) { *space->writeAble(space->topPointer) = PolyWord::FromUnsigned(0); space->topPointer++; } #endif break; } } } if (newObj == 0) { // Didn't find room in the existing spaces. Create a new space. uintptr_t spaceWords; if (isMutableObj) { if (isNoOverwrite) spaceWords = defaultNoOverSize; else spaceWords = defaultMutSize; } else { if (isCodeObj) spaceWords = defaultCodeSize; else spaceWords = defaultImmSize; } if (spaceWords <= words) spaceWords = words + 1; // Make sure there's space for this object. PermanentMemSpace *space = gMem.NewExportSpace(spaceWords, isMutableObj, isNoOverwrite, isCodeObj); if (isByteObj) space->byteOnly = true; if (space == 0) { if (debugOptions & DEBUG_SAVING) Log("SAVE: Unable to allocate export space, size: %lu.\n", spaceWords); // Unable to allocate this. throw MemoryException(); } newObj = (PolyObject*)(space->topPointer + 1); - writeAble = space->writeAble(newObj); + writAble = space->writeAble(newObj); space->topPointer += words + 1; #ifdef POLYML32IN64 // Maintain the odd-word alignment of topPointer if ((words & 1) == 0 && space->topPointer < space->top) { *space->writeAble(space->topPointer) = PolyWord::FromUnsigned(0); space->topPointer++; } #endif ASSERT(space->topPointer <= space->top && space->topPointer >= space->bottom); } - writeAble->SetLengthWord(lengthWord); // copy length word + writAble->SetLengthWord(lengthWord); // copy length word if (hierarchy == 0 /* Exporting object module */ && isNoOverwrite && isMutableObj && !isByteObj) { // These are not exported. They are used for special values e.g. mutexes // that should be set to 0/nil/NONE at start-up. // Weak+No-overwrite byte objects are used for entry points and volatiles // in the foreign-function interface and have to be treated specially. // Note: this must not be done when exporting a saved state because the // copied version is used as the local data for the rest of the session. for (POLYUNSIGNED i = 0; i < words; i++) - writeAble->Set(i, TAGGED(0)); + writAble->Set(i, TAGGED(0)); } - else memcpy(writeAble, obj, words * sizeof(PolyWord)); + else memcpy(writAble, obj, words * sizeof(PolyWord)); if (space->spaceType == ST_PERMANENT && !space->isMutable && ((PermanentMemSpace*)space)->hierarchy == 0) { // The immutable permanent areas are read-only. unsigned m; for (m = 0; m < tombs; m++) { GraveYard *g = &graveYard[m]; if ((PolyWord*)obj >= g->startAddr && (PolyWord*)obj < g->endAddr) { PolyWord *tombAddr = g->graves + ((PolyWord*)obj - g->startAddr); PolyObject *tombObject = (PolyObject*)tombAddr; #ifdef POLYML32IN64 if (isCodeObj) { POLYUNSIGNED ll = (POLYUNSIGNED)(((PolyWord*)newObj - globalCodeBase) >> 1 | _OBJ_TOMBSTONE_BIT); tombObject->SetLengthWord(ll); } else tombObject->SetForwardingPtr(newObj); #else tombObject->SetForwardingPtr(newObj); #endif break; // No need to look further } } ASSERT(m < tombs); // Should be there. } else if (isCodeObj) #ifdef POLYML32IN64 // If this is a code address we can't use the usual forwarding pointer format. // Instead we have to compute the offset relative to the base of the code. { POLYUNSIGNED ll = (POLYUNSIGNED)(((PolyWord*)newObj-globalCodeBase) >> 1 | _OBJ_TOMBSTONE_BIT); gMem.SpaceForObjectAddress(obj)->writeAble(obj)->SetLengthWord(ll); } #else gMem.SpaceForObjectAddress(obj)->writeAble(obj)->SetForwardingPtr(newObj); #endif else obj->SetForwardingPtr(newObj); // Put forwarding pointer in old object. if (OBJ_IS_CODE_OBJECT(lengthWord)) { // We don't need to worry about flushing the instruction cache // since we're not going to execute this code here. // We do have to update any relative addresses within the code // to take account of its new position. We have to do that now // even though ScanAddressesInObject will do it again because this // is the only point where we have both the old and the new addresses. machineDependent->ScanConstantsWithinCode(newObj, obj, words, this); } *pt = newObj; // Update it to the newly copied object. return lengthWord; // This new object needs to be scanned. } // The address of code in the code area. We treat this as a normal heap cell. // We will probably need to copy this and to process addresses within it. POLYUNSIGNED CopyScan::ScanCodeAddressAt(PolyObject **pt) { POLYUNSIGNED lengthWord = ScanAddress(pt); if (lengthWord) ScanAddressesInObject(*pt, lengthWord); return 0; } PolyObject *CopyScan::ScanObjectAddress(PolyObject *base) { PolyWord val = base; // Scan this as an address. POLYUNSIGNED lengthWord = CopyScan::ScanAddressAt(&val); if (lengthWord) ScanAddressesInObject(val.AsObjPtr(), lengthWord); return val.AsObjPtr(); } #define MAX_EXTENSION 4 // The longest extension we may need to add is ".obj" // Convert the forwarding pointers in a region back into length words. // Generally if this object has a forwarding pointer that's // because we've moved it into the export region. We can, // though, get multiple levels of forwarding if there is an object // that has been shifted up by a garbage collection, leaving a forwarding // pointer and then that object has been moved to the export region. // We mustn't turn locally forwarded values back into ordinary objects // because they could contain addresses that are no longer valid. static POLYUNSIGNED GetObjLength(PolyObject *obj) { if (obj->ContainsForwardingPtr()) { PolyObject *forwardedTo; #ifdef POLYML32IN64 { MemSpace *space = gMem.SpaceForObjectAddress(obj); if (space->isCode) forwardedTo = (PolyObject*)(globalCodeBase + ((obj->LengthWord() & ~_OBJ_TOMBSTONE_BIT) << 1)); else forwardedTo = obj->GetForwardingPtr(); } #else forwardedTo = obj->GetForwardingPtr(); #endif POLYUNSIGNED length = GetObjLength(forwardedTo); MemSpace *space = gMem.SpaceForObjectAddress(forwardedTo); if (space->spaceType == ST_EXPORT) gMem.SpaceForObjectAddress(obj)->writeAble(obj)->SetLengthWord(length); return length; } else { ASSERT(obj->ContainsNormalLengthWord()); return obj->LengthWord(); } } static void FixForwarding(PolyWord *pt, size_t space) { while (space) { pt++; PolyObject *obj = (PolyObject*)pt; #ifdef POLYML32IN64 if ((uintptr_t)obj & 4) { // Skip filler words needed to align to an even word space--; continue; // We've added 1 to pt so just loop. } #endif size_t length = OBJ_OBJECT_LENGTH(GetObjLength(obj)); pt += length; ASSERT(space > length); space -= length+1; } } class ExportRequest: public MainThreadRequest { public: ExportRequest(Handle root, Exporter *exp): MainThreadRequest(MTP_EXPORTING), exportRoot(root), exporter(exp) {} virtual void Perform() { exporter->RunExport(exportRoot->WordP()); } Handle exportRoot; Exporter *exporter; }; static void exporter(TaskData *taskData, Handle fileName, Handle root, const TCHAR *extension, Exporter *exports) { size_t extLen = _tcslen(extension); TempString fileNameBuff(Poly_string_to_T_alloc(fileName->Word(), extLen)); if (fileNameBuff == NULL) raise_syscall(taskData, "Insufficient memory", NOMEMORY); size_t length = _tcslen(fileNameBuff); // Does it already have the extension? If not add it on. if (length < extLen || _tcscmp(fileNameBuff + length - extLen, extension) != 0) _tcscat(fileNameBuff, extension); #if (defined(_WIN32) && defined(UNICODE)) exports->exportFile = _wfopen(fileNameBuff, L"wb"); #else exports->exportFile = fopen(fileNameBuff, "wb"); #endif if (exports->exportFile == NULL) raise_syscall(taskData, "Cannot open export file", ERRORNUMBER); // Request a full GC to reduce the size of fix-ups. FullGC(taskData); // Request the main thread to do the export. ExportRequest request(root, exports); processes->MakeRootRequest(taskData, &request); if (exports->errorMessage) raise_fail(taskData, exports->errorMessage); } // This is called by the initial thread to actually do the export. void Exporter::RunExport(PolyObject *rootFunction) { Exporter *exports = this; PolyObject *copiedRoot = 0; CopyScan copyScan(hierarchy); try { copyScan.initialise(); // Copy the root and everything reachable from it into the temporary area. copiedRoot = copyScan.ScanObjectAddress(rootFunction); } catch (MemoryException &) { // If we ran out of memory. copiedRoot = 0; } // Fix the forwarding pointers. for (std::vector::iterator i = gMem.lSpaces.begin(); i < gMem.lSpaces.end(); i++) { LocalMemSpace *space = *i; // Local areas only have objects from the allocation pointer to the top. FixForwarding(space->bottom, space->lowerAllocPtr - space->bottom); FixForwarding(space->upperAllocPtr, space->top - space->upperAllocPtr); } for (std::vector::iterator i = gMem.pSpaces.begin(); i < gMem.pSpaces.end(); i++) { MemSpace *space = *i; // Permanent areas are filled with objects from the bottom. FixForwarding(space->bottom, space->top - space->bottom); } for (std::vector::iterator i = gMem.cSpaces.begin(); i < gMem.cSpaces.end(); i++) { MemSpace *space = *i; // Code areas are filled with objects from the bottom. FixForwarding(space->bottom, space->top - space->bottom); } // Reraise the exception after cleaning up the forwarding pointers. if (copiedRoot == 0) { exports->errorMessage = "Insufficient Memory"; return; } // Copy the areas into the export object. size_t tableEntries = gMem.eSpaces.size(); unsigned memEntry = 0; if (hierarchy != 0) tableEntries += gMem.pSpaces.size(); exports->memTable = new memoryTableEntry[tableEntries]; // If we're constructing a module we need to include the global spaces. if (hierarchy != 0) { // Permanent spaces from the executable. for (std::vector::iterator i = gMem.pSpaces.begin(); i < gMem.pSpaces.end(); i++) { PermanentMemSpace *space = *i; if (space->hierarchy < hierarchy) { memoryTableEntry *entry = &exports->memTable[memEntry++]; entry->mtOriginalAddr = entry->mtCurrentAddr = space->bottom; entry->mtLength = (space->topPointer-space->bottom)*sizeof(PolyWord); entry->mtIndex = space->index; entry->mtFlags = 0; if (space->isMutable) entry->mtFlags |= MTF_WRITEABLE; if (space->isCode) entry->mtFlags |= MTF_EXECUTABLE; } } newAreas = memEntry; } for (std::vector::iterator i = gMem.eSpaces.begin(); i < gMem.eSpaces.end(); i++) { memoryTableEntry *entry = &exports->memTable[memEntry++]; PermanentMemSpace *space = *i; entry->mtOriginalAddr = entry->mtCurrentAddr = space->bottom; entry->mtLength = (space->topPointer-space->bottom)*sizeof(PolyWord); entry->mtIndex = hierarchy == 0 ? memEntry-1 : space->index; entry->mtFlags = 0; if (space->isMutable) { entry->mtFlags = MTF_WRITEABLE; if (space->noOverwrite) entry->mtFlags |= MTF_NO_OVERWRITE; } if (space->isCode) entry->mtFlags |= MTF_EXECUTABLE; if (space->byteOnly) entry->mtFlags |= MTF_BYTES; } ASSERT(memEntry == tableEntries); exports->memTableEntries = memEntry; exports->rootFunction = copiedRoot; try { // This can raise MemoryException at least in PExport::exportStore. exports->exportStore(); } catch (MemoryException &) { exports->errorMessage = "Insufficient Memory"; } } // Functions called via the RTS call. Handle exportNative(TaskData *taskData, Handle args) { #ifdef HAVE_PECOFF // Windows including Cygwin #if (defined(_WIN32)) const TCHAR *extension = _T(".obj"); // Windows #else const char *extension = ".o"; // Cygwin #endif PECOFFExport exports; exporter(taskData, taskData->saveVec.push(args->WordP()->Get(0)), taskData->saveVec.push(args->WordP()->Get(1)), extension, &exports); #elif defined(HAVE_ELF_H) || defined(HAVE_ELF_ABI_H) // Most Unix including Linux, FreeBSD and Solaris. const char *extension = ".o"; ELFExport exports; exporter(taskData, taskData->saveVec.push(args->WordP()->Get(0)), taskData->saveVec.push(args->WordP()->Get(1)), extension, &exports); #elif defined(HAVE_MACH_O_RELOC_H) // Mac OS-X const char *extension = ".o"; MachoExport exports; exporter(taskData, taskData->saveVec.push(args->WordP()->Get(0)), taskData->saveVec.push(args->WordP()->Get(1)), extension, &exports); #else raise_exception_string (taskData, EXC_Fail, "Native export not available for this platform"); #endif return taskData->saveVec.push(TAGGED(0)); } Handle exportPortable(TaskData *taskData, Handle args) { PExport exports; exporter(taskData, taskData->saveVec.push(args->WordP()->Get(0)), taskData->saveVec.push(args->WordP()->Get(1)), _T(".txt"), &exports); return taskData->saveVec.push(TAGGED(0)); } POLYUNSIGNED PolyExport(FirstArgument threadId, PolyWord fileName, PolyWord root) { TaskData *taskData = TaskData::FindTaskForId(threadId); ASSERT(taskData != 0); taskData->PreRTSCall(); Handle reset = taskData->saveVec.mark(); Handle pushedName = taskData->saveVec.push(fileName); Handle pushedRoot = taskData->saveVec.push(root); try { #ifdef HAVE_PECOFF // Windows including Cygwin #if (defined(_WIN32)) const TCHAR *extension = _T(".obj"); // Windows #else const char *extension = ".o"; // Cygwin #endif PECOFFExport exports; exporter(taskData, pushedName, pushedRoot, extension, &exports); #elif defined(HAVE_ELF_H) || defined(HAVE_ELF_ABI_H) // Most Unix including Linux, FreeBSD and Solaris. const char *extension = ".o"; ELFExport exports; exporter(taskData, pushedName, pushedRoot, extension, &exports); #elif defined(HAVE_MACH_O_RELOC_H) // Mac OS-X const char *extension = ".o"; MachoExport exports; exporter(taskData, pushedName, pushedRoot, extension, &exports); #else raise_exception_string (taskData, EXC_Fail, "Native export not available for this platform"); #endif } catch (...) { } // If an ML exception is raised taskData->saveVec.reset(reset); taskData->PostRTSCall(); return TAGGED(0).AsUnsigned(); // Returns unit } POLYUNSIGNED PolyExportPortable(FirstArgument threadId, PolyWord fileName, PolyWord root) { TaskData *taskData = TaskData::FindTaskForId(threadId); ASSERT(taskData != 0); taskData->PreRTSCall(); Handle reset = taskData->saveVec.mark(); Handle pushedName = taskData->saveVec.push(fileName); Handle pushedRoot = taskData->saveVec.push(root); try { PExport exports; exporter(taskData, pushedName, pushedRoot, _T(".txt"), &exports); } catch (...) { } // If an ML exception is raised taskData->saveVec.reset(reset); taskData->PostRTSCall(); return TAGGED(0).AsUnsigned(); // Returns unit } // Helper functions for exporting. We need to produce relocation information // and this code is common to every method. Exporter::Exporter(unsigned int h): exportFile(NULL), errorMessage(0), hierarchy(h), memTable(0), newAreas(0) { } Exporter::~Exporter() { delete[](memTable); if (exportFile) fclose(exportFile); } void Exporter::relocateValue(PolyWord *pt) { #ifndef POLYML32IN64 PolyWord q = *pt; if (IS_INT(q) || q == PolyWord::FromUnsigned(0)) {} else createRelocation(pt); #endif } void Exporter::createRelocation(PolyWord* pt) { *gMem.SpaceForAddress(pt)->writeAble(pt) = createRelocation(*pt, pt); } // Check through the areas to see where the address is. It must be // in one of them. unsigned Exporter::findArea(void *p) { for (unsigned i = 0; i < memTableEntries; i++) { if (p > memTable[i].mtOriginalAddr && p <= (char*)memTable[i].mtOriginalAddr + memTable[i].mtLength) return i; } { ASSERT(0); } return 0; } void Exporter::relocateObject(PolyObject *p) { if (p->IsByteObject()) { if (p->IsMutable() && p->IsWeakRefObject()) { // Weak mutable byte refs are used for external references and // also in the FFI for non-persistent values. bool isFuncPtr = true; const char *entryName = getEntryPointName(p, &isFuncPtr); if (entryName != 0) addExternalReference(p, entryName, isFuncPtr); // Clear the first word of the data. ASSERT(p->Length() >= sizeof(uintptr_t)/sizeof(PolyWord)); *(uintptr_t*)p = 0; } } else if (p->IsCodeObject()) { POLYUNSIGNED constCount; PolyWord *cp; ASSERT(! p->IsMutable() ); p->GetConstSegmentForCode(cp, constCount); /* Now the constants. */ for (POLYUNSIGNED i = 0; i < constCount; i++) relocateValue(&(cp[i])); } else // Closure and ordinary objects { POLYUNSIGNED length = p->Length(); for (POLYUNSIGNED i = 0; i < length; i++) relocateValue(p->Offset(i)); } } ExportStringTable::ExportStringTable(): strings(0), stringSize(0), stringAvailable(0) { } ExportStringTable::~ExportStringTable() { free(strings); } // Add a string to the string table, growing it if necessary. unsigned long ExportStringTable::makeEntry(const char *str) { unsigned len = (unsigned)strlen(str); unsigned long entry = stringSize; if (stringSize + len + 1 > stringAvailable) { stringAvailable = stringAvailable+stringAvailable/2; if (stringAvailable < stringSize + len + 1) stringAvailable = stringSize + len + 1 + 500; char* newStrings = (char*)realloc(strings, stringAvailable); if (newStrings == 0) { if (debugOptions & DEBUG_SAVING) Log("SAVE: Unable to realloc string table, size: %lu.\n", stringAvailable); throw MemoryException(); } else strings = newStrings; } strcpy(strings + stringSize, str); stringSize += len + 1; return entry; } struct _entrypts exporterEPT[] = { { "PolyExport", (polyRTSFunction)&PolyExport}, { "PolyExportPortable", (polyRTSFunction)&PolyExportPortable}, { NULL, NULL} // End of list. }; diff --git a/libpolyml/x86_dep.cpp b/libpolyml/x86_dep.cpp index 852b74e5..1310263b 100644 --- a/libpolyml/x86_dep.cpp +++ b/libpolyml/x86_dep.cpp @@ -1,1273 +1,1275 @@ /* Title: Machine dependent code for i386 and X64 under Windows and Unix Copyright (c) 2000-7 Cambridge University Technical Services Limited Further work copyright David C. J. Matthews 2011-20 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include "config.h" #elif defined(_WIN32) #include "winconfig.h" #else #error "No configuration file" #endif #ifdef HAVE_STDLIB_H #include #endif #include #ifdef HAVE_SIGNAL_H #include #endif #ifdef HAVE_ASSERT_H #include #define ASSERT(x) assert(x) #else #define ASSERT(x) #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_ERRNO_H #include #endif #if (defined(_WIN32)) #include #include #endif #include "globals.h" #include "run_time.h" #include "diagnostics.h" #include "processes.h" #include "profiling.h" #include "machine_dep.h" #include "scanaddrs.h" #include "memmgr.h" #include "rtsentry.h" #include "sys.h" // Temporary /********************************************************************** * * Register usage: * * %Reax: First argument to function. Result of function call. * %Rebx: Second argument to function. * %Recx: General register * %Redx: Closure pointer in call. * %Rebp: Points to memory used for extra registers * %Resi: General register. * %Redi: General register. * %Resp: Stack pointer. * The following apply only on the X64 * %R8: Third argument to function * %R9: Fourth argument to function * %R10: Fifth argument to function * %R11: General register * %R12: General register * %R13: General register * %R14: General register * %R15: Memory allocation pointer * **********************************************************************/ #ifdef HOSTARCHITECTURE_X86_64 struct fpSaveArea { double fpregister[7]; // Save area for xmm0-6 }; #else // Structure of floating point save area. // This is dictated by the hardware. typedef byte fpregister[10]; struct fpSaveArea { unsigned short cw; unsigned short _unused0; unsigned short sw; unsigned short _unused1; unsigned short tw; unsigned short _unused2; unsigned fip; unsigned short fcs0; unsigned short _unused3; unsigned foo; unsigned short fcs1; unsigned short _unused4; fpregister registers[8]; }; #endif /* the amount of ML stack space to reserve for registers, C exception handling etc. The compiler requires us to reserve 2 stack-frames worth (2 * 20 words). We actually reserve slightly more than this. */ #if (!defined(_WIN32) && !defined(HAVE_SIGALTSTACK)) // If we can't handle signals on a separate stack make sure there's space // on the Poly stack. #define OVERFLOW_STACK_SIZE (50+1024) #else #define OVERFLOW_STACK_SIZE 50 #endif class X86TaskData; // This is passed as the argument vector to X86AsmSwitchToPoly. // The offsets are built into the assembly code and the code-generator. // localMpointer and stackPtr are updated before control returns to C. typedef struct _AssemblyArgs { public: PolyWord *localMpointer; // Allocation ptr + 1 word stackItem *handlerRegister; // Current exception handler PolyWord *localMbottom; // Base of memory + 1 word stackItem *stackLimit; // Lower limit of stack stackItem exceptionPacket; // Set if there is an exception byte unusedRequestCode; // No longer used. byte unusedFlag; // No longer used byte returnReason; // Reason for returning from ML. byte unusedRestore; // No longer used. uintptr_t saveCStack; // Saved C stack frame. PolyWord threadId; // My thread id. Saves having to call into RTS for it. stackItem *stackPtr; // Current stack pointer byte *noLongerUsed; // Now removed byte *heapOverFlowCall; // These are filled in with the functions. byte *stackOverFlowCall; byte *stackOverFlowCallEx; + byte *trapHandlerEntry; // Saved registers, where applicable. stackItem p_rax; stackItem p_rbx; stackItem p_rcx; stackItem p_rdx; stackItem p_rsi; stackItem p_rdi; #ifdef HOSTARCHITECTURE_X86_64 stackItem p_r8; stackItem p_r9; stackItem p_r10; stackItem p_r11; stackItem p_r12; stackItem p_r13; stackItem p_r14; #endif struct fpSaveArea p_fp; } AssemblyArgs; // These next few are temporarily added for the interpreter // This duplicates some code in reals.cpp but is now updated. #define DOUBLESIZE (sizeof(double)/sizeof(POLYUNSIGNED)) union realdb { double dble; POLYUNSIGNED puns[DOUBLESIZE]; }; #define LGWORDSIZE (sizeof(uintptr_t) / sizeof(PolyWord)) class X86TaskData: public TaskData { public: X86TaskData(); unsigned allocReg; // The register to take the allocated space. POLYUNSIGNED allocWords; // The words to allocate. AssemblyArgs assemblyInterface; int saveRegisterMask; // Registers that need to be updated by a GC. virtual void GarbageCollect(ScanAddress *process); void ScanStackAddress(ScanAddress *process, stackItem &val, StackSpace *stack); virtual void EnterPolyCode(); // Start running ML virtual void InterruptCode(); virtual bool AddTimeProfileCount(SIGNALCONTEXT *context); virtual void InitStackFrame(TaskData *parentTask, Handle proc, Handle arg); virtual void SetException(poly_exn *exc); // Release a mutex in exactly the same way as compiler code virtual Handle AtomicDecrement(Handle mutexp); virtual void AtomicReset(Handle mutexp); // Return the minimum space occupied by the stack. Used when setting a limit. // N.B. This is PolyWords not native words. virtual uintptr_t currentStackSpace(void) const { return (this->stack->top - (PolyWord*)assemblyInterface.stackPtr) + OVERFLOW_STACK_SIZE*sizeof(uintptr_t)/sizeof(PolyWord); } // Increment the profile count for an allocation. Also now used for mutex contention. virtual void addProfileCount(POLYUNSIGNED words) { addSynchronousCount(assemblyInterface.stackPtr[0].codeAddr, words); } // PreRTSCall: After calling from ML to the RTS we need to save the current heap pointer virtual void PreRTSCall(void) { TaskData::PreRTSCall(); SaveMemRegisters(); } // PostRTSCall: Before returning we need to restore the heap pointer. // If there has been a GC in the RTS call we need to create a new heap area. virtual void PostRTSCall(void) { SetMemRegisters(); TaskData::PostRTSCall(); } virtual void CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length); void HeapOverflowTrap(byte *pcPtr); void SetMemRegisters(); void SaveMemRegisters(); void SetRegisterMask(); void HandleTrap(); PLock interruptLock; stackItem *get_reg(int n); stackItem *®SP() { return assemblyInterface.stackPtr; } stackItem ®AX() { return assemblyInterface.p_rax; } stackItem ®BX() { return assemblyInterface.p_rbx; } stackItem ®CX() { return assemblyInterface.p_rcx; } stackItem ®DX() { return assemblyInterface.p_rdx; } stackItem ®SI() { return assemblyInterface.p_rsi; } stackItem ®DI() { return assemblyInterface.p_rdi; } #ifdef HOSTARCHITECTURE_X86_64 stackItem ®8() { return assemblyInterface.p_r8; } stackItem ®9() { return assemblyInterface.p_r9; } stackItem ®10() { return assemblyInterface.p_r10; } stackItem ®11() { return assemblyInterface.p_r11; } stackItem ®12() { return assemblyInterface.p_r12; } stackItem ®13() { return assemblyInterface.p_r13; } stackItem ®14() { return assemblyInterface.p_r14; } #endif #if (defined(_WIN32)) DWORD savedErrno; #else int savedErrno; #endif }; class X86Dependent: public MachineDependent { public: X86Dependent() {} // Create a task data object. virtual TaskData *CreateTaskData(void) { return new X86TaskData(); } // Initial size of stack in PolyWords virtual unsigned InitialStackSize(void) { return (128+OVERFLOW_STACK_SIZE) * sizeof(uintptr_t) / sizeof(PolyWord); } virtual void ScanConstantsWithinCode(PolyObject *addr, PolyObject *oldAddr, POLYUNSIGNED length, ScanAddress *process); virtual Architectures MachineArchitecture(void) #ifndef HOSTARCHITECTURE_X86_64 { return MA_I386; } #elif defined(POLYML32IN64) { return MA_X86_64_32; } #else { return MA_X86_64; } #endif }; // Values for the returnReason byte enum RETURN_REASON { RETURN_HEAP_OVERFLOW = 1, RETURN_STACK_OVERFLOW = 2, RETURN_STACK_OVERFLOWEX = 3, }; extern "C" { // These are declared in the assembly code segment. void X86AsmSwitchToPoly(void *); extern int X86AsmCallExtraRETURN_HEAP_OVERFLOW(void); extern int X86AsmCallExtraRETURN_STACK_OVERFLOW(void); extern int X86AsmCallExtraRETURN_STACK_OVERFLOWEX(void); POLYUNSIGNED X86AsmAtomicDecrement(PolyObject*); void X86TrapHandler(PolyWord threadId); }; X86TaskData::X86TaskData(): allocReg(0), allocWords(0), saveRegisterMask(0) { assemblyInterface.heapOverFlowCall = (byte*)X86AsmCallExtraRETURN_HEAP_OVERFLOW; assemblyInterface.stackOverFlowCall = (byte*)X86AsmCallExtraRETURN_STACK_OVERFLOW; assemblyInterface.stackOverFlowCallEx = (byte*)X86AsmCallExtraRETURN_STACK_OVERFLOWEX; + assemblyInterface.trapHandlerEntry = (byte*)X86TrapHandler; savedErrno = 0; } void X86TaskData::GarbageCollect(ScanAddress *process) { TaskData::GarbageCollect(process); // Process the parent first assemblyInterface.threadId = threadObject; if (stack != 0) { ASSERT(assemblyInterface.stackPtr >= (stackItem*)stack->bottom && assemblyInterface.stackPtr <= (stackItem*)stack->top); // Now the values on the stack. for (stackItem *q = assemblyInterface.stackPtr; q < (stackItem*)stack->top; q++) ScanStackAddress(process, *q, stack); } // Register mask for (int i = 0; i < 16; i++) { if (saveRegisterMask & (1 << i)) ScanStackAddress(process, *get_reg(i), stack); } } // Process a value within the stack. void X86TaskData::ScanStackAddress(ScanAddress *process, stackItem &stackItem, StackSpace *stack) { // We may have return addresses on the stack which could look like // tagged values. Check whether the value is in the code area before // checking whether it is untagged. #ifdef POLYML32IN64 // In 32-in-64 return addresses always have the top 32 bits non-zero. if (stackItem.argValue < ((uintptr_t)1 << 32)) { // It's either a tagged integer or an object pointer. if (stackItem.w().IsDataPtr()) { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } } else { // Could be a code address or a stack address. MemSpace *space = gMem.SpaceForAddress(stackItem.codeAddr - 1); if (space == 0 || space->spaceType != ST_CODE) return; PolyObject *obj = gMem.FindCodeObject(stackItem.codeAddr); ASSERT(obj != 0); // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } #else // The -1 here is because we may have a zero-sized cell in the last // word of a space. MemSpace *space = gMem.SpaceForAddress(stackItem.codeAddr-1); if (space == 0) return; // In particular we may have one of the assembly code addresses. if (space->spaceType == ST_CODE) { PolyObject *obj = gMem.FindCodeObject(stackItem.codeAddr); // If it is actually an integer it might be outside a valid code object. if (obj == 0) { ASSERT(stackItem.w().IsTagged()); // It must be an integer } else // Process the address of the start. Don't update anything. process->ScanObjectAddress(obj); } else if (space->spaceType == ST_LOCAL && stackItem.w().IsDataPtr()) // Local values must be word addresses. { PolyWord val = process->ScanObjectAddress(stackItem.w().AsObjPtr()); stackItem = val; } #endif } // Copy a stack void X86TaskData::CopyStackFrame(StackObject *old_stack, uintptr_t old_length, StackObject *new_stack, uintptr_t new_length) { /* Moves a stack, updating all references within the stack */ #ifdef POLYML32IN64 old_length = old_length / 2; new_length = new_length / 2; #endif stackItem *old_base = (stackItem *)old_stack; stackItem *new_base = (stackItem*)new_stack; stackItem *old_top = old_base + old_length; /* Calculate the offset of the new stack from the old. If the frame is being extended objects in the new frame will be further up the stack than in the old one. */ uintptr_t offset = new_base - old_base + new_length - old_length; stackItem *oldStackPtr = assemblyInterface.stackPtr; // Adjust the stack pointer and handler pointer since these point into the stack. assemblyInterface.stackPtr = assemblyInterface.stackPtr + offset; assemblyInterface.handlerRegister = assemblyInterface.handlerRegister + offset; // We need to adjust any values on the stack that are pointers within the stack. // Skip the unused part of the stack. size_t i = oldStackPtr - old_base; ASSERT (i <= old_length); i = old_length - i; stackItem *old = oldStackPtr; stackItem *newp = assemblyInterface.stackPtr; while (i--) { stackItem old_word = *old++; if (old_word.w().IsDataPtr() && old_word.stackAddr >= old_base && old_word.stackAddr <= old_top) old_word.stackAddr = old_word.stackAddr + offset; else if (old_word.w().IsDataPtr() && IsHeapAddress(old_word.stackAddr)) { stackItem *addr = (stackItem*)old_word.w().AsStackAddr(); if (addr >= old_base && addr <= old_top) { addr += offset; old_word = PolyWord::FromStackAddr((PolyWord*)addr); } } *newp++ = old_word; } ASSERT(old == ((stackItem*)old_stack)+old_length); ASSERT(newp == ((stackItem*)new_stack)+new_length); // And change any registers that pointed into the old stack for (int j = 0; j < 16; j++) { if (saveRegisterMask & (1 << j)) { stackItem *regAddr = get_reg(j); stackItem old_word = *regAddr; if (old_word.w().IsDataPtr() && old_word.stackAddr >= old_base && old_word.stackAddr <= old_top) old_word.stackAddr = old_word.stackAddr + offset; else if (old_word.w().IsDataPtr() && IsHeapAddress(old_word.stackAddr)) { stackItem *addr = (stackItem*)old_word.w().AsStackAddr(); if (addr >= old_base && addr <= old_top) { addr += offset; old_word = PolyWord::FromStackAddr((PolyWord*)addr); } } *regAddr = old_word; } } } void X86TaskData::EnterPolyCode() /* Called from "main" to enter the code. */ { SetMemRegisters(); // Enter the ML code. X86AsmSwitchToPoly(&this->assemblyInterface); // This should never return ASSERT(0); } // Called from the assembly code as a result of a trap i.e. a request for // a GC or to extend the stack. void X86TrapHandler(PolyWord threadId) { X86TaskData* taskData = (X86TaskData*)TaskData::FindTaskForId(threadId); taskData->HandleTrap(); } void X86TaskData::HandleTrap() { SaveMemRegisters(); // Update globals from the memory registers. switch (this->assemblyInterface.returnReason) { case RETURN_HEAP_OVERFLOW: // The heap has overflowed. SetRegisterMask(); this->HeapOverflowTrap(assemblyInterface.stackPtr[0].codeAddr); // Computes a value for allocWords only break; case RETURN_STACK_OVERFLOW: case RETURN_STACK_OVERFLOWEX: { SetRegisterMask(); uintptr_t min_size; // Size in PolyWords if (assemblyInterface.returnReason == RETURN_STACK_OVERFLOW) { min_size = (this->stack->top - (PolyWord*)assemblyInterface.stackPtr) + OVERFLOW_STACK_SIZE * sizeof(uintptr_t) / sizeof(PolyWord); } else { // Stack limit overflow. If the required stack space is larger than // the fixed overflow size the code will calculate the limit in %EDI. stackItem* stackP = regDI().stackAddr; min_size = (this->stack->top - (PolyWord*)stackP) + OVERFLOW_STACK_SIZE * sizeof(uintptr_t) / sizeof(PolyWord); } try { // The stack check has failed. This may either be because we really have // overflowed the stack or because the stack limit value has been adjusted // to result in a call here. CheckAndGrowStack(this, min_size); } catch (IOException&) { // We may get an exception while handling this if we run out of store } { PLocker l(&interruptLock); // Set the stack limit. This clears any interrupt and also sets the // correct value if we've grown the stack. this->assemblyInterface.stackLimit = (stackItem*)this->stack->bottom + OVERFLOW_STACK_SIZE; } // We're in a safe state to handle any interrupts. try { // Process any asynchronous events i.e. interrupts or kill processes->ProcessAsynchRequests(this); // Release and re-acquire use of the ML memory to allow another thread to GC. processes->ThreadReleaseMLMemory(this); processes->ThreadUseMLMemory(this); } catch (IOException&) { // If this resulted in an ML exception it will also raise a C++ exception. } catch (KillException&) { processes->ThreadExit(this); } break; } default: Crash("Unknown return reason code %u", this->assemblyInterface.returnReason); } SetMemRegisters(); } void X86TaskData::InitStackFrame(TaskData *parentTaskData, Handle proc, Handle arg) /* Initialise stack frame. */ { StackSpace *space = this->stack; StackObject * newStack = space->stack(); uintptr_t stack_size = space->spaceSize() * sizeof(PolyWord) / sizeof(stackItem); // Set the top of the stack inside the stack rather than at the end. This wastes // a word but if sp is actually at the end OpenBSD segfaults because it isn't in // a MAP_STACK area. uintptr_t topStack = stack_size - 1; stackItem* stackTop = (stackItem*)newStack + topStack; *stackTop = TAGGED(0); // Set it to non-zero. assemblyInterface.stackPtr = stackTop; assemblyInterface.stackLimit = (stackItem*)space->bottom + OVERFLOW_STACK_SIZE; assemblyInterface.handlerRegister = stackTop; // Floating point save area. memset(&assemblyInterface.p_fp, 0, sizeof(struct fpSaveArea)); #ifndef HOSTARCHITECTURE_X86_64 // Set the control word for 64-bit precision otherwise we get inconsistent results. assemblyInterface.p_fp.cw = 0x027f ; // Control word assemblyInterface.p_fp.tw = 0xffff; // Tag registers - all unused #endif // Store the argument and the closure. assemblyInterface.p_rdx = proc->Word(); // Closure assemblyInterface.p_rax = (arg == 0) ? TAGGED(0) : DEREFWORD(arg); // Argument // Have to set the register mask in case we get a GC before the thread starts. saveRegisterMask = (1 << 2) | 1; // Rdx and rax #ifdef POLYML32IN64 // In 32-in-64 RBX always contains the heap base address. assemblyInterface.p_rbx.stackAddr = (stackItem*)globalHeapBase; #endif } // In Solaris-x86 the registers are named EIP and ESP. #if (!defined(REG_EIP) && defined(EIP)) #define REG_EIP EIP #endif #if (!defined(REG_ESP) && defined(ESP)) #define REG_ESP ESP #endif // Get the PC and SP(stack) from a signal context. This is needed for profiling. // This version gets the actual sp and pc if we are in ML. // N.B. This must not call malloc since we're in a signal handler. bool X86TaskData::AddTimeProfileCount(SIGNALCONTEXT *context) { stackItem * sp = 0; POLYCODEPTR pc = 0; if (context != 0) { // The tests for HAVE_UCONTEXT_T, HAVE_STRUCT_SIGCONTEXT and HAVE_WINDOWS_H need // to follow the tests in processes.h. #if defined(HAVE_WINDOWS_H) #ifdef _WIN64 sp = (stackItem *)context->Rsp; pc = (POLYCODEPTR)context->Rip; #else // Windows 32 including cygwin. sp = (stackItem *)context->Esp; pc = (POLYCODEPTR)context->Eip; #endif #elif defined(HAVE_UCONTEXT_T) #ifdef HAVE_MCONTEXT_T_GREGS // Linux #ifndef HOSTARCHITECTURE_X86_64 pc = (byte*)context->uc_mcontext.gregs[REG_EIP]; sp = (stackItem*)context->uc_mcontext.gregs[REG_ESP]; #else /* HOSTARCHITECTURE_X86_64 */ pc = (byte*)context->uc_mcontext.gregs[REG_RIP]; sp = (stackItem*)context->uc_mcontext.gregs[REG_RSP]; #endif /* HOSTARCHITECTURE_X86_64 */ #elif defined(HAVE_MCONTEXT_T_MC_ESP) // FreeBSD #ifndef HOSTARCHITECTURE_X86_64 pc = (byte*)context->uc_mcontext.mc_eip; sp = (stackItem*)context->uc_mcontext.mc_esp; #else /* HOSTARCHITECTURE_X86_64 */ pc = (byte*)context->uc_mcontext.mc_rip; sp = (stackItem*)context->uc_mcontext.mc_rsp; #endif /* HOSTARCHITECTURE_X86_64 */ #else // Mac OS X #ifndef HOSTARCHITECTURE_X86_64 #if(defined(HAVE_STRUCT_MCONTEXT_SS)||defined(HAVE_STRUCT___DARWIN_MCONTEXT32_SS)) pc = (byte*)context->uc_mcontext->ss.eip; sp = (stackItem*)context->uc_mcontext->ss.esp; #elif(defined(HAVE_STRUCT___DARWIN_MCONTEXT32___SS)) pc = (byte*)context->uc_mcontext->__ss.__eip; sp = (stackItem*)context->uc_mcontext->__ss.__esp; #endif #else /* HOSTARCHITECTURE_X86_64 */ #if(defined(HAVE_STRUCT_MCONTEXT_SS)||defined(HAVE_STRUCT___DARWIN_MCONTEXT64_SS)) pc = (byte*)context->uc_mcontext->ss.rip; sp = (stackItem*)context->uc_mcontext->ss.rsp; #elif(defined(HAVE_STRUCT___DARWIN_MCONTEXT64___SS)) pc = (byte*)context->uc_mcontext->__ss.__rip; sp = (stackItem*)context->uc_mcontext->__ss.__rsp; #endif #endif /* HOSTARCHITECTURE_X86_64 */ #endif #elif defined(HAVE_STRUCT_SIGCONTEXT) #if defined(HOSTARCHITECTURE_X86_64) && defined(__OpenBSD__) // CPP defines missing in amd64/signal.h in OpenBSD pc = (byte*)context->sc_rip; sp = (stackItem*)context->sc_rsp; #else // !HOSTARCHITEXTURE_X86_64 || !defined(__OpenBSD__) pc = (byte*)context->sc_pc; sp = (stackItem*)context->sc_sp; #endif #endif } if (pc != 0) { // See if the PC we've got is an ML code address. MemSpace *space = gMem.SpaceForAddress(pc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { incrementCountAsynch(pc); return true; } } // See if the sp value is in the current stack. if (sp >= (stackItem*)this->stack->bottom && sp < (stackItem*)this->stack->top) { // We may be in the assembly code. The top of the stack will be a return address. pc = sp[0].w().AsCodePtr(); MemSpace *space = gMem.SpaceForAddress(pc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { incrementCountAsynch(pc); return true; } } // See if the value of regSP is a valid stack pointer. // This works if we happen to be in an RTS call using a "Full" call. // It doesn't work if we've used a "Fast" call because that doesn't save the SP. sp = assemblyInterface.stackPtr; if (sp >= (stackItem*)this->stack->bottom && sp < (stackItem*)this->stack->top) { // We may be in the run-time system. pc = sp[0].w().AsCodePtr(); MemSpace *space = gMem.SpaceForAddress(pc); if (space != 0 && (space->spaceType == ST_CODE || space->spaceType == ST_PERMANENT)) { incrementCountAsynch(pc); return true; } } // None of those worked return false; } // This is called from a different thread so we have to be careful. void X86TaskData::InterruptCode() { PLocker l(&interruptLock); // Set the stack limit pointer to the top of the stack to cause // a trap when we next check for stack overflow. // We use a lock here to ensure that we always use the current value of the // stack. The thread we're interrupting could be growing the stack at this point. if (this->stack != 0) this->assemblyInterface.stackLimit = (stackItem*)(this->stack->top-1); } // This is called from SwitchToPoly before we enter the ML code. void X86TaskData::SetMemRegisters() { // Copy the current store limits into variables before we go into the assembly code. // If we haven't yet set the allocation area or we don't have enough we need // to create one (or a new one). if (this->allocPointer <= this->allocLimit + this->allocWords) { if (this->allocPointer < this->allocLimit) Crash ("Bad length in heap overflow trap"); // Find some space to allocate in. Updates taskData->allocPointer and // returns a pointer to the newly allocated space (if allocWords != 0) PolyWord *space = processes->FindAllocationSpace(this, this->allocWords, true); if (space == 0) { // We will now raise an exception instead of returning. // Set allocWords to zero so we don't set the allocation register // since that could be holding the exception packet. this->allocWords = 0; } // Undo the allocation just now. this->allocPointer += this->allocWords; } if (this->allocWords != 0) { // If we have had a heap trap we actually do the allocation here. // We will have already garbage collected and recovered sufficient space. // This also happens if we have just trapped because of store profiling. this->allocPointer -= this->allocWords; // Now allocate // Set the allocation register to this area. N.B. This is an absolute address. if (this->allocReg < 15) get_reg(this->allocReg)[0].codeAddr = (POLYCODEPTR)(this->allocPointer + 1); /* remember: it's off-by-one */ this->allocWords = 0; } // If we have run out of store, either just above or while allocating in the RTS, // allocPointer and allocLimit will have been set to zero as part of the GC. We will // now be raising an exception which may free some store but we need to come back here // before we allocate anything. The compiled code uses unsigned arithmetic to check for // heap overflow but only after subtracting the space required. We need to make sure // that the values are still non-negative after substracting any object size. if (this->allocPointer == 0) this->allocPointer += MAX_OBJECT_SIZE; if (this->allocLimit == 0) this->allocLimit += MAX_OBJECT_SIZE; this->assemblyInterface.localMbottom = this->allocLimit + 1; this->assemblyInterface.localMpointer = this->allocPointer + 1; // If we are profiling store allocation we set mem_hl so that a trap // will be generated. if (profileMode == kProfileStoreAllocation) this->assemblyInterface.localMbottom = this->assemblyInterface.localMpointer; this->assemblyInterface.threadId = this->threadObject; } // This is called whenever we have returned from ML to C. void X86TaskData::SaveMemRegisters() { this->allocPointer = this->assemblyInterface.localMpointer - 1; this->allocWords = 0; this->assemblyInterface.exceptionPacket = TAGGED(0); this->saveRegisterMask = 0; } // Called on a GC or stack overflow trap. The register mask // is in the bytes after the trap call. void X86TaskData::SetRegisterMask() { byte *pc = assemblyInterface.stackPtr[0].codeAddr; if (*pc == 0xcd) // CD - INT n is used for a single byte { pc++; saveRegisterMask = *pc++; } else if (*pc == 0xca) // CA - FAR RETURN is used for a two byte mask { pc++; saveRegisterMask = pc[0] | (pc[1] << 8); pc += 2; } assemblyInterface.stackPtr[0].codeAddr = pc; } stackItem *X86TaskData::get_reg(int n) /* Returns a pointer to the register given by n. */ { switch (n) { case 0: return &assemblyInterface.p_rax; case 1: return &assemblyInterface.p_rcx; case 2: return &assemblyInterface.p_rdx; case 3: return &assemblyInterface.p_rbx; // Should not have rsp or rbp. case 6: return &assemblyInterface.p_rsi; case 7: return &assemblyInterface.p_rdi; #ifdef HOSTARCHITECTURE_X86_64 case 8: return &assemblyInterface.p_r8; case 9: return &assemblyInterface.p_r9; case 10: return &assemblyInterface.p_r10; case 11: return &assemblyInterface.p_r11; case 12: return &assemblyInterface.p_r12; case 13: return &assemblyInterface.p_r13; case 14: return &assemblyInterface.p_r14; // R15 is the heap pointer so shouldn't occur here. #endif /* HOSTARCHITECTURE_X86_64 */ default: Crash("Unknown register %d\n", n); } } // Called as a result of a heap overflow trap void X86TaskData::HeapOverflowTrap(byte *pcPtr) { X86TaskData *mdTask = this; POLYUNSIGNED wordsNeeded = 0; // The next instruction, after any branches round forwarding pointers or pop // instructions, will be a store of register containing the adjusted heap pointer. // We need to find that register and the value in it in order to find out how big // the area we actually wanted is. N.B. The code-generator and assembly code // must generate the correct instruction sequence. // byte *pcPtr = assemblyInterface.programCtr; while (true) { if (pcPtr[0] == 0xeb) { // Forwarding pointer if (pcPtr[1] >= 128) pcPtr += 256 - pcPtr[1] + 2; else pcPtr += pcPtr[1] + 2; } else if ((pcPtr[0] & 0xf8) == 0x58) // Pop instruction. pcPtr++; else if (pcPtr[0] == 0x41 && ((pcPtr[1] & 0xf8) == 0x58)) // Pop with Rex prefix pcPtr += 2; else break; } #ifndef HOSTARCHITECTURE_X86_64 // This should be movl REG,0[%ebp]. ASSERT(pcPtr[0] == 0x89); mdTask->allocReg = (pcPtr[1] >> 3) & 7; // Remember this until we allocate the memory stackItem *reg = get_reg(mdTask->allocReg); stackItem reg_val = *reg; // The space we need is the difference between this register // and the current value of newptr. // The +1 here is because assemblyInterface.localMpointer is A.M.pointer +1. The reason // is that after the allocation we have the register pointing at the address we will // actually use. wordsNeeded = (this->allocPointer - (PolyWord*)reg_val.stackAddr) + 1; *reg = TAGGED(0); // Clear this - it's not a valid address. /* length in words, including length word */ ASSERT (wordsNeeded <= (1<<24)); /* Max object size including length/flag word is 2^24 words. */ #else /* HOSTARCHITECTURE_X86_64 */ ASSERT(pcPtr[1] == 0x89 || pcPtr[1] == 0x8b); if (pcPtr[1] == 0x89) { // New (5.4) format. This should be movq REG,%r15 ASSERT(pcPtr[0] == 0x49 || pcPtr[0] == 0x4d); mdTask->allocReg = (pcPtr[2] >> 3) & 7; // Remember this until we allocate the memory if (pcPtr[0] & 0x4) mdTask->allocReg += 8; } else { // Alternative form of movq REG,%r15 ASSERT(pcPtr[0] == 0x4c || pcPtr[0] == 0x4d); mdTask->allocReg = pcPtr[2] & 7; // Remember this until we allocate the memory if (pcPtr[0] & 0x1) mdTask->allocReg += 8; } stackItem *reg = get_reg(this->allocReg); stackItem reg_val = *reg; wordsNeeded = (POLYUNSIGNED)((this->allocPointer - (PolyWord*)reg_val.stackAddr) + 1); *reg = TAGGED(0); // Clear this - it's not a valid address. #endif /* HOSTARCHITECTURE_X86_64 */ if (profileMode == kProfileStoreAllocation) addProfileCount(wordsNeeded); mdTask->allocWords = wordsNeeded; // The actual allocation is done in SetMemRegisters. } void X86TaskData::SetException(poly_exn *exc) // The RTS wants to raise an exception packet. Normally this is as the // result of an RTS call in which case the caller will check this. It can // also happen in a trap. { assemblyInterface.exceptionPacket = (PolyWord)exc; // Set for direct calls. } // Decode and process an effective address. There may // be a constant address in here but in any case we need // to decode it to work out where the next instruction starts. // If this is an lea instruction any addresses are just constants // so must not be treated as addresses. static void skipea(PolyObject *base, byte **pt, ScanAddress *process, bool lea) { unsigned int modrm = *((*pt)++); unsigned int md = modrm >> 6; unsigned int rm = modrm & 7; if (md == 3) { } /* Register. */ else if (rm == 4) { /* s-i-b present. */ unsigned int sib = *((*pt)++); if (md == 0) { if ((sib & 7) == 5) { if (! lea) { #ifndef HOSTARCHITECTURE_X86_64 process->ScanConstant(base, *pt, PROCESS_RELOC_DIRECT); #endif /* HOSTARCHITECTURE_X86_64 */ } (*pt) += 4; } } else if (md == 1) (*pt)++; else if (md == 2) (*pt) += 4; } else if (md == 0 && rm == 5) { if (!lea) { #ifndef HOSTARCHITECTURE_X86_64 /* Absolute address. */ process->ScanConstant(base, *pt, PROCESS_RELOC_DIRECT); #endif /* HOSTARCHITECTURE_X86_64 */ } *pt += 4; } else { if (md == 1) *pt += 1; else if (md == 2) *pt += 4; } } /* Added to deal with constants within the code rather than in the constant area. The constant area is still needed for the function name. DCJM 2/1/2001 */ void X86Dependent::ScanConstantsWithinCode(PolyObject *addr, PolyObject *old, POLYUNSIGNED length, ScanAddress *process) { byte *pt = (byte*)addr; PolyWord *end = addr->Offset(length - 1); #ifdef POLYML32IN64 // If this begins with enter-int it's interpreted code - ignore if (pt[0] == 0xff && pt[1] == 0x55 && pt[2] == 0x48) return; #endif while (true) { // Escape prefixes come before any Rex byte if (*pt == 0xf2 || *pt == 0xf3 || *pt == 0x66) pt++; #ifdef HOSTARCHITECTURE_X86_64 // REX prefixes. Set this first. byte lastRex; if (*pt >= 0x40 && *pt <= 0x4f) lastRex = *pt++; else lastRex = 0; //printf("pt=%p *pt=%x\n", pt, *pt); #endif /* HOSTARCHITECTURE_X86_64 */ switch (*pt) { case 0x00: return; // This is actually the first byte of the old "marker" word. case 0xf4: return; // Halt - now used as a marker. case 0x50: case 0x51: case 0x52: case 0x53: case 0x54: case 0x55: case 0x56: case 0x57: /* Push */ case 0x58: case 0x59: case 0x5a: case 0x5b: case 0x5c: case 0x5d: case 0x5e: case 0x5f: /* Pop */ case 0x90: /* nop */ case 0xc3: /* ret */ case 0xf9: /* stc */ case 0xce: /* into */ case 0xf0: /* lock. */ case 0xf3: /* rep/repe */ case 0xa4: case 0xa5: case 0xaa: case 0xab: /* movs/stos */ case 0xa6: /* cmpsb */ case 0x9e: /* sahf */ case 0x99: /* cqo/cdq */ pt++; break; case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: case 0x78: case 0x79: case 0x7a: case 0x7b: case 0x7c: case 0x7d: case 0x7e: case 0x7f: case 0xeb: /* short jumps. */ case 0xcd: /* INT - now used for a register mask */ case 0xa8: /* TEST_ACC8 */ case 0x6a: /* PUSH_8 */ pt += 2; break; case 0xc2: /* RET_16 */ case 0xca: /* FAR RET 16 - used for a register mask */ pt += 3; break; case 0x8d: /* leal. */ pt++; skipea(addr, &pt, process, true); break; case 0x03: case 0x0b: case 0x13: case 0x1b: case 0x23: case 0x2b: case 0x33: case 0x3b: /* Add r,ea etc. */ case 0x88: /* MOVB_R_A */ case 0x89: /* MOVL_R_A */ case 0x8b: /* MOVL_A_R */ case 0x62: /* BOUNDL */ case 0xff: /* Group5 */ case 0xd1: /* Group2_1_A */ case 0x8f: /* POP_A */ case 0xd3: /* Group2_CL_A */ case 0x87: // XCHNG case 0x63: // MOVSXD pt++; skipea(addr, &pt, process, false); break; case 0xf6: /* Group3_a */ { int isTest = 0; pt++; /* The test instruction has an immediate operand. */ if ((*pt & 0x38) == 0) isTest = 1; skipea(addr, &pt, process, false); if (isTest) pt++; break; } case 0xf7: /* Group3_A */ { int isTest = 0; pt++; /* The test instruction has an immediate operand. */ if ((*pt & 0x38) == 0) isTest = 1; skipea(addr, &pt, process, false); if (isTest) pt += 4; break; } case 0xc1: /* Group2_8_A */ case 0xc6: /* MOVB_8_A */ case 0x83: /* Group1_8_A */ case 0x80: /* Group1_8_a */ case 0x6b: // IMUL Ev,Ib pt++; skipea(addr, &pt, process, false); pt++; break; case 0x69: // IMUL Ev,Iv pt++; skipea(addr, &pt, process, false); pt += 4; break; case 0x81: /* Group1_32_A */ { pt ++; #ifndef HOSTARCHITECTURE_X86_64 unsigned opCode = *pt; #endif skipea(addr, &pt, process, false); // Only check the 32 bit constant if this is a comparison. // For other operations this may be untagged and shouldn't be an address. #ifndef HOSTARCHITECTURE_X86_64 if ((opCode & 0x38) == 0x38) process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); #endif pt += 4; break; } case 0xe8: case 0xe9: // Long jump and call. These are used to call constant (known) functions // and also long jumps within the function. { pt++; POLYSIGNED disp = (pt[3] & 0x80) ? -1 : 0; // Set the sign just in case. for(unsigned i = 4; i > 0; i--) disp = (disp << 8) | pt[i-1]; byte *absAddr = pt + disp + 4; // The address is relative to AFTER the constant // If the new address is within the current piece of code we don't do anything if (absAddr >= (byte*)addr && absAddr < (byte*)end) {} else { #ifdef HOSTARCHITECTURE_X86_64 ASSERT(sizeof(PolyWord) == 4); // Should only be used internally on x64 #endif /* HOSTARCHITECTURE_X86_64 */ if (addr != old) { // The old value of the displacement was relative to the old address before // we copied this code segment. // We have to correct it back to the original address. absAddr = absAddr - (byte*)addr + (byte*)old; // We have to correct the displacement for the new location and store // that away before we call ScanConstant. size_t newDisp = absAddr - pt - 4; byte* wr = gMem.SpaceForAddress(pt)->writeAble(pt); for (unsigned i = 0; i < 4; i++) { wr[i] = (byte)(newDisp & 0xff); newDisp >>= 8; } } process->ScanConstant(addr, pt, PROCESS_RELOC_I386RELATIVE); } pt += 4; break; } case 0xc7:/* MOVL_32_A */ { pt++; if ((*pt & 0xc0) == 0x40 /* Byte offset or sib present */ && ((*pt & 7) != 4) /* But not sib present */ && pt[1] == 256-sizeof(PolyWord)) { /* We may use a move instruction to set the length word on a new segment. We mustn't try to treat this as a constant. */ pt += 6; /* Skip the modrm byte, the offset and the constant. */ } else { skipea(addr, &pt, process, false); #ifndef HOSTARCHITECTURE_X86_64 // This isn't used for addresses even in 32-in-64 process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); #endif /* HOSTARCHITECTURE_X86_64 */ pt += 4; } break; } case 0xb8: case 0xb9: case 0xba: case 0xbb: case 0xbc: case 0xbd: case 0xbe: case 0xbf: /* MOVL_32_64_R */ pt ++; #ifdef HOSTARCHITECTURE_X86_64 if ((lastRex & 8) == 0) pt += 4; // 32-bit mode on 64-bits else #endif /* HOSTARCHITECTURE_X86_64 */ { // This is used in native 32-bit for constants and in // 32-in-64 for the special case of an absolute address. process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); pt += sizeof(uintptr_t); } break; case 0x68: /* PUSH_32 */ pt ++; #if (!defined(HOSTARCHITECTURE_X86_64)) process->ScanConstant(addr, pt, PROCESS_RELOC_DIRECT); #endif pt += 4; break; case 0x0f: /* ESCAPE */ { pt++; switch (*pt) { case 0xb6: /* movzl */ case 0xb7: // movzw case 0xbe: // movsx case 0xbf: // movsx case 0xc1: /* xaddl */ case 0xae: // ldmxcsr/stmxcsr case 0xaf: // imul case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47: case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f: // cmov pt++; skipea(addr, &pt, process, false); break; case 0x80: case 0x81: case 0x82: case 0x83: case 0x84: case 0x85: case 0x86: case 0x87: case 0x88: case 0x89: case 0x8a: case 0x8b: case 0x8c: case 0x8d: case 0x8e: case 0x8f: /* Conditional branches with 32-bit displacement. */ pt += 5; break; case 0x90: case 0x91: case 0x92: case 0x93: case 0x94: case 0x95: case 0x96: case 0x97: case 0x98: case 0x99: case 0x9a: case 0x9b: case 0x9c: case 0x9d: case 0x9e: case 0x9f: /* SetCC. */ pt++; skipea(addr, &pt, process, false); break; // These are SSE2 instructions case 0x10: case 0x11: case 0x58: case 0x5c: case 0x59: case 0x5e: case 0x2e: case 0x2a: case 0x54: case 0x57: case 0x5a: case 0x6e: case 0x7e: case 0x2c: case 0x2d: pt++; skipea(addr, &pt, process, false); break; case 0x73: // PSRLDQ - EA,imm pt++; skipea(addr, &pt, process, false); pt++; break; default: Crash("Unknown opcode %d at %p\n", *pt, pt); } break; } case 0xd8: case 0xd9: case 0xda: case 0xdb: case 0xdc: case 0xdd: case 0xde: case 0xdf: // Floating point escape instructions { pt++; if ((*pt & 0xe0) == 0xe0) pt++; else skipea(addr, &pt, process, false); break; } default: Crash("Unknown opcode %d at %p\n", *pt, pt); } } } // Increment the value contained in the first word of the mutex. Handle X86TaskData::AtomicDecrement(Handle mutexp) { PolyObject *p = DEREFHANDLE(mutexp); POLYUNSIGNED result = X86AsmAtomicDecrement(p); return this->saveVec.push(PolyWord::FromUnsigned(result)); } // Release a mutex. Because the atomic increment and decrement // use the hardware LOCK prefix we can simply set this to zero. void X86TaskData::AtomicReset(Handle mutexp) { DEREFHANDLE(mutexp)->Set(0, TAGGED(0)); } static X86Dependent x86Dependent; MachineDependent *machineDependent = &x86Dependent; extern "C" { POLYEXTERNALSYMBOL void *PolyX86GetThreadData(); } // Return the address of assembly data for the current thread. This is normally in // RBP except if we are in a callback. void *PolyX86GetThreadData() { // We should get the task data for the thread that is running this code. // If this thread has been created by the foreign code we will have to // create a new one here. TaskData* taskData = processes->GetTaskDataForThread(); if (taskData == 0) { try { taskData = processes->CreateNewTaskData(0, 0, 0, TAGGED(0)); } catch (std::bad_alloc&) { ::Exit("Unable to create thread data - insufficient memory"); } catch (MemoryException&) { ::Exit("Unable to create thread data - insufficient memory"); } } return &((X86TaskData*)taskData)->assemblyInterface; } struct _entrypts machineSpecificEPT[] = { { "PolyX86GetThreadData", (polyRTSFunction)& PolyX86GetThreadData }, { NULL, NULL} // End of list. }; diff --git a/libpolyml/x86assembly_gas32.S b/libpolyml/x86assembly_gas32.S index de48ac0e..91594542 100644 --- a/libpolyml/x86assembly_gas32.S +++ b/libpolyml/x86assembly_gas32.S @@ -1,184 +1,185 @@ /* Title: Assembly code routines for the poly system. Author: David Matthews Copyright (c) David C. J. Matthews 2000-2020 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* This is the 32-bit Unix version of the assembly code file. There are separate versions of 32/64 and Windows (Intel syntax) and Unix (gas syntax). */ /* Registers used :- %%eax: First argument to function. Result of function call. %%ebx: Second argument to function. %%ecx: General register %%edx: Closure pointer in call. %%ebp: Points to memory used for extra registers %%esi: General register. %%edi: General register. %%esp: Stack pointer. */ #include "config.h" #ifdef SYMBOLS_REQUIRE_UNDERSCORE #define EXTNAME(x) _##x #else #define EXTNAME(x) x #endif # # Macro to begin the hand-coded functions # #ifdef MACOSX #define GLOBAL .globl #else #define GLOBAL .global #endif #define INLINE_ROUTINE(id) \ GLOBAL EXTNAME(id); \ EXTNAME(id): #define Fr_Size 16 /* This is the argument vector passed in to X86AsmSwitchToPoly It is used to initialise the frame. A few values are updated when ML returns. */ #define Arg_LocalMpointer 0x0 #define Arg_HandlerRegister 0x4 #define Arg_LocalMbottom 0x8 #define Arg_StackLimit 0xc #define Arg_ExceptionPacket 0x10 /* Address of packet to raise */ #define Arg_RequestCode 0x14 /* Byte: Io function to call. */ #define Arg_ReturnReason 0x16 /* Byte: Reason for returning from ML. */ #define Arg_FullRestore 0x17 /* Byte: Full/partial restore */ #define Arg_SaveCStack 0x18 /* Save C Stack pointer */ #define Arg_ThreadId 0x1c /* My thread id */ #define Arg_StackPtr 0x20 /* Stack Pointer */ -#define Arg_SaveRAX 0x34 -#define Arg_SaveRBX 0x38 -#define Arg_SaveRCX 0x3c -#define Arg_SaveRDX 0x40 -#define Arg_SaveRSI 0x44 -#define Arg_SaveRDI 0x48 -#define Arg_SaveFP 0x4c +#define Arg_TrapHandlerEntry 0x34 +#define Arg_SaveRAX 0x38 +#define Arg_SaveRBX 0x3c +#define Arg_SaveRCX 0x40 +#define Arg_SaveRDX 0x44 +#define Arg_SaveRSI 0x48 +#define Arg_SaveRDI 0x4c +#define Arg_SaveFP 0x50 #define RETURN_HEAP_OVERFLOW 1 #define RETURN_STACK_OVERFLOW 2 #define RETURN_STACK_OVERFLOWEX 3 # Mark the stack as non-executable when supported #if (defined(__linux__) && defined(__ELF__)) .section .note.GNU-stack, "", @progbits #endif # # CODE STARTS HERE # .text #define CALL_EXTRA(index) \ movb $index,Arg_ReturnReason(%ebp); \ jmp CallTrapHandler; /* Enter ML code. This is now only ever used to start a new thread. It is probably unnecessary to save the callee-save regs or load the ML regs. */ INLINE_ROUTINE(X86AsmSwitchToPoly) pushl %ebp # Standard entry sequence movl 8(%esp),%ebp # Address of argument vector pushl %ebx pushl %edi pushl %esi # Push callee-save registers subl $(Fr_Size-12),%esp # Allocate frame movl %esp,Arg_SaveCStack(%ebp) movl Arg_StackPtr(%ebp),%esp FRSTOR Arg_SaveFP(%ebp) movl Arg_SaveRAX(%ebp),%eax # Load the registers movl Arg_SaveRBX(%ebp),%ebx # Load the registers movl Arg_SaveRCX(%ebp),%ecx movl Arg_SaveRDX(%ebp),%edx movl Arg_SaveRSI(%ebp),%esi movl Arg_SaveRDI(%ebp),%edi cld # Clear this just in case jmp *(%edx) /* Save all the registers and enter the trap handler. It is probably unnecessary to save the FP state now. */ CallTrapHandler: movl %eax,Arg_SaveRAX(%ebp) movl %ebx,Arg_SaveRBX(%ebp) movl %ecx,Arg_SaveRCX(%ebp) movl %edx,Arg_SaveRDX(%ebp) movl %esi,Arg_SaveRSI(%ebp) movl %edi,Arg_SaveRDI(%ebp) fnsave Arg_SaveFP(%ebp) # Save FP state. Also resets the state so... fldcw Arg_SaveFP(%ebp) # ...load because we need the same rounding mode in the RTS movl %esp,Arg_StackPtr(%ebp) # Save ML stack pointer movl Arg_SaveCStack(%ebp),%esp # Restore C stack pointer subl $12,%esp # Align stack ptr - GCC prefers this pushl Arg_ThreadId(%ebp) - call X86TrapHandler + calll *Arg_TrapHandlerEntry(%ebp) addl $16,%esp movl Arg_StackPtr(%ebp),%esp movl Arg_ExceptionPacket(%ebp),%eax cmpl $1,%eax # Did we raise an exception? jnz raisexlocal FRSTOR Arg_SaveFP(%ebp) movl Arg_SaveRAX(%ebp),%eax # Load the registers movl Arg_SaveRBX(%ebp),%ebx # Load the registers movl Arg_SaveRCX(%ebp),%ecx movl Arg_SaveRDX(%ebp),%edx movl Arg_SaveRSI(%ebp),%esi movl Arg_SaveRDI(%ebp),%edi cld # Clear this just in case ret raisexlocal: movl Arg_HandlerRegister(%ebp),%ecx # Get next handler into %rcx jmp *(%ecx) INLINE_ROUTINE(X86AsmCallExtraRETURN_HEAP_OVERFLOW) CALL_EXTRA(RETURN_HEAP_OVERFLOW) INLINE_ROUTINE(X86AsmCallExtraRETURN_STACK_OVERFLOW) CALL_EXTRA(RETURN_STACK_OVERFLOW) INLINE_ROUTINE(X86AsmCallExtraRETURN_STACK_OVERFLOWEX) CALL_EXTRA(RETURN_STACK_OVERFLOWEX) # Additional assembly code routines # This implements atomic addition in the same way as atomic_increment INLINE_ROUTINE(X86AsmAtomicDecrement) #ifndef HOSTARCHITECTURE_X86_64 movl 4(%esp),%eax #else movl %edi,%eax # On X86_64 the argument is passed in %edi #endif # Use %ecx and %eax because they are volatile (unlike %ebx on X86/64/Unix) movl $-2,%ecx lock; xaddl %ecx,(%eax) subl $2,%ecx movl %ecx,%eax ret diff --git a/libpolyml/x86assembly_gas64.S b/libpolyml/x86assembly_gas64.S index f7385b02..a9d4554d 100644 --- a/libpolyml/x86assembly_gas64.S +++ b/libpolyml/x86assembly_gas64.S @@ -1,283 +1,284 @@ /* Title: Assembly code routines for the poly system. Author: David Matthews Copyright (c) David C. J. Matthews 2000-2020 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License version 2.1 as published by the Free Software Foundation. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* This is the 64-bit gas version of the assembly code file. There are separate versions of 32/64 and MAMS (Intel syntax) and and GCC (gas syntax). */ /* Registers used :- %rax: First argument to function. Result of function call. %rbx: Second argument to function. %rcx: General register %rdx: Closure pointer in call. %rbp: Points to memory used for extra registers %rsi: General register. %rdi: General register. %rsp: Stack pointer. %r8: Third argument to function %r9: Fourth argument to function %r10: Fifth argument to function %r11: General register %r12: General register %r13: General register %r14: General register %r15: Memory allocation pointer */ #include "config.h" #ifdef SYMBOLS_REQUIRE_UNDERSCORE #define EXTNAME(x) _##x #else #define EXTNAME(x) x #endif #ifdef __CYGWIN__ #define _WIN32 1 #endif /* Macro to begin the hand-coded functions */ #ifdef MACOSX #define GLOBAL .globl #else #define GLOBAL .global #endif #define INLINE_ROUTINE(id) \ GLOBAL EXTNAME(id); \ EXTNAME(id): /* Extra entries on the C stack */ #define Fr_Size 64 /* Must be multiple of 16 to get alignment correct */ /* This is the argument vector passed in to X86AsmSwitchToPoly It is used to initialise the frame. A few values are updated when ML returns. */ #define Arg_LocalMpointer 0x0 #define Arg_HandlerRegister 0x8 #define Arg_LocalMbottom 0x10 #define Arg_StackLimit 0x18 #define Arg_ExceptionPacket 0x20 /* Address of packet to raise */ #define Arg_RequestCode 0x28 /* Byte: Io function to call. */ #define Arg_ReturnReason 0x2a /* Byte: Reason for returning from ML. */ #define Arg_UnusedRestore 0x2b /* Byte: Full/partial restore */ #define Arg_SaveCStack 0x30 /* Current stack base */ #define Arg_ThreadId 0x38 /* My thread id */ #define Arg_StackPtr 0x40 /* Stack Pointer */ -#define Arg_SaveRAX 0x68 -#define Arg_SaveRBX 0x70 -#define Arg_SaveRCX 0x78 -#define Arg_SaveRDX 0x80 -#define Arg_SaveRSI 0x88 -#define Arg_SaveRDI 0x90 -#define Arg_SaveR8 0x98 -#define Arg_SaveR9 0xa0 -#define Arg_SaveR10 0xa8 -#define Arg_SaveR11 0xb0 -#define Arg_SaveR12 0xb8 -#define Arg_SaveR13 0xc0 -#define Arg_SaveR14 0xc8 -#define Arg_SaveXMM0 0xd0 -#define Arg_SaveXMM1 0xd8 -#define Arg_SaveXMM2 0xe0 -#define Arg_SaveXMM3 0xe8 -#define Arg_SaveXMM4 0xf0 -#define Arg_SaveXMM5 0xf8 -#define Arg_SaveXMM6 0x100 +#define Arg_TrapHandlerEntry 0x68 +#define Arg_SaveRAX 0x70 +#define Arg_SaveRBX 0x78 +#define Arg_SaveRCX 0x80 +#define Arg_SaveRDX 0x88 +#define Arg_SaveRSI 0x90 +#define Arg_SaveRDI 0x98 +#define Arg_SaveR8 0xa0 +#define Arg_SaveR9 0xa8 +#define Arg_SaveR10 0xb0 +#define Arg_SaveR11 0xb8 +#define Arg_SaveR12 0xc0 +#define Arg_SaveR13 0xc8 +#define Arg_SaveR14 0xd0 +#define Arg_SaveXMM0 0xd8 +#define Arg_SaveXMM1 0xe0 +#define Arg_SaveXMM2 0xe8 +#define Arg_SaveXMM3 0xf0 +#define Arg_SaveXMM4 0xf8 +#define Arg_SaveXMM5 0x100 +#define Arg_SaveXMM6 0x108 /* IO function numbers. These are functions that are called to handle special cases in this code */ #include "sys.h" #define RETURN_HEAP_OVERFLOW 1 #define RETURN_STACK_OVERFLOW 2 #define RETURN_STACK_OVERFLOWEX 3 #define RETURN_RAISE_OVERFLOW 8 # Mark the stack as non-executable when supported #if (defined(__linux__) && defined(__ELF__)) .section .note.GNU-stack, "", @progbits #endif # # CODE STARTS HERE # .text #define CALL_EXTRA(index) \ movb $index,Arg_ReturnReason(%rbp); \ jmp CallTrapHandler; /* Enter ML code. This is now only ever used to start a new thread. It is probably unnecessary to save the callee-save regs or load the ML regs. */ INLINE_ROUTINE(X86AsmSwitchToPoly) pushq %rbp # Standard entry sequence /* If we're compiling with Mingw we're using Windows calling conventions. */ #ifdef _WIN32 movq %rcx,%rbp # Argument is in %rcx #else movq %rdi,%rbp # Argument is in %rdi #endif pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 #ifdef _WIN32 pushq %rdi # Callee save in Windows pushq %rsi subq $(Fr_Size-56),%rsp # Argument is already in %rcx #else subq $(Fr_Size-40),%rsp #endif movq %rsp,Arg_SaveCStack(%rbp) movq Arg_LocalMpointer(%rbp),%r15 movq Arg_StackPtr(%rbp),%rsp # Set the new stack ptr movsd Arg_SaveXMM0(%rbp),%xmm0 # Load the registers movsd Arg_SaveXMM1(%rbp),%xmm1 movsd Arg_SaveXMM2(%rbp),%xmm2 movsd Arg_SaveXMM3(%rbp),%xmm3 movsd Arg_SaveXMM4(%rbp),%xmm4 movsd Arg_SaveXMM5(%rbp),%xmm5 movsd Arg_SaveXMM6(%rbp),%xmm6 movq Arg_SaveRBX(%rbp),%rbx movq Arg_SaveRCX(%rbp),%rcx movq Arg_SaveRDX(%rbp),%rdx movq Arg_SaveRSI(%rbp),%rsi movq Arg_SaveRDI(%rbp),%rdi movq Arg_SaveR8(%rbp),%r8 movq Arg_SaveR9(%rbp),%r9 movq Arg_SaveR10(%rbp),%r10 movq Arg_SaveR11(%rbp),%r11 movq Arg_SaveR12(%rbp),%r12 movq Arg_SaveR13(%rbp),%r13 movq Arg_SaveR14(%rbp),%r14 movq Arg_SaveRAX(%rbp),%rax cld # Clear this just in case #ifdef POLYML32IN64 jmp *(%rbx,%rdx,4) #else jmp *(%rdx) #endif /* This is exactly the same as raisex but seems to be needed to work round a PIC problem. */ raisexLocal: movq Arg_HandlerRegister(%rbp),%rcx # Get next handler into %rcx jmp *(%rcx) /* Save all the registers and enter the trap handler. It is probably unnecessary to save the FP state now. */ CallTrapHandler: movq %rax,Arg_SaveRAX(%rbp) movq %rbx,Arg_SaveRBX(%rbp) movq %rcx,Arg_SaveRCX(%rbp) movq %rdx,Arg_SaveRDX(%rbp) movq %rsi,Arg_SaveRSI(%rbp) movq %rdi,Arg_SaveRDI(%rbp) movsd %xmm0,Arg_SaveXMM0(%rbp) movsd %xmm1,Arg_SaveXMM1(%rbp) movsd %xmm2,Arg_SaveXMM2(%rbp) movsd %xmm3,Arg_SaveXMM3(%rbp) movsd %xmm4,Arg_SaveXMM4(%rbp) movsd %xmm5,Arg_SaveXMM5(%rbp) movsd %xmm6,Arg_SaveXMM6(%rbp) movq %r8,Arg_SaveR8(%rbp) movq %r9,Arg_SaveR9(%rbp) movq %r10,Arg_SaveR10(%rbp) movq %r11,Arg_SaveR11(%rbp) movq %r12,Arg_SaveR12(%rbp) movq %r13,Arg_SaveR13(%rbp) movq %r14,Arg_SaveR14(%rbp) movq %rsp,Arg_StackPtr(%rbp) movq %r15,Arg_LocalMpointer(%rbp) # Save back heap pointer movq Arg_SaveCStack(%rbp),%rsp # Restore C stack pointer #ifdef _WIN32 subq $32,%rsp # Windows save area movq Arg_ThreadId(%rbp),%rcx #else movq Arg_ThreadId(%rbp),%rdi #endif - call X86TrapHandler@plt + callq *Arg_TrapHandlerEntry(%rbp) #ifdef _WIN32 addq $32,%rsp #endif movq Arg_LocalMpointer(%rbp),%r15 movq Arg_StackPtr(%rbp),%rsp # Set the new stack ptr movsd Arg_SaveXMM0(%rbp),%xmm0 # Load the registers movsd Arg_SaveXMM1(%rbp),%xmm1 movsd Arg_SaveXMM2(%rbp),%xmm2 movsd Arg_SaveXMM3(%rbp),%xmm3 movsd Arg_SaveXMM4(%rbp),%xmm4 movsd Arg_SaveXMM5(%rbp),%xmm5 movsd Arg_SaveXMM6(%rbp),%xmm6 movq Arg_SaveRBX(%rbp),%rbx movq Arg_SaveRCX(%rbp),%rcx movq Arg_SaveRDX(%rbp),%rdx movq Arg_SaveRSI(%rbp),%rsi movq Arg_SaveRDI(%rbp),%rdi movq Arg_SaveR8(%rbp),%r8 movq Arg_SaveR9(%rbp),%r9 movq Arg_SaveR10(%rbp),%r10 movq Arg_SaveR11(%rbp),%r11 movq Arg_SaveR12(%rbp),%r12 movq Arg_SaveR13(%rbp),%r13 movq Arg_SaveR14(%rbp),%r14 movq Arg_ExceptionPacket(%rbp),%rax cmpq $1,%rax # Did we raise an exception? jnz raisexLocal movq Arg_SaveRAX(%rbp),%rax cld # Clear this just in case ret # Additional assembly code routines INLINE_ROUTINE(X86AsmCallExtraRETURN_HEAP_OVERFLOW) CALL_EXTRA(RETURN_HEAP_OVERFLOW) INLINE_ROUTINE(X86AsmCallExtraRETURN_STACK_OVERFLOW) CALL_EXTRA(RETURN_STACK_OVERFLOW) INLINE_ROUTINE(X86AsmCallExtraRETURN_STACK_OVERFLOWEX) CALL_EXTRA(RETURN_STACK_OVERFLOWEX) # This implements atomic subtraction in the same way as atomic_decrement INLINE_ROUTINE(X86AsmAtomicDecrement) #ifdef _WIN32 movq %rcx,%rax # On Windows the argument is passed in %rcx #else movq %rdi,%rax # On X86_64 the argument is passed in %rdi #endif # Use %rcx and %rax because they are volatile (unlike %rbx on X86/64/Unix) movq $-2,%rcx #ifdef POLYML32IN64 lock xaddl %ecx,(%rax) # Rax is an absolute address but this is only a word #else lock xaddq %rcx,(%rax) #endif subq $2,%rcx movq %rcx,%rax ret diff --git a/libpolyml/x86assembly_masm32.S b/libpolyml/x86assembly_masm32.S index 85b84ae0..db1e95b9 100644 --- a/libpolyml/x86assembly_masm32.S +++ b/libpolyml/x86assembly_masm32.S @@ -1,170 +1,171 @@ ; ; Title: Assembly code routines for the poly system. ; Author: David Matthews ; Copyright (c) David C. J. Matthews 2000-2020 ; ; This library is free software; you can redistribute it and/or ; modify it under the terms of the GNU Lesser General Public ; License version 2.1 as published by the Free Software Foundation. ; ; This library is distributed in the hope that it will be useful, ; but WITHOUT ANY WARRANTY; without even the implied warranty of ; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ; Lesser General Public License for more details. ; ; You should have received a copy of the GNU Lesser General Public ; License along with this library; if not, write to the Free Software ; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ; ; ; Registers used :- ; ; eax: First argument to function. Result of function call. ; ebx: Second argument to function. ; ecx: General register ; edx: Closure pointer in call. ; ebp: Points to memory used for extra registers ; esi: General register. ; edi: General register. ; esp: Stack pointer. .486 .model flat,c Fr_Size EQU 16 ; Make this a multiple of 16 ; This is the argument vector passed in to X86AsmSwitchToPoly ; It is used to initialise the frame. A few values are updated ; when ML returns. ArgVector STRUCT LocalMPointer DWORD ? HandlerRegister DWORD ? LocalMbottom DWORD ? StackLimit DWORD ? ExceptionPacket DWORD ? ; Address of packet to raise UnusedRequestCode DB ? ; Byte: Io function to call. UnusedFlag DB ? ReturnReason DB ? ; Byte: Reason for returning from ML. UnusedRestore DB ? ; SaveCStack DWORD ? ; Saved C stack pointer ThreadId DWORD ? ; My thread id StackPtr DWORD ? ; Stack pointer UnusedProgramCtr DWORD ? HeapOverFlowCall DWORD ? StackOverFlowCall DWORD ? StackOverFlowCallEx DWORD ? +TrapHandlerEntry DWORD ? SaveRAX DWORD ? SaveRBX DWORD ? SaveRCX DWORD ? SaveRDX DWORD ? SaveRSI DWORD ? SaveRDI DWORD ? SaveFP WORD ? ; Actually larger ArgVector ENDS ; Codes to indicate the reason for return. RETURN_HEAP_OVERFLOW EQU 1 RETURN_STACK_OVERFLOW EQU 2 RETURN_STACK_OVERFLOWEX EQU 3 RETURN_CALLBACK_RETURN EQU 6 RETURN_CALLBACK_EXCEPTION EQU 7 RETURN_KILL_SELF EQU 9 ; ; CODE STARTS HERE ; .CODE ; Enter ML code. This is now only ever used to start a new thread. ; It is probably unnecessary to save the callee-save regs or load the ML regs. PUBLIC X86AsmSwitchToPoly X86AsmSwitchToPoly: push ebp ; Standard entry sequence mov ebp,[8+esp] ; Address of argument vector push ebx ; Push callee-save registers push edi push esi sub esp,(Fr_size-12) ; Allocate frame mov [ArgVector.SaveCStack+ebp],esp mov esp,[ArgVector.StackPtr+ebp] frstor [ArgVector.SaveFP+ebp] mov eax,[ArgVector.SaveRAX+ebp] mov ebx,[ArgVector.SaveRBX+ebp] mov ecx,[ArgVector.SaveRCX+ebp] mov edx,[ArgVector.SaveRDX+ebp] mov esi,[ArgVector.SaveRSI+ebp] mov edi,[ArgVector.SaveRDI+ebp] cld ; Clear this just in case jmp dword ptr [edx] ; Save all the registers and enter the trap handler. ; It is probably unnecessary to save the FP state now. X86TrapHandler PROTO C CallTrapHandler: mov [ArgVector.SaveRAX+ebp],eax mov [ArgVector.SaveRBX+ebp],ebx mov [ArgVector.SaveRCX+ebp],ecx mov [ArgVector.SaveRDX+ebp],edx mov [ArgVector.SaveRSI+ebp],esi mov [ArgVector.SaveRDI+ebp],edi FNSAVE [ArgVector.SaveFP+ebp] ; Save FP state. Also resets the state so... FLDCW [ArgVector.SaveFP+ebp] ; ...load because we need the same rounding mode in the RTS mov [ArgVector.StackPtr+ebp],esp ; Save ML stack pointer mov esp,[ArgVector.SaveCStack+ebp] ; Restore C stack pointer sub esp,12 ; Align stack ptr - GCC prefers this push [ArgVector.ThreadId+ebp] - call X86TrapHandler + call [ArgVector.TrapHandlerEntry+ebp] add esp,16 mov esp,[ArgVector.StackPtr+ebp] mov eax,[ArgVector.ExceptionPacket+ebp] cmp eax,1 ; Did we raise an exception? jnz raisexcept frstor [ArgVector.SaveFP+ebp] mov eax,[ArgVector.SaveRAX+ebp] mov ebx,[ArgVector.SaveRBX+ebp] mov ecx,[ArgVector.SaveRCX+ebp] mov edx,[ArgVector.SaveRDX+ebp] mov esi,[ArgVector.SaveRSI+ebp] mov edi,[ArgVector.SaveRDI+ebp] cld ; Clear this just in case ret raisexcept: mov ecx,[ArgVector.HandlerRegister+ebp] jmp dword ptr [ecx] ; Define standard call macro. ; Defined as an Masm macro because there are multiple instructions. CALL_EXTRA MACRO index mov byte ptr [ArgVector.ReturnReason+ebp],index jmp CallTrapHandler ENDM ; This implements atomic addition in the same way as atomic_increment ; N.B. It is called from the RTS so uses C linkage conventions. PUBLIC X86AsmAtomicDecrement X86AsmAtomicDecrement: mov eax,4[esp] ; Use ecx and eax because they are volatile (unlike ebx on X86/64/Unix) mov ecx,-2 lock xadd [eax],ecx sub ecx,2 mov eax,ecx ret CREATE_EXTRA_CALL MACRO index PUBLIC X86AsmCallExtra&index& X86AsmCallExtra&index&: CALL_EXTRA index ENDM CREATE_EXTRA_CALL RETURN_HEAP_OVERFLOW CREATE_EXTRA_CALL RETURN_STACK_OVERFLOW CREATE_EXTRA_CALL RETURN_STACK_OVERFLOWEX END diff --git a/libpolyml/x86assembly_masm64.S b/libpolyml/x86assembly_masm64.S index 2c9463af..3806c2da 100644 --- a/libpolyml/x86assembly_masm64.S +++ b/libpolyml/x86assembly_masm64.S @@ -1,245 +1,246 @@ ; ; Title: Assembly code routines for the poly system. ; Author: David Matthews ; Copyright (c) David C. J. Matthews 2000-2020 ; ; This library is free software; you can redistribute it and/or ; modify it under the terms of the GNU Lesser General Public ; License version 2.1 as published by the Free Software Foundation. ; ; This library is distributed in the hope that it will be useful, ; but WITHOUT ANY WARRANTY; without even the implied warranty of ; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ; Lesser General Public License for more details. ; ; You should have received a copy of the GNU Lesser General Public ; License along with this library; if not, write to the Free Software ; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ; ; ; Registers used :- ; ; rax: First argument to function. Result of function call. ; rbx: Second argument to function. ; rcx: General register ; rdx: Closure pointer in call. ; rbp: Points to memory used for extra registers ; rsi: General register. ; rdi: General register. ; rsp: Stack pointer. ; r8: Third argument to function ; r9: Fourth argument to function ; r10: Fifth argument to function ; r11: General register ; r12: General register ; r13: General register ; r14: General register ; r15: Memory allocation pointer ; Extra entries on the C stack Fr_Size EQU 64 ; Must be multiple of 16 to get alignment correct ; This is the argument vector passed in to X86AsmSwitchToPoly ; It is used to initialise the frame. A few values are updated ; when ML returns. ArgVector STRUCT LocalMPointer QWORD ? HandlerRegister QWORD ? LocalMbottom QWORD ? StackLimit QWORD ? ExceptionPacket QWORD ? ; Address of packet to raise UnusedRequestCode DB ? ; Byte: Io function to call. UnusedFlag DB ? ReturnReason DB ? ; Byte: Reason for returning from ML. UnusedRestore DB ? ; Byte: UnusedAlign DWORD ? SaveCStack QWORD ? ; Saved C stack frame ThreadId QWORD ? ; My thread id StackPtr QWORD ? ; Stack pointer UnusedProgramCtr QWORD ? HeapOverFlowCall QWORD ? StackOverFlowCall QWORD ? StackOverFlowCallEx QWORD ? +TrapHandlerEntry QWORD ? SaveRAX QWORD ? SaveRBX QWORD ? SaveRCX QWORD ? SaveRDX QWORD ? SaveRSI QWORD ? SaveRDI QWORD ? SaveR8 QWORD ? SaveR9 QWORD ? SaveR10 QWORD ? SaveR11 QWORD ? SaveR12 QWORD ? SaveR13 QWORD ? SaveR14 QWORD ? SaveXMM0 QWORD ? SaveXMM1 QWORD ? SaveXMM2 QWORD ? SaveXMM3 QWORD ? SaveXMM4 QWORD ? SaveXMM5 QWORD ? SaveXMM6 QWORD ? ArgVector ENDS RETURN_HEAP_OVERFLOW EQU 1 RETURN_STACK_OVERFLOW EQU 2 RETURN_STACK_OVERFLOWEX EQU 3 RETURN_KILL_SELF EQU 9 ; ; CODE STARTS HERE ; .CODE ; Define standard call macro. CALL_EXTRA MACRO index mov byte ptr [ArgVector.ReturnReason+rbp],index jmp CallTrapHandler ENDM ; Enter ML code. This is now only ever used to start a new thread. ; It is probably unnecessary to save the callee-save regs or load the ML regs. ; This does not set up a correct frame because we do not want to reserve a register for ; that. RBP needs to be the original argument because we need to be able to modify ; the stack limit "register" from another thread in order to be able to interrupt ; this one. X86AsmSwitchToPoly PROC FRAME push rbp ; Standard entry sequence push rbx ; Save callee-save registers push r12 push r13 push r14 push r15 push rdi ; Callee save in Windows push rsi ; Strictly, we should also save xmm6 .endprolog mov rbp,rcx ; Move argument into rbp - this is definitely non-standard sub rsp,(Fr_size-56) mov [ArgVector.SaveCStack+rcx],rsp ; Save the C stack pointer mov r15,[ArgVector.LocalMpointer+rbp] mov rsp,[ArgVector.StackPtr+rbp] movsd xmm0,[ArgVector.SaveXMM0+rbp] movsd xmm1,[ArgVector.SaveXMM1+rbp] movsd xmm2,[ArgVector.SaveXMM2+rbp] movsd xmm3,[ArgVector.SaveXMM3+rbp] movsd xmm4,[ArgVector.SaveXMM4+rbp] movsd xmm5,[ArgVector.SaveXMM5+rbp] movsd xmm6,[ArgVector.SaveXMM6+rbp] mov rbx,[ArgVector.SaveRBX+rbp] mov rcx,[ArgVector.SaveRCX+rbp] mov rdx,[ArgVector.SaveRDX+rbp] mov rsi,[ArgVector.SaveRSI+rbp] mov rdi,[ArgVector.SaveRDI+rbp] mov r8,[ArgVector.SaveR8+rbp] mov r9,[ArgVector.SaveR9+rbp] mov r10,[ArgVector.SaveR10+rbp] mov r11,[ArgVector.SaveR11+rbp] mov r12,[ArgVector.SaveR12+rbp] mov r13,[ArgVector.SaveR13+rbp] mov r14,[ArgVector.SaveR14+rbp] mov rax,[ArgVector.SaveRAX+rbp] cld ; Clear this just in case #ifdef POLYML32IN64 jmp qword ptr [rbx+rdx*4] #else jmp qword ptr [rdx] #endif ; Everything up to here is considered as part of the X86AsmSwitchToPoly proc X86AsmSwitchToPoly ENDP ; Save all the registers and enter the trap handler. ; It is probably unnecessary to save the FP state now. X86TrapHandler PROTO C CallTrapHandler: mov [ArgVector.SaveRAX+rbp],rax mov [ArgVector.SaveRBX+rbp],rbx mov [ArgVector.SaveRCX+rbp],rcx mov [ArgVector.SaveRDX+rbp],rdx mov [ArgVector.SaveRSI+rbp],rsi mov [ArgVector.SaveRDI+rbp],rdi movsd [ArgVector.SaveXMM0+rbp],xmm0 movsd [ArgVector.SaveXMM1+rbp],xmm1 movsd [ArgVector.SaveXMM2+rbp],xmm2 movsd [ArgVector.SaveXMM3+rbp],xmm3 movsd [ArgVector.SaveXMM4+rbp],xmm4 movsd [ArgVector.SaveXMM5+rbp],xmm5 movsd [ArgVector.SaveXMM6+rbp],xmm6 mov [ArgVector.SaveR8+rbp],r8 mov [ArgVector.SaveR9+rbp],r9 mov [ArgVector.SaveR10+rbp],r10 mov [ArgVector.SaveR11+rbp],r11 mov [ArgVector.SaveR12+rbp],r12 mov [ArgVector.SaveR13+rbp],r13 mov [ArgVector.SaveR14+rbp],r14 mov [ArgVector.StackPtr+rbp],rsp ; Save ML stack pointer mov [ArgVector.LocalMpointer+rbp],r15 ; Save back heap pointer mov rsp,[ArgVector.SaveCStack+rbp] ; Restore C stack pointer sub rsp,32 ; Create Windows save area mov rcx,[ArgVector.ThreadId+rbp] - call X86TrapHandler + call [ArgVector.TrapHandlerEntry+rbp] add rsp,32 mov r15,[ArgVector.LocalMpointer+rbp] mov rsp,[ArgVector.StackPtr+rbp] movsd xmm0,[ArgVector.SaveXMM0+rbp] movsd xmm1,[ArgVector.SaveXMM1+rbp] movsd xmm2,[ArgVector.SaveXMM2+rbp] movsd xmm3,[ArgVector.SaveXMM3+rbp] movsd xmm4,[ArgVector.SaveXMM4+rbp] movsd xmm5,[ArgVector.SaveXMM5+rbp] movsd xmm6,[ArgVector.SaveXMM6+rbp] mov rbx,[ArgVector.SaveRBX+rbp] mov rcx,[ArgVector.SaveRCX+rbp] mov rdx,[ArgVector.SaveRDX+rbp] mov rsi,[ArgVector.SaveRSI+rbp] mov rdi,[ArgVector.SaveRDI+rbp] mov r8,[ArgVector.SaveR8+rbp] mov r9,[ArgVector.SaveR9+rbp] mov r10,[ArgVector.SaveR10+rbp] mov r11,[ArgVector.SaveR11+rbp] mov r12,[ArgVector.SaveR12+rbp] mov r13,[ArgVector.SaveR13+rbp] mov r14,[ArgVector.SaveR14+rbp] mov rax,[ArgVector.ExceptionPacket+rbp] cmp rax,1 ; Did we raise an exception? jnz raisexcept mov rax,[ArgVector.SaveRAX+rbp] cld ; Clear this just in case ret raisexcept: mov rcx,[ArgVector.HandlerRegister+rbp] jmp qword ptr [rcx] ; This implements atomic subtraction in the same way as atomic_decrement PUBLIC X86AsmAtomicDecrement X86AsmAtomicDecrement: mov rax,rcx ; Use rcx and rax because they are volatile (unlike rbx on X86/64/Unix) mov rcx,-2 #ifdef POLYML32IN64 lock xadd [rax],ecx ;# Rax is an absolute address but this is only a word #else lock xadd [rax],rcx #endif sub rcx,2 mov rax,rcx ret CREATE_EXTRA_CALL MACRO index PUBLIC X86AsmCallExtra&index& X86AsmCallExtra&index&: CALL_EXTRA index ENDM CREATE_EXTRA_CALL RETURN_HEAP_OVERFLOW CREATE_EXTRA_CALL RETURN_STACK_OVERFLOW CREATE_EXTRA_CALL RETURN_STACK_OVERFLOWEX END