aboutsummaryrefslogtreecommitdiff
path: root/lld/MachO/SyntheticSections.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lld/MachO/SyntheticSections.cpp')
-rw-r--r--lld/MachO/SyntheticSections.cpp1155
1 files changed, 905 insertions, 250 deletions
diff --git a/lld/MachO/SyntheticSections.cpp b/lld/MachO/SyntheticSections.cpp
index 3f5413696d4b..f49340677270 100644
--- a/lld/MachO/SyntheticSections.cpp
+++ b/lld/MachO/SyntheticSections.cpp
@@ -7,25 +7,36 @@
//===----------------------------------------------------------------------===//
#include "SyntheticSections.h"
+#include "ConcatOutputSection.h"
#include "Config.h"
#include "ExportTrie.h"
#include "InputFiles.h"
#include "MachOStructs.h"
-#include "MergedOutputSection.h"
#include "OutputSegment.h"
#include "SymbolTable.h"
#include "Symbols.h"
-#include "Writer.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Memory.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/SHA256.h"
+
+#if defined(__APPLE__)
+#include <sys/mman.h>
+#endif
+
+#ifdef LLVM_HAVE_LIBXAR
+#include <fcntl.h>
+#include <xar/xar.h>
+#endif
using namespace llvm;
+using namespace llvm::MachO;
using namespace llvm::support;
using namespace llvm::support::endian;
using namespace lld;
@@ -35,14 +46,23 @@ InStruct macho::in;
std::vector<SyntheticSection *> macho::syntheticSections;
SyntheticSection::SyntheticSection(const char *segname, const char *name)
- : OutputSection(SyntheticKind, name), segname(segname) {
+ : OutputSection(SyntheticKind, name) {
+ std::tie(this->segname, this->name) = maybeRenameSection({segname, name});
+ isec = make<ConcatInputSection>(segname, name);
+ isec->parent = this;
syntheticSections.push_back(this);
}
// dyld3's MachOLoaded::getSlide() assumes that the __TEXT segment starts
// from the beginning of the file (i.e. the header).
MachHeaderSection::MachHeaderSection()
- : SyntheticSection(segment_names::text, section_names::header) {}
+ : SyntheticSection(segment_names::text, section_names::header) {
+ // XXX: This is a hack. (See D97007)
+ // Setting the index to 1 to pretend that this section is the text
+ // section.
+ index = 1;
+ isec->isFinal = true;
+}
void MachHeaderSection::addLoadCommand(LoadCommand *lc) {
loadCommands.push_back(lc);
@@ -50,42 +70,68 @@ void MachHeaderSection::addLoadCommand(LoadCommand *lc) {
}
uint64_t MachHeaderSection::getSize() const {
- return sizeof(MachO::mach_header_64) + sizeOfCmds + config->headerPad;
+ uint64_t size = target->headerSize + sizeOfCmds + config->headerPad;
+ // If we are emitting an encryptable binary, our load commands must have a
+ // separate (non-encrypted) page to themselves.
+ if (config->emitEncryptionInfo)
+ size = alignTo(size, target->getPageSize());
+ return size;
+}
+
+static uint32_t cpuSubtype() {
+ uint32_t subtype = target->cpuSubtype;
+
+ if (config->outputType == MH_EXECUTE && !config->staticLink &&
+ target->cpuSubtype == CPU_SUBTYPE_X86_64_ALL &&
+ config->platform() == PlatformKind::macOS &&
+ config->platformInfo.minimum >= VersionTuple(10, 5))
+ subtype |= CPU_SUBTYPE_LIB64;
+
+ return subtype;
}
void MachHeaderSection::writeTo(uint8_t *buf) const {
- auto *hdr = reinterpret_cast<MachO::mach_header_64 *>(buf);
- hdr->magic = MachO::MH_MAGIC_64;
- hdr->cputype = MachO::CPU_TYPE_X86_64;
- hdr->cpusubtype = MachO::CPU_SUBTYPE_X86_64_ALL | MachO::CPU_SUBTYPE_LIB64;
+ auto *hdr = reinterpret_cast<mach_header *>(buf);
+ hdr->magic = target->magic;
+ hdr->cputype = target->cpuType;
+ hdr->cpusubtype = cpuSubtype();
hdr->filetype = config->outputType;
hdr->ncmds = loadCommands.size();
hdr->sizeofcmds = sizeOfCmds;
- hdr->flags = MachO::MH_NOUNDEFS | MachO::MH_DYLDLINK | MachO::MH_TWOLEVEL;
+ hdr->flags = MH_DYLDLINK;
- if (config->outputType == MachO::MH_DYLIB && !config->hasReexports)
- hdr->flags |= MachO::MH_NO_REEXPORTED_DYLIBS;
+ if (config->namespaceKind == NamespaceKind::twolevel)
+ hdr->flags |= MH_NOUNDEFS | MH_TWOLEVEL;
- if (config->outputType == MachO::MH_EXECUTE && config->isPic)
- hdr->flags |= MachO::MH_PIE;
+ if (config->outputType == MH_DYLIB && !config->hasReexports)
+ hdr->flags |= MH_NO_REEXPORTED_DYLIBS;
+
+ if (config->markDeadStrippableDylib)
+ hdr->flags |= MH_DEAD_STRIPPABLE_DYLIB;
+
+ if (config->outputType == MH_EXECUTE && config->isPic)
+ hdr->flags |= MH_PIE;
+
+ if (config->outputType == MH_DYLIB && config->applicationExtension)
+ hdr->flags |= MH_APP_EXTENSION_SAFE;
if (in.exports->hasWeakSymbol || in.weakBinding->hasNonWeakDefinition())
- hdr->flags |= MachO::MH_WEAK_DEFINES;
+ hdr->flags |= MH_WEAK_DEFINES;
if (in.exports->hasWeakSymbol || in.weakBinding->hasEntry())
- hdr->flags |= MachO::MH_BINDS_TO_WEAK;
+ hdr->flags |= MH_BINDS_TO_WEAK;
- for (OutputSegment *seg : outputSegments) {
- for (OutputSection *osec : seg->getSections()) {
+ for (const OutputSegment *seg : outputSegments) {
+ for (const OutputSection *osec : seg->getSections()) {
if (isThreadLocalVariables(osec->flags)) {
- hdr->flags |= MachO::MH_HAS_TLV_DESCRIPTORS;
+ hdr->flags |= MH_HAS_TLV_DESCRIPTORS;
break;
}
}
}
- uint8_t *p = reinterpret_cast<uint8_t *>(hdr + 1);
- for (LoadCommand *lc : loadCommands) {
+ uint8_t *p = reinterpret_cast<uint8_t *>(hdr) + target->headerSize;
+ for (const LoadCommand *lc : loadCommands) {
lc->writeTo(p);
p += lc->getSize();
}
@@ -94,12 +140,6 @@ void MachHeaderSection::writeTo(uint8_t *buf) const {
PageZeroSection::PageZeroSection()
: SyntheticSection(segment_names::pageZero, section_names::pageZero) {}
-uint64_t Location::getVA() const {
- if (const auto *isec = section.dyn_cast<const InputSection *>())
- return isec->getVA() + offset;
- return section.get<const OutputSection *>()->addr + offset;
-}
-
RebaseSection::RebaseSection()
: LinkEditSection(segment_names::linkEdit, section_names::rebase) {}
@@ -115,7 +155,6 @@ struct Rebase {
// using a single DO_REBASE opcode. To take advantage of it, we delay emitting
// `DO_REBASE` until we have reached the end of a contiguous sequence.
static void encodeDoRebase(Rebase &rebase, raw_svector_ostream &os) {
- using namespace llvm::MachO;
assert(rebase.consecutiveCount != 0);
if (rebase.consecutiveCount <= REBASE_IMMEDIATE_MASK) {
os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_IMM_TIMES |
@@ -129,7 +168,6 @@ static void encodeDoRebase(Rebase &rebase, raw_svector_ostream &os) {
static void encodeRebase(const OutputSection *osec, uint64_t outSecOff,
Rebase &lastRebase, raw_svector_ostream &os) {
- using namespace llvm::MachO;
OutputSegment *seg = osec->parent;
uint64_t offset = osec->getSegmentOffset() + outSecOff;
if (lastRebase.segment != seg || lastRebase.offset != offset) {
@@ -151,11 +189,10 @@ static void encodeRebase(const OutputSection *osec, uint64_t outSecOff,
}
++lastRebase.consecutiveCount;
// DO_REBASE causes dyld to both perform the binding and increment the offset
- lastRebase.offset += WordSize;
+ lastRebase.offset += target->wordSize;
}
void RebaseSection::finalizeContents() {
- using namespace llvm::MachO;
if (locations.empty())
return;
@@ -165,16 +202,11 @@ void RebaseSection::finalizeContents() {
os << static_cast<uint8_t>(REBASE_OPCODE_SET_TYPE_IMM | REBASE_TYPE_POINTER);
llvm::sort(locations, [](const Location &a, const Location &b) {
- return a.getVA() < b.getVA();
+ return a.isec->getVA(a.offset) < b.isec->getVA(b.offset);
});
- for (const Location &loc : locations) {
- if (const auto *isec = loc.section.dyn_cast<const InputSection *>()) {
- encodeRebase(isec->parent, isec->outSecOff + loc.offset, lastRebase, os);
- } else {
- const auto *osec = loc.section.get<const OutputSection *>();
- encodeRebase(osec, loc.offset, lastRebase, os);
- }
- }
+ for (const Location &loc : locations)
+ encodeRebase(loc.isec->parent, loc.isec->getOffset(loc.offset), lastRebase,
+ os);
if (lastRebase.consecutiveCount != 0)
encodeDoRebase(lastRebase, os);
@@ -188,8 +220,25 @@ void RebaseSection::writeTo(uint8_t *buf) const {
NonLazyPointerSectionBase::NonLazyPointerSectionBase(const char *segname,
const char *name)
: SyntheticSection(segname, name) {
- align = 8;
- flags = MachO::S_NON_LAZY_SYMBOL_POINTERS;
+ align = target->wordSize;
+}
+
+void macho::addNonLazyBindingEntries(const Symbol *sym,
+ const InputSection *isec, uint64_t offset,
+ int64_t addend) {
+ if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) {
+ in.binding->addEntry(dysym, isec, offset, addend);
+ if (dysym->isWeakDef())
+ in.weakBinding->addEntry(sym, isec, offset, addend);
+ } else if (const auto *defined = dyn_cast<Defined>(sym)) {
+ in.rebase->addEntry(isec, offset);
+ if (defined->isExternalWeakDef())
+ in.weakBinding->addEntry(sym, isec, offset, addend);
+ } else {
+ // Undefined symbols are filtered out in scanRelocations(); we should never
+ // get here
+ llvm_unreachable("cannot bind to an undefined symbol");
+ }
}
void NonLazyPointerSectionBase::addEntry(Symbol *sym) {
@@ -197,14 +246,25 @@ void NonLazyPointerSectionBase::addEntry(Symbol *sym) {
assert(!sym->isInGot());
sym->gotIndex = entries.size() - 1;
- addNonLazyBindingEntries(sym, this, sym->gotIndex * WordSize);
+ addNonLazyBindingEntries(sym, isec, sym->gotIndex * target->wordSize);
}
}
void NonLazyPointerSectionBase::writeTo(uint8_t *buf) const {
for (size_t i = 0, n = entries.size(); i < n; ++i)
if (auto *defined = dyn_cast<Defined>(entries[i]))
- write64le(&buf[i * WordSize], defined->getVA());
+ write64le(&buf[i * target->wordSize], defined->getVA());
+}
+
+GotSection::GotSection()
+ : NonLazyPointerSectionBase(segment_names::dataConst, section_names::got) {
+ flags = S_NON_LAZY_SYMBOL_POINTERS;
+}
+
+TlvPointerSection::TlvPointerSection()
+ : NonLazyPointerSectionBase(segment_names::data,
+ section_names::threadPtrs) {
+ flags = S_THREAD_LOCAL_VARIABLE_POINTERS;
}
BindingSection::BindingSection()
@@ -215,7 +275,13 @@ struct Binding {
OutputSegment *segment = nullptr;
uint64_t offset = 0;
int64_t addend = 0;
- uint8_t ordinal = 0;
+};
+struct BindIR {
+ // Default value of 0xF0 is not valid opcode and should make the program
+ // scream instead of accidentally writing "valid" values.
+ uint8_t opcode = 0xF0;
+ uint64_t data = 0;
+ uint64_t consecutiveCount = 0;
};
} // namespace
@@ -225,66 +291,181 @@ struct Binding {
// The bind opcode "interpreter" remembers the values of each binding field, so
// we only need to encode the differences between bindings. Hence the use of
// lastBinding.
-static void encodeBinding(const Symbol *sym, const OutputSection *osec,
- uint64_t outSecOff, int64_t addend,
- bool isWeakBinding, Binding &lastBinding,
- raw_svector_ostream &os) {
- using namespace llvm::MachO;
+static void encodeBinding(const OutputSection *osec, uint64_t outSecOff,
+ int64_t addend, Binding &lastBinding,
+ std::vector<BindIR> &opcodes) {
OutputSegment *seg = osec->parent;
uint64_t offset = osec->getSegmentOffset() + outSecOff;
if (lastBinding.segment != seg) {
- os << static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
- seg->index);
- encodeULEB128(offset, os);
+ opcodes.push_back(
+ {static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
+ seg->index),
+ offset});
lastBinding.segment = seg;
lastBinding.offset = offset;
} else if (lastBinding.offset != offset) {
- os << static_cast<uint8_t>(BIND_OPCODE_ADD_ADDR_ULEB);
- encodeULEB128(offset - lastBinding.offset, os);
+ opcodes.push_back({BIND_OPCODE_ADD_ADDR_ULEB, offset - lastBinding.offset});
lastBinding.offset = offset;
}
if (lastBinding.addend != addend) {
- os << static_cast<uint8_t>(BIND_OPCODE_SET_ADDEND_SLEB);
- encodeSLEB128(addend, os);
+ opcodes.push_back(
+ {BIND_OPCODE_SET_ADDEND_SLEB, static_cast<uint64_t>(addend)});
lastBinding.addend = addend;
}
- uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;
- if (!isWeakBinding && sym->isWeakRef())
- flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT;
-
- os << flags << sym->getName() << '\0'
- << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER)
- << static_cast<uint8_t>(BIND_OPCODE_DO_BIND);
+ opcodes.push_back({BIND_OPCODE_DO_BIND, 0});
// DO_BIND causes dyld to both perform the binding and increment the offset
- lastBinding.offset += WordSize;
+ lastBinding.offset += target->wordSize;
}
-// Non-weak bindings need to have their dylib ordinal encoded as well.
-static void encodeDylibOrdinal(const DylibSymbol *dysym, Binding &lastBinding,
- raw_svector_ostream &os) {
- using namespace llvm::MachO;
- if (lastBinding.ordinal != dysym->file->ordinal) {
- if (dysym->file->ordinal <= BIND_IMMEDIATE_MASK) {
- os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM |
- dysym->file->ordinal);
+static void optimizeOpcodes(std::vector<BindIR> &opcodes) {
+ // Pass 1: Combine bind/add pairs
+ size_t i;
+ int pWrite = 0;
+ for (i = 1; i < opcodes.size(); ++i, ++pWrite) {
+ if ((opcodes[i].opcode == BIND_OPCODE_ADD_ADDR_ULEB) &&
+ (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND)) {
+ opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB;
+ opcodes[pWrite].data = opcodes[i].data;
+ ++i;
+ } else {
+ opcodes[pWrite] = opcodes[i - 1];
+ }
+ }
+ if (i == opcodes.size())
+ opcodes[pWrite] = opcodes[i - 1];
+ opcodes.resize(pWrite + 1);
+
+ // Pass 2: Compress two or more bind_add opcodes
+ pWrite = 0;
+ for (i = 1; i < opcodes.size(); ++i, ++pWrite) {
+ if ((opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
+ (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
+ (opcodes[i].data == opcodes[i - 1].data)) {
+ opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB;
+ opcodes[pWrite].consecutiveCount = 2;
+ opcodes[pWrite].data = opcodes[i].data;
+ ++i;
+ while (i < opcodes.size() &&
+ (opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
+ (opcodes[i].data == opcodes[i - 1].data)) {
+ opcodes[pWrite].consecutiveCount++;
+ ++i;
+ }
} else {
- os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
- encodeULEB128(dysym->file->ordinal, os);
+ opcodes[pWrite] = opcodes[i - 1];
+ }
+ }
+ if (i == opcodes.size())
+ opcodes[pWrite] = opcodes[i - 1];
+ opcodes.resize(pWrite + 1);
+
+ // Pass 3: Use immediate encodings
+ // Every binding is the size of one pointer. If the next binding is a
+ // multiple of wordSize away that is within BIND_IMMEDIATE_MASK, the
+ // opcode can be scaled by wordSize into a single byte and dyld will
+ // expand it to the correct address.
+ for (auto &p : opcodes) {
+ // It's unclear why the check needs to be less than BIND_IMMEDIATE_MASK,
+ // but ld64 currently does this. This could be a potential bug, but
+ // for now, perform the same behavior to prevent mysterious bugs.
+ if ((p.opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
+ ((p.data / target->wordSize) < BIND_IMMEDIATE_MASK) &&
+ ((p.data % target->wordSize) == 0)) {
+ p.opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED;
+ p.data /= target->wordSize;
}
- lastBinding.ordinal = dysym->file->ordinal;
+ }
+}
+
+static void flushOpcodes(const BindIR &op, raw_svector_ostream &os) {
+ uint8_t opcode = op.opcode & BIND_OPCODE_MASK;
+ switch (opcode) {
+ case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
+ case BIND_OPCODE_ADD_ADDR_ULEB:
+ case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
+ os << op.opcode;
+ encodeULEB128(op.data, os);
+ break;
+ case BIND_OPCODE_SET_ADDEND_SLEB:
+ os << op.opcode;
+ encodeSLEB128(static_cast<int64_t>(op.data), os);
+ break;
+ case BIND_OPCODE_DO_BIND:
+ os << op.opcode;
+ break;
+ case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
+ os << op.opcode;
+ encodeULEB128(op.consecutiveCount, os);
+ encodeULEB128(op.data, os);
+ break;
+ case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
+ os << static_cast<uint8_t>(op.opcode | op.data);
+ break;
+ default:
+ llvm_unreachable("cannot bind to an unrecognized symbol");
+ }
+}
+
+// Non-weak bindings need to have their dylib ordinal encoded as well.
+static int16_t ordinalForDylibSymbol(const DylibSymbol &dysym) {
+ if (config->namespaceKind == NamespaceKind::flat || dysym.isDynamicLookup())
+ return static_cast<int16_t>(BIND_SPECIAL_DYLIB_FLAT_LOOKUP);
+ assert(dysym.getFile()->isReferenced());
+ return dysym.getFile()->ordinal;
+}
+
+static void encodeDylibOrdinal(int16_t ordinal, raw_svector_ostream &os) {
+ if (ordinal <= 0) {
+ os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM |
+ (ordinal & BIND_IMMEDIATE_MASK));
+ } else if (ordinal <= BIND_IMMEDIATE_MASK) {
+ os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | ordinal);
+ } else {
+ os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
+ encodeULEB128(ordinal, os);
}
}
static void encodeWeakOverride(const Defined *defined,
raw_svector_ostream &os) {
- using namespace llvm::MachO;
os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM |
BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION)
<< defined->getName() << '\0';
}
+// Organize the bindings so we can encoded them with fewer opcodes.
+//
+// First, all bindings for a given symbol should be grouped together.
+// BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM is the largest opcode (since it
+// has an associated symbol string), so we only want to emit it once per symbol.
+//
+// Within each group, we sort the bindings by address. Since bindings are
+// delta-encoded, sorting them allows for a more compact result. Note that
+// sorting by address alone ensures that bindings for the same segment / section
+// are located together, minimizing the number of times we have to emit
+// BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB.
+//
+// Finally, we sort the symbols by the address of their first binding, again
+// to facilitate the delta-encoding process.
+template <class Sym>
+std::vector<std::pair<const Sym *, std::vector<BindingEntry>>>
+sortBindings(const BindingsMap<const Sym *> &bindingsMap) {
+ std::vector<std::pair<const Sym *, std::vector<BindingEntry>>> bindingsVec(
+ bindingsMap.begin(), bindingsMap.end());
+ for (auto &p : bindingsVec) {
+ std::vector<BindingEntry> &bindings = p.second;
+ llvm::sort(bindings, [](const BindingEntry &a, const BindingEntry &b) {
+ return a.target.getVA() < b.target.getVA();
+ });
+ }
+ llvm::sort(bindingsVec, [](const auto &a, const auto &b) {
+ return a.second[0].target.getVA() < b.second[0].target.getVA();
+ });
+ return bindingsVec;
+}
+
// Emit bind opcodes, which are a stream of byte-sized opcodes that dyld
// interprets to update a record with the following fields:
// * segment index (of the segment to write the symbol addresses to, typically
@@ -301,26 +482,33 @@ static void encodeWeakOverride(const Defined *defined,
void BindingSection::finalizeContents() {
raw_svector_ostream os{contents};
Binding lastBinding;
-
- // Since bindings are delta-encoded, sorting them allows for a more compact
- // result. Note that sorting by address alone ensures that bindings for the
- // same segment / section are located together.
- llvm::sort(bindings, [](const BindingEntry &a, const BindingEntry &b) {
- return a.target.getVA() < b.target.getVA();
- });
- for (const BindingEntry &b : bindings) {
- encodeDylibOrdinal(b.dysym, lastBinding, os);
- if (auto *isec = b.target.section.dyn_cast<const InputSection *>()) {
- encodeBinding(b.dysym, isec->parent, isec->outSecOff + b.target.offset,
- b.addend, /*isWeakBinding=*/false, lastBinding, os);
- } else {
- auto *osec = b.target.section.get<const OutputSection *>();
- encodeBinding(b.dysym, osec, b.target.offset, b.addend,
- /*isWeakBinding=*/false, lastBinding, os);
+ int16_t lastOrdinal = 0;
+
+ for (auto &p : sortBindings(bindingsMap)) {
+ const DylibSymbol *sym = p.first;
+ std::vector<BindingEntry> &bindings = p.second;
+ uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;
+ if (sym->isWeakRef())
+ flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT;
+ os << flags << sym->getName() << '\0'
+ << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER);
+ int16_t ordinal = ordinalForDylibSymbol(*sym);
+ if (ordinal != lastOrdinal) {
+ encodeDylibOrdinal(ordinal, os);
+ lastOrdinal = ordinal;
}
+ std::vector<BindIR> opcodes;
+ for (const BindingEntry &b : bindings)
+ encodeBinding(b.target.isec->parent,
+ b.target.isec->getOffset(b.target.offset), b.addend,
+ lastBinding, opcodes);
+ if (config->optimize > 1)
+ optimizeOpcodes(opcodes);
+ for (const auto &op : opcodes)
+ flushOpcodes(op, os);
}
- if (!bindings.empty())
- os << static_cast<uint8_t>(MachO::BIND_OPCODE_DONE);
+ if (!bindingsMap.empty())
+ os << static_cast<uint8_t>(BIND_OPCODE_DONE);
}
void BindingSection::writeTo(uint8_t *buf) const {
@@ -337,61 +525,36 @@ void WeakBindingSection::finalizeContents() {
for (const Defined *defined : definitions)
encodeWeakOverride(defined, os);
- // Since bindings are delta-encoded, sorting them allows for a more compact
- // result.
- llvm::sort(bindings,
- [](const WeakBindingEntry &a, const WeakBindingEntry &b) {
- return a.target.getVA() < b.target.getVA();
- });
- for (const WeakBindingEntry &b : bindings) {
- if (auto *isec = b.target.section.dyn_cast<const InputSection *>()) {
- encodeBinding(b.symbol, isec->parent, isec->outSecOff + b.target.offset,
- b.addend, /*isWeakBinding=*/true, lastBinding, os);
- } else {
- auto *osec = b.target.section.get<const OutputSection *>();
- encodeBinding(b.symbol, osec, b.target.offset, b.addend,
- /*isWeakBinding=*/true, lastBinding, os);
- }
+ for (auto &p : sortBindings(bindingsMap)) {
+ const Symbol *sym = p.first;
+ std::vector<BindingEntry> &bindings = p.second;
+ os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM)
+ << sym->getName() << '\0'
+ << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER);
+ std::vector<BindIR> opcodes;
+ for (const BindingEntry &b : bindings)
+ encodeBinding(b.target.isec->parent,
+ b.target.isec->getOffset(b.target.offset), b.addend,
+ lastBinding, opcodes);
+ if (config->optimize > 1)
+ optimizeOpcodes(opcodes);
+ for (const auto &op : opcodes)
+ flushOpcodes(op, os);
}
- if (!bindings.empty() || !definitions.empty())
- os << static_cast<uint8_t>(MachO::BIND_OPCODE_DONE);
+ if (!bindingsMap.empty() || !definitions.empty())
+ os << static_cast<uint8_t>(BIND_OPCODE_DONE);
}
void WeakBindingSection::writeTo(uint8_t *buf) const {
memcpy(buf, contents.data(), contents.size());
}
-bool macho::needsBinding(const Symbol *sym) {
- if (isa<DylibSymbol>(sym))
- return true;
- if (const auto *defined = dyn_cast<Defined>(sym))
- return defined->isExternalWeakDef();
- return false;
-}
-
-void macho::addNonLazyBindingEntries(const Symbol *sym,
- SectionPointerUnion section,
- uint64_t offset, int64_t addend) {
- if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {
- in.binding->addEntry(dysym, section, offset, addend);
- if (dysym->isWeakDef())
- in.weakBinding->addEntry(sym, section, offset, addend);
- } else if (auto *defined = dyn_cast<Defined>(sym)) {
- in.rebase->addEntry(section, offset);
- if (defined->isExternalWeakDef())
- in.weakBinding->addEntry(sym, section, offset, addend);
- } else if (isa<DSOHandle>(sym)) {
- error("cannot bind to " + DSOHandle::name);
- } else {
- // Undefined symbols are filtered out in scanRelocations(); we should never
- // get here
- llvm_unreachable("cannot bind to an undefined symbol");
- }
-}
-
StubsSection::StubsSection()
- : SyntheticSection(segment_names::text, "__stubs") {
- flags = MachO::S_SYMBOL_STUBS;
+ : SyntheticSection(segment_names::text, section_names::stubs) {
+ flags = S_SYMBOL_STUBS | S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;
+ // The stubs section comprises machine instructions, which are aligned to
+ // 4 bytes on the archs we care about.
+ align = 4;
reserved2 = target->stubSize;
}
@@ -407,6 +570,8 @@ void StubsSection::writeTo(uint8_t *buf) const {
}
}
+void StubsSection::finalize() { isFinal = true; }
+
bool StubsSection::addEntry(Symbol *sym) {
bool inserted = entries.insert(sym);
if (inserted)
@@ -415,7 +580,10 @@ bool StubsSection::addEntry(Symbol *sym) {
}
StubHelperSection::StubHelperSection()
- : SyntheticSection(segment_names::text, "__stub_helper") {}
+ : SyntheticSection(segment_names::text, section_names::stubHelper) {
+ flags = S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;
+ align = 4; // This section comprises machine instructions
+}
uint64_t StubHelperSection::getSize() const {
return target->stubHelperHeaderSize +
@@ -434,38 +602,41 @@ void StubHelperSection::writeTo(uint8_t *buf) const {
}
void StubHelperSection::setup() {
- stubBinder = dyn_cast_or_null<DylibSymbol>(symtab->find("dyld_stub_binder"));
- if (stubBinder == nullptr) {
- error("symbol dyld_stub_binder not found (normally in libSystem.dylib). "
- "Needed to perform lazy binding.");
+ Symbol *binder = symtab->addUndefined("dyld_stub_binder", /*file=*/nullptr,
+ /*isWeakRef=*/false);
+ if (auto *undefined = dyn_cast<Undefined>(binder))
+ treatUndefinedSymbol(*undefined,
+ "lazy binding (normally in libSystem.dylib)");
+
+ // treatUndefinedSymbol() can replace binder with a DylibSymbol; re-check.
+ stubBinder = dyn_cast_or_null<DylibSymbol>(binder);
+ if (stubBinder == nullptr)
return;
- }
- stubBinder->refState = RefState::Strong;
+
in.got->addEntry(stubBinder);
+ in.imageLoaderCache->parent =
+ ConcatOutputSection::getOrCreateForInput(in.imageLoaderCache);
inputSections.push_back(in.imageLoaderCache);
+ // Since this isn't in the symbol table or in any input file, the noDeadStrip
+ // argument doesn't matter. It's kept alive by ImageLoaderCacheSection()
+ // setting `live` to true on the backing InputSection.
dyldPrivate =
- make<Defined>("__dyld_private", in.imageLoaderCache, 0,
+ make<Defined>("__dyld_private", nullptr, in.imageLoaderCache, 0, 0,
/*isWeakDef=*/false,
- /*isExternal=*/false, /*isPrivateExtern=*/false);
-}
-
-ImageLoaderCacheSection::ImageLoaderCacheSection() {
- segname = segment_names::data;
- name = "__data";
- uint8_t *arr = bAlloc.Allocate<uint8_t>(WordSize);
- memset(arr, 0, WordSize);
- data = {arr, WordSize};
+ /*isExternal=*/false, /*isPrivateExtern=*/false,
+ /*isThumb=*/false, /*isReferencedDynamically=*/false,
+ /*noDeadStrip=*/false);
}
LazyPointerSection::LazyPointerSection()
- : SyntheticSection(segment_names::data, "__la_symbol_ptr") {
- align = 8;
- flags = MachO::S_LAZY_SYMBOL_POINTERS;
+ : SyntheticSection(segment_names::data, section_names::lazySymbolPtr) {
+ align = target->wordSize;
+ flags = S_LAZY_SYMBOL_POINTERS;
}
uint64_t LazyPointerSection::getSize() const {
- return in.stubs->getEntries().size() * WordSize;
+ return in.stubs->getEntries().size() * target->wordSize;
}
bool LazyPointerSection::isNeeded() const {
@@ -485,7 +656,7 @@ void LazyPointerSection::writeTo(uint8_t *buf) const {
} else {
write64le(buf + off, sym->getVA());
}
- off += WordSize;
+ off += target->wordSize;
}
}
@@ -506,7 +677,8 @@ void LazyBindingSection::writeTo(uint8_t *buf) const {
void LazyBindingSection::addEntry(DylibSymbol *dysym) {
if (entries.insert(dysym)) {
dysym->stubsHelperIndex = entries.size() - 1;
- in.rebase->addEntry(in.lazyPointers, dysym->stubsIndex * WordSize);
+ in.rebase->addEntry(in.lazyPointers->isec,
+ dysym->stubsIndex * target->wordSize);
}
}
@@ -519,52 +691,23 @@ void LazyBindingSection::addEntry(DylibSymbol *dysym) {
uint32_t LazyBindingSection::encode(const DylibSymbol &sym) {
uint32_t opstreamOffset = contents.size();
OutputSegment *dataSeg = in.lazyPointers->parent;
- os << static_cast<uint8_t>(MachO::BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
+ os << static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
dataSeg->index);
- uint64_t offset = in.lazyPointers->addr - dataSeg->firstSection()->addr +
- sym.stubsIndex * WordSize;
+ uint64_t offset = in.lazyPointers->addr - dataSeg->addr +
+ sym.stubsIndex * target->wordSize;
encodeULEB128(offset, os);
- if (sym.file->ordinal <= MachO::BIND_IMMEDIATE_MASK) {
- os << static_cast<uint8_t>(MachO::BIND_OPCODE_SET_DYLIB_ORDINAL_IMM |
- sym.file->ordinal);
- } else {
- os << static_cast<uint8_t>(MachO::BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
- encodeULEB128(sym.file->ordinal, os);
- }
+ encodeDylibOrdinal(ordinalForDylibSymbol(sym), os);
- uint8_t flags = MachO::BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;
+ uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;
if (sym.isWeakRef())
- flags |= MachO::BIND_SYMBOL_FLAGS_WEAK_IMPORT;
+ flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT;
os << flags << sym.getName() << '\0'
- << static_cast<uint8_t>(MachO::BIND_OPCODE_DO_BIND)
- << static_cast<uint8_t>(MachO::BIND_OPCODE_DONE);
+ << static_cast<uint8_t>(BIND_OPCODE_DO_BIND)
+ << static_cast<uint8_t>(BIND_OPCODE_DONE);
return opstreamOffset;
}
-void macho::prepareBranchTarget(Symbol *sym) {
- if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {
- if (in.stubs->addEntry(dysym)) {
- if (sym->isWeakDef()) {
- in.binding->addEntry(dysym, in.lazyPointers,
- sym->stubsIndex * WordSize);
- in.weakBinding->addEntry(sym, in.lazyPointers,
- sym->stubsIndex * WordSize);
- } else {
- in.lazyBinding->addEntry(dysym);
- }
- }
- } else if (auto *defined = dyn_cast<Defined>(sym)) {
- if (defined->isExternalWeakDef()) {
- if (in.stubs->addEntry(sym)) {
- in.rebase->addEntry(in.lazyPointers, sym->stubsIndex * WordSize);
- in.weakBinding->addEntry(sym, in.lazyPointers,
- sym->stubsIndex * WordSize);
- }
- }
- }
-}
-
ExportSection::ExportSection()
: LinkEditSection(segment_names::linkEdit, section_names::export_) {}
@@ -572,7 +715,7 @@ void ExportSection::finalizeContents() {
trieBuilder.setImageBase(in.header->addr);
for (const Symbol *sym : symtab->getSymbols()) {
if (const auto *defined = dyn_cast<Defined>(sym)) {
- if (defined->privateExtern)
+ if (defined->privateExtern || !defined->isLive())
continue;
trieBuilder.addSymbol(*defined);
hasWeakSymbol = hasWeakSymbol || sym->isWeakDef();
@@ -583,16 +726,109 @@ void ExportSection::finalizeContents() {
void ExportSection::writeTo(uint8_t *buf) const { trieBuilder.writeTo(buf); }
+DataInCodeSection::DataInCodeSection()
+ : LinkEditSection(segment_names::linkEdit, section_names::dataInCode) {}
+
+template <class LP>
+static std::vector<MachO::data_in_code_entry> collectDataInCodeEntries() {
+ using SegmentCommand = typename LP::segment_command;
+ using Section = typename LP::section;
+
+ std::vector<MachO::data_in_code_entry> dataInCodeEntries;
+ for (const InputFile *inputFile : inputFiles) {
+ if (!isa<ObjFile>(inputFile))
+ continue;
+ const ObjFile *objFile = cast<ObjFile>(inputFile);
+ const auto *c = reinterpret_cast<const SegmentCommand *>(
+ findCommand(objFile->mb.getBufferStart(), LP::segmentLCType));
+ if (!c)
+ continue;
+ ArrayRef<Section> sections{reinterpret_cast<const Section *>(c + 1),
+ c->nsects};
+
+ ArrayRef<MachO::data_in_code_entry> entries = objFile->dataInCodeEntries;
+ if (entries.empty())
+ continue;
+ // For each code subsection find 'data in code' entries residing in it.
+ // Compute the new offset values as
+ // <offset within subsection> + <subsection address> - <__TEXT address>.
+ for (size_t i = 0, n = sections.size(); i < n; ++i) {
+ const SubsectionMap &subsecMap = objFile->subsections[i];
+ for (const SubsectionEntry &subsecEntry : subsecMap) {
+ const InputSection *isec = subsecEntry.isec;
+ if (!isCodeSection(isec))
+ continue;
+ if (cast<ConcatInputSection>(isec)->shouldOmitFromOutput())
+ continue;
+ const uint64_t beginAddr = sections[i].addr + subsecEntry.offset;
+ auto it = llvm::lower_bound(
+ entries, beginAddr,
+ [](const MachO::data_in_code_entry &entry, uint64_t addr) {
+ return entry.offset < addr;
+ });
+ const uint64_t endAddr = beginAddr + isec->getFileSize();
+ for (const auto end = entries.end();
+ it != end && it->offset + it->length <= endAddr; ++it)
+ dataInCodeEntries.push_back(
+ {static_cast<uint32_t>(isec->getVA(it->offset - beginAddr) -
+ in.header->addr),
+ it->length, it->kind});
+ }
+ }
+ }
+ return dataInCodeEntries;
+}
+
+void DataInCodeSection::finalizeContents() {
+ entries = target->wordSize == 8 ? collectDataInCodeEntries<LP64>()
+ : collectDataInCodeEntries<ILP32>();
+}
+
+void DataInCodeSection::writeTo(uint8_t *buf) const {
+ if (!entries.empty())
+ memcpy(buf, entries.data(), getRawSize());
+}
+
+FunctionStartsSection::FunctionStartsSection()
+ : LinkEditSection(segment_names::linkEdit, section_names::functionStarts) {}
+
+void FunctionStartsSection::finalizeContents() {
+ raw_svector_ostream os{contents};
+ std::vector<uint64_t> addrs;
+ for (const Symbol *sym : symtab->getSymbols()) {
+ if (const auto *defined = dyn_cast<Defined>(sym)) {
+ if (!defined->isec || !isCodeSection(defined->isec) || !defined->isLive())
+ continue;
+ if (const auto *concatIsec = dyn_cast<ConcatInputSection>(defined->isec))
+ if (concatIsec->shouldOmitFromOutput())
+ continue;
+ // TODO: Add support for thumbs, in that case
+ // the lowest bit of nextAddr needs to be set to 1.
+ addrs.push_back(defined->getVA());
+ }
+ }
+ llvm::sort(addrs);
+ uint64_t addr = in.header->addr;
+ for (uint64_t nextAddr : addrs) {
+ uint64_t delta = nextAddr - addr;
+ if (delta == 0)
+ continue;
+ encodeULEB128(delta, os);
+ addr = nextAddr;
+ }
+ os << '\0';
+}
+
+void FunctionStartsSection::writeTo(uint8_t *buf) const {
+ memcpy(buf, contents.data(), contents.size());
+}
+
SymtabSection::SymtabSection(StringTableSection &stringTableSection)
: LinkEditSection(segment_names::linkEdit, section_names::symbolTable),
stringTableSection(stringTableSection) {}
-uint64_t SymtabSection::getRawSize() const {
- return getNumSymbols() * sizeof(structs::nlist_64);
-}
-
void SymtabSection::emitBeginSourceStab(DWARFUnit *compileUnit) {
- StabsEntry stab(MachO::N_SO);
+ StabsEntry stab(N_SO);
SmallString<261> dir(compileUnit->getCompilationDir());
StringRef sep = sys::path::get_separator();
// We don't use `path::append` here because we want an empty `dir` to result
@@ -605,13 +841,13 @@ void SymtabSection::emitBeginSourceStab(DWARFUnit *compileUnit) {
}
void SymtabSection::emitEndSourceStab() {
- StabsEntry stab(MachO::N_SO);
+ StabsEntry stab(N_SO);
stab.sect = 1;
stabs.emplace_back(std::move(stab));
}
void SymtabSection::emitObjectFileStab(ObjFile *file) {
- StabsEntry stab(MachO::N_OSO);
+ StabsEntry stab(N_OSO);
stab.sect = target->cpuSubtype;
SmallString<261> path(!file->archiveName.empty() ? file->archiveName
: file->getName());
@@ -629,23 +865,29 @@ void SymtabSection::emitObjectFileStab(ObjFile *file) {
}
void SymtabSection::emitEndFunStab(Defined *defined) {
- StabsEntry stab(MachO::N_FUN);
- // FIXME this should be the size of the symbol. Using the section size in
- // lieu is only correct if .subsections_via_symbols is set.
- stab.value = defined->isec->getSize();
+ StabsEntry stab(N_FUN);
+ stab.value = defined->size;
stabs.emplace_back(std::move(stab));
}
void SymtabSection::emitStabs() {
+ for (const std::string &s : config->astPaths) {
+ StabsEntry astStab(N_AST);
+ astStab.strx = stringTableSection.addString(s);
+ stabs.emplace_back(std::move(astStab));
+ }
+
std::vector<Defined *> symbolsNeedingStabs;
for (const SymtabEntry &entry :
concat<SymtabEntry>(localSymbols, externalSymbols)) {
Symbol *sym = entry.sym;
+ assert(sym->isLive() &&
+ "dead symbols should not be in localSymbols, externalSymbols");
if (auto *defined = dyn_cast<Defined>(sym)) {
if (defined->isAbsolute())
continue;
InputSection *isec = defined->isec;
- ObjFile *file = dyn_cast_or_null<ObjFile>(isec->file);
+ ObjFile *file = dyn_cast_or_null<ObjFile>(isec->getFile());
if (!file || !file->compileUnit)
continue;
symbolsNeedingStabs.push_back(defined);
@@ -653,7 +895,7 @@ void SymtabSection::emitStabs() {
}
llvm::stable_sort(symbolsNeedingStabs, [&](Defined *a, Defined *b) {
- return a->isec->file->id < b->isec->file->id;
+ return a->isec->getFile()->id < b->isec->getFile()->id;
});
// Emit STABS symbols so that dsymutil and/or the debugger can map address
@@ -662,8 +904,7 @@ void SymtabSection::emitStabs() {
InputFile *lastFile = nullptr;
for (Defined *defined : symbolsNeedingStabs) {
InputSection *isec = defined->isec;
- ObjFile *file = dyn_cast<ObjFile>(isec->file);
- assert(file);
+ ObjFile *file = cast<ObjFile>(isec->getFile());
if (lastFile == nullptr || lastFile != file) {
if (lastFile != nullptr)
@@ -675,16 +916,16 @@ void SymtabSection::emitStabs() {
}
StabsEntry symStab;
- symStab.sect = defined->isec->parent->index;
+ symStab.sect = defined->isec->canonical()->parent->index;
symStab.strx = stringTableSection.addString(defined->getName());
symStab.value = defined->getVA();
if (isCodeSection(isec)) {
- symStab.type = MachO::N_FUN;
+ symStab.type = N_FUN;
stabs.emplace_back(std::move(symStab));
emitEndFunStab(defined);
} else {
- symStab.type = defined->isExternal() ? MachO::N_GSYM : MachO::N_STSYM;
+ symStab.type = defined->isExternal() ? N_GSYM : N_STSYM;
stabs.emplace_back(std::move(symStab));
}
}
@@ -701,14 +942,15 @@ void SymtabSection::finalizeContents() {
// Local symbols aren't in the SymbolTable, so we walk the list of object
// files to gather them.
- for (InputFile *file : inputFiles) {
+ for (const InputFile *file : inputFiles) {
if (auto *objFile = dyn_cast<ObjFile>(file)) {
for (Symbol *sym : objFile->symbols) {
- // TODO: when we implement -dead_strip, we should filter out symbols
- // that belong to dead sections.
- if (auto *defined = dyn_cast<Defined>(sym)) {
- if (!defined->isExternal())
- addSymbol(localSymbols, sym);
+ if (auto *defined = dyn_cast_or_null<Defined>(sym)) {
+ if (!defined->isExternal() && defined->isLive()) {
+ StringRef name = defined->getName();
+ if (!name.startswith("l") && !name.startswith("L"))
+ addSymbol(localSymbols, sym);
+ }
}
}
}
@@ -716,14 +958,20 @@ void SymtabSection::finalizeContents() {
// __dyld_private is a local symbol too. It's linker-created and doesn't
// exist in any object file.
- if (Defined* dyldPrivate = in.stubHelper->dyldPrivate)
+ if (Defined *dyldPrivate = in.stubHelper->dyldPrivate)
addSymbol(localSymbols, dyldPrivate);
for (Symbol *sym : symtab->getSymbols()) {
+ if (!sym->isLive())
+ continue;
if (auto *defined = dyn_cast<Defined>(sym)) {
+ if (!defined->includeInSymtab)
+ continue;
assert(defined->isExternal());
- (void)defined;
- addSymbol(externalSymbols, sym);
+ if (defined->privateExtern)
+ addSymbol(localSymbols, defined);
+ else
+ addSymbol(externalSymbols, defined);
} else if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {
if (dysym->isReferenced())
addSymbol(undefinedSymbols, sym);
@@ -743,8 +991,21 @@ uint32_t SymtabSection::getNumSymbols() const {
undefinedSymbols.size();
}
-void SymtabSection::writeTo(uint8_t *buf) const {
- auto *nList = reinterpret_cast<structs::nlist_64 *>(buf);
+// This serves to hide (type-erase) the template parameter from SymtabSection.
+template <class LP> class SymtabSectionImpl final : public SymtabSection {
+public:
+ SymtabSectionImpl(StringTableSection &stringTableSection)
+ : SymtabSection(stringTableSection) {}
+ uint64_t getRawSize() const override;
+ void writeTo(uint8_t *buf) const override;
+};
+
+template <class LP> uint64_t SymtabSectionImpl<LP>::getRawSize() const {
+ return getNumSymbols() * sizeof(typename LP::nlist);
+}
+
+template <class LP> void SymtabSectionImpl<LP>::writeTo(uint8_t *buf) const {
+ auto *nList = reinterpret_cast<typename LP::nlist *>(buf);
// Emit the stabs entries before the "real" symbols. We cannot emit them
// after as that would render Symbol::symtabIndex inaccurate.
for (const StabsEntry &entry : stabs) {
@@ -765,45 +1026,63 @@ void SymtabSection::writeTo(uint8_t *buf) const {
if (defined->privateExtern) {
// Private external -- dylib scoped symbol.
// Promote to non-external at link time.
- assert(defined->isExternal() && "invalid input file");
- scope = MachO::N_PEXT;
+ scope = N_PEXT;
} else if (defined->isExternal()) {
// Normal global symbol.
- scope = MachO::N_EXT;
+ scope = N_EXT;
} else {
// TU-local symbol from localSymbols.
scope = 0;
}
if (defined->isAbsolute()) {
- nList->n_type = scope | MachO::N_ABS;
- nList->n_sect = MachO::NO_SECT;
+ nList->n_type = scope | N_ABS;
+ nList->n_sect = NO_SECT;
nList->n_value = defined->value;
} else {
- nList->n_type = scope | MachO::N_SECT;
- nList->n_sect = defined->isec->parent->index;
+ nList->n_type = scope | N_SECT;
+ nList->n_sect = defined->isec->canonical()->parent->index;
// For the N_SECT symbol type, n_value is the address of the symbol
nList->n_value = defined->getVA();
}
- nList->n_desc |= defined->isExternalWeakDef() ? MachO::N_WEAK_DEF : 0;
+ nList->n_desc |= defined->thumb ? N_ARM_THUMB_DEF : 0;
+ nList->n_desc |= defined->isExternalWeakDef() ? N_WEAK_DEF : 0;
+ nList->n_desc |=
+ defined->referencedDynamically ? REFERENCED_DYNAMICALLY : 0;
} else if (auto *dysym = dyn_cast<DylibSymbol>(entry.sym)) {
uint16_t n_desc = nList->n_desc;
- MachO::SET_LIBRARY_ORDINAL(n_desc, dysym->file->ordinal);
- nList->n_type = MachO::N_EXT;
- n_desc |= dysym->isWeakRef() ? MachO::N_WEAK_REF : 0;
+ int16_t ordinal = ordinalForDylibSymbol(*dysym);
+ if (ordinal == BIND_SPECIAL_DYLIB_FLAT_LOOKUP)
+ SET_LIBRARY_ORDINAL(n_desc, DYNAMIC_LOOKUP_ORDINAL);
+ else if (ordinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE)
+ SET_LIBRARY_ORDINAL(n_desc, EXECUTABLE_ORDINAL);
+ else {
+ assert(ordinal > 0);
+ SET_LIBRARY_ORDINAL(n_desc, static_cast<uint8_t>(ordinal));
+ }
+
+ nList->n_type = N_EXT;
+ n_desc |= dysym->isWeakDef() ? N_WEAK_DEF : 0;
+ n_desc |= dysym->isWeakRef() ? N_WEAK_REF : 0;
nList->n_desc = n_desc;
}
++nList;
}
}
+template <class LP>
+SymtabSection *
+macho::makeSymtabSection(StringTableSection &stringTableSection) {
+ return make<SymtabSectionImpl<LP>>(stringTableSection);
+}
+
IndirectSymtabSection::IndirectSymtabSection()
: LinkEditSection(segment_names::linkEdit,
section_names::indirectSymbolTable) {}
uint32_t IndirectSymtabSection::getNumSymbols() const {
return in.got->getEntries().size() + in.tlvPointers->getEntries().size() +
- in.stubs->getEntries().size();
+ 2 * in.stubs->getEntries().size();
}
bool IndirectSymtabSection::isNeeded() const {
@@ -817,23 +1096,37 @@ void IndirectSymtabSection::finalizeContents() {
off += in.got->getEntries().size();
in.tlvPointers->reserved1 = off;
off += in.tlvPointers->getEntries().size();
- // There is a 1:1 correspondence between stubs and LazyPointerSection
- // entries, so they can share the same sub-array in the table.
- in.stubs->reserved1 = in.lazyPointers->reserved1 = off;
+ in.stubs->reserved1 = off;
+ off += in.stubs->getEntries().size();
+ in.lazyPointers->reserved1 = off;
+}
+
+static uint32_t indirectValue(const Symbol *sym) {
+ return sym->symtabIndex != UINT32_MAX ? sym->symtabIndex
+ : INDIRECT_SYMBOL_LOCAL;
}
void IndirectSymtabSection::writeTo(uint8_t *buf) const {
uint32_t off = 0;
for (const Symbol *sym : in.got->getEntries()) {
- write32le(buf + off * sizeof(uint32_t), sym->symtabIndex);
+ write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
++off;
}
for (const Symbol *sym : in.tlvPointers->getEntries()) {
- write32le(buf + off * sizeof(uint32_t), sym->symtabIndex);
+ write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
++off;
}
for (const Symbol *sym : in.stubs->getEntries()) {
- write32le(buf + off * sizeof(uint32_t), sym->symtabIndex);
+ write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
+ ++off;
+ }
+ // There is a 1:1 correspondence between stubs and LazyPointerSection
+ // entries. But giving __stubs and __la_symbol_ptr the same reserved1
+ // (the offset into the indirect symbol table) so that they both refer
+ // to the same range of offsets confuses `strip`, so write the stubs
+ // symbol table offsets a second time.
+ for (const Symbol *sym : in.stubs->getEntries()) {
+ write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
++off;
}
}
@@ -855,3 +1148,365 @@ void StringTableSection::writeTo(uint8_t *buf) const {
off += str.size() + 1; // account for null terminator
}
}
+
+static_assert((CodeSignatureSection::blobHeadersSize % 8) == 0, "");
+static_assert((CodeSignatureSection::fixedHeadersSize % 8) == 0, "");
+
+CodeSignatureSection::CodeSignatureSection()
+ : LinkEditSection(segment_names::linkEdit, section_names::codeSignature) {
+ align = 16; // required by libstuff
+ // FIXME: Consider using finalOutput instead of outputFile.
+ fileName = config->outputFile;
+ size_t slashIndex = fileName.rfind("/");
+ if (slashIndex != std::string::npos)
+ fileName = fileName.drop_front(slashIndex + 1);
+ allHeadersSize = alignTo<16>(fixedHeadersSize + fileName.size() + 1);
+ fileNamePad = allHeadersSize - fixedHeadersSize - fileName.size();
+}
+
+uint32_t CodeSignatureSection::getBlockCount() const {
+ return (fileOff + blockSize - 1) / blockSize;
+}
+
+uint64_t CodeSignatureSection::getRawSize() const {
+ return allHeadersSize + getBlockCount() * hashSize;
+}
+
+void CodeSignatureSection::writeHashes(uint8_t *buf) const {
+ uint8_t *code = buf;
+ uint8_t *codeEnd = buf + fileOff;
+ uint8_t *hashes = codeEnd + allHeadersSize;
+ while (code < codeEnd) {
+ StringRef block(reinterpret_cast<char *>(code),
+ std::min(codeEnd - code, static_cast<ssize_t>(blockSize)));
+ SHA256 hasher;
+ hasher.update(block);
+ StringRef hash = hasher.final();
+ assert(hash.size() == hashSize);
+ memcpy(hashes, hash.data(), hashSize);
+ code += blockSize;
+ hashes += hashSize;
+ }
+#if defined(__APPLE__)
+ // This is macOS-specific work-around and makes no sense for any
+ // other host OS. See https://openradar.appspot.com/FB8914231
+ //
+ // The macOS kernel maintains a signature-verification cache to
+ // quickly validate applications at time of execve(2). The trouble
+ // is that for the kernel creates the cache entry at the time of the
+ // mmap(2) call, before we have a chance to write either the code to
+ // sign or the signature header+hashes. The fix is to invalidate
+ // all cached data associated with the output file, thus discarding
+ // the bogus prematurely-cached signature.
+ msync(buf, fileOff + getSize(), MS_INVALIDATE);
+#endif
+}
+
+void CodeSignatureSection::writeTo(uint8_t *buf) const {
+ uint32_t signatureSize = static_cast<uint32_t>(getSize());
+ auto *superBlob = reinterpret_cast<CS_SuperBlob *>(buf);
+ write32be(&superBlob->magic, CSMAGIC_EMBEDDED_SIGNATURE);
+ write32be(&superBlob->length, signatureSize);
+ write32be(&superBlob->count, 1);
+ auto *blobIndex = reinterpret_cast<CS_BlobIndex *>(&superBlob[1]);
+ write32be(&blobIndex->type, CSSLOT_CODEDIRECTORY);
+ write32be(&blobIndex->offset, blobHeadersSize);
+ auto *codeDirectory =
+ reinterpret_cast<CS_CodeDirectory *>(buf + blobHeadersSize);
+ write32be(&codeDirectory->magic, CSMAGIC_CODEDIRECTORY);
+ write32be(&codeDirectory->length, signatureSize - blobHeadersSize);
+ write32be(&codeDirectory->version, CS_SUPPORTSEXECSEG);
+ write32be(&codeDirectory->flags, CS_ADHOC | CS_LINKER_SIGNED);
+ write32be(&codeDirectory->hashOffset,
+ sizeof(CS_CodeDirectory) + fileName.size() + fileNamePad);
+ write32be(&codeDirectory->identOffset, sizeof(CS_CodeDirectory));
+ codeDirectory->nSpecialSlots = 0;
+ write32be(&codeDirectory->nCodeSlots, getBlockCount());
+ write32be(&codeDirectory->codeLimit, fileOff);
+ codeDirectory->hashSize = static_cast<uint8_t>(hashSize);
+ codeDirectory->hashType = kSecCodeSignatureHashSHA256;
+ codeDirectory->platform = 0;
+ codeDirectory->pageSize = blockSizeShift;
+ codeDirectory->spare2 = 0;
+ codeDirectory->scatterOffset = 0;
+ codeDirectory->teamOffset = 0;
+ codeDirectory->spare3 = 0;
+ codeDirectory->codeLimit64 = 0;
+ OutputSegment *textSeg = getOrCreateOutputSegment(segment_names::text);
+ write64be(&codeDirectory->execSegBase, textSeg->fileOff);
+ write64be(&codeDirectory->execSegLimit, textSeg->fileSize);
+ write64be(&codeDirectory->execSegFlags,
+ config->outputType == MH_EXECUTE ? CS_EXECSEG_MAIN_BINARY : 0);
+ auto *id = reinterpret_cast<char *>(&codeDirectory[1]);
+ memcpy(id, fileName.begin(), fileName.size());
+ memset(id + fileName.size(), 0, fileNamePad);
+}
+
+BitcodeBundleSection::BitcodeBundleSection()
+ : SyntheticSection(segment_names::llvm, section_names::bitcodeBundle) {}
+
+class ErrorCodeWrapper {
+public:
+ explicit ErrorCodeWrapper(std::error_code ec) : errorCode(ec.value()) {}
+ explicit ErrorCodeWrapper(int ec) : errorCode(ec) {}
+ operator int() const { return errorCode; }
+
+private:
+ int errorCode;
+};
+
+#define CHECK_EC(exp) \
+ do { \
+ ErrorCodeWrapper ec(exp); \
+ if (ec) \
+ fatal(Twine("operation failed with error code ") + Twine(ec) + ": " + \
+ #exp); \
+ } while (0);
+
+void BitcodeBundleSection::finalize() {
+#ifdef LLVM_HAVE_LIBXAR
+ using namespace llvm::sys::fs;
+ CHECK_EC(createTemporaryFile("bitcode-bundle", "xar", xarPath));
+
+ xar_t xar(xar_open(xarPath.data(), O_RDWR));
+ if (!xar)
+ fatal("failed to open XAR temporary file at " + xarPath);
+ CHECK_EC(xar_opt_set(xar, XAR_OPT_COMPRESSION, XAR_OPT_VAL_NONE));
+ // FIXME: add more data to XAR
+ CHECK_EC(xar_close(xar));
+
+ file_size(xarPath, xarSize);
+#endif // defined(LLVM_HAVE_LIBXAR)
+}
+
+void BitcodeBundleSection::writeTo(uint8_t *buf) const {
+ using namespace llvm::sys::fs;
+ file_t handle =
+ CHECK(openNativeFile(xarPath, CD_OpenExisting, FA_Read, OF_None),
+ "failed to open XAR file");
+ std::error_code ec;
+ mapped_file_region xarMap(handle, mapped_file_region::mapmode::readonly,
+ xarSize, 0, ec);
+ if (ec)
+ fatal("failed to map XAR file");
+ memcpy(buf, xarMap.const_data(), xarSize);
+
+ closeFile(handle);
+ remove(xarPath);
+}
+
+CStringSection::CStringSection()
+ : SyntheticSection(segment_names::text, section_names::cString) {
+ flags = S_CSTRING_LITERALS;
+}
+
+void CStringSection::addInput(CStringInputSection *isec) {
+ isec->parent = this;
+ inputs.push_back(isec);
+ if (isec->align > align)
+ align = isec->align;
+}
+
+void CStringSection::writeTo(uint8_t *buf) const {
+ for (const CStringInputSection *isec : inputs) {
+ for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) {
+ if (!isec->pieces[i].live)
+ continue;
+ StringRef string = isec->getStringRef(i);
+ memcpy(buf + isec->pieces[i].outSecOff, string.data(), string.size());
+ }
+ }
+}
+
+void CStringSection::finalizeContents() {
+ uint64_t offset = 0;
+ for (CStringInputSection *isec : inputs) {
+ for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) {
+ if (!isec->pieces[i].live)
+ continue;
+ uint32_t pieceAlign = MinAlign(isec->pieces[i].inSecOff, align);
+ offset = alignTo(offset, pieceAlign);
+ isec->pieces[i].outSecOff = offset;
+ isec->isFinal = true;
+ StringRef string = isec->getStringRef(i);
+ offset += string.size();
+ }
+ }
+ size = offset;
+}
+// Mergeable cstring literals are found under the __TEXT,__cstring section. In
+// contrast to ELF, which puts strings that need different alignments into
+// different sections, clang's Mach-O backend puts them all in one section.
+// Strings that need to be aligned have the .p2align directive emitted before
+// them, which simply translates into zero padding in the object file.
+//
+// I *think* ld64 extracts the desired per-string alignment from this data by
+// preserving each string's offset from the last section-aligned address. I'm
+// not entirely certain since it doesn't seem consistent about doing this, and
+// in fact doesn't seem to be correct in general: we can in fact can induce ld64
+// to produce a crashing binary just by linking in an additional object file
+// that only contains a duplicate cstring at a different alignment. See PR50563
+// for details.
+//
+// On x86_64, the cstrings we've seen so far that require special alignment are
+// all accessed by SIMD operations -- x86_64 requires SIMD accesses to be
+// 16-byte-aligned. arm64 also seems to require 16-byte-alignment in some cases
+// (PR50791), but I haven't tracked down the root cause. So for now, I'm just
+// aligning all strings to 16 bytes. This is indeed wasteful, but
+// implementation-wise it's simpler than preserving per-string
+// alignment+offsets. It also avoids the aforementioned crash after
+// deduplication of differently-aligned strings. Finally, the overhead is not
+// huge: using 16-byte alignment (vs no alignment) is only a 0.5% size overhead
+// when linking chromium_framework on x86_64.
+DeduplicatedCStringSection::DeduplicatedCStringSection()
+ : builder(StringTableBuilder::RAW, /*Alignment=*/16) {}
+
+void DeduplicatedCStringSection::finalizeContents() {
+ // Add all string pieces to the string table builder to create section
+ // contents.
+ for (const CStringInputSection *isec : inputs)
+ for (size_t i = 0, e = isec->pieces.size(); i != e; ++i)
+ if (isec->pieces[i].live)
+ builder.add(isec->getCachedHashStringRef(i));
+
+ // Fix the string table content. After this, the contents will never change.
+ builder.finalizeInOrder();
+
+ // finalize() fixed tail-optimized strings, so we can now get
+ // offsets of strings. Get an offset for each string and save it
+ // to a corresponding SectionPiece for easy access.
+ for (CStringInputSection *isec : inputs) {
+ for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) {
+ if (!isec->pieces[i].live)
+ continue;
+ isec->pieces[i].outSecOff =
+ builder.getOffset(isec->getCachedHashStringRef(i));
+ isec->isFinal = true;
+ }
+ }
+}
+
+// This section is actually emitted as __TEXT,__const by ld64, but clang may
+// emit input sections of that name, and LLD doesn't currently support mixing
+// synthetic and concat-type OutputSections. To work around this, I've given
+// our merged-literals section a different name.
+WordLiteralSection::WordLiteralSection()
+ : SyntheticSection(segment_names::text, section_names::literals) {
+ align = 16;
+}
+
+void WordLiteralSection::addInput(WordLiteralInputSection *isec) {
+ isec->parent = this;
+ inputs.push_back(isec);
+}
+
+void WordLiteralSection::finalizeContents() {
+ for (WordLiteralInputSection *isec : inputs) {
+ // We do all processing of the InputSection here, so it will be effectively
+ // finalized.
+ isec->isFinal = true;
+ const uint8_t *buf = isec->data.data();
+ switch (sectionType(isec->getFlags())) {
+ case S_4BYTE_LITERALS: {
+ for (size_t off = 0, e = isec->data.size(); off < e; off += 4) {
+ if (!isec->isLive(off))
+ continue;
+ uint32_t value = *reinterpret_cast<const uint32_t *>(buf + off);
+ literal4Map.emplace(value, literal4Map.size());
+ }
+ break;
+ }
+ case S_8BYTE_LITERALS: {
+ for (size_t off = 0, e = isec->data.size(); off < e; off += 8) {
+ if (!isec->isLive(off))
+ continue;
+ uint64_t value = *reinterpret_cast<const uint64_t *>(buf + off);
+ literal8Map.emplace(value, literal8Map.size());
+ }
+ break;
+ }
+ case S_16BYTE_LITERALS: {
+ for (size_t off = 0, e = isec->data.size(); off < e; off += 16) {
+ if (!isec->isLive(off))
+ continue;
+ UInt128 value = *reinterpret_cast<const UInt128 *>(buf + off);
+ literal16Map.emplace(value, literal16Map.size());
+ }
+ break;
+ }
+ default:
+ llvm_unreachable("invalid literal section type");
+ }
+ }
+}
+
+void WordLiteralSection::writeTo(uint8_t *buf) const {
+ // Note that we don't attempt to do any endianness conversion in addInput(),
+ // so we don't do it here either -- just write out the original value,
+ // byte-for-byte.
+ for (const auto &p : literal16Map)
+ memcpy(buf + p.second * 16, &p.first, 16);
+ buf += literal16Map.size() * 16;
+
+ for (const auto &p : literal8Map)
+ memcpy(buf + p.second * 8, &p.first, 8);
+ buf += literal8Map.size() * 8;
+
+ for (const auto &p : literal4Map)
+ memcpy(buf + p.second * 4, &p.first, 4);
+}
+
+void macho::createSyntheticSymbols() {
+ auto addHeaderSymbol = [](const char *name) {
+ symtab->addSynthetic(name, in.header->isec, /*value=*/0,
+ /*privateExtern=*/true, /*includeInSymtab=*/false,
+ /*referencedDynamically=*/false);
+ };
+
+ switch (config->outputType) {
+ // FIXME: Assign the right address value for these symbols
+ // (rather than 0). But we need to do that after assignAddresses().
+ case MH_EXECUTE:
+ // If linking PIE, __mh_execute_header is a defined symbol in
+ // __TEXT, __text)
+ // Otherwise, it's an absolute symbol.
+ if (config->isPic)
+ symtab->addSynthetic("__mh_execute_header", in.header->isec, /*value=*/0,
+ /*privateExtern=*/false, /*includeInSymtab=*/true,
+ /*referencedDynamically=*/true);
+ else
+ symtab->addSynthetic("__mh_execute_header", /*isec=*/nullptr, /*value=*/0,
+ /*privateExtern=*/false, /*includeInSymtab=*/true,
+ /*referencedDynamically=*/true);
+ break;
+
+ // The following symbols are N_SECT symbols, even though the header is not
+ // part of any section and that they are private to the bundle/dylib/object
+ // they are part of.
+ case MH_BUNDLE:
+ addHeaderSymbol("__mh_bundle_header");
+ break;
+ case MH_DYLIB:
+ addHeaderSymbol("__mh_dylib_header");
+ break;
+ case MH_DYLINKER:
+ addHeaderSymbol("__mh_dylinker_header");
+ break;
+ case MH_OBJECT:
+ addHeaderSymbol("__mh_object_header");
+ break;
+ default:
+ llvm_unreachable("unexpected outputType");
+ break;
+ }
+
+ // The Itanium C++ ABI requires dylibs to pass a pointer to __cxa_atexit
+ // which does e.g. cleanup of static global variables. The ABI document
+ // says that the pointer can point to any address in one of the dylib's
+ // segments, but in practice ld64 seems to set it to point to the header,
+ // so that's what's implemented here.
+ addHeaderSymbol("___dso_handle");
+}
+
+template SymtabSection *macho::makeSymtabSection<LP64>(StringTableSection &);
+template SymtabSection *macho::makeSymtabSection<ILP32>(StringTableSection &);