aboutsummaryrefslogtreecommitdiff
path: root/lld/ELF/Target.h
blob: 68b6c5d2ca3510bdc18a77de6f5a994182109158 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
//===- Target.h -------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLD_ELF_TARGET_H
#define LLD_ELF_TARGET_H

#include "InputSection.h"
#include "lld/Common/ErrorHandler.h"
#include "llvm/Object/ELF.h"
#include "llvm/Support/MathExtras.h"
#include <array>

namespace lld {
std::string toString(elf::RelType type);

namespace elf {
class Defined;
class InputFile;
class Symbol;

class TargetInfo {
public:
  virtual uint32_t calcEFlags() const { return 0; }
  virtual RelExpr getRelExpr(RelType type, const Symbol &s,
                             const uint8_t *loc) const = 0;
  virtual RelType getDynRel(RelType type) const { return 0; }
  virtual void writeGotPltHeader(uint8_t *buf) const {}
  virtual void writeGotHeader(uint8_t *buf) const {}
  virtual void writeGotPlt(uint8_t *buf, const Symbol &s) const {};
  virtual void writeIgotPlt(uint8_t *buf, const Symbol &s) const {}
  virtual int64_t getImplicitAddend(const uint8_t *buf, RelType type) const;
  virtual int getTlsGdRelaxSkip(RelType type) const { return 1; }

  // If lazy binding is supported, the first entry of the PLT has code
  // to call the dynamic linker to resolve PLT entries the first time
  // they are called. This function writes that code.
  virtual void writePltHeader(uint8_t *buf) const {}

  virtual void writePlt(uint8_t *buf, const Symbol &sym,
                        uint64_t pltEntryAddr) const {}
  virtual void writeIplt(uint8_t *buf, const Symbol &sym,
                         uint64_t pltEntryAddr) const {
    // All but PPC32 and PPC64 use the same format for .plt and .iplt entries.
    writePlt(buf, sym, pltEntryAddr);
  }
  virtual void writeIBTPlt(uint8_t *buf, size_t numEntries) const {}
  virtual void addPltHeaderSymbols(InputSection &isec) const {}
  virtual void addPltSymbols(InputSection &isec, uint64_t off) const {}

  // Returns true if a relocation only uses the low bits of a value such that
  // all those bits are in the same page. For example, if the relocation
  // only uses the low 12 bits in a system with 4k pages. If this is true, the
  // bits will always have the same value at runtime and we don't have to emit
  // a dynamic relocation.
  virtual bool usesOnlyLowPageBits(RelType type) const;

  // Decide whether a Thunk is needed for the relocation from File
  // targeting S.
  virtual bool needsThunk(RelExpr expr, RelType relocType,
                          const InputFile *file, uint64_t branchAddr,
                          const Symbol &s, int64_t a) const;

  // On systems with range extensions we place collections of Thunks at
  // regular spacings that enable the majority of branches reach the Thunks.
  // a value of 0 means range extension thunks are not supported.
  virtual uint32_t getThunkSectionSpacing() const { return 0; }

  // The function with a prologue starting at Loc was compiled with
  // -fsplit-stack and it calls a function compiled without. Adjust the prologue
  // to do the right thing. See https://gcc.gnu.org/wiki/SplitStacks.
  // The symbols st_other flags are needed on PowerPC64 for determining the
  // offset to the split-stack prologue.
  virtual bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
                                                uint8_t stOther) const;

  // Return true if we can reach dst from src with RelType type.
  virtual bool inBranchRange(RelType type, uint64_t src,
                             uint64_t dst) const;

  virtual void relocate(uint8_t *loc, const Relocation &rel,
                        uint64_t val) const = 0;
  void relocateNoSym(uint8_t *loc, RelType type, uint64_t val) const {
    relocate(loc, Relocation{R_NONE, type, 0, 0, nullptr}, val);
  }

  virtual void applyJumpInstrMod(uint8_t *loc, JumpModType type,
                                 JumpModType val) const {}

  virtual ~TargetInfo();

  // This deletes a jump insn at the end of the section if it is a fall thru to
  // the next section.  Further, if there is a conditional jump and a direct
  // jump consecutively, it tries to flip the conditional jump to convert the
  // direct jump into a fall thru and delete it.  Returns true if a jump
  // instruction can be deleted.
  virtual bool deleteFallThruJmpInsn(InputSection &is, InputFile *file,
                                     InputSection *nextIS) const {
    return false;
  }

  unsigned defaultCommonPageSize = 4096;
  unsigned defaultMaxPageSize = 4096;

  uint64_t getImageBase() const;

  // True if _GLOBAL_OFFSET_TABLE_ is relative to .got.plt, false if .got.
  bool gotBaseSymInGotPlt = true;

  RelType copyRel;
  RelType gotRel;
  RelType noneRel;
  RelType pltRel;
  RelType relativeRel;
  RelType iRelativeRel;
  RelType symbolicRel;
  RelType tlsDescRel;
  RelType tlsGotRel;
  RelType tlsModuleIndexRel;
  RelType tlsOffsetRel;
  unsigned pltEntrySize;
  unsigned pltHeaderSize;
  unsigned ipltEntrySize;

  // At least on x86_64 positions 1 and 2 are used by the first plt entry
  // to support lazy loading.
  unsigned gotPltHeaderEntriesNum = 3;

  // On PPC ELF V2 abi, the first entry in the .got is the .TOC.
  unsigned gotHeaderEntriesNum = 0;

  bool needsThunks = false;

  // A 4-byte field corresponding to one or more trap instructions, used to pad
  // executable OutputSections.
  std::array<uint8_t, 4> trapInstr;

  // Stores the NOP instructions of different sizes for the target and is used
  // to pad sections that are relaxed.
  llvm::Optional<std::vector<std::vector<uint8_t>>> nopInstrs;

  // If a target needs to rewrite calls to __morestack to instead call
  // __morestack_non_split when a split-stack enabled caller calls a
  // non-split-stack callee this will return true. Otherwise returns false.
  bool needsMoreStackNonSplit = true;

  virtual RelExpr adjustTlsExpr(RelType type, RelExpr expr) const;
  virtual RelExpr adjustGotPcExpr(RelType type, int64_t addend,
                                  const uint8_t *loc) const;
  virtual void relaxGot(uint8_t *loc, const Relocation &rel,
                        uint64_t val) const;
  virtual void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
                              uint64_t val) const;
  virtual void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
                              uint64_t val) const;
  virtual void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
                              uint64_t val) const;
  virtual void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
                              uint64_t val) const;

protected:
  // On FreeBSD x86_64 the first page cannot be mmaped.
  // On Linux this is controlled by vm.mmap_min_addr. At least on some x86_64
  // installs this is set to 65536, so the first 15 pages cannot be used.
  // Given that, the smallest value that can be used in here is 0x10000.
  uint64_t defaultImageBase = 0x10000;
};

TargetInfo *getAArch64TargetInfo();
TargetInfo *getAMDGPUTargetInfo();
TargetInfo *getARMTargetInfo();
TargetInfo *getAVRTargetInfo();
TargetInfo *getHexagonTargetInfo();
TargetInfo *getMSP430TargetInfo();
TargetInfo *getPPC64TargetInfo();
TargetInfo *getPPCTargetInfo();
TargetInfo *getRISCVTargetInfo();
TargetInfo *getSPARCV9TargetInfo();
TargetInfo *getX86TargetInfo();
TargetInfo *getX86_64TargetInfo();
template <class ELFT> TargetInfo *getMipsTargetInfo();

struct ErrorPlace {
  InputSectionBase *isec;
  std::string loc;
};

// Returns input section and corresponding source string for the given location.
ErrorPlace getErrorPlace(const uint8_t *loc);

static inline std::string getErrorLocation(const uint8_t *loc) {
  return getErrorPlace(loc).loc;
}

void writePPC32GlinkSection(uint8_t *buf, size_t numEntries);

bool tryRelaxPPC64TocIndirection(const Relocation &rel, uint8_t *bufLoc);
unsigned getPPCDFormOp(unsigned secondaryOp);

// In the PowerPC64 Elf V2 abi a function can have 2 entry points.  The first
// is a global entry point (GEP) which typically is used to initialize the TOC
// pointer in general purpose register 2.  The second is a local entry
// point (LEP) which bypasses the TOC pointer initialization code. The
// offset between GEP and LEP is encoded in a function's st_other flags.
// This function will return the offset (in bytes) from the global entry-point
// to the local entry-point.
unsigned getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther);

// Returns true if a relocation is a small code model relocation that accesses
// the .toc section.
bool isPPC64SmallCodeModelTocReloc(RelType type);

// Write a prefixed instruction, which is a 4-byte prefix followed by a 4-byte
// instruction (regardless of endianness). Therefore, the prefix is always in
// lower memory than the instruction.
void writePrefixedInstruction(uint8_t *loc, uint64_t insn);

void addPPC64SaveRestore();
uint64_t getPPC64TocBase();
uint64_t getAArch64Page(uint64_t expr);

extern const TargetInfo *target;
TargetInfo *getTarget();

template <class ELFT> bool isMipsPIC(const Defined *sym);

void reportRangeError(uint8_t *loc, const Relocation &rel, const Twine &v,
                      int64_t min, uint64_t max);
void reportRangeError(uint8_t *loc, int64_t v, int n, const Symbol &sym,
                      const Twine &msg);

// Make sure that V can be represented as an N bit signed integer.
inline void checkInt(uint8_t *loc, int64_t v, int n, const Relocation &rel) {
  if (v != llvm::SignExtend64(v, n))
    reportRangeError(loc, rel, Twine(v), llvm::minIntN(n), llvm::maxIntN(n));
}

// Make sure that V can be represented as an N bit unsigned integer.
inline void checkUInt(uint8_t *loc, uint64_t v, int n, const Relocation &rel) {
  if ((v >> n) != 0)
    reportRangeError(loc, rel, Twine(v), 0, llvm::maxUIntN(n));
}

// Make sure that V can be represented as an N bit signed or unsigned integer.
inline void checkIntUInt(uint8_t *loc, uint64_t v, int n,
                         const Relocation &rel) {
  // For the error message we should cast V to a signed integer so that error
  // messages show a small negative value rather than an extremely large one
  if (v != (uint64_t)llvm::SignExtend64(v, n) && (v >> n) != 0)
    reportRangeError(loc, rel, Twine((int64_t)v), llvm::minIntN(n),
                     llvm::maxUIntN(n));
}

inline void checkAlignment(uint8_t *loc, uint64_t v, int n,
                           const Relocation &rel) {
  if ((v & (n - 1)) != 0)
    error(getErrorLocation(loc) + "improper alignment for relocation " +
          lld::toString(rel.type) + ": 0x" + llvm::utohexstr(v) +
          " is not aligned to " + Twine(n) + " bytes");
}

// Endianness-aware read/write.
inline uint16_t read16(const void *p) {
  return llvm::support::endian::read16(p, config->endianness);
}

inline uint32_t read32(const void *p) {
  return llvm::support::endian::read32(p, config->endianness);
}

inline uint64_t read64(const void *p) {
  return llvm::support::endian::read64(p, config->endianness);
}

inline void write16(void *p, uint16_t v) {
  llvm::support::endian::write16(p, v, config->endianness);
}

inline void write32(void *p, uint32_t v) {
  llvm::support::endian::write32(p, v, config->endianness);
}

inline void write64(void *p, uint64_t v) {
  llvm::support::endian::write64(p, v, config->endianness);
}
} // namespace elf
} // namespace lld

#endif