aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/IR/Operator.cpp
blob: 18a1c84933e0adf4595450ac014b084ef60a917f (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
//===-- Operator.cpp - Implement the LLVM operators -----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the non-inline methods for the LLVM Operator classes.
//
//===----------------------------------------------------------------------===//

#include "llvm/IR/Operator.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"

#include "ConstantsContext.h"

namespace llvm {
Type *GEPOperator::getSourceElementType() const {
  if (auto *I = dyn_cast<GetElementPtrInst>(this))
    return I->getSourceElementType();
  return cast<GetElementPtrConstantExpr>(this)->getSourceElementType();
}

Type *GEPOperator::getResultElementType() const {
  if (auto *I = dyn_cast<GetElementPtrInst>(this))
    return I->getResultElementType();
  return cast<GetElementPtrConstantExpr>(this)->getResultElementType();
}

Align GEPOperator::getMaxPreservedAlignment(const DataLayout &DL) const {
  /// compute the worse possible offset for every level of the GEP et accumulate
  /// the minimum alignment into Result.

  Align Result = Align(llvm::Value::MaximumAlignment);
  for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
       GTI != GTE; ++GTI) {
    int64_t Offset = 1;
    ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());

    if (StructType *STy = GTI.getStructTypeOrNull()) {
      const StructLayout *SL = DL.getStructLayout(STy);
      Offset = SL->getElementOffset(OpC->getZExtValue());
    } else {
      assert(GTI.isSequential() && "should be sequencial");
      /// If the index isn't know we take 1 because it is the index that will
      /// give the worse alignment of the offset.
      int64_t ElemCount = 1;
      if (OpC)
        ElemCount = OpC->getZExtValue();
      Offset = DL.getTypeAllocSize(GTI.getIndexedType()) * ElemCount;
    }
    Result = Align(MinAlign(Offset, Result.value()));
  }
  return Result;
}

bool GEPOperator::accumulateConstantOffset(
    const DataLayout &DL, APInt &Offset,
    function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
  assert(Offset.getBitWidth() ==
             DL.getIndexSizeInBits(getPointerAddressSpace()) &&
         "The offset bit width does not match DL specification.");
  SmallVector<const Value *> Index(value_op_begin() + 1, value_op_end());
  return GEPOperator::accumulateConstantOffset(getSourceElementType(), Index,
                                               DL, Offset, ExternalAnalysis);
}

bool GEPOperator::accumulateConstantOffset(
    Type *SourceType, ArrayRef<const Value *> Index, const DataLayout &DL,
    APInt &Offset, function_ref<bool(Value &, APInt &)> ExternalAnalysis) {
  bool UsedExternalAnalysis = false;
  auto AccumulateOffset = [&](APInt Index, uint64_t Size) -> bool {
    Index = Index.sextOrTrunc(Offset.getBitWidth());
    APInt IndexedSize = APInt(Offset.getBitWidth(), Size);
    // For array or vector indices, scale the index by the size of the type.
    if (!UsedExternalAnalysis) {
      Offset += Index * IndexedSize;
    } else {
      // External Analysis can return a result higher/lower than the value
      // represents. We need to detect overflow/underflow.
      bool Overflow = false;
      APInt OffsetPlus = Index.smul_ov(IndexedSize, Overflow);
      if (Overflow)
        return false;
      Offset = Offset.sadd_ov(OffsetPlus, Overflow);
      if (Overflow)
        return false;
    }
    return true;
  };
  auto begin = generic_gep_type_iterator<decltype(Index.begin())>::begin(
      SourceType, Index.begin());
  auto end = generic_gep_type_iterator<decltype(Index.end())>::end(Index.end());
  for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
    // Scalable vectors are multiplied by a runtime constant.
    bool ScalableType = false;
    if (isa<ScalableVectorType>(GTI.getIndexedType()))
      ScalableType = true;

    Value *V = GTI.getOperand();
    StructType *STy = GTI.getStructTypeOrNull();
    // Handle ConstantInt if possible.
    if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
      if (ConstOffset->isZero())
        continue;
      // if the type is scalable and the constant is not zero (vscale * n * 0 =
      // 0) bailout.
      if (ScalableType)
        return false;
      // Handle a struct index, which adds its field offset to the pointer.
      if (STy) {
        unsigned ElementIdx = ConstOffset->getZExtValue();
        const StructLayout *SL = DL.getStructLayout(STy);
        // Element offset is in bytes.
        if (!AccumulateOffset(
                APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)),
                1))
          return false;
        continue;
      }
      if (!AccumulateOffset(ConstOffset->getValue(),
                            DL.getTypeAllocSize(GTI.getIndexedType())))
        return false;
      continue;
    }

    // The operand is not constant, check if an external analysis was provided.
    // External analsis is not applicable to a struct type.
    if (!ExternalAnalysis || STy || ScalableType)
      return false;
    APInt AnalysisIndex;
    if (!ExternalAnalysis(*V, AnalysisIndex))
      return false;
    UsedExternalAnalysis = true;
    if (!AccumulateOffset(AnalysisIndex,
                          DL.getTypeAllocSize(GTI.getIndexedType())))
      return false;
  }
  return true;
}

bool GEPOperator::collectOffset(
    const DataLayout &DL, unsigned BitWidth,
    MapVector<Value *, APInt> &VariableOffsets,
    APInt &ConstantOffset) const {
  assert(BitWidth == DL.getIndexSizeInBits(getPointerAddressSpace()) &&
         "The offset bit width does not match DL specification.");

  auto CollectConstantOffset = [&](APInt Index, uint64_t Size) {
    Index = Index.sextOrTrunc(BitWidth);
    APInt IndexedSize = APInt(BitWidth, Size);
    ConstantOffset += Index * IndexedSize;
  };

  for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
       GTI != GTE; ++GTI) {
    // Scalable vectors are multiplied by a runtime constant.
    bool ScalableType = isa<ScalableVectorType>(GTI.getIndexedType());

    Value *V = GTI.getOperand();
    StructType *STy = GTI.getStructTypeOrNull();
    // Handle ConstantInt if possible.
    if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
      if (ConstOffset->isZero())
        continue;
      // If the type is scalable and the constant is not zero (vscale * n * 0 =
      // 0) bailout.
      // TODO: If the runtime value is accessible at any point before DWARF
      // emission, then we could potentially keep a forward reference to it
      // in the debug value to be filled in later.
      if (ScalableType)
        return false;
      // Handle a struct index, which adds its field offset to the pointer.
      if (STy) {
        unsigned ElementIdx = ConstOffset->getZExtValue();
        const StructLayout *SL = DL.getStructLayout(STy);
        // Element offset is in bytes.
        CollectConstantOffset(APInt(BitWidth, SL->getElementOffset(ElementIdx)),
                              1);
        continue;
      }
      CollectConstantOffset(ConstOffset->getValue(),
                            DL.getTypeAllocSize(GTI.getIndexedType()));
      continue;
    }

    if (STy || ScalableType)
      return false;
    // Insert an initial offset of 0 for V iff none exists already, then
    // increment the offset by IndexedSize.
    VariableOffsets.insert({V, APInt(BitWidth, 0)});
    APInt IndexedSize =
        APInt(BitWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
    VariableOffsets[V] += IndexedSize;
  }
  return true;
}
} // namespace llvm