aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/ARM/ARMSubtarget.h
blob: a8a9ae66b4abd71cb495cc4eefaae686cadc21f1 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
//===-- ARMSubtarget.h - Define Subtarget for the ARM ----------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the ARM specific subclass of TargetSubtargetInfo.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
#define LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H

#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMConstantPoolValue.h"
#include "ARMFrameLowering.h"
#include "ARMISelLowering.h"
#include "ARMSelectionDAGInfo.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <memory>
#include <string>

#define GET_SUBTARGETINFO_HEADER
#include "ARMGenSubtargetInfo.inc"

namespace llvm {

class ARMBaseTargetMachine;
class GlobalValue;
class StringRef;

class ARMSubtarget : public ARMGenSubtargetInfo {
protected:
  enum ARMProcFamilyEnum {
    Others,

    CortexA12,
    CortexA15,
    CortexA17,
    CortexA32,
    CortexA35,
    CortexA5,
    CortexA53,
    CortexA55,
    CortexA57,
    CortexA7,
    CortexA72,
    CortexA73,
    CortexA75,
    CortexA76,
    CortexA77,
    CortexA78,
    CortexA78C,
    CortexA8,
    CortexA9,
    CortexM3,
    CortexM7,
    CortexR4,
    CortexR4F,
    CortexR5,
    CortexR52,
    CortexR7,
    CortexX1,
    Exynos,
    Krait,
    Kryo,
    NeoverseN1,
    NeoverseN2,
    NeoverseV1,
    Swift
  };
  enum ARMProcClassEnum {
    None,

    AClass,
    MClass,
    RClass
  };
  enum ARMArchEnum {
    ARMv2,
    ARMv2a,
    ARMv3,
    ARMv3m,
    ARMv4,
    ARMv4t,
    ARMv5,
    ARMv5t,
    ARMv5te,
    ARMv5tej,
    ARMv6,
    ARMv6k,
    ARMv6kz,
    ARMv6m,
    ARMv6sm,
    ARMv6t2,
    ARMv7a,
    ARMv7em,
    ARMv7m,
    ARMv7r,
    ARMv7ve,
    ARMv81a,
    ARMv82a,
    ARMv83a,
    ARMv84a,
    ARMv85a,
    ARMv86a,
    ARMv87a,
    ARMv8a,
    ARMv8mBaseline,
    ARMv8mMainline,
    ARMv8r,
    ARMv81mMainline,
  };

public:
  /// What kind of timing do load multiple/store multiple instructions have.
  enum ARMLdStMultipleTiming {
    /// Can load/store 2 registers/cycle.
    DoubleIssue,
    /// Can load/store 2 registers/cycle, but needs an extra cycle if the access
    /// is not 64-bit aligned.
    DoubleIssueCheckUnalignedAccess,
    /// Can load/store 1 register/cycle.
    SingleIssue,
    /// Can load/store 1 register/cycle, but needs an extra cycle for address
    /// computation and potentially also for register writeback.
    SingleIssuePlusExtras,
  };

protected:
  /// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others.
  ARMProcFamilyEnum ARMProcFamily = Others;

  /// ARMProcClass - ARM processor class: None, AClass, RClass or MClass.
  ARMProcClassEnum ARMProcClass = None;

  /// ARMArch - ARM architecture
  ARMArchEnum ARMArch = ARMv4t;

  /// HasV4TOps, HasV5TOps, HasV5TEOps,
  /// HasV6Ops, HasV6MOps, HasV6KOps, HasV6T2Ops, HasV7Ops, HasV8Ops -
  /// Specify whether target support specific ARM ISA variants.
  bool HasV4TOps = false;
  bool HasV5TOps = false;
  bool HasV5TEOps = false;
  bool HasV6Ops = false;
  bool HasV6MOps = false;
  bool HasV6KOps = false;
  bool HasV6T2Ops = false;
  bool HasV7Ops = false;
  bool HasV8Ops = false;
  bool HasV8_1aOps = false;
  bool HasV8_2aOps = false;
  bool HasV8_3aOps = false;
  bool HasV8_4aOps = false;
  bool HasV8_5aOps = false;
  bool HasV8_6aOps = false;
  bool HasV8_7aOps = false;
  bool HasV8MBaselineOps = false;
  bool HasV8MMainlineOps = false;
  bool HasV8_1MMainlineOps = false;
  bool HasMVEIntegerOps = false;
  bool HasMVEFloatOps = false;
  bool HasCDEOps = false;

  /// HasVFPv2, HasVFPv3, HasVFPv4, HasFPARMv8, HasNEON - Specify what
  /// floating point ISAs are supported.
  bool HasVFPv2 = false;
  bool HasVFPv3 = false;
  bool HasVFPv4 = false;
  bool HasFPARMv8 = false;
  bool HasNEON = false;
  bool HasFPRegs = false;
  bool HasFPRegs16 = false;
  bool HasFPRegs64 = false;

  /// Versions of the VFP flags restricted to single precision, or to
  /// 16 d-registers, or both.
  bool HasVFPv2SP = false;
  bool HasVFPv3SP = false;
  bool HasVFPv4SP = false;
  bool HasFPARMv8SP = false;
  bool HasVFPv3D16 = false;
  bool HasVFPv4D16 = false;
  bool HasFPARMv8D16 = false;
  bool HasVFPv3D16SP = false;
  bool HasVFPv4D16SP = false;
  bool HasFPARMv8D16SP = false;

  /// HasDotProd - True if the ARMv8.2A dot product instructions are supported.
  bool HasDotProd = false;

  /// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been
  /// specified. Use the method useNEONForSinglePrecisionFP() to
  /// determine if NEON should actually be used.
  bool UseNEONForSinglePrecisionFP = false;

  /// UseMulOps - True if non-microcoded fused integer multiply-add and
  /// multiply-subtract instructions should be used.
  bool UseMulOps = false;

  /// SlowFPVMLx - If the VFP2 / NEON instructions are available, indicates
  /// whether the FP VML[AS] instructions are slow (if so, don't use them).
  bool SlowFPVMLx = false;

  /// SlowFPVFMx - If the VFP4 / NEON instructions are available, indicates
  /// whether the FP VFM[AS] instructions are slow (if so, don't use them).
  bool SlowFPVFMx = false;

  /// HasVMLxForwarding - If true, NEON has special multiplier accumulator
  /// forwarding to allow mul + mla being issued back to back.
  bool HasVMLxForwarding = false;

  /// SlowFPBrcc - True if floating point compare + branch is slow.
  bool SlowFPBrcc = false;

  /// InThumbMode - True if compiling for Thumb, false for ARM.
  bool InThumbMode = false;

  /// UseSoftFloat - True if we're using software floating point features.
  bool UseSoftFloat = false;

  /// UseMISched - True if MachineScheduler should be used for this subtarget.
  bool UseMISched = false;

  /// DisablePostRAScheduler - False if scheduling should happen again after
  /// register allocation.
  bool DisablePostRAScheduler = false;

  /// HasThumb2 - True if Thumb2 instructions are supported.
  bool HasThumb2 = false;

  /// NoARM - True if subtarget does not support ARM mode execution.
  bool NoARM = false;

  /// ReserveR9 - True if R9 is not available as a general purpose register.
  bool ReserveR9 = false;

  /// NoMovt - True if MOVT / MOVW pairs are not used for materialization of
  /// 32-bit imms (including global addresses).
  bool NoMovt = false;

  /// SupportsTailCall - True if the OS supports tail call. The dynamic linker
  /// must be able to synthesize call stubs for interworking between ARM and
  /// Thumb.
  bool SupportsTailCall = false;

  /// HasFP16 - True if subtarget supports half-precision FP conversions
  bool HasFP16 = false;

  /// HasFullFP16 - True if subtarget supports half-precision FP operations
  bool HasFullFP16 = false;

  /// HasFP16FML - True if subtarget supports half-precision FP fml operations
  bool HasFP16FML = false;

  /// HasBF16 - True if subtarget supports BFloat16 floating point operations
  bool HasBF16 = false;

  /// HasMatMulInt8 - True if subtarget supports 8-bit integer matrix multiply
  bool HasMatMulInt8 = false;

  /// HasD32 - True if subtarget has the full 32 double precision
  /// FP registers for VFPv3.
  bool HasD32 = false;

  /// HasHardwareDivide - True if subtarget supports [su]div in Thumb mode
  bool HasHardwareDivideInThumb = false;

  /// HasHardwareDivideInARM - True if subtarget supports [su]div in ARM mode
  bool HasHardwareDivideInARM = false;

  /// HasDataBarrier - True if the subtarget supports DMB / DSB data barrier
  /// instructions.
  bool HasDataBarrier = false;

  /// HasFullDataBarrier - True if the subtarget supports DFB data barrier
  /// instruction.
  bool HasFullDataBarrier = false;

  /// HasV7Clrex - True if the subtarget supports CLREX instructions
  bool HasV7Clrex = false;

  /// HasAcquireRelease - True if the subtarget supports v8 atomics (LDA/LDAEX etc)
  /// instructions
  bool HasAcquireRelease = false;

  /// Pref32BitThumb - If true, codegen would prefer 32-bit Thumb instructions
  /// over 16-bit ones.
  bool Pref32BitThumb = false;

  /// AvoidCPSRPartialUpdate - If true, codegen would avoid using instructions
  /// that partially update CPSR and add false dependency on the previous
  /// CPSR setting instruction.
  bool AvoidCPSRPartialUpdate = false;

  /// CheapPredicableCPSRDef - If true, disable +1 predication cost
  /// for instructions updating CPSR. Enabled for Cortex-A57.
  bool CheapPredicableCPSRDef = false;

  /// AvoidMOVsShifterOperand - If true, codegen should avoid using flag setting
  /// movs with shifter operand (i.e. asr, lsl, lsr).
  bool AvoidMOVsShifterOperand = false;

  /// HasRetAddrStack - Some processors perform return stack prediction. CodeGen should
  /// avoid issue "normal" call instructions to callees which do not return.
  bool HasRetAddrStack = false;

  /// HasBranchPredictor - True if the subtarget has a branch predictor. Having
  /// a branch predictor or not changes the expected cost of taking a branch
  /// which affects the choice of whether to use predicated instructions.
  bool HasBranchPredictor = true;

  /// HasMPExtension - True if the subtarget supports Multiprocessing
  /// extension (ARMv7 only).
  bool HasMPExtension = false;

  /// HasVirtualization - True if the subtarget supports the Virtualization
  /// extension.
  bool HasVirtualization = false;

  /// HasFP64 - If true, the floating point unit supports double
  /// precision.
  bool HasFP64 = false;

  /// If true, the processor supports the Performance Monitor Extensions. These
  /// include a generic cycle-counter as well as more fine-grained (often
  /// implementation-specific) events.
  bool HasPerfMon = false;

  /// HasTrustZone - if true, processor supports TrustZone security extensions
  bool HasTrustZone = false;

  /// Has8MSecExt - if true, processor supports ARMv8-M Security Extensions
  bool Has8MSecExt = false;

  /// HasSHA2 - if true, processor supports SHA1 and SHA256
  bool HasSHA2 = false;

  /// HasAES - if true, processor supports AES
  bool HasAES = false;

  /// HasCrypto - if true, processor supports Cryptography extensions
  bool HasCrypto = false;

  /// HasCRC - if true, processor supports CRC instructions
  bool HasCRC = false;

  /// HasRAS - if true, the processor supports RAS extensions
  bool HasRAS = false;

  /// HasLOB - if true, the processor supports the Low Overhead Branch extension
  bool HasLOB = false;

  /// If true, the instructions "vmov.i32 d0, #0" and "vmov.i32 q0, #0" are
  /// particularly effective at zeroing a VFP register.
  bool HasZeroCycleZeroing = false;

  /// HasFPAO - if true, processor  does positive address offset computation faster
  bool HasFPAO = false;

  /// HasFuseAES - if true, processor executes back to back AES instruction
  /// pairs faster.
  bool HasFuseAES = false;

  /// HasFuseLiterals - if true, processor executes back to back
  /// bottom and top halves of literal generation faster.
  bool HasFuseLiterals = false;

  /// If true, if conversion may decide to leave some instructions unpredicated.
  bool IsProfitableToUnpredicate = false;

  /// If true, VMOV will be favored over VGETLNi32.
  bool HasSlowVGETLNi32 = false;

  /// If true, VMOV will be favored over VDUP.
  bool HasSlowVDUP32 = false;

  /// If true, VMOVSR will be favored over VMOVDRR.
  bool PreferVMOVSR = false;

  /// If true, ISHST barriers will be used for Release semantics.
  bool PreferISHST = false;

  /// If true, a VLDM/VSTM starting with an odd register number is considered to
  /// take more microops than single VLDRS/VSTRS.
  bool SlowOddRegister = false;

  /// If true, loading into a D subregister will be penalized.
  bool SlowLoadDSubregister = false;

  /// If true, use a wider stride when allocating VFP registers.
  bool UseWideStrideVFP = false;

  /// If true, the AGU and NEON/FPU units are multiplexed.
  bool HasMuxedUnits = false;

  /// If true, VMOVS will never be widened to VMOVD.
  bool DontWidenVMOVS = false;

  /// If true, splat a register between VFP and NEON instructions.
  bool SplatVFPToNeon = false;

  /// If true, run the MLx expansion pass.
  bool ExpandMLx = false;

  /// If true, VFP/NEON VMLA/VMLS have special RAW hazards.
  bool HasVMLxHazards = false;

  // If true, read thread pointer from coprocessor register.
  bool ReadTPHard = false;

  /// If true, VMOVRS, VMOVSR and VMOVS will be converted from VFP to NEON.
  bool UseNEONForFPMovs = false;

  /// If true, VLDn instructions take an extra cycle for unaligned accesses.
  bool CheckVLDnAlign = false;

  /// If true, VFP instructions are not pipelined.
  bool NonpipelinedVFP = false;

  /// StrictAlign - If true, the subtarget disallows unaligned memory
  /// accesses for some types.  For details, see
  /// ARMTargetLowering::allowsMisalignedMemoryAccesses().
  bool StrictAlign = false;

  /// RestrictIT - If true, the subtarget disallows generation of deprecated IT
  ///  blocks to conform to ARMv8 rule.
  bool RestrictIT = false;

  /// HasDSP - If true, the subtarget supports the DSP (saturating arith
  /// and such) instructions.
  bool HasDSP = false;

  /// NaCl TRAP instruction is generated instead of the regular TRAP.
  bool UseNaClTrap = false;

  /// Generate calls via indirect call instructions.
  bool GenLongCalls = false;

  /// Generate code that does not contain data access to code sections.
  bool GenExecuteOnly = false;

  /// Target machine allowed unsafe FP math (such as use of NEON fp)
  bool UnsafeFPMath = false;

  /// UseSjLjEH - If true, the target uses SjLj exception handling (e.g. iOS).
  bool UseSjLjEH = false;

  /// Has speculation barrier
  bool HasSB = false;

  /// Implicitly convert an instruction to a different one if its immediates
  /// cannot be encoded. For example, ADD r0, r1, #FFFFFFFF -> SUB r0, r1, #1.
  bool NegativeImmediates = true;

  /// Harden against Straight Line Speculation for Returns and Indirect
  /// Branches.
  bool HardenSlsRetBr = false;

  /// Harden against Straight Line Speculation for indirect calls.
  bool HardenSlsBlr = false;

  /// Generate thunk code for SLS mitigation in the normal text section.
  bool HardenSlsNoComdat = false;

  /// stackAlignment - The minimum alignment known to hold of the stack frame on
  /// entry to the function and which must be maintained by every function.
  Align stackAlignment = Align(4);

  /// CPUString - String name of used CPU.
  std::string CPUString;

  unsigned MaxInterleaveFactor = 1;

  /// Clearance before partial register updates (in number of instructions)
  unsigned PartialUpdateClearance = 0;

  /// What kind of timing do load multiple/store multiple have (double issue,
  /// single issue etc).
  ARMLdStMultipleTiming LdStMultipleTiming = SingleIssue;

  /// The adjustment that we need to apply to get the operand latency from the
  /// operand cycle returned by the itinerary data for pre-ISel operands.
  int PreISelOperandLatencyAdjustment = 2;

  /// What alignment is preferred for loop bodies, in log2(bytes).
  unsigned PrefLoopLogAlignment = 0;

  /// The cost factor for MVE instructions, representing the multiple beats an
  // instruction can take. The default is 2, (set in initSubtargetFeatures so
  // that we can use subtarget features less than 2).
  unsigned MVEVectorCostFactor = 0;

  /// OptMinSize - True if we're optimising for minimum code size, equal to
  /// the function attribute.
  bool OptMinSize = false;

  /// IsLittle - The target is Little Endian
  bool IsLittle;

  /// TargetTriple - What processor and OS we're targeting.
  Triple TargetTriple;

  /// SchedModel - Processor specific instruction costs.
  MCSchedModel SchedModel;

  /// Selected instruction itineraries (one entry per itinerary class.)
  InstrItineraryData InstrItins;

  /// Options passed via command line that could influence the target
  const TargetOptions &Options;

  const ARMBaseTargetMachine &TM;

public:
  /// This constructor initializes the data members to match that
  /// of the specified triple.
  ///
  ARMSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
               const ARMBaseTargetMachine &TM, bool IsLittle,
               bool MinSize = false);

  /// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
  /// that still makes it profitable to inline the call.
  unsigned getMaxInlineSizeThreshold() const {
    return 64;
  }

  /// getMaxMemcpyTPInlineSizeThreshold - Returns the maximum size
  /// that still makes it profitable to inline a llvm.memcpy as a Tail
  /// Predicated loop.
  /// This threshold should only be used for constant size inputs.
  unsigned getMaxMemcpyTPInlineSizeThreshold() const { return 128; }

  /// ParseSubtargetFeatures - Parses features string setting specified
  /// subtarget options.  Definition of function is auto generated by tblgen.
  void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);

  /// initializeSubtargetDependencies - Initializes using a CPU and feature string
  /// so that we can use initializer lists for subtarget initialization.
  ARMSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);

  const ARMSelectionDAGInfo *getSelectionDAGInfo() const override {
    return &TSInfo;
  }

  const ARMBaseInstrInfo *getInstrInfo() const override {
    return InstrInfo.get();
  }

  const ARMTargetLowering *getTargetLowering() const override {
    return &TLInfo;
  }

  const ARMFrameLowering *getFrameLowering() const override {
    return FrameLowering.get();
  }

  const ARMBaseRegisterInfo *getRegisterInfo() const override {
    return &InstrInfo->getRegisterInfo();
  }

  const CallLowering *getCallLowering() const override;
  InstructionSelector *getInstructionSelector() const override;
  const LegalizerInfo *getLegalizerInfo() const override;
  const RegisterBankInfo *getRegBankInfo() const override;

private:
  ARMSelectionDAGInfo TSInfo;
  // Either Thumb1FrameLowering or ARMFrameLowering.
  std::unique_ptr<ARMFrameLowering> FrameLowering;
  // Either Thumb1InstrInfo or Thumb2InstrInfo.
  std::unique_ptr<ARMBaseInstrInfo> InstrInfo;
  ARMTargetLowering   TLInfo;

  /// GlobalISel related APIs.
  std::unique_ptr<CallLowering> CallLoweringInfo;
  std::unique_ptr<InstructionSelector> InstSelector;
  std::unique_ptr<LegalizerInfo> Legalizer;
  std::unique_ptr<RegisterBankInfo> RegBankInfo;

  void initializeEnvironment();
  void initSubtargetFeatures(StringRef CPU, StringRef FS);
  ARMFrameLowering *initializeFrameLowering(StringRef CPU, StringRef FS);

  std::bitset<8> CoprocCDE = {};
public:
  void computeIssueWidth();

  bool hasV4TOps()  const { return HasV4TOps;  }
  bool hasV5TOps()  const { return HasV5TOps;  }
  bool hasV5TEOps() const { return HasV5TEOps; }
  bool hasV6Ops()   const { return HasV6Ops;   }
  bool hasV6MOps()  const { return HasV6MOps;  }
  bool hasV6KOps()  const { return HasV6KOps; }
  bool hasV6T2Ops() const { return HasV6T2Ops; }
  bool hasV7Ops()   const { return HasV7Ops;  }
  bool hasV8Ops()   const { return HasV8Ops;  }
  bool hasV8_1aOps() const { return HasV8_1aOps; }
  bool hasV8_2aOps() const { return HasV8_2aOps; }
  bool hasV8_3aOps() const { return HasV8_3aOps; }
  bool hasV8_4aOps() const { return HasV8_4aOps; }
  bool hasV8_5aOps() const { return HasV8_5aOps; }
  bool hasV8_6aOps() const { return HasV8_6aOps; }
  bool hasV8_7aOps() const { return HasV8_7aOps; }
  bool hasV8MBaselineOps() const { return HasV8MBaselineOps; }
  bool hasV8MMainlineOps() const { return HasV8MMainlineOps; }
  bool hasV8_1MMainlineOps() const { return HasV8_1MMainlineOps; }
  bool hasMVEIntegerOps() const { return HasMVEIntegerOps; }
  bool hasMVEFloatOps() const { return HasMVEFloatOps; }
  bool hasCDEOps() const { return HasCDEOps; }
  bool hasFPRegs() const { return HasFPRegs; }
  bool hasFPRegs16() const { return HasFPRegs16; }
  bool hasFPRegs64() const { return HasFPRegs64; }

  /// @{
  /// These functions are obsolete, please consider adding subtarget features
  /// or properties instead of calling them.
  bool isCortexA5() const { return ARMProcFamily == CortexA5; }
  bool isCortexA7() const { return ARMProcFamily == CortexA7; }
  bool isCortexA8() const { return ARMProcFamily == CortexA8; }
  bool isCortexA9() const { return ARMProcFamily == CortexA9; }
  bool isCortexA15() const { return ARMProcFamily == CortexA15; }
  bool isSwift()    const { return ARMProcFamily == Swift; }
  bool isCortexM3() const { return ARMProcFamily == CortexM3; }
  bool isCortexM7() const { return ARMProcFamily == CortexM7; }
  bool isLikeA9() const { return isCortexA9() || isCortexA15() || isKrait(); }
  bool isCortexR5() const { return ARMProcFamily == CortexR5; }
  bool isKrait() const { return ARMProcFamily == Krait; }
  /// @}

  bool hasARMOps() const { return !NoARM; }

  bool hasVFP2Base() const { return HasVFPv2SP; }
  bool hasVFP3Base() const { return HasVFPv3D16SP; }
  bool hasVFP4Base() const { return HasVFPv4D16SP; }
  bool hasFPARMv8Base() const { return HasFPARMv8D16SP; }
  bool hasNEON() const { return HasNEON;  }
  bool hasSHA2() const { return HasSHA2; }
  bool hasAES() const { return HasAES; }
  bool hasCrypto() const { return HasCrypto; }
  bool hasDotProd() const { return HasDotProd; }
  bool hasCRC() const { return HasCRC; }
  bool hasRAS() const { return HasRAS; }
  bool hasLOB() const { return HasLOB; }
  bool hasVirtualization() const { return HasVirtualization; }

  bool useNEONForSinglePrecisionFP() const {
    return hasNEON() && UseNEONForSinglePrecisionFP;
  }

  bool hasDivideInThumbMode() const { return HasHardwareDivideInThumb; }
  bool hasDivideInARMMode() const { return HasHardwareDivideInARM; }
  bool hasDataBarrier() const { return HasDataBarrier; }
  bool hasFullDataBarrier() const { return HasFullDataBarrier; }
  bool hasV7Clrex() const { return HasV7Clrex; }
  bool hasAcquireRelease() const { return HasAcquireRelease; }

  bool hasAnyDataBarrier() const {
    return HasDataBarrier || (hasV6Ops() && !isThumb());
  }

  bool useMulOps() const { return UseMulOps; }
  bool useFPVMLx() const { return !SlowFPVMLx; }
  bool useFPVFMx() const {
    return !isTargetDarwin() && hasVFP4Base() && !SlowFPVFMx;
  }
  bool useFPVFMx16() const { return useFPVFMx() && hasFullFP16(); }
  bool useFPVFMx64() const { return useFPVFMx() && hasFP64(); }
  bool hasVMLxForwarding() const { return HasVMLxForwarding; }
  bool isFPBrccSlow() const { return SlowFPBrcc; }
  bool hasFP64() const { return HasFP64; }
  bool hasPerfMon() const { return HasPerfMon; }
  bool hasTrustZone() const { return HasTrustZone; }
  bool has8MSecExt() const { return Has8MSecExt; }
  bool hasZeroCycleZeroing() const { return HasZeroCycleZeroing; }
  bool hasFPAO() const { return HasFPAO; }
  bool isProfitableToUnpredicate() const { return IsProfitableToUnpredicate; }
  bool hasSlowVGETLNi32() const { return HasSlowVGETLNi32; }
  bool hasSlowVDUP32() const { return HasSlowVDUP32; }
  bool preferVMOVSR() const { return PreferVMOVSR; }
  bool preferISHSTBarriers() const { return PreferISHST; }
  bool expandMLx() const { return ExpandMLx; }
  bool hasVMLxHazards() const { return HasVMLxHazards; }
  bool hasSlowOddRegister() const { return SlowOddRegister; }
  bool hasSlowLoadDSubregister() const { return SlowLoadDSubregister; }
  bool useWideStrideVFP() const { return UseWideStrideVFP; }
  bool hasMuxedUnits() const { return HasMuxedUnits; }
  bool dontWidenVMOVS() const { return DontWidenVMOVS; }
  bool useSplatVFPToNeon() const { return SplatVFPToNeon; }
  bool useNEONForFPMovs() const { return UseNEONForFPMovs; }
  bool checkVLDnAccessAlignment() const { return CheckVLDnAlign; }
  bool nonpipelinedVFP() const { return NonpipelinedVFP; }
  bool prefers32BitThumb() const { return Pref32BitThumb; }
  bool avoidCPSRPartialUpdate() const { return AvoidCPSRPartialUpdate; }
  bool cheapPredicableCPSRDef() const { return CheapPredicableCPSRDef; }
  bool avoidMOVsShifterOperand() const { return AvoidMOVsShifterOperand; }
  bool hasRetAddrStack() const { return HasRetAddrStack; }
  bool hasBranchPredictor() const { return HasBranchPredictor; }
  bool hasMPExtension() const { return HasMPExtension; }
  bool hasDSP() const { return HasDSP; }
  bool useNaClTrap() const { return UseNaClTrap; }
  bool useSjLjEH() const { return UseSjLjEH; }
  bool hasSB() const { return HasSB; }
  bool genLongCalls() const { return GenLongCalls; }
  bool genExecuteOnly() const { return GenExecuteOnly; }
  bool hasBaseDSP() const {
    if (isThumb())
      return hasDSP();
    else
      return hasV5TEOps();
  }

  bool hasFP16() const { return HasFP16; }
  bool hasD32() const { return HasD32; }
  bool hasFullFP16() const { return HasFullFP16; }
  bool hasFP16FML() const { return HasFP16FML; }
  bool hasBF16() const { return HasBF16; }

  bool hasFuseAES() const { return HasFuseAES; }
  bool hasFuseLiterals() const { return HasFuseLiterals; }
  /// Return true if the CPU supports any kind of instruction fusion.
  bool hasFusion() const { return hasFuseAES() || hasFuseLiterals(); }

  bool hasMatMulInt8() const { return HasMatMulInt8; }

  const Triple &getTargetTriple() const { return TargetTriple; }

  bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
  bool isTargetIOS() const { return TargetTriple.isiOS(); }
  bool isTargetWatchOS() const { return TargetTriple.isWatchOS(); }
  bool isTargetWatchABI() const { return TargetTriple.isWatchABI(); }
  bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
  bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
  bool isTargetNetBSD() const { return TargetTriple.isOSNetBSD(); }
  bool isTargetWindows() const { return TargetTriple.isOSWindows(); }

  bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
  bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
  bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }

  // ARM EABI is the bare-metal EABI described in ARM ABI documents and
  // can be accessed via -target arm-none-eabi. This is NOT GNUEABI.
  // FIXME: Add a flag for bare-metal for that target and set Triple::EABI
  // even for GNUEABI, so we can make a distinction here and still conform to
  // the EABI on GNU (and Android) mode. This requires change in Clang, too.
  // FIXME: The Darwin exception is temporary, while we move users to
  // "*-*-*-macho" triples as quickly as possible.
  bool isTargetAEABI() const {
    return (TargetTriple.getEnvironment() == Triple::EABI ||
            TargetTriple.getEnvironment() == Triple::EABIHF) &&
           !isTargetDarwin() && !isTargetWindows();
  }
  bool isTargetGNUAEABI() const {
    return (TargetTriple.getEnvironment() == Triple::GNUEABI ||
            TargetTriple.getEnvironment() == Triple::GNUEABIHF) &&
           !isTargetDarwin() && !isTargetWindows();
  }
  bool isTargetMuslAEABI() const {
    return (TargetTriple.getEnvironment() == Triple::MuslEABI ||
            TargetTriple.getEnvironment() == Triple::MuslEABIHF) &&
           !isTargetDarwin() && !isTargetWindows();
  }

  // ARM Targets that support EHABI exception handling standard
  // Darwin uses SjLj. Other targets might need more checks.
  bool isTargetEHABICompatible() const {
    return (TargetTriple.getEnvironment() == Triple::EABI ||
            TargetTriple.getEnvironment() == Triple::GNUEABI ||
            TargetTriple.getEnvironment() == Triple::MuslEABI ||
            TargetTriple.getEnvironment() == Triple::EABIHF ||
            TargetTriple.getEnvironment() == Triple::GNUEABIHF ||
            TargetTriple.getEnvironment() == Triple::MuslEABIHF ||
            isTargetAndroid()) &&
           !isTargetDarwin() && !isTargetWindows();
  }

  bool isTargetHardFloat() const;

  bool isTargetAndroid() const { return TargetTriple.isAndroid(); }

  bool isXRaySupported() const override;

  bool isAPCS_ABI() const;
  bool isAAPCS_ABI() const;
  bool isAAPCS16_ABI() const;

  bool isROPI() const;
  bool isRWPI() const;

  bool useMachineScheduler() const { return UseMISched; }
  bool disablePostRAScheduler() const { return DisablePostRAScheduler; }
  bool useSoftFloat() const { return UseSoftFloat; }
  bool isThumb() const { return InThumbMode; }
  bool hasMinSize() const { return OptMinSize; }
  bool isThumb1Only() const { return InThumbMode && !HasThumb2; }
  bool isThumb2() const { return InThumbMode && HasThumb2; }
  bool hasThumb2() const { return HasThumb2; }
  bool isMClass() const { return ARMProcClass == MClass; }
  bool isRClass() const { return ARMProcClass == RClass; }
  bool isAClass() const { return ARMProcClass == AClass; }
  bool isReadTPHard() const { return ReadTPHard; }

  bool isR9Reserved() const {
    return isTargetMachO() ? (ReserveR9 || !HasV6Ops) : ReserveR9;
  }

  MCPhysReg getFramePointerReg() const {
    if (isTargetDarwin() || (!isTargetWindows() && isThumb()))
      return ARM::R7;
    return ARM::R11;
  }

  /// Returns true if the frame setup is split into two separate pushes (first
  /// r0-r7,lr then r8-r11), principally so that the frame pointer is adjacent
  /// to lr. This is always required on Thumb1-only targets, as the push and
  /// pop instructions can't access the high registers.
  bool splitFramePushPop(const MachineFunction &MF) const {
    return (getFramePointerReg() == ARM::R7 &&
            MF.getTarget().Options.DisableFramePointerElim(MF)) ||
           isThumb1Only();
  }

  bool useStride4VFPs() const;

  bool useMovt() const;

  bool supportsTailCall() const { return SupportsTailCall; }

  bool allowsUnalignedMem() const { return !StrictAlign; }

  bool restrictIT() const { return RestrictIT; }

  const std::string & getCPUString() const { return CPUString; }

  bool isLittle() const { return IsLittle; }

  unsigned getMispredictionPenalty() const;

  /// Returns true if machine scheduler should be enabled.
  bool enableMachineScheduler() const override;

  /// True for some subtargets at > -O0.
  bool enablePostRAScheduler() const override;

  /// True for some subtargets at > -O0.
  bool enablePostRAMachineScheduler() const override;

  /// Check whether this subtarget wants to use subregister liveness.
  bool enableSubRegLiveness() const override;

  /// Enable use of alias analysis during code generation (during MI
  /// scheduling, DAGCombine, etc.).
  bool useAA() const override { return true; }

  // enableAtomicExpand- True if we need to expand our atomics.
  bool enableAtomicExpand() const override;

  /// getInstrItins - Return the instruction itineraries based on subtarget
  /// selection.
  const InstrItineraryData *getInstrItineraryData() const override {
    return &InstrItins;
  }

  /// getStackAlignment - Returns the minimum alignment known to hold of the
  /// stack frame on entry to the function and which must be maintained by every
  /// function for this subtarget.
  Align getStackAlignment() const { return stackAlignment; }

  unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; }

  unsigned getPartialUpdateClearance() const { return PartialUpdateClearance; }

  ARMLdStMultipleTiming getLdStMultipleTiming() const {
    return LdStMultipleTiming;
  }

  int getPreISelOperandLatencyAdjustment() const {
    return PreISelOperandLatencyAdjustment;
  }

  /// True if the GV will be accessed via an indirect symbol.
  bool isGVIndirectSymbol(const GlobalValue *GV) const;

  /// Returns the constant pool modifier needed to access the GV.
  bool isGVInGOT(const GlobalValue *GV) const;

  /// True if fast-isel is used.
  bool useFastISel() const;

  /// Returns the correct return opcode for the current feature set.
  /// Use BX if available to allow mixing thumb/arm code, but fall back
  /// to plain mov pc,lr on ARMv4.
  unsigned getReturnOpcode() const {
    if (isThumb())
      return ARM::tBX_RET;
    if (hasV4TOps())
      return ARM::BX_RET;
    return ARM::MOVPCLR;
  }

  /// Allow movt+movw for PIC global address calculation.
  /// ELF does not have GOT relocations for movt+movw.
  /// ROPI does not use GOT.
  bool allowPositionIndependentMovt() const {
    return isROPI() || !isTargetELF();
  }

  unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; }

  unsigned
  getMVEVectorCostFactor(TargetTransformInfo::TargetCostKind CostKind) const {
    if (CostKind == TargetTransformInfo::TCK_CodeSize)
      return 1;
    return MVEVectorCostFactor;
  }

  bool ignoreCSRForAllocationOrder(const MachineFunction &MF,
                                   unsigned PhysReg) const override;
  unsigned getGPRAllocationOrder(const MachineFunction &MF) const;

  bool hardenSlsRetBr() const { return HardenSlsRetBr; }
  bool hardenSlsBlr() const { return HardenSlsBlr; }
  bool hardenSlsNoComdat() const { return HardenSlsNoComdat; }
};

} // end namespace llvm

#endif  // LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H