aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
blob: 92acfb93057a11b687e1e5295c607f6a63473428 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This file contains classes used to discover if for a particular value
// there from sue to definition that crosses a suspend block.
//
// Using the information discovered we form a Coroutine Frame structure to
// contain those values. All uses of those values are replaced with appropriate
// GEP + load from the coroutine frame. At the point of the definition we spill
// the value into the coroutine frame.
//===----------------------------------------------------------------------===//

#include "CoroInternal.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/Analysis/StackLifetime.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/OptimizedStructLayout.h"
#include "llvm/Support/circular_raw_ostream.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include <algorithm>

using namespace llvm;

// The "coro-suspend-crossing" flag is very noisy. There is another debug type,
// "coro-frame", which results in leaner debug spew.
#define DEBUG_TYPE "coro-suspend-crossing"

static cl::opt<bool> EnableReuseStorageInFrame(
    "reuse-storage-in-coroutine-frame", cl::Hidden,
    cl::desc(
        "Enable the optimization which would reuse the storage in the coroutine \
         frame for allocas whose liferanges are not overlapped, for testing purposes"),
    llvm::cl::init(false));

enum { SmallVectorThreshold = 32 };

// Provides two way mapping between the blocks and numbers.
namespace {
class BlockToIndexMapping {
  SmallVector<BasicBlock *, SmallVectorThreshold> V;

public:
  size_t size() const { return V.size(); }

  BlockToIndexMapping(Function &F) {
    for (BasicBlock &BB : F)
      V.push_back(&BB);
    llvm::sort(V);
  }

  size_t blockToIndex(BasicBlock *BB) const {
    auto *I = llvm::lower_bound(V, BB);
    assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
    return I - V.begin();
  }

  BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
};
} // end anonymous namespace

// The SuspendCrossingInfo maintains data that allows to answer a question
// whether given two BasicBlocks A and B there is a path from A to B that
// passes through a suspend point.
//
// For every basic block 'i' it maintains a BlockData that consists of:
//   Consumes:  a bit vector which contains a set of indices of blocks that can
//              reach block 'i'
//   Kills: a bit vector which contains a set of indices of blocks that can
//          reach block 'i', but one of the path will cross a suspend point
//   Suspend: a boolean indicating whether block 'i' contains a suspend point.
//   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
//
namespace {
struct SuspendCrossingInfo {
  BlockToIndexMapping Mapping;

  struct BlockData {
    BitVector Consumes;
    BitVector Kills;
    bool Suspend = false;
    bool End = false;
  };
  SmallVector<BlockData, SmallVectorThreshold> Block;

  iterator_range<succ_iterator> successors(BlockData const &BD) const {
    BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
    return llvm::successors(BB);
  }

  BlockData &getBlockData(BasicBlock *BB) {
    return Block[Mapping.blockToIndex(BB)];
  }

  void dump() const;
  void dump(StringRef Label, BitVector const &BV) const;

  SuspendCrossingInfo(Function &F, coro::Shape &Shape);

  bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
    size_t const DefIndex = Mapping.blockToIndex(DefBB);
    size_t const UseIndex = Mapping.blockToIndex(UseBB);

    bool const Result = Block[UseIndex].Kills[DefIndex];
    LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
                      << " answer is " << Result << "\n");
    return Result;
  }

  bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
    auto *I = cast<Instruction>(U);

    // We rewrote PHINodes, so that only the ones with exactly one incoming
    // value need to be analyzed.
    if (auto *PN = dyn_cast<PHINode>(I))
      if (PN->getNumIncomingValues() > 1)
        return false;

    BasicBlock *UseBB = I->getParent();

    // As a special case, treat uses by an llvm.coro.suspend.retcon or an
    // llvm.coro.suspend.async as if they were uses in the suspend's single
    // predecessor: the uses conceptually occur before the suspend.
    if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
      UseBB = UseBB->getSinglePredecessor();
      assert(UseBB && "should have split coro.suspend into its own block");
    }

    return hasPathCrossingSuspendPoint(DefBB, UseBB);
  }

  bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
    return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
  }

  bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
    auto *DefBB = I.getParent();

    // As a special case, treat values produced by an llvm.coro.suspend.*
    // as if they were defined in the single successor: the uses
    // conceptually occur after the suspend.
    if (isa<AnyCoroSuspendInst>(I)) {
      DefBB = DefBB->getSingleSuccessor();
      assert(DefBB && "should have split coro.suspend into its own block");
    }

    return isDefinitionAcrossSuspend(DefBB, U);
  }

  bool isDefinitionAcrossSuspend(Value &V, User *U) const {
    if (auto *Arg = dyn_cast<Argument>(&V))
      return isDefinitionAcrossSuspend(*Arg, U);
    if (auto *Inst = dyn_cast<Instruction>(&V))
      return isDefinitionAcrossSuspend(*Inst, U);

    llvm_unreachable(
        "Coroutine could only collect Argument and Instruction now.");
  }
};
} // end anonymous namespace

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
                                                BitVector const &BV) const {
  dbgs() << Label << ":";
  for (size_t I = 0, N = BV.size(); I < N; ++I)
    if (BV[I])
      dbgs() << " " << Mapping.indexToBlock(I)->getName();
  dbgs() << "\n";
}

LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
  for (size_t I = 0, N = Block.size(); I < N; ++I) {
    BasicBlock *const B = Mapping.indexToBlock(I);
    dbgs() << B->getName() << ":\n";
    dump("   Consumes", Block[I].Consumes);
    dump("      Kills", Block[I].Kills);
  }
  dbgs() << "\n";
}
#endif

SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
    : Mapping(F) {
  const size_t N = Mapping.size();
  Block.resize(N);

  // Initialize every block so that it consumes itself
  for (size_t I = 0; I < N; ++I) {
    auto &B = Block[I];
    B.Consumes.resize(N);
    B.Kills.resize(N);
    B.Consumes.set(I);
  }

  // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
  // the code beyond coro.end is reachable during initial invocation of the
  // coroutine.
  for (auto *CE : Shape.CoroEnds)
    getBlockData(CE->getParent()).End = true;

  // Mark all suspend blocks and indicate that they kill everything they
  // consume. Note, that crossing coro.save also requires a spill, as any code
  // between coro.save and coro.suspend may resume the coroutine and all of the
  // state needs to be saved by that time.
  auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
    BasicBlock *SuspendBlock = BarrierInst->getParent();
    auto &B = getBlockData(SuspendBlock);
    B.Suspend = true;
    B.Kills |= B.Consumes;
  };
  for (auto *CSI : Shape.CoroSuspends) {
    markSuspendBlock(CSI);
    if (auto *Save = CSI->getCoroSave())
      markSuspendBlock(Save);
  }

  // Iterate propagating consumes and kills until they stop changing.
  int Iteration = 0;
  (void)Iteration;

  bool Changed;
  do {
    LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
    LLVM_DEBUG(dbgs() << "==============\n");

    Changed = false;
    for (size_t I = 0; I < N; ++I) {
      auto &B = Block[I];
      for (BasicBlock *SI : successors(B)) {

        auto SuccNo = Mapping.blockToIndex(SI);

        // Saved Consumes and Kills bitsets so that it is easy to see
        // if anything changed after propagation.
        auto &S = Block[SuccNo];
        auto SavedConsumes = S.Consumes;
        auto SavedKills = S.Kills;

        // Propagate Kills and Consumes from block B into its successor S.
        S.Consumes |= B.Consumes;
        S.Kills |= B.Kills;

        // If block B is a suspend block, it should propagate kills into the
        // its successor for every block B consumes.
        if (B.Suspend) {
          S.Kills |= B.Consumes;
        }
        if (S.Suspend) {
          // If block S is a suspend block, it should kill all of the blocks it
          // consumes.
          S.Kills |= S.Consumes;
        } else if (S.End) {
          // If block S is an end block, it should not propagate kills as the
          // blocks following coro.end() are reached during initial invocation
          // of the coroutine while all the data are still available on the
          // stack or in the registers.
          S.Kills.reset();
        } else {
          // This is reached when S block it not Suspend nor coro.end and it
          // need to make sure that it is not in the kill set.
          S.Kills.reset(SuccNo);
        }

        // See if anything changed.
        Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);

        if (S.Kills != SavedKills) {
          LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
                            << "\n");
          LLVM_DEBUG(dump("S.Kills", S.Kills));
          LLVM_DEBUG(dump("SavedKills", SavedKills));
        }
        if (S.Consumes != SavedConsumes) {
          LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
          LLVM_DEBUG(dump("S.Consume", S.Consumes));
          LLVM_DEBUG(dump("SavedCons", SavedConsumes));
        }
      }
    }
  } while (Changed);
  LLVM_DEBUG(dump());
}

#undef DEBUG_TYPE // "coro-suspend-crossing"
#define DEBUG_TYPE "coro-frame"

namespace {
class FrameTypeBuilder;
// Mapping from the to-be-spilled value to all the users that need reload.
using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
struct AllocaInfo {
  AllocaInst *Alloca;
  DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
  bool MayWriteBeforeCoroBegin;
  AllocaInfo(AllocaInst *Alloca,
             DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
             bool MayWriteBeforeCoroBegin)
      : Alloca(Alloca), Aliases(std::move(Aliases)),
        MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
};
struct FrameDataInfo {
  // All the values (that are not allocas) that needs to be spilled to the
  // frame.
  SpillInfo Spills;
  // Allocas contains all values defined as allocas that need to live in the
  // frame.
  SmallVector<AllocaInfo, 8> Allocas;

  SmallVector<Value *, 8> getAllDefs() const {
    SmallVector<Value *, 8> Defs;
    for (const auto &P : Spills)
      Defs.push_back(P.first);
    for (const auto &A : Allocas)
      Defs.push_back(A.Alloca);
    return Defs;
  }

  uint32_t getFieldIndex(Value *V) const {
    auto Itr = FieldIndexMap.find(V);
    assert(Itr != FieldIndexMap.end() &&
           "Value does not have a frame field index");
    return Itr->second;
  }

  void setFieldIndex(Value *V, uint32_t Index) {
    assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
           "Cannot set the index for the same field twice.");
    FieldIndexMap[V] = Index;
  }

  uint64_t getAlign(Value *V) const {
    auto Iter = FieldAlignMap.find(V);
    assert(Iter != FieldAlignMap.end());
    return Iter->second;
  }

  void setAlign(Value *V, uint64_t Align) {
    assert(FieldAlignMap.count(V) == 0);
    FieldAlignMap.insert({V, Align});
  }

  uint64_t getOffset(Value *V) const {
    auto Iter = FieldOffsetMap.find(V);
    assert(Iter != FieldOffsetMap.end());
    return Iter->second;
  }

  void setOffset(Value *V, uint64_t Offset) {
    assert(FieldOffsetMap.count(V) == 0);
    FieldOffsetMap.insert({V, Offset});
  }

  // Remap the index of every field in the frame, using the final layout index.
  void updateLayoutIndex(FrameTypeBuilder &B);

private:
  // LayoutIndexUpdateStarted is used to avoid updating the index of any field
  // twice by mistake.
  bool LayoutIndexUpdateStarted = false;
  // Map from values to their slot indexes on the frame. They will be first set
  // with their original insertion field index. After the frame is built, their
  // indexes will be updated into the final layout index.
  DenseMap<Value *, uint32_t> FieldIndexMap;
  // Map from values to their alignment on the frame. They would be set after
  // the frame is built.
  DenseMap<Value *, uint64_t> FieldAlignMap;
  // Map from values to their offset on the frame. They would be set after
  // the frame is built.
  DenseMap<Value *, uint64_t> FieldOffsetMap;
};
} // namespace

#ifndef NDEBUG
static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
  dbgs() << "------------- " << Title << "--------------\n";
  for (const auto &E : Spills) {
    E.first->dump();
    dbgs() << "   user: ";
    for (auto *I : E.second)
      I->dump();
  }
}

static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
  dbgs() << "------------- Allocas --------------\n";
  for (const auto &A : Allocas) {
    A.Alloca->dump();
  }
}
#endif

namespace {
using FieldIDType = size_t;
// We cannot rely solely on natural alignment of a type when building a
// coroutine frame and if the alignment specified on the Alloca instruction
// differs from the natural alignment of the alloca type we will need to insert
// padding.
class FrameTypeBuilder {
private:
  struct Field {
    uint64_t Size;
    uint64_t Offset;
    Type *Ty;
    FieldIDType LayoutFieldIndex;
    Align Alignment;
    Align TyAlignment;
  };

  const DataLayout &DL;
  LLVMContext &Context;
  uint64_t StructSize = 0;
  Align StructAlign;
  bool IsFinished = false;

  Optional<Align> MaxFrameAlignment;

  SmallVector<Field, 8> Fields;
  DenseMap<Value*, unsigned> FieldIndexByKey;

public:
  FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
                   Optional<Align> MaxFrameAlignment)
      : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}

  /// Add a field to this structure for the storage of an `alloca`
  /// instruction.
  LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
                                               bool IsHeader = false) {
    Type *Ty = AI->getAllocatedType();

    // Make an array type if this is a static array allocation.
    if (AI->isArrayAllocation()) {
      if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
        Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
      else
        report_fatal_error("Coroutines cannot handle non static allocas yet");
    }

    return addField(Ty, AI->getAlign(), IsHeader);
  }

  /// We want to put the allocas whose lifetime-ranges are not overlapped
  /// into one slot of coroutine frame.
  /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
  ///
  ///     cppcoro::task<void> alternative_paths(bool cond) {
  ///         if (cond) {
  ///             big_structure a;
  ///             process(a);
  ///             co_await something();
  ///         } else {
  ///             big_structure b;
  ///             process2(b);
  ///             co_await something();
  ///         }
  ///     }
  ///
  /// We want to put variable a and variable b in the same slot to
  /// reduce the size of coroutine frame.
  ///
  /// This function use StackLifetime algorithm to partition the AllocaInsts in
  /// Spills to non-overlapped sets in order to put Alloca in the same
  /// non-overlapped set into the same slot in the Coroutine Frame. Then add
  /// field for the allocas in the same non-overlapped set by using the largest
  /// type as the field type.
  ///
  /// Side Effects: Because We sort the allocas, the order of allocas in the
  /// frame may be different with the order in the source code.
  void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
                          coro::Shape &Shape);

  /// Add a field to this structure.
  LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
                                      bool IsHeader = false,
                                      bool IsSpillOfValue = false) {
    assert(!IsFinished && "adding fields to a finished builder");
    assert(Ty && "must provide a type for a field");

    // The field size is always the alloc size of the type.
    uint64_t FieldSize = DL.getTypeAllocSize(Ty);

    // For an alloca with size=0, we don't need to add a field and they
    // can just point to any index in the frame. Use index 0.
    if (FieldSize == 0) {
      return 0;
    }

    // The field alignment might not be the type alignment, but we need
    // to remember the type alignment anyway to build the type.
    // If we are spilling values we don't need to worry about ABI alignment
    // concerns.
    auto ABIAlign = DL.getABITypeAlign(Ty);
    Align TyAlignment =
        (IsSpillOfValue && MaxFrameAlignment)
            ? (*MaxFrameAlignment < ABIAlign ? *MaxFrameAlignment : ABIAlign)
            : ABIAlign;
    if (!FieldAlignment) {
      FieldAlignment = TyAlignment;
    }

    // Lay out header fields immediately.
    uint64_t Offset;
    if (IsHeader) {
      Offset = alignTo(StructSize, FieldAlignment);
      StructSize = Offset + FieldSize;

    // Everything else has a flexible offset.
    } else {
      Offset = OptimizedStructLayoutField::FlexibleOffset;
    }

    Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
    return Fields.size() - 1;
  }

  /// Finish the layout and set the body on the given type.
  void finish(StructType *Ty);

  uint64_t getStructSize() const {
    assert(IsFinished && "not yet finished!");
    return StructSize;
  }

  Align getStructAlign() const {
    assert(IsFinished && "not yet finished!");
    return StructAlign;
  }

  FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
    assert(IsFinished && "not yet finished!");
    return Fields[Id].LayoutFieldIndex;
  }

  Field getLayoutField(FieldIDType Id) const {
    assert(IsFinished && "not yet finished!");
    return Fields[Id];
  }
};
} // namespace

void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
  auto Updater = [&](Value *I) {
    auto Field = B.getLayoutField(getFieldIndex(I));
    setFieldIndex(I, Field.LayoutFieldIndex);
    setAlign(I, Field.Alignment.value());
    setOffset(I, Field.Offset);
  };
  LayoutIndexUpdateStarted = true;
  for (auto &S : Spills)
    Updater(S.first);
  for (const auto &A : Allocas)
    Updater(A.Alloca);
  LayoutIndexUpdateStarted = false;
}

void FrameTypeBuilder::addFieldForAllocas(const Function &F,
                                          FrameDataInfo &FrameData,
                                          coro::Shape &Shape) {
  using AllocaSetType = SmallVector<AllocaInst *, 4>;
  SmallVector<AllocaSetType, 4> NonOverlapedAllocas;

  // We need to add field for allocas at the end of this function.
  auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
    for (auto AllocaList : NonOverlapedAllocas) {
      auto *LargestAI = *AllocaList.begin();
      FieldIDType Id = addFieldForAlloca(LargestAI);
      for (auto *Alloca : AllocaList)
        FrameData.setFieldIndex(Alloca, Id);
    }
  });

  if (!Shape.OptimizeFrame && !EnableReuseStorageInFrame) {
    for (const auto &A : FrameData.Allocas) {
      AllocaInst *Alloca = A.Alloca;
      NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
    }
    return;
  }

  // Because there are pathes from the lifetime.start to coro.end
  // for each alloca, the liferanges for every alloca is overlaped
  // in the blocks who contain coro.end and the successor blocks.
  // So we choose to skip there blocks when we calculates the liferange
  // for each alloca. It should be reasonable since there shouldn't be uses
  // in these blocks and the coroutine frame shouldn't be used outside the
  // coroutine body.
  //
  // Note that the user of coro.suspend may not be SwitchInst. However, this
  // case seems too complex to handle. And it is harmless to skip these
  // patterns since it just prevend putting the allocas to live in the same
  // slot.
  DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
  for (auto CoroSuspendInst : Shape.CoroSuspends) {
    for (auto U : CoroSuspendInst->users()) {
      if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
        auto *SWI = const_cast<SwitchInst *>(ConstSWI);
        DefaultSuspendDest[SWI] = SWI->getDefaultDest();
        SWI->setDefaultDest(SWI->getSuccessor(1));
      }
    }
  }

  auto ExtractAllocas = [&]() {
    AllocaSetType Allocas;
    Allocas.reserve(FrameData.Allocas.size());
    for (const auto &A : FrameData.Allocas)
      Allocas.push_back(A.Alloca);
    return Allocas;
  };
  StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
                                      StackLifetime::LivenessType::May);
  StackLifetimeAnalyzer.run();
  auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
    return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
        StackLifetimeAnalyzer.getLiveRange(AI2));
  };
  auto GetAllocaSize = [&](const AllocaInfo &A) {
    Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
    assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
    assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
    return RetSize->getFixedSize();
  };
  // Put larger allocas in the front. So the larger allocas have higher
  // priority to merge, which can save more space potentially. Also each
  // AllocaSet would be ordered. So we can get the largest Alloca in one
  // AllocaSet easily.
  sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
    return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
  });
  for (const auto &A : FrameData.Allocas) {
    AllocaInst *Alloca = A.Alloca;
    bool Merged = false;
    // Try to find if the Alloca is not inferenced with any existing
    // NonOverlappedAllocaSet. If it is true, insert the alloca to that
    // NonOverlappedAllocaSet.
    for (auto &AllocaSet : NonOverlapedAllocas) {
      assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
      bool NoInference = none_of(AllocaSet, [&](auto Iter) {
        return IsAllocaInferenre(Alloca, Iter);
      });
      // If the alignment of A is multiple of the alignment of B, the address
      // of A should satisfy the requirement for aligning for B.
      //
      // There may be other more fine-grained strategies to handle the alignment
      // infomation during the merging process. But it seems hard to handle
      // these strategies and benefit little.
      bool Alignable = [&]() -> bool {
        auto *LargestAlloca = *AllocaSet.begin();
        return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
               0;
      }();
      bool CouldMerge = NoInference && Alignable;
      if (!CouldMerge)
        continue;
      AllocaSet.push_back(Alloca);
      Merged = true;
      break;
    }
    if (!Merged) {
      NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
    }
  }
  // Recover the default target destination for each Switch statement
  // reserved.
  for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
    SwitchInst *SWI = SwitchAndDefaultDest.first;
    BasicBlock *DestBB = SwitchAndDefaultDest.second;
    SWI->setDefaultDest(DestBB);
  }
  // This Debug Info could tell us which allocas are merged into one slot.
  LLVM_DEBUG(for (auto &AllocaSet
                  : NonOverlapedAllocas) {
    if (AllocaSet.size() > 1) {
      dbgs() << "In Function:" << F.getName() << "\n";
      dbgs() << "Find Union Set "
             << "\n";
      dbgs() << "\tAllocas are \n";
      for (auto Alloca : AllocaSet)
        dbgs() << "\t\t" << *Alloca << "\n";
    }
  });
}

void FrameTypeBuilder::finish(StructType *Ty) {
  assert(!IsFinished && "already finished!");

  // Prepare the optimal-layout field array.
  // The Id in the layout field is a pointer to our Field for it.
  SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
  LayoutFields.reserve(Fields.size());
  for (auto &Field : Fields) {
    LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
                              Field.Offset);
  }

  // Perform layout.
  auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
  StructSize = SizeAndAlign.first;
  StructAlign = SizeAndAlign.second;

  auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
    return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
  };

  // We need to produce a packed struct type if there's a field whose
  // assigned offset isn't a multiple of its natural type alignment.
  bool Packed = [&] {
    for (auto &LayoutField : LayoutFields) {
      auto &F = getField(LayoutField);
      if (!isAligned(F.TyAlignment, LayoutField.Offset))
        return true;
    }
    return false;
  }();

  // Build the struct body.
  SmallVector<Type*, 16> FieldTypes;
  FieldTypes.reserve(LayoutFields.size() * 3 / 2);
  uint64_t LastOffset = 0;
  for (auto &LayoutField : LayoutFields) {
    auto &F = getField(LayoutField);

    auto Offset = LayoutField.Offset;

    // Add a padding field if there's a padding gap and we're either
    // building a packed struct or the padding gap is more than we'd
    // get from aligning to the field type's natural alignment.
    assert(Offset >= LastOffset);
    if (Offset != LastOffset) {
      if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
        FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
                                            Offset - LastOffset));
    }

    F.Offset = Offset;
    F.LayoutFieldIndex = FieldTypes.size();

    FieldTypes.push_back(F.Ty);
    LastOffset = Offset + F.Size;
  }

  Ty->setBody(FieldTypes, Packed);

#ifndef NDEBUG
  // Check that the IR layout matches the offsets we expect.
  auto Layout = DL.getStructLayout(Ty);
  for (auto &F : Fields) {
    assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
    assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
  }
#endif

  IsFinished = true;
}

static void cacheDIVar(FrameDataInfo &FrameData,
                       DenseMap<Value *, DILocalVariable *> &DIVarCache) {
  for (auto *V : FrameData.getAllDefs()) {
    if (DIVarCache.find(V) != DIVarCache.end())
      continue;

    auto DDIs = FindDbgDeclareUses(V);
    auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) {
      return DDI->getExpression()->getNumElements() == 0;
    });
    if (I != DDIs.end())
      DIVarCache.insert({V, (*I)->getVariable()});
  }
}

/// Create name for Type. It uses MDString to store new created string to
/// avoid memory leak.
static StringRef solveTypeName(Type *Ty) {
  if (Ty->isIntegerTy()) {
    // The longest name in common may be '__int_128', which has 9 bits.
    SmallString<16> Buffer;
    raw_svector_ostream OS(Buffer);
    OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
    auto *MDName = MDString::get(Ty->getContext(), OS.str());
    return MDName->getString();
  }

  if (Ty->isFloatingPointTy()) {
    if (Ty->isFloatTy())
      return "__float_";
    if (Ty->isDoubleTy())
      return "__double_";
    return "__floating_type_";
  }

  if (Ty->isPointerTy()) {
    auto *PtrTy = cast<PointerType>(Ty);
    Type *PointeeTy = PtrTy->getPointerElementType();
    auto Name = solveTypeName(PointeeTy);
    if (Name == "UnknownType")
      return "PointerType";
    SmallString<16> Buffer;
    Twine(Name + "_Ptr").toStringRef(Buffer);
    auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
    return MDName->getString();
  }

  if (Ty->isStructTy()) {
    if (!cast<StructType>(Ty)->hasName())
      return "__LiteralStructType_";

    auto Name = Ty->getStructName();

    SmallString<16> Buffer(Name);
    for_each(Buffer, [](auto &Iter) {
      if (Iter == '.' || Iter == ':')
        Iter = '_';
    });
    auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
    return MDName->getString();
  }

  return "UnknownType";
}

static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
                           const DataLayout &Layout, DIScope *Scope,
                           unsigned LineNum,
                           DenseMap<Type *, DIType *> &DITypeCache) {
  if (DIType *DT = DITypeCache.lookup(Ty))
    return DT;

  StringRef Name = solveTypeName(Ty);

  DIType *RetType = nullptr;

  if (Ty->isIntegerTy()) {
    auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
    RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
                                      llvm::DINode::FlagArtificial);
  } else if (Ty->isFloatingPointTy()) {
    RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
                                      dwarf::DW_ATE_float,
                                      llvm::DINode::FlagArtificial);
  } else if (Ty->isPointerTy()) {
    // Construct BasicType instead of PointerType to avoid infinite
    // search problem.
    // For example, we would be in trouble if we traverse recursively:
    //
    //  struct Node {
    //      Node* ptr;
    //  };
    RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
                                      dwarf::DW_ATE_address,
                                      llvm::DINode::FlagArtificial);
  } else if (Ty->isStructTy()) {
    auto *DIStruct = Builder.createStructType(
        Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
        Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr,
        llvm::DINodeArray());

    auto *StructTy = cast<StructType>(Ty);
    SmallVector<Metadata *, 16> Elements;
    for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
      DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
                                 Scope, LineNum, DITypeCache);
      assert(DITy);
      Elements.push_back(Builder.createMemberType(
          Scope, DITy->getName(), Scope->getFile(), LineNum,
          DITy->getSizeInBits(), DITy->getAlignInBits(),
          Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
          llvm::DINode::FlagArtificial, DITy));
    }

    Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));

    RetType = DIStruct;
  } else {
    LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";);
    SmallString<32> Buffer;
    raw_svector_ostream OS(Buffer);
    OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty);
    RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty),
                                      dwarf::DW_ATE_address,
                                      llvm::DINode::FlagArtificial);
  }

  DITypeCache.insert({Ty, RetType});
  return RetType;
}

/// Build artificial debug info for C++ coroutine frames to allow users to
/// inspect the contents of the frame directly
///
/// Create Debug information for coroutine frame with debug name "__coro_frame".
/// The debug information for the fields of coroutine frame is constructed from
/// the following way:
/// 1. For all the value in the Frame, we search the use of dbg.declare to find
///    the corresponding debug variables for the value. If we can find the
///    debug variable, we can get full and accurate debug information.
/// 2. If we can't get debug information in step 1 and 2, we could only try to
///    build the DIType by Type. We did this in solveDIType. We only handle
///    integer, float, double, integer type and struct type for now.
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape,
                                FrameDataInfo &FrameData) {
  DISubprogram *DIS = F.getSubprogram();
  // If there is no DISubprogram for F, it implies the Function are not compiled
  // with debug info. So we also don't need to generate debug info for the frame
  // neither.
  if (!DIS || !DIS->getUnit() ||
      !dwarf::isCPlusPlus(
          (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
    return;

  assert(Shape.ABI == coro::ABI::Switch &&
         "We could only build debug infomation for C++ coroutine now.\n");

  DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);

  AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
  assert(PromiseAlloca &&
         "Coroutine with switch ABI should own Promise alloca");

  TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca);
  if (DIs.empty())
    return;

  DbgDeclareInst *PromiseDDI = DIs.front();
  DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable();
  DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
  DIFile *DFile = PromiseDIScope->getFile();
  DILocation *DILoc = PromiseDDI->getDebugLoc().get();
  unsigned LineNum = PromiseDIVariable->getLine();

  DICompositeType *FrameDITy = DBuilder.createStructType(
      DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8,
      Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
      llvm::DINodeArray());
  StructType *FrameTy = Shape.FrameTy;
  SmallVector<Metadata *, 16> Elements;
  DataLayout Layout = F.getParent()->getDataLayout();

  DenseMap<Value *, DILocalVariable *> DIVarCache;
  cacheDIVar(FrameData, DIVarCache);

  unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
  unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
  unsigned IndexIndex = Shape.SwitchLowering.IndexField;

  DenseMap<unsigned, StringRef> NameCache;
  NameCache.insert({ResumeIndex, "__resume_fn"});
  NameCache.insert({DestroyIndex, "__destroy_fn"});
  NameCache.insert({IndexIndex, "__coro_index"});

  Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
       *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
       *IndexTy = FrameTy->getElementType(IndexIndex);

  DenseMap<unsigned, DIType *> TyCache;
  TyCache.insert({ResumeIndex,
                  DBuilder.createBasicType("__resume_fn",
                                           Layout.getTypeSizeInBits(ResumeFnTy),
                                           dwarf::DW_ATE_address)});
  TyCache.insert(
      {DestroyIndex, DBuilder.createBasicType(
                         "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy),
                         dwarf::DW_ATE_address)});

  /// FIXME: If we fill the field `SizeInBits` with the actual size of
  /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
  TyCache.insert({IndexIndex, DBuilder.createBasicType(
                                  "__coro_index",
                                  (Layout.getTypeSizeInBits(IndexTy) < 8)
                                      ? 8
                                      : Layout.getTypeSizeInBits(IndexTy),
                                  dwarf::DW_ATE_unsigned_char)});

  for (auto *V : FrameData.getAllDefs()) {
    if (DIVarCache.find(V) == DIVarCache.end())
      continue;

    auto Index = FrameData.getFieldIndex(V);

    NameCache.insert({Index, DIVarCache[V]->getName()});
    TyCache.insert({Index, DIVarCache[V]->getType()});
  }

  // Cache from index to (Align, Offset Pair)
  DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache;
  // The Align and Offset of Resume function and Destroy function are fixed.
  OffsetCache.insert({ResumeIndex, {8, 0}});
  OffsetCache.insert({DestroyIndex, {8, 8}});
  OffsetCache.insert(
      {IndexIndex,
       {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}});

  for (auto *V : FrameData.getAllDefs()) {
    auto Index = FrameData.getFieldIndex(V);

    OffsetCache.insert(
        {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}});
  }

  DenseMap<Type *, DIType *> DITypeCache;
  // This counter is used to avoid same type names. e.g., there would be
  // many i32 and i64 types in one coroutine. And we would use i32_0 and
  // i32_1 to avoid the same type. Since it makes no sense the name of the
  // fields confilicts with each other.
  unsigned UnknownTypeNum = 0;
  for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
    if (OffsetCache.find(Index) == OffsetCache.end())
      continue;

    std::string Name;
    uint64_t SizeInBits;
    uint32_t AlignInBits;
    uint64_t OffsetInBits;
    DIType *DITy = nullptr;

    Type *Ty = FrameTy->getElementType(Index);
    assert(Ty->isSized() && "We can't handle type which is not sized.\n");
    SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize();
    AlignInBits = OffsetCache[Index].first * 8;
    OffsetInBits = OffsetCache[Index].second * 8;

    if (NameCache.find(Index) != NameCache.end()) {
      Name = NameCache[Index].str();
      DITy = TyCache[Index];
    } else {
      DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
      assert(DITy && "SolveDIType shouldn't return nullptr.\n");
      Name = DITy->getName().str();
      Name += "_" + std::to_string(UnknownTypeNum);
      UnknownTypeNum++;
    }

    Elements.push_back(DBuilder.createMemberType(
        FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
        llvm::DINode::FlagArtificial, DITy));
  }

  DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));

  auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
                                                 DFile, LineNum, FrameDITy,
                                                 true, DINode::FlagArtificial);
  assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc()));

  // Subprogram would have ContainedNodes field which records the debug
  // variables it contained. So we need to add __coro_frame to the
  // ContainedNodes of it.
  //
  // If we don't add __coro_frame to the RetainedNodes, user may get
  // `no symbol __coro_frame in context` rather than `__coro_frame`
  // is optimized out, which is more precise.
  if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
    auto RetainedNodes = SubProgram->getRetainedNodes();
    SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
                                                 RetainedNodes.end());
    RetainedNodesVec.push_back(FrameDIVar);
    SubProgram->replaceOperandWith(
        7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
  }

  DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
                         DBuilder.createExpression(), DILoc,
                         Shape.FramePtr->getNextNode());
}

// Build a struct that will keep state for an active coroutine.
//   struct f.frame {
//     ResumeFnTy ResumeFnAddr;
//     ResumeFnTy DestroyFnAddr;
//     int ResumeIndex;
//     ... promise (if present) ...
//     ... spills ...
//   };
static StructType *buildFrameType(Function &F, coro::Shape &Shape,
                                  FrameDataInfo &FrameData) {
  LLVMContext &C = F.getContext();
  const DataLayout &DL = F.getParent()->getDataLayout();
  StructType *FrameTy = [&] {
    SmallString<32> Name(F.getName());
    Name.append(".Frame");
    return StructType::create(C, Name);
  }();

  // We will use this value to cap the alignment of spilled values.
  Optional<Align> MaxFrameAlignment;
  if (Shape.ABI == coro::ABI::Async)
    MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
  FrameTypeBuilder B(C, DL, MaxFrameAlignment);

  AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
  Optional<FieldIDType> SwitchIndexFieldId;

  if (Shape.ABI == coro::ABI::Switch) {
    auto *FramePtrTy = FrameTy->getPointerTo();
    auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
                                   /*IsVarArg=*/false);
    auto *FnPtrTy = FnTy->getPointerTo();

    // Add header fields for the resume and destroy functions.
    // We can rely on these being perfectly packed.
    (void)B.addField(FnPtrTy, None, /*header*/ true);
    (void)B.addField(FnPtrTy, None, /*header*/ true);

    // PromiseAlloca field needs to be explicitly added here because it's
    // a header field with a fixed offset based on its alignment. Hence it
    // needs special handling and cannot be added to FrameData.Allocas.
    if (PromiseAlloca)
      FrameData.setFieldIndex(
          PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));

    // Add a field to store the suspend index.  This doesn't need to
    // be in the header.
    unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
    Type *IndexType = Type::getIntNTy(C, IndexBits);

    SwitchIndexFieldId = B.addField(IndexType, None);
  } else {
    assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
  }

  // Because multiple allocas may own the same field slot,
  // we add allocas to field here.
  B.addFieldForAllocas(F, FrameData, Shape);
  // Add PromiseAlloca to Allocas list so that
  // 1. updateLayoutIndex could update its index after
  // `performOptimizedStructLayout`
  // 2. it is processed in insertSpills.
  if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
    // We assume that the promise alloca won't be modified before
    // CoroBegin and no alias will be create before CoroBegin.
    FrameData.Allocas.emplace_back(
        PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
  // Create an entry for every spilled value.
  for (auto &S : FrameData.Spills) {
    Type *FieldType = S.first->getType();
    // For byval arguments, we need to store the pointed value in the frame,
    // instead of the pointer itself.
    if (const Argument *A = dyn_cast<Argument>(S.first))
      if (A->hasByValAttr())
        FieldType = A->getParamByValType();
    FieldIDType Id =
        B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/);
    FrameData.setFieldIndex(S.first, Id);
  }

  B.finish(FrameTy);
  FrameData.updateLayoutIndex(B);
  Shape.FrameAlign = B.getStructAlign();
  Shape.FrameSize = B.getStructSize();

  switch (Shape.ABI) {
  case coro::ABI::Switch: {
    // In the switch ABI, remember the switch-index field.
    auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
    Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
    Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
    Shape.SwitchLowering.IndexOffset = IndexField.Offset;

    // Also round the frame size up to a multiple of its alignment, as is
    // generally expected in C/C++.
    Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
    break;
  }

  // In the retcon ABI, remember whether the frame is inline in the storage.
  case coro::ABI::Retcon:
  case coro::ABI::RetconOnce: {
    auto Id = Shape.getRetconCoroId();
    Shape.RetconLowering.IsFrameInlineInStorage
      = (B.getStructSize() <= Id->getStorageSize() &&
         B.getStructAlign() <= Id->getStorageAlignment());
    break;
  }
  case coro::ABI::Async: {
    Shape.AsyncLowering.FrameOffset =
        alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
    // Also make the final context size a multiple of the context alignment to
    // make allocation easier for allocators.
    Shape.AsyncLowering.ContextSize =
        alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
                Shape.AsyncLowering.getContextAlignment());
    if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
      report_fatal_error(
          "The alignment requirment of frame variables cannot be higher than "
          "the alignment of the async function context");
    }
    break;
  }
  }

  return FrameTy;
}

// We use a pointer use visitor to track how an alloca is being used.
// The goal is to be able to answer the following three questions:
// 1. Should this alloca be allocated on the frame instead.
// 2. Could the content of the alloca be modified prior to CoroBegn, which would
// require copying the data from alloca to the frame after CoroBegin.
// 3. Is there any alias created for this alloca prior to CoroBegin, but used
// after CoroBegin. In that case, we will need to recreate the alias after
// CoroBegin based off the frame. To answer question 1, we track two things:
//   a. List of all BasicBlocks that use this alloca or any of the aliases of
//   the alloca. In the end, we check if there exists any two basic blocks that
//   cross suspension points. If so, this alloca must be put on the frame. b.
//   Whether the alloca or any alias of the alloca is escaped at some point,
//   either by storing the address somewhere, or the address is used in a
//   function call that might capture. If it's ever escaped, this alloca must be
//   put on the frame conservatively.
// To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
// Whenever a potential write happens, either through a store instruction, a
// function call or any of the memory intrinsics, we check whether this
// instruction is prior to CoroBegin. To answer question 3, we track the offsets
// of all aliases created for the alloca prior to CoroBegin but used after
// CoroBegin. llvm::Optional is used to be able to represent the case when the
// offset is unknown (e.g. when you have a PHINode that takes in different
// offset values). We cannot handle unknown offsets and will assert. This is the
// potential issue left out. An ideal solution would likely require a
// significant redesign.
namespace {
struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
  using Base = PtrUseVisitor<AllocaUseVisitor>;
  AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
                   const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
                   bool ShouldUseLifetimeStartInfo)
      : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
        ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}

  void visit(Instruction &I) {
    Users.insert(&I);
    Base::visit(I);
    // If the pointer is escaped prior to CoroBegin, we have to assume it would
    // be written into before CoroBegin as well.
    if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
      MayWriteBeforeCoroBegin = true;
    }
  }
  // We need to provide this overload as PtrUseVisitor uses a pointer based
  // visiting function.
  void visit(Instruction *I) { return visit(*I); }

  void visitPHINode(PHINode &I) {
    enqueueUsers(I);
    handleAlias(I);
  }

  void visitSelectInst(SelectInst &I) {
    enqueueUsers(I);
    handleAlias(I);
  }

  void visitStoreInst(StoreInst &SI) {
    // Regardless whether the alias of the alloca is the value operand or the
    // pointer operand, we need to assume the alloca is been written.
    handleMayWrite(SI);

    if (SI.getValueOperand() != U->get())
      return;

    // We are storing the pointer into a memory location, potentially escaping.
    // As an optimization, we try to detect simple cases where it doesn't
    // actually escape, for example:
    //   %ptr = alloca ..
    //   %addr = alloca ..
    //   store %ptr, %addr
    //   %x = load %addr
    //   ..
    // If %addr is only used by loading from it, we could simply treat %x as
    // another alias of %ptr, and not considering %ptr being escaped.
    auto IsSimpleStoreThenLoad = [&]() {
      auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
      // If the memory location we are storing to is not an alloca, it
      // could be an alias of some other memory locations, which is difficult
      // to analyze.
      if (!AI)
        return false;
      // StoreAliases contains aliases of the memory location stored into.
      SmallVector<Instruction *, 4> StoreAliases = {AI};
      while (!StoreAliases.empty()) {
        Instruction *I = StoreAliases.pop_back_val();
        for (User *U : I->users()) {
          // If we are loading from the memory location, we are creating an
          // alias of the original pointer.
          if (auto *LI = dyn_cast<LoadInst>(U)) {
            enqueueUsers(*LI);
            handleAlias(*LI);
            continue;
          }
          // If we are overriding the memory location, the pointer certainly
          // won't escape.
          if (auto *S = dyn_cast<StoreInst>(U))
            if (S->getPointerOperand() == I)
              continue;
          if (auto *II = dyn_cast<IntrinsicInst>(U))
            if (II->isLifetimeStartOrEnd())
              continue;
          // BitCastInst creats aliases of the memory location being stored
          // into.
          if (auto *BI = dyn_cast<BitCastInst>(U)) {
            StoreAliases.push_back(BI);
            continue;
          }
          return false;
        }
      }

      return true;
    };

    if (!IsSimpleStoreThenLoad())
      PI.setEscaped(&SI);
  }

  // All mem intrinsics modify the data.
  void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }

  void visitBitCastInst(BitCastInst &BC) {
    Base::visitBitCastInst(BC);
    handleAlias(BC);
  }

  void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
    Base::visitAddrSpaceCastInst(ASC);
    handleAlias(ASC);
  }

  void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
    // The base visitor will adjust Offset accordingly.
    Base::visitGetElementPtrInst(GEPI);
    handleAlias(GEPI);
  }

  void visitIntrinsicInst(IntrinsicInst &II) {
    // When we found the lifetime markers refers to a
    // subrange of the original alloca, ignore the lifetime
    // markers to avoid misleading the analysis.
    if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
        !Offset.isZero())
      return Base::visitIntrinsicInst(II);
    LifetimeStarts.insert(&II);
  }

  void visitCallBase(CallBase &CB) {
    for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
      if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
        PI.setEscaped(&CB);
    handleMayWrite(CB);
  }

  bool getShouldLiveOnFrame() const {
    if (!ShouldLiveOnFrame)
      ShouldLiveOnFrame = computeShouldLiveOnFrame();
    return ShouldLiveOnFrame.getValue();
  }

  bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }

  DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
    assert(getShouldLiveOnFrame() && "This method should only be called if the "
                                     "alloca needs to live on the frame.");
    for (const auto &P : AliasOffetMap)
      if (!P.second)
        report_fatal_error("Unable to handle an alias with unknown offset "
                           "created before CoroBegin.");
    return AliasOffetMap;
  }

private:
  const DominatorTree &DT;
  const CoroBeginInst &CoroBegin;
  const SuspendCrossingInfo &Checker;
  // All alias to the original AllocaInst, created before CoroBegin and used
  // after CoroBegin. Each entry contains the instruction and the offset in the
  // original Alloca. They need to be recreated after CoroBegin off the frame.
  DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
  SmallPtrSet<Instruction *, 4> Users{};
  SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
  bool MayWriteBeforeCoroBegin{false};
  bool ShouldUseLifetimeStartInfo{true};

  mutable llvm::Optional<bool> ShouldLiveOnFrame{};

  bool computeShouldLiveOnFrame() const {
    // If lifetime information is available, we check it first since it's
    // more precise. We look at every pair of lifetime.start intrinsic and
    // every basic block that uses the pointer to see if they cross suspension
    // points. The uses cover both direct uses as well as indirect uses.
    if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
      for (auto *I : Users)
        for (auto *S : LifetimeStarts)
          if (Checker.isDefinitionAcrossSuspend(*S, I))
            return true;
      return false;
    }
    // FIXME: Ideally the isEscaped check should come at the beginning.
    // However there are a few loose ends that need to be fixed first before
    // we can do that. We need to make sure we are not over-conservative, so
    // that the data accessed in-between await_suspend and symmetric transfer
    // is always put on the stack, and also data accessed after coro.end is
    // always put on the stack (esp the return object). To fix that, we need
    // to:
    //  1) Potentially treat sret as nocapture in calls
    //  2) Special handle the return object and put it on the stack
    //  3) Utilize lifetime.end intrinsic
    if (PI.isEscaped())
      return true;

    for (auto *U1 : Users)
      for (auto *U2 : Users)
        if (Checker.isDefinitionAcrossSuspend(*U1, U2))
          return true;

    return false;
  }

  void handleMayWrite(const Instruction &I) {
    if (!DT.dominates(&CoroBegin, &I))
      MayWriteBeforeCoroBegin = true;
  }

  bool usedAfterCoroBegin(Instruction &I) {
    for (auto &U : I.uses())
      if (DT.dominates(&CoroBegin, U))
        return true;
    return false;
  }

  void handleAlias(Instruction &I) {
    // We track all aliases created prior to CoroBegin but used after.
    // These aliases may need to be recreated after CoroBegin if the alloca
    // need to live on the frame.
    if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
      return;

    if (!IsOffsetKnown) {
      AliasOffetMap[&I].reset();
    } else {
      auto Itr = AliasOffetMap.find(&I);
      if (Itr == AliasOffetMap.end()) {
        AliasOffetMap[&I] = Offset;
      } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
        // If we have seen two different possible values for this alias, we set
        // it to empty.
        AliasOffetMap[&I].reset();
      }
    }
  }
};
} // namespace

// We need to make room to insert a spill after initial PHIs, but before
// catchswitch instruction. Placing it before violates the requirement that
// catchswitch, like all other EHPads must be the first nonPHI in a block.
//
// Split away catchswitch into a separate block and insert in its place:
//
//   cleanuppad <InsertPt> cleanupret.
//
// cleanupret instruction will act as an insert point for the spill.
static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
  BasicBlock *CurrentBlock = CatchSwitch->getParent();
  BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
  CurrentBlock->getTerminator()->eraseFromParent();

  auto *CleanupPad =
      CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
  auto *CleanupRet =
      CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
  return CleanupRet;
}

static void createFramePtr(coro::Shape &Shape) {
  auto *CB = Shape.CoroBegin;
  IRBuilder<> Builder(CB->getNextNode());
  StructType *FrameTy = Shape.FrameTy;
  PointerType *FramePtrTy = FrameTy->getPointerTo();
  Shape.FramePtr =
      cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
}

// Replace all alloca and SSA values that are accessed across suspend points
// with GetElementPointer from coroutine frame + loads and stores. Create an
// AllocaSpillBB that will become the new entry block for the resume parts of
// the coroutine:
//
//    %hdl = coro.begin(...)
//    whatever
//
// becomes:
//
//    %hdl = coro.begin(...)
//    %FramePtr = bitcast i8* hdl to %f.frame*
//    br label %AllocaSpillBB
//
//  AllocaSpillBB:
//    ; geps corresponding to allocas that were moved to coroutine frame
//    br label PostSpill
//
//  PostSpill:
//    whatever
//
//
static Instruction *insertSpills(const FrameDataInfo &FrameData,
                                 coro::Shape &Shape) {
  auto *CB = Shape.CoroBegin;
  LLVMContext &C = CB->getContext();
  IRBuilder<> Builder(C);
  StructType *FrameTy = Shape.FrameTy;
  Instruction *FramePtr = Shape.FramePtr;
  DominatorTree DT(*CB->getFunction());
  SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;

  // Create a GEP with the given index into the coroutine frame for the original
  // value Orig. Appends an extra 0 index for array-allocas, preserving the
  // original type.
  auto GetFramePointer = [&](Value *Orig) -> Value * {
    FieldIDType Index = FrameData.getFieldIndex(Orig);
    SmallVector<Value *, 3> Indices = {
        ConstantInt::get(Type::getInt32Ty(C), 0),
        ConstantInt::get(Type::getInt32Ty(C), Index),
    };

    if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
      if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
        auto Count = CI->getValue().getZExtValue();
        if (Count > 1) {
          Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
        }
      } else {
        report_fatal_error("Coroutines cannot handle non static allocas yet");
      }
    }

    auto GEP = cast<GetElementPtrInst>(
        Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
    if (isa<AllocaInst>(Orig)) {
      // If the type of GEP is not equal to the type of AllocaInst, it implies
      // that the AllocaInst may be reused in the Frame slot of other
      // AllocaInst. So We cast GEP to the AllocaInst here to re-use
      // the Frame storage.
      //
      // Note: If we change the strategy dealing with alignment, we need to refine
      // this casting.
      if (GEP->getResultElementType() != Orig->getType())
        return Builder.CreateBitCast(GEP, Orig->getType(),
                                     Orig->getName() + Twine(".cast"));
    }
    return GEP;
  };

  for (auto const &E : FrameData.Spills) {
    Value *Def = E.first;
    auto SpillAlignment = Align(FrameData.getAlign(Def));
    // Create a store instruction storing the value into the
    // coroutine frame.
    Instruction *InsertPt = nullptr;
    bool NeedToCopyArgPtrValue = false;
    if (auto *Arg = dyn_cast<Argument>(Def)) {
      // For arguments, we will place the store instruction right after
      // the coroutine frame pointer instruction, i.e. bitcast of
      // coro.begin from i8* to %f.frame*.
      InsertPt = FramePtr->getNextNode();

      // If we're spilling an Argument, make sure we clear 'nocapture'
      // from the coroutine function.
      Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);

      if (Arg->hasByValAttr())
        NeedToCopyArgPtrValue = true;

    } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
      // Don't spill immediately after a suspend; splitting assumes
      // that the suspend will be followed by a branch.
      InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
    } else {
      auto *I = cast<Instruction>(Def);
      if (!DT.dominates(CB, I)) {
        // If it is not dominated by CoroBegin, then spill should be
        // inserted immediately after CoroFrame is computed.
        InsertPt = FramePtr->getNextNode();
      } else if (auto *II = dyn_cast<InvokeInst>(I)) {
        // If we are spilling the result of the invoke instruction, split
        // the normal edge and insert the spill in the new block.
        auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
        InsertPt = NewBB->getTerminator();
      } else if (isa<PHINode>(I)) {
        // Skip the PHINodes and EH pads instructions.
        BasicBlock *DefBlock = I->getParent();
        if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
          InsertPt = splitBeforeCatchSwitch(CSI);
        else
          InsertPt = &*DefBlock->getFirstInsertionPt();
      } else {
        assert(!I->isTerminator() && "unexpected terminator");
        // For all other values, the spill is placed immediately after
        // the definition.
        InsertPt = I->getNextNode();
      }
    }

    auto Index = FrameData.getFieldIndex(Def);
    Builder.SetInsertPoint(InsertPt);
    auto *G = Builder.CreateConstInBoundsGEP2_32(
        FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
    if (NeedToCopyArgPtrValue) {
      // For byval arguments, we need to store the pointed value in the frame,
      // instead of the pointer itself.
      auto *Value =
          Builder.CreateLoad(Def->getType()->getPointerElementType(), Def);
      Builder.CreateAlignedStore(Value, G, SpillAlignment);
    } else {
      Builder.CreateAlignedStore(Def, G, SpillAlignment);
    }

    BasicBlock *CurrentBlock = nullptr;
    Value *CurrentReload = nullptr;
    for (auto *U : E.second) {
      // If we have not seen the use block, create a load instruction to reload
      // the spilled value from the coroutine frame. Populates the Value pointer
      // reference provided with the frame GEP.
      if (CurrentBlock != U->getParent()) {
        CurrentBlock = U->getParent();
        Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());

        auto *GEP = GetFramePointer(E.first);
        GEP->setName(E.first->getName() + Twine(".reload.addr"));
        if (NeedToCopyArgPtrValue)
          CurrentReload = GEP;
        else
          CurrentReload = Builder.CreateAlignedLoad(
              FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
              SpillAlignment, E.first->getName() + Twine(".reload"));

        TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
        for (DbgDeclareInst *DDI : DIs) {
          bool AllowUnresolved = false;
          // This dbg.declare is preserved for all coro-split function
          // fragments. It will be unreachable in the main function, and
          // processed by coro::salvageDebugInfo() by CoroCloner.
          DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
              .insertDeclare(CurrentReload, DDI->getVariable(),
                             DDI->getExpression(), DDI->getDebugLoc(),
                             &*Builder.GetInsertPoint());
          // This dbg.declare is for the main function entry point.  It
          // will be deleted in all coro-split functions.
          coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
        }
      }

      // If we have a single edge PHINode, remove it and replace it with a
      // reload from the coroutine frame. (We already took care of multi edge
      // PHINodes by rewriting them in the rewritePHIs function).
      if (auto *PN = dyn_cast<PHINode>(U)) {
        assert(PN->getNumIncomingValues() == 1 &&
               "unexpected number of incoming "
               "values in the PHINode");
        PN->replaceAllUsesWith(CurrentReload);
        PN->eraseFromParent();
        continue;
      }

      // Replace all uses of CurrentValue in the current instruction with
      // reload.
      U->replaceUsesOfWith(Def, CurrentReload);
    }
  }

  BasicBlock *FramePtrBB = FramePtr->getParent();

  auto SpillBlock =
      FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
  SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
  Shape.AllocaSpillBlock = SpillBlock;

  // retcon and retcon.once lowering assumes all uses have been sunk.
  if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
      Shape.ABI == coro::ABI::Async) {
    // If we found any allocas, replace all of their remaining uses with Geps.
    Builder.SetInsertPoint(&SpillBlock->front());
    for (const auto &P : FrameData.Allocas) {
      AllocaInst *Alloca = P.Alloca;
      auto *G = GetFramePointer(Alloca);

      // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
      // here, as we are changing location of the instruction.
      G->takeName(Alloca);
      Alloca->replaceAllUsesWith(G);
      Alloca->eraseFromParent();
    }
    return FramePtr;
  }

  // If we found any alloca, replace all of their remaining uses with GEP
  // instructions. To remain debugbility, we replace the uses of allocas for
  // dbg.declares and dbg.values with the reload from the frame.
  // Note: We cannot replace the alloca with GEP instructions indiscriminately,
  // as some of the uses may not be dominated by CoroBegin.
  Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
  SmallVector<Instruction *, 4> UsersToUpdate;
  for (const auto &A : FrameData.Allocas) {
    AllocaInst *Alloca = A.Alloca;
    UsersToUpdate.clear();
    for (User *U : Alloca->users()) {
      auto *I = cast<Instruction>(U);
      if (DT.dominates(CB, I))
        UsersToUpdate.push_back(I);
    }
    if (UsersToUpdate.empty())
      continue;
    auto *G = GetFramePointer(Alloca);
    G->setName(Alloca->getName() + Twine(".reload.addr"));

    SmallVector<DbgVariableIntrinsic *, 4> DIs;
    findDbgUsers(DIs, Alloca);
    for (auto *DVI : DIs)
      DVI->replaceUsesOfWith(Alloca, G);

    for (Instruction *I : UsersToUpdate)
      I->replaceUsesOfWith(Alloca, G);
  }
  Builder.SetInsertPoint(FramePtr->getNextNode());
  for (const auto &A : FrameData.Allocas) {
    AllocaInst *Alloca = A.Alloca;
    if (A.MayWriteBeforeCoroBegin) {
      // isEscaped really means potentially modified before CoroBegin.
      if (Alloca->isArrayAllocation())
        report_fatal_error(
            "Coroutines cannot handle copying of array allocas yet");

      auto *G = GetFramePointer(Alloca);
      auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
      Builder.CreateStore(Value, G);
    }
    // For each alias to Alloca created before CoroBegin but used after
    // CoroBegin, we recreate them after CoroBegin by appplying the offset
    // to the pointer in the frame.
    for (const auto &Alias : A.Aliases) {
      auto *FramePtr = GetFramePointer(Alloca);
      auto *FramePtrRaw =
          Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
      auto *AliasPtr = Builder.CreateGEP(
          Type::getInt8Ty(C), FramePtrRaw,
          ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
      auto *AliasPtrTyped =
          Builder.CreateBitCast(AliasPtr, Alias.first->getType());
      Alias.first->replaceUsesWithIf(
          AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
    }
  }
  return FramePtr;
}

// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
// PHI in InsertedBB.
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
                                         BasicBlock *InsertedBB,
                                         BasicBlock *PredBB,
                                         PHINode *UntilPHI = nullptr) {
  auto *PN = cast<PHINode>(&SuccBB->front());
  do {
    int Index = PN->getBasicBlockIndex(InsertedBB);
    Value *V = PN->getIncomingValue(Index);
    PHINode *InputV = PHINode::Create(
        V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
        &InsertedBB->front());
    InputV->addIncoming(V, PredBB);
    PN->setIncomingValue(Index, InputV);
    PN = dyn_cast<PHINode>(PN->getNextNode());
  } while (PN != UntilPHI);
}

// Rewrites the PHI Nodes in a cleanuppad.
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
                                     CleanupPadInst *CleanupPad) {
  // For every incoming edge to a CleanupPad we will create a new block holding
  // all incoming values in single-value PHI nodes. We will then create another
  // block to act as a dispather (as all unwind edges for related EH blocks
  // must be the same).
  //
  // cleanuppad:
  //    %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
  //    %3 = cleanuppad within none []
  //
  // It will create:
  //
  // cleanuppad.corodispatch
  //    %2 = phi i8[0, %catchswitch], [1, %catch.1]
  //    %3 = cleanuppad within none []
  //    switch i8 % 2, label %unreachable
  //            [i8 0, label %cleanuppad.from.catchswitch
  //             i8 1, label %cleanuppad.from.catch.1]
  // cleanuppad.from.catchswitch:
  //    %4 = phi i32 [%0, %catchswitch]
  //    br %label cleanuppad
  // cleanuppad.from.catch.1:
  //    %6 = phi i32 [%1, %catch.1]
  //    br %label cleanuppad
  // cleanuppad:
  //    %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
  //                 [%6, %cleanuppad.from.catch.1]

  // Unreachable BB, in case switching on an invalid value in the dispatcher.
  auto *UnreachBB = BasicBlock::Create(
      CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
  IRBuilder<> Builder(UnreachBB);
  Builder.CreateUnreachable();

  // Create a new cleanuppad which will be the dispatcher.
  auto *NewCleanupPadBB =
      BasicBlock::Create(CleanupPadBB->getContext(),
                         CleanupPadBB->getName() + Twine(".corodispatch"),
                         CleanupPadBB->getParent(), CleanupPadBB);
  Builder.SetInsertPoint(NewCleanupPadBB);
  auto *SwitchType = Builder.getInt8Ty();
  auto *SetDispatchValuePN =
      Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
  CleanupPad->removeFromParent();
  CleanupPad->insertAfter(SetDispatchValuePN);
  auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
                                                pred_size(CleanupPadBB));

  int SwitchIndex = 0;
  SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
  for (BasicBlock *Pred : Preds) {
    // Create a new cleanuppad and move the PHI values to there.
    auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
                                      CleanupPadBB->getName() +
                                          Twine(".from.") + Pred->getName(),
                                      CleanupPadBB->getParent(), CleanupPadBB);
    updatePhiNodes(CleanupPadBB, Pred, CaseBB);
    CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
                    Pred->getName());
    Builder.SetInsertPoint(CaseBB);
    Builder.CreateBr(CleanupPadBB);
    movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);

    // Update this Pred to the new unwind point.
    setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);

    // Setup the switch in the dispatcher.
    auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
    SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
    SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
    SwitchIndex++;
  }
}

static void cleanupSinglePredPHIs(Function &F) {
  SmallVector<PHINode *, 32> Worklist;
  for (auto &BB : F) {
    for (auto &Phi : BB.phis()) {
      if (Phi.getNumIncomingValues() == 1) {
        Worklist.push_back(&Phi);
      } else
        break;
    }
  }
  while (!Worklist.empty()) {
    auto *Phi = Worklist.pop_back_val();
    auto *OriginalValue = Phi->getIncomingValue(0);
    Phi->replaceAllUsesWith(OriginalValue);
  }
}

static void rewritePHIs(BasicBlock &BB) {
  // For every incoming edge we will create a block holding all
  // incoming values in a single PHI nodes.
  //
  // loop:
  //    %n.val = phi i32[%n, %entry], [%inc, %loop]
  //
  // It will create:
  //
  // loop.from.entry:
  //    %n.loop.pre = phi i32 [%n, %entry]
  //    br %label loop
  // loop.from.loop:
  //    %inc.loop.pre = phi i32 [%inc, %loop]
  //    br %label loop
  //
  // After this rewrite, further analysis will ignore any phi nodes with more
  // than one incoming edge.

  // TODO: Simplify PHINodes in the basic block to remove duplicate
  // predecessors.

  // Special case for CleanupPad: all EH blocks must have the same unwind edge
  // so we need to create an additional "dispatcher" block.
  if (auto *CleanupPad =
          dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
    SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
    for (BasicBlock *Pred : Preds) {
      if (CatchSwitchInst *CS =
              dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
        // CleanupPad with a CatchSwitch predecessor: therefore this is an
        // unwind destination that needs to be handle specially.
        assert(CS->getUnwindDest() == &BB);
        (void)CS;
        rewritePHIsForCleanupPad(&BB, CleanupPad);
        return;
      }
    }
  }

  LandingPadInst *LandingPad = nullptr;
  PHINode *ReplPHI = nullptr;
  if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
    // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
    // We replace the original landing pad with a PHINode that will collect the
    // results from all of them.
    ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
    ReplPHI->takeName(LandingPad);
    LandingPad->replaceAllUsesWith(ReplPHI);
    // We will erase the original landing pad at the end of this function after
    // ehAwareSplitEdge cloned it in the transition blocks.
  }

  SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
  for (BasicBlock *Pred : Preds) {
    auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
    IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());

    // Stop the moving of values at ReplPHI, as this is either null or the PHI
    // that replaced the landing pad.
    movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
  }

  if (LandingPad) {
    // Calls to ehAwareSplitEdge function cloned the original lading pad.
    // No longer need it.
    LandingPad->eraseFromParent();
  }
}

static void rewritePHIs(Function &F) {
  SmallVector<BasicBlock *, 8> WorkList;

  for (BasicBlock &BB : F)
    if (auto *PN = dyn_cast<PHINode>(&BB.front()))
      if (PN->getNumIncomingValues() > 1)
        WorkList.push_back(&BB);

  for (BasicBlock *BB : WorkList)
    rewritePHIs(*BB);
}

// Check for instructions that we can recreate on resume as opposed to spill
// the result into a coroutine frame.
static bool materializable(Instruction &V) {
  return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
         isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
}

// Check for structural coroutine intrinsics that should not be spilled into
// the coroutine frame.
static bool isCoroutineStructureIntrinsic(Instruction &I) {
  return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
         isa<CoroSuspendInst>(&I);
}

// For every use of the value that is across suspend point, recreate that value
// after a suspend point.
static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
                                              const SpillInfo &Spills) {
  for (const auto &E : Spills) {
    Value *Def = E.first;
    BasicBlock *CurrentBlock = nullptr;
    Instruction *CurrentMaterialization = nullptr;
    for (Instruction *U : E.second) {
      // If we have not seen this block, materialize the value.
      if (CurrentBlock != U->getParent()) {

        bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U);
        CurrentBlock = U->getParent();
        auto *InsertBlock = IsInCoroSuspendBlock
                                ? CurrentBlock->getSinglePredecessor()
                                : CurrentBlock;
        CurrentMaterialization = cast<Instruction>(Def)->clone();
        CurrentMaterialization->setName(Def->getName());
        CurrentMaterialization->insertBefore(
            IsInCoroSuspendBlock ? InsertBlock->getTerminator()
                                 : &*InsertBlock->getFirstInsertionPt());
      }
      if (auto *PN = dyn_cast<PHINode>(U)) {
        assert(PN->getNumIncomingValues() == 1 &&
               "unexpected number of incoming "
               "values in the PHINode");
        PN->replaceAllUsesWith(CurrentMaterialization);
        PN->eraseFromParent();
        continue;
      }
      // Replace all uses of Def in the current instruction with the
      // CurrentMaterialization for the block.
      U->replaceUsesOfWith(Def, CurrentMaterialization);
    }
  }
}

// Splits the block at a particular instruction unless it is the first
// instruction in the block with a single predecessor.
static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
  auto *BB = I->getParent();
  if (&BB->front() == I) {
    if (BB->getSinglePredecessor()) {
      BB->setName(Name);
      return BB;
    }
  }
  return BB->splitBasicBlock(I, Name);
}

// Split above and below a particular instruction so that it
// will be all alone by itself in a block.
static void splitAround(Instruction *I, const Twine &Name) {
  splitBlockIfNotFirst(I, Name);
  splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
}

static bool isSuspendBlock(BasicBlock *BB) {
  return isa<AnyCoroSuspendInst>(BB->front());
}

typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;

/// Does control flow starting at the given block ever reach a suspend
/// instruction before reaching a block in VisitedOrFreeBBs?
static bool isSuspendReachableFrom(BasicBlock *From,
                                   VisitedBlocksSet &VisitedOrFreeBBs) {
  // Eagerly try to add this block to the visited set.  If it's already
  // there, stop recursing; this path doesn't reach a suspend before
  // either looping or reaching a freeing block.
  if (!VisitedOrFreeBBs.insert(From).second)
    return false;

  // We assume that we'll already have split suspends into their own blocks.
  if (isSuspendBlock(From))
    return true;

  // Recurse on the successors.
  for (auto Succ : successors(From)) {
    if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
      return true;
  }

  return false;
}

/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
/// suspend point?
static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
  // Seed the visited set with all the basic blocks containing a free
  // so that we won't pass them up.
  VisitedBlocksSet VisitedOrFreeBBs;
  for (auto User : AI->users()) {
    if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
      VisitedOrFreeBBs.insert(FI->getParent());
  }

  return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
}

/// After we split the coroutine, will the given basic block be along
/// an obvious exit path for the resumption function?
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
                                              unsigned depth = 3) {
  // If we've bottomed out our depth count, stop searching and assume
  // that the path might loop back.
  if (depth == 0) return false;

  // If this is a suspend block, we're about to exit the resumption function.
  if (isSuspendBlock(BB)) return true;

  // Recurse into the successors.
  for (auto Succ : successors(BB)) {
    if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
      return false;
  }

  // If none of the successors leads back in a loop, we're on an exit/abort.
  return true;
}

static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
  // Look for a free that isn't sufficiently obviously followed by
  // either a suspend or a termination, i.e. something that will leave
  // the coro resumption frame.
  for (auto U : AI->users()) {
    auto FI = dyn_cast<CoroAllocaFreeInst>(U);
    if (!FI) continue;

    if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
      return true;
  }

  // If we never found one, we don't need a stack save.
  return false;
}

/// Turn each of the given local allocas into a normal (dynamic) alloca
/// instruction.
static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
                              SmallVectorImpl<Instruction*> &DeadInsts) {
  for (auto AI : LocalAllocas) {
    auto M = AI->getModule();
    IRBuilder<> Builder(AI);

    // Save the stack depth.  Try to avoid doing this if the stackrestore
    // is going to immediately precede a return or something.
    Value *StackSave = nullptr;
    if (localAllocaNeedsStackSave(AI))
      StackSave = Builder.CreateCall(
                            Intrinsic::getDeclaration(M, Intrinsic::stacksave));

    // Allocate memory.
    auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
    Alloca->setAlignment(Align(AI->getAlignment()));

    for (auto U : AI->users()) {
      // Replace gets with the allocation.
      if (isa<CoroAllocaGetInst>(U)) {
        U->replaceAllUsesWith(Alloca);

      // Replace frees with stackrestores.  This is safe because
      // alloca.alloc is required to obey a stack discipline, although we
      // don't enforce that structurally.
      } else {
        auto FI = cast<CoroAllocaFreeInst>(U);
        if (StackSave) {
          Builder.SetInsertPoint(FI);
          Builder.CreateCall(
                    Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
                             StackSave);
        }
      }
      DeadInsts.push_back(cast<Instruction>(U));
    }

    DeadInsts.push_back(AI);
  }
}

/// Turn the given coro.alloca.alloc call into a dynamic allocation.
/// This happens during the all-instructions iteration, so it must not
/// delete the call.
static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
                                        coro::Shape &Shape,
                                   SmallVectorImpl<Instruction*> &DeadInsts) {
  IRBuilder<> Builder(AI);
  auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);

  for (User *U : AI->users()) {
    if (isa<CoroAllocaGetInst>(U)) {
      U->replaceAllUsesWith(Alloc);
    } else {
      auto FI = cast<CoroAllocaFreeInst>(U);
      Builder.SetInsertPoint(FI);
      Shape.emitDealloc(Builder, Alloc, nullptr);
    }
    DeadInsts.push_back(cast<Instruction>(U));
  }

  // Push this on last so that it gets deleted after all the others.
  DeadInsts.push_back(AI);

  // Return the new allocation value so that we can check for needed spills.
  return cast<Instruction>(Alloc);
}

/// Get the current swifterror value.
static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
                                     coro::Shape &Shape) {
  // Make a fake function pointer as a sort of intrinsic.
  auto FnTy = FunctionType::get(ValueTy, {}, false);
  auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());

  auto Call = Builder.CreateCall(FnTy, Fn, {});
  Shape.SwiftErrorOps.push_back(Call);

  return Call;
}

/// Set the given value as the current swifterror value.
///
/// Returns a slot that can be used as a swifterror slot.
static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
                                     coro::Shape &Shape) {
  // Make a fake function pointer as a sort of intrinsic.
  auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
                                {V->getType()}, false);
  auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());

  auto Call = Builder.CreateCall(FnTy, Fn, { V });
  Shape.SwiftErrorOps.push_back(Call);

  return Call;
}

/// Set the swifterror value from the given alloca before a call,
/// then put in back in the alloca afterwards.
///
/// Returns an address that will stand in for the swifterror slot
/// until splitting.
static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
                                                 AllocaInst *Alloca,
                                                 coro::Shape &Shape) {
  auto ValueTy = Alloca->getAllocatedType();
  IRBuilder<> Builder(Call);

  // Load the current value from the alloca and set it as the
  // swifterror value.
  auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
  auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);

  // Move to after the call.  Since swifterror only has a guaranteed
  // value on normal exits, we can ignore implicit and explicit unwind
  // edges.
  if (isa<CallInst>(Call)) {
    Builder.SetInsertPoint(Call->getNextNode());
  } else {
    auto Invoke = cast<InvokeInst>(Call);
    Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
  }

  // Get the current swifterror value and store it to the alloca.
  auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
  Builder.CreateStore(ValueAfterCall, Alloca);

  return Addr;
}

/// Eliminate a formerly-swifterror alloca by inserting the get/set
/// intrinsics and attempting to MemToReg the alloca away.
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
                                      coro::Shape &Shape) {
  for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
    // swifterror values can only be used in very specific ways.
    // We take advantage of that here.
    auto User = Use.getUser();
    if (isa<LoadInst>(User) || isa<StoreInst>(User))
      continue;

    assert(isa<CallInst>(User) || isa<InvokeInst>(User));
    auto Call = cast<Instruction>(User);

    auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);

    // Use the returned slot address as the call argument.
    Use.set(Addr);
  }

  // All the uses should be loads and stores now.
  assert(isAllocaPromotable(Alloca));
}

/// "Eliminate" a swifterror argument by reducing it to the alloca case
/// and then loading and storing in the prologue and epilog.
///
/// The argument keeps the swifterror flag.
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
                                        coro::Shape &Shape,
                             SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
  IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());

  auto ArgTy = cast<PointerType>(Arg.getType());
  auto ValueTy = ArgTy->getPointerElementType();

  // Reduce to the alloca case:

  // Create an alloca and replace all uses of the arg with it.
  auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
  Arg.replaceAllUsesWith(Alloca);

  // Set an initial value in the alloca.  swifterror is always null on entry.
  auto InitialValue = Constant::getNullValue(ValueTy);
  Builder.CreateStore(InitialValue, Alloca);

  // Find all the suspends in the function and save and restore around them.
  for (auto Suspend : Shape.CoroSuspends) {
    (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
  }

  // Find all the coro.ends in the function and restore the error value.
  for (auto End : Shape.CoroEnds) {
    Builder.SetInsertPoint(End);
    auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
    (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
  }

  // Now we can use the alloca logic.
  AllocasToPromote.push_back(Alloca);
  eliminateSwiftErrorAlloca(F, Alloca, Shape);
}

/// Eliminate all problematic uses of swifterror arguments and allocas
/// from the function.  We'll fix them up later when splitting the function.
static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
  SmallVector<AllocaInst*, 4> AllocasToPromote;

  // Look for a swifterror argument.
  for (auto &Arg : F.args()) {
    if (!Arg.hasSwiftErrorAttr()) continue;

    eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
    break;
  }

  // Look for swifterror allocas.
  for (auto &Inst : F.getEntryBlock()) {
    auto Alloca = dyn_cast<AllocaInst>(&Inst);
    if (!Alloca || !Alloca->isSwiftError()) continue;

    // Clear the swifterror flag.
    Alloca->setSwiftError(false);

    AllocasToPromote.push_back(Alloca);
    eliminateSwiftErrorAlloca(F, Alloca, Shape);
  }

  // If we have any allocas to promote, compute a dominator tree and
  // promote them en masse.
  if (!AllocasToPromote.empty()) {
    DominatorTree DT(F);
    PromoteMemToReg(AllocasToPromote, DT);
  }
}

/// retcon and retcon.once conventions assume that all spill uses can be sunk
/// after the coro.begin intrinsic.
static void sinkSpillUsesAfterCoroBegin(Function &F,
                                        const FrameDataInfo &FrameData,
                                        CoroBeginInst *CoroBegin) {
  DominatorTree Dom(F);

  SmallSetVector<Instruction *, 32> ToMove;
  SmallVector<Instruction *, 32> Worklist;

  // Collect all users that precede coro.begin.
  for (auto *Def : FrameData.getAllDefs()) {
    for (User *U : Def->users()) {
      auto Inst = cast<Instruction>(U);
      if (Inst->getParent() != CoroBegin->getParent() ||
          Dom.dominates(CoroBegin, Inst))
        continue;
      if (ToMove.insert(Inst))
        Worklist.push_back(Inst);
    }
  }
  // Recursively collect users before coro.begin.
  while (!Worklist.empty()) {
    auto *Def = Worklist.pop_back_val();
    for (User *U : Def->users()) {
      auto Inst = cast<Instruction>(U);
      if (Dom.dominates(CoroBegin, Inst))
        continue;
      if (ToMove.insert(Inst))
        Worklist.push_back(Inst);
    }
  }

  // Sort by dominance.
  SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
  llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
    // If a dominates b it should preceed (<) b.
    return Dom.dominates(A, B);
  });

  Instruction *InsertPt = CoroBegin->getNextNode();
  for (Instruction *Inst : InsertionList)
    Inst->moveBefore(InsertPt);
}

/// For each local variable that all of its user are only used inside one of
/// suspended region, we sink their lifetime.start markers to the place where
/// after the suspend block. Doing so minimizes the lifetime of each variable,
/// hence minimizing the amount of data we end up putting on the frame.
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
                                     SuspendCrossingInfo &Checker) {
  DominatorTree DT(F);

  // Collect all possible basic blocks which may dominate all uses of allocas.
  SmallPtrSet<BasicBlock *, 4> DomSet;
  DomSet.insert(&F.getEntryBlock());
  for (auto *CSI : Shape.CoroSuspends) {
    BasicBlock *SuspendBlock = CSI->getParent();
    assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
           "should have split coro.suspend into its own block");
    DomSet.insert(SuspendBlock->getSingleSuccessor());
  }

  for (Instruction &I : instructions(F)) {
    AllocaInst* AI = dyn_cast<AllocaInst>(&I);
    if (!AI)
      continue;

    for (BasicBlock *DomBB : DomSet) {
      bool Valid = true;
      SmallVector<Instruction *, 1> Lifetimes;

      auto isLifetimeStart = [](Instruction* I) {
        if (auto* II = dyn_cast<IntrinsicInst>(I))
          return II->getIntrinsicID() == Intrinsic::lifetime_start;
        return false;
      };

      auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
        if (isLifetimeStart(U)) {
          Lifetimes.push_back(U);
          return true;
        }
        if (!U->hasOneUse() || U->stripPointerCasts() != AI)
          return false;
        if (isLifetimeStart(U->user_back())) {
          Lifetimes.push_back(U->user_back());
          return true;
        }
        return false;
      };

      for (User *U : AI->users()) {
        Instruction *UI = cast<Instruction>(U);
        // For all users except lifetime.start markers, if they are all
        // dominated by one of the basic blocks and do not cross
        // suspend points as well, then there is no need to spill the
        // instruction.
        if (!DT.dominates(DomBB, UI->getParent()) ||
            Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
          // Skip lifetime.start, GEP and bitcast used by lifetime.start
          // markers.
          if (collectLifetimeStart(UI, AI))
            continue;
          Valid = false;
          break;
        }
      }
      // Sink lifetime.start markers to dominate block when they are
      // only used outside the region.
      if (Valid && Lifetimes.size() != 0) {
        // May be AI itself, when the type of AI is i8*
        auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
          if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
            return AI;
          auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
          return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
                                  DomBB->getTerminator());
        }(AI);

        auto *NewLifetime = Lifetimes[0]->clone();
        NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
        NewLifetime->insertBefore(DomBB->getTerminator());

        // All the outsided lifetime.start markers are no longer necessary.
        for (Instruction *S : Lifetimes)
          S->eraseFromParent();

        break;
      }
    }
  }
}

static void collectFrameAllocas(Function &F, coro::Shape &Shape,
                                const SuspendCrossingInfo &Checker,
                                SmallVectorImpl<AllocaInfo> &Allocas) {
  for (Instruction &I : instructions(F)) {
    auto *AI = dyn_cast<AllocaInst>(&I);
    if (!AI)
      continue;
    // The PromiseAlloca will be specially handled since it needs to be in a
    // fixed position in the frame.
    if (AI == Shape.SwitchLowering.PromiseAlloca) {
      continue;
    }
    DominatorTree DT(F);
    // The code that uses lifetime.start intrinsic does not work for functions
    // with loops without exit. Disable it on ABIs we know to generate such
    // code.
    bool ShouldUseLifetimeStartInfo =
        (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
         Shape.ABI != coro::ABI::RetconOnce);
    AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
                             *Shape.CoroBegin, Checker,
                             ShouldUseLifetimeStartInfo};
    Visitor.visitPtr(*AI);
    if (!Visitor.getShouldLiveOnFrame())
      continue;
    Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
                         Visitor.getMayWriteBeforeCoroBegin());
  }
}

void coro::salvageDebugInfo(
    SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
    DbgVariableIntrinsic *DVI, bool OptimizeFrame) {
  Function *F = DVI->getFunction();
  IRBuilder<> Builder(F->getContext());
  auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
  while (isa<IntrinsicInst>(InsertPt))
    ++InsertPt;
  Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
  DIExpression *Expr = DVI->getExpression();
  // Follow the pointer arithmetic all the way to the incoming
  // function argument and convert into a DIExpression.
  bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
  Value *Storage = DVI->getVariableLocationOp(0);
  Value *OriginalStorage = Storage;
  while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
    if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
      Storage = LdInst->getOperand(0);
      // FIXME: This is a heuristic that works around the fact that
      // LLVM IR debug intrinsics cannot yet distinguish between
      // memory and value locations: Because a dbg.declare(alloca) is
      // implicitly a memory location no DW_OP_deref operation for the
      // last direct load from an alloca is necessary.  This condition
      // effectively drops the *last* DW_OP_deref in the expression.
      if (!SkipOutermostLoad)
        Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
    } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
      Storage = StInst->getOperand(0);
    } else {
      SmallVector<uint64_t, 16> Ops;
      SmallVector<Value *, 0> AdditionalValues;
      Value *Op = llvm::salvageDebugInfoImpl(
          *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
          AdditionalValues);
      if (!Op || !AdditionalValues.empty()) {
        // If salvaging failed or salvaging produced more than one location
        // operand, give up.
        break;
      }
      Storage = Op;
      Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
    }
    SkipOutermostLoad = false;
  }
  if (!Storage)
    return;

  // Store a pointer to the coroutine frame object in an alloca so it
  // is available throughout the function when producing unoptimized
  // code. Extending the lifetime this way is correct because the
  // variable has been declared by a dbg.declare intrinsic.
  //
  // Avoid to create the alloca would be eliminated by optimization
  // passes and the corresponding dbg.declares would be invalid.
  if (!OptimizeFrame && !EnableReuseStorageInFrame)
    if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
      auto &Cached = DbgPtrAllocaCache[Storage];
      if (!Cached) {
        Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
                                      Arg->getName() + ".debug");
        Builder.CreateStore(Storage, Cached);
      }
      Storage = Cached;
      // FIXME: LLVM lacks nuanced semantics to differentiate between
      // memory and direct locations at the IR level. The backend will
      // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
      // location. Thus, if there are deref and offset operations in the
      // expression, we need to add a DW_OP_deref at the *start* of the
      // expression to first load the contents of the alloca before
      // adjusting it with the expression.
      if (Expr && Expr->isComplex())
        Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
    }

  DVI->replaceVariableLocationOp(OriginalStorage, Storage);
  DVI->setExpression(Expr);
  /// It makes no sense to move the dbg.value intrinsic.
  if (!isa<DbgValueInst>(DVI)) {
    if (auto *II = dyn_cast<InvokeInst>(Storage))
      DVI->moveBefore(II->getNormalDest()->getFirstNonPHI());
    else if (auto *CBI = dyn_cast<CallBrInst>(Storage))
      DVI->moveBefore(CBI->getDefaultDest()->getFirstNonPHI());
    else if (auto *InsertPt = dyn_cast<Instruction>(Storage)) {
      assert(!InsertPt->isTerminator() &&
             "Unimaged terminator that could return a storage.");
      DVI->moveAfter(InsertPt);
    } else if (isa<Argument>(Storage))
      DVI->moveAfter(F->getEntryBlock().getFirstNonPHI());
  }
}

void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
  // Don't eliminate swifterror in async functions that won't be split.
  if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
    eliminateSwiftError(F, Shape);

  if (Shape.ABI == coro::ABI::Switch &&
      Shape.SwitchLowering.PromiseAlloca) {
    Shape.getSwitchCoroId()->clearPromise();
  }

  // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
  // intrinsics are in their own blocks to simplify the logic of building up
  // SuspendCrossing data.
  for (auto *CSI : Shape.CoroSuspends) {
    if (auto *Save = CSI->getCoroSave())
      splitAround(Save, "CoroSave");
    splitAround(CSI, "CoroSuspend");
  }

  // Put CoroEnds into their own blocks.
  for (AnyCoroEndInst *CE : Shape.CoroEnds) {
    splitAround(CE, "CoroEnd");

    // Emit the musttail call function in a new block before the CoroEnd.
    // We do this here so that the right suspend crossing info is computed for
    // the uses of the musttail call function call. (Arguments to the coro.end
    // instructions would be ignored)
    if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
      auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
      if (!MustTailCallFn)
        continue;
      IRBuilder<> Builder(AsyncEnd);
      SmallVector<Value *, 8> Args(AsyncEnd->args());
      auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
      auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
                                      Arguments, Builder);
      splitAround(Call, "MustTailCall.Before.CoroEnd");
    }
  }

  // Later code makes structural assumptions about single predecessors phis e.g
  // that they are not live accross a suspend point.
  cleanupSinglePredPHIs(F);

  // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
  // never has its definition separated from the PHI by the suspend point.
  rewritePHIs(F);

  // Build suspend crossing info.
  SuspendCrossingInfo Checker(F, Shape);

  IRBuilder<> Builder(F.getContext());
  FrameDataInfo FrameData;
  SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
  SmallVector<Instruction*, 4> DeadInstructions;

  {
    SpillInfo Spills;
    for (int Repeat = 0; Repeat < 4; ++Repeat) {
      // See if there are materializable instructions across suspend points.
      for (Instruction &I : instructions(F))
        if (materializable(I)) {
          for (User *U : I.users())
            if (Checker.isDefinitionAcrossSuspend(I, U))
              Spills[&I].push_back(cast<Instruction>(U));

          // Manually add dbg.value metadata uses of I.
          SmallVector<DbgValueInst *, 16> DVIs;
          findDbgValues(DVIs, &I);
          for (auto *DVI : DVIs)
            if (Checker.isDefinitionAcrossSuspend(I, DVI))
              Spills[&I].push_back(DVI);
        }

      if (Spills.empty())
        break;

      // Rewrite materializable instructions to be materialized at the use
      // point.
      LLVM_DEBUG(dumpSpills("Materializations", Spills));
      rewriteMaterializableInstructions(Builder, Spills);
      Spills.clear();
    }
  }

  if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
      Shape.ABI != coro::ABI::RetconOnce)
    sinkLifetimeStartMarkers(F, Shape, Checker);

  if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
    collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
  LLVM_DEBUG(dumpAllocas(FrameData.Allocas));

  // Collect the spills for arguments and other not-materializable values.
  for (Argument &A : F.args())
    for (User *U : A.users())
      if (Checker.isDefinitionAcrossSuspend(A, U))
        FrameData.Spills[&A].push_back(cast<Instruction>(U));

  for (Instruction &I : instructions(F)) {
    // Values returned from coroutine structure intrinsics should not be part
    // of the Coroutine Frame.
    if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
      continue;

    // The Coroutine Promise always included into coroutine frame, no need to
    // check for suspend crossing.
    if (Shape.ABI == coro::ABI::Switch &&
        Shape.SwitchLowering.PromiseAlloca == &I)
      continue;

    // Handle alloca.alloc specially here.
    if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
      // Check whether the alloca's lifetime is bounded by suspend points.
      if (isLocalAlloca(AI)) {
        LocalAllocas.push_back(AI);
        continue;
      }

      // If not, do a quick rewrite of the alloca and then add spills of
      // the rewritten value.  The rewrite doesn't invalidate anything in
      // Spills because the other alloca intrinsics have no other operands
      // besides AI, and it doesn't invalidate the iteration because we delay
      // erasing AI.
      auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);

      for (User *U : Alloc->users()) {
        if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
          FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
      }
      continue;
    }

    // Ignore alloca.get; we process this as part of coro.alloca.alloc.
    if (isa<CoroAllocaGetInst>(I))
      continue;

    if (isa<AllocaInst>(I))
      continue;

    for (User *U : I.users())
      if (Checker.isDefinitionAcrossSuspend(I, U)) {
        // We cannot spill a token.
        if (I.getType()->isTokenTy())
          report_fatal_error(
              "token definition is separated from the use by a suspend point");
        FrameData.Spills[&I].push_back(cast<Instruction>(U));
      }
  }

  // We don't want the layout of coroutine frame to be affected
  // by debug information. So we only choose to salvage DbgValueInst for
  // whose value is already in the frame.
  // We would handle the dbg.values for allocas specially
  for (auto &Iter : FrameData.Spills) {
    auto *V = Iter.first;
    SmallVector<DbgValueInst *, 16> DVIs;
    findDbgValues(DVIs, V);
    llvm::for_each(DVIs, [&](DbgValueInst *DVI) {
      if (Checker.isDefinitionAcrossSuspend(*V, DVI))
        FrameData.Spills[V].push_back(DVI);
    });
  }

  LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
  if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
      Shape.ABI == coro::ABI::Async)
    sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
  Shape.FrameTy = buildFrameType(F, Shape, FrameData);
  createFramePtr(Shape);
  // For now, this works for C++ programs only.
  buildFrameDebugInfo(F, Shape, FrameData);
  insertSpills(FrameData, Shape);
  lowerLocalAllocas(LocalAllocas, DeadInstructions);

  for (auto I : DeadInstructions)
    I->eraseFromParent();
}