reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
  185
  186
  187
  188
  189
  190
  191
  192
  193
  194
  195
  196
  197
  198
  199
  200
  201
  202
  203
  204
  205
  206
  207
  208
  209
  210
  211
  212
  213
  214
  215
  216
  217
  218
  219
  220
  221
  222
  223
  224
  225
  226
  227
  228
  229
  230
  231
  232
  233
  234
  235
  236
  237
  238
  239
  240
  241
  242
  243
  244
  245
  246
  247
  248
  249
  250
  251
  252
  253
  254
  255
  256
  257
  258
  259
  260
  261
  262
  263
  264
  265
  266
  267
  268
  269
  270
  271
  272
  273
  274
  275
  276
  277
  278
  279
  280
  281
  282
  283
  284
  285
  286
  287
  288
  289
  290
  291
  292
  293
  294
  295
  296
  297
  298
  299
  300
  301
  302
  303
  304
  305
  306
  307
  308
  309
  310
  311
  312
  313
  314
  315
  316
  317
  318
  319
  320
  321
  322
  323
  324
  325
  326
  327
  328
  329
  330
  331
  332
  333
  334
  335
  336
  337
  338
  339
  340
  341
  342
  343
  344
  345
  346
  347
  348
  349
  350
  351
  352
  353
  354
  355
  356
  357
  358
  359
  360
  361
  362
  363
  364
  365
  366
  367
  368
  369
  370
  371
  372
  373
  374
  375
  376
  377
  378
  379
  380
  381
  382
  383
  384
  385
  386
  387
  388
  389
  390
  391
  392
  393
  394
  395
  396
  397
  398
  399
  400
  401
  402
  403
  404
  405
  406
  407
  408
  409
  410
  411
  412
  413
  414
  415
  416
  417
  418
  419
  420
  421
  422
  423
  424
  425
  426
  427
  428
  429
  430
  431
  432
  433
  434
  435
  436
  437
  438
  439
  440
  441
  442
  443
  444
  445
  446
  447
  448
  449
  450
  451
  452
  453
  454
  455
  456
  457
  458
  459
  460
  461
  462
  463
  464
  465
  466
  467
  468
  469
  470
  471
  472
  473
  474
  475
  476
  477
  478
  479
  480
  481
  482
  483
  484
  485
  486
  487
  488
  489
  490
  491
  492
  493
  494
  495
  496
  497
  498
  499
  500
  501
  502
  503
  504
  505
  506
  507
  508
  509
  510
  511
  512
  513
  514
  515
  516
  517
  518
  519
  520
  521
  522
  523
  524
  525
  526
  527
  528
  529
  530
  531
  532
  533
  534
  535
  536
  537
  538
  539
  540
  541
  542
  543
  544
  545
  546
  547
  548
  549
  550
  551
  552
  553
  554
  555
  556
  557
  558
  559
  560
  561
  562
  563
  564
  565
  566
  567
  568
  569
  570
  571
  572
  573
  574
  575
  576
  577
  578
  579
  580
  581
  582
  583
  584
  585
  586
  587
  588
  589
  590
  591
  592
  593
  594
  595
  596
  597
  598
  599
  600
  601
  602
  603
  604
  605
  606
  607
  608
  609
  610
  611
  612
  613
  614
  615
  616
  617
  618
  619
  620
  621
  622
  623
  624
  625
  626
  627
  628
  629
  630
  631
  632
  633
  634
  635
  636
  637
  638
  639
  640
  641
  642
  643
  644
  645
  646
  647
  648
  649
  650
  651
  652
  653
  654
  655
  656
  657
  658
  659
  660
  661
  662
  663
  664
  665
  666
  667
  668
  669
  670
  671
  672
  673
  674
  675
  676
  677
  678
  679
  680
  681
  682
  683
  684
  685
  686
  687
  688
  689
  690
  691
  692
  693
  694
  695
  696
  697
  698
  699
  700
  701
  702
  703
  704
  705
  706
  707
  708
  709
  710
  711
  712
  713
  714
  715
  716
  717
  718
  719
  720
  721
  722
  723
  724
  725
  726
  727
  728
  729
  730
  731
  732
  733
  734
  735
  736
  737
  738
  739
  740
  741
  742
  743
  744
  745
  746
  747
  748
  749
  750
  751
  752
  753
  754
  755
  756
  757
  758
  759
  760
  761
  762
  763
  764
  765
  766
  767
  768
  769
  770
  771
  772
  773
  774
  775
  776
  777
  778
  779
  780
  781
  782
  783
  784
  785
  786
  787
  788
  789
  790
  791
  792
  793
  794
  795
  796
  797
  798
  799
  800
  801
  802
  803
  804
  805
  806
  807
  808
  809
  810
  811
  812
  813
  814
  815
  816
  817
  818
  819
  820
  821
  822
  823
  824
  825
  826
  827
  828
  829
  830
  831
  832
  833
  834
  835
  836
  837
  838
  839
  840
  841
  842
  843
  844
  845
  846
  847
  848
  849
  850
  851
  852
  853
  854
  855
  856
  857
  858
  859
  860
  861
  862
  863
  864
  865
  866
  867
  868
  869
  870
  871
  872
  873
  874
  875
  876
  877
  878
  879
  880
  881
  882
  883
  884
  885
  886
  887
  888
  889
  890
  891
  892
  893
  894
  895
  896
  897
  898
  899
  900
  901
  902
  903
  904
  905
  906
  907
  908
  909
  910
  911
  912
  913
  914
  915
  916
  917
  918
  919
  920
  921
  922
  923
  924
  925
  926
  927
  928
  929
  930
  931
  932
  933
  934
  935
  936
  937
  938
  939
  940
  941
  942
  943
  944
  945
  946
  947
  948
  949
  950
  951
  952
  953
  954
  955
  956
  957
  958
  959
  960
  961
  962
  963
  964
  965
  966
  967
  968
  969
  970
  971
  972
  973
  974
  975
  976
  977
  978
  979
  980
  981
  982
  983
  984
  985
  986
  987
  988
  989
  990
  991
  992
  993
  994
  995
  996
  997
  998
  999
 1000
 1001
 1002
 1003
 1004
 1005
 1006
 1007
 1008
 1009
 1010
 1011
 1012
 1013
 1014
 1015
 1016
 1017
 1018
 1019
 1020
 1021
 1022
 1023
 1024
 1025
 1026
 1027
 1028
 1029
 1030
 1031
 1032
 1033
 1034
 1035
 1036
 1037
 1038
 1039
 1040
 1041
 1042
 1043
 1044
 1045
 1046
 1047
 1048
 1049
 1050
 1051
 1052
 1053
 1054
 1055
 1056
 1057
 1058
 1059
 1060
 1061
 1062
 1063
 1064
 1065
 1066
 1067
 1068
 1069
 1070
 1071
 1072
 1073
 1074
 1075
 1076
 1077
 1078
 1079
 1080
 1081
 1082
 1083
 1084
 1085
 1086
 1087
 1088
 1089
 1090
 1091
 1092
 1093
 1094
 1095
 1096
 1097
 1098
 1099
 1100
 1101
 1102
 1103
 1104
 1105
 1106
 1107
 1108
 1109
 1110
 1111
 1112
 1113
 1114
 1115
 1116
 1117
 1118
 1119
 1120
 1121
 1122
 1123
 1124
 1125
 1126
 1127
 1128
 1129
 1130
 1131
 1132
 1133
 1134
 1135
 1136
 1137
 1138
 1139
 1140
 1141
 1142
 1143
 1144
 1145
 1146
 1147
 1148
 1149
 1150
 1151
 1152
 1153
 1154
 1155
 1156
 1157
 1158
 1159
 1160
 1161
 1162
 1163
 1164
 1165
 1166
 1167
 1168
 1169
 1170
 1171
 1172
 1173
 1174
 1175
 1176
 1177
 1178
 1179
 1180
 1181
 1182
 1183
 1184
 1185
 1186
 1187
 1188
 1189
 1190
 1191
 1192
 1193
 1194
 1195
 1196
 1197
 1198
 1199
 1200
 1201
 1202
 1203
 1204
 1205
 1206
 1207
 1208
 1209
 1210
 1211
 1212
 1213
 1214
 1215
 1216
 1217
 1218
 1219
 1220
 1221
 1222
 1223
 1224
 1225
 1226
 1227
 1228
 1229
 1230
 1231
 1232
 1233
 1234
 1235
 1236
 1237
 1238
 1239
 1240
 1241
 1242
 1243
 1244
 1245
 1246
 1247
 1248
 1249
 1250
 1251
 1252
 1253
 1254
 1255
 1256
 1257
 1258
 1259
 1260
 1261
 1262
 1263
 1264
 1265
 1266
 1267
 1268
 1269
 1270
 1271
 1272
 1273
 1274
 1275
 1276
 1277
 1278
 1279
 1280
 1281
 1282
 1283
 1284
 1285
 1286
 1287
 1288
 1289
 1290
 1291
 1292
 1293
 1294
 1295
 1296
 1297
 1298
 1299
 1300
 1301
 1302
 1303
 1304
 1305
 1306
 1307
 1308
 1309
 1310
 1311
 1312
 1313
 1314
 1315
 1316
 1317
 1318
 1319
 1320
 1321
 1322
 1323
 1324
 1325
 1326
 1327
 1328
 1329
 1330
 1331
 1332
 1333
 1334
 1335
 1336
 1337
 1338
 1339
 1340
 1341
 1342
 1343
 1344
 1345
 1346
 1347
 1348
 1349
 1350
 1351
 1352
 1353
 1354
 1355
 1356
 1357
 1358
 1359
 1360
 1361
 1362
 1363
 1364
 1365
 1366
 1367
 1368
 1369
 1370
 1371
 1372
 1373
 1374
 1375
 1376
 1377
 1378
 1379
 1380
 1381
 1382
 1383
 1384
 1385
 1386
 1387
 1388
 1389
 1390
 1391
 1392
 1393
 1394
 1395
 1396
 1397
 1398
 1399
 1400
 1401
 1402
 1403
 1404
 1405
 1406
 1407
 1408
 1409
 1410
 1411
 1412
 1413
 1414
 1415
 1416
 1417
 1418
 1419
 1420
 1421
 1422
 1423
 1424
 1425
 1426
 1427
 1428
 1429
 1430
 1431
 1432
 1433
 1434
 1435
 1436
 1437
 1438
 1439
 1440
 1441
 1442
 1443
 1444
 1445
 1446
 1447
 1448
 1449
 1450
 1451
 1452
 1453
 1454
 1455
 1456
 1457
 1458
 1459
 1460
 1461
 1462
 1463
 1464
 1465
 1466
 1467
 1468
 1469
 1470
 1471
 1472
 1473
 1474
 1475
 1476
 1477
 1478
 1479
 1480
 1481
 1482
 1483
 1484
 1485
 1486
 1487
 1488
 1489
 1490
 1491
 1492
 1493
 1494
 1495
 1496
 1497
 1498
 1499
 1500
 1501
 1502
 1503
 1504
 1505
 1506
 1507
 1508
 1509
 1510
 1511
 1512
 1513
 1514
 1515
 1516
 1517
 1518
 1519
 1520
 1521
 1522
 1523
 1524
 1525
 1526
 1527
 1528
 1529
 1530
 1531
 1532
 1533
 1534
 1535
 1536
 1537
 1538
 1539
 1540
 1541
 1542
 1543
 1544
 1545
 1546
 1547
 1548
 1549
 1550
 1551
 1552
 1553
 1554
 1555
 1556
 1557
 1558
 1559
 1560
 1561
 1562
 1563
 1564
 1565
 1566
 1567
 1568
 1569
 1570
 1571
 1572
 1573
 1574
 1575
 1576
 1577
 1578
 1579
 1580
 1581
 1582
 1583
 1584
 1585
 1586
 1587
 1588
 1589
 1590
 1591
 1592
 1593
 1594
 1595
 1596
 1597
 1598
 1599
 1600
 1601
 1602
 1603
 1604
 1605
 1606
 1607
 1608
 1609
 1610
 1611
 1612
 1613
 1614
 1615
 1616
 1617
 1618
 1619
 1620
 1621
 1622
 1623
 1624
 1625
 1626
 1627
 1628
 1629
 1630
 1631
 1632
 1633
 1634
 1635
 1636
 1637
 1638
 1639
 1640
 1641
 1642
 1643
 1644
 1645
 1646
 1647
 1648
 1649
 1650
 1651
 1652
 1653
 1654
 1655
 1656
 1657
 1658
 1659
 1660
 1661
 1662
 1663
 1664
 1665
 1666
 1667
 1668
 1669
 1670
 1671
 1672
 1673
 1674
 1675
 1676
 1677
 1678
 1679
 1680
 1681
 1682
 1683
 1684
 1685
 1686
 1687
 1688
 1689
 1690
 1691
 1692
 1693
 1694
 1695
 1696
 1697
 1698
 1699
 1700
 1701
 1702
 1703
 1704
 1705
 1706
 1707
 1708
 1709
 1710
 1711
 1712
 1713
 1714
 1715
 1716
 1717
 1718
 1719
 1720
 1721
 1722
 1723
 1724
 1725
 1726
 1727
 1728
 1729
 1730
 1731
 1732
 1733
 1734
 1735
 1736
 1737
 1738
 1739
 1740
 1741
 1742
 1743
 1744
 1745
 1746
 1747
 1748
 1749
 1750
 1751
 1752
 1753
 1754
 1755
 1756
 1757
 1758
 1759
 1760
 1761
 1762
 1763
 1764
 1765
 1766
 1767
 1768
 1769
 1770
 1771
 1772
 1773
 1774
 1775
 1776
 1777
 1778
 1779
 1780
 1781
 1782
 1783
 1784
 1785
 1786
 1787
 1788
 1789
 1790
 1791
 1792
 1793
 1794
 1795
 1796
 1797
 1798
 1799
 1800
 1801
 1802
 1803
 1804
 1805
 1806
 1807
 1808
 1809
 1810
 1811
 1812
 1813
 1814
 1815
 1816
 1817
 1818
 1819
 1820
 1821
 1822
 1823
 1824
 1825
 1826
 1827
 1828
 1829
 1830
 1831
 1832
 1833
 1834
 1835
 1836
 1837
 1838
 1839
 1840
 1841
 1842
 1843
 1844
 1845
 1846
 1847
 1848
 1849
 1850
 1851
 1852
 1853
 1854
 1855
 1856
 1857
 1858
 1859
 1860
 1861
 1862
 1863
 1864
 1865
 1866
 1867
 1868
 1869
 1870
 1871
 1872
 1873
 1874
 1875
 1876
 1877
 1878
 1879
 1880
 1881
 1882
 1883
 1884
 1885
 1886
 1887
 1888
 1889
 1890
 1891
 1892
 1893
 1894
 1895
 1896
 1897
 1898
 1899
 1900
 1901
 1902
 1903
 1904
 1905
 1906
 1907
 1908
 1909
 1910
 1911
 1912
 1913
 1914
 1915
 1916
 1917
 1918
 1919
 1920
 1921
 1922
 1923
 1924
 1925
 1926
 1927
 1928
 1929
 1930
 1931
 1932
 1933
 1934
 1935
 1936
 1937
 1938
 1939
 1940
 1941
 1942
 1943
 1944
 1945
 1946
 1947
 1948
 1949
 1950
 1951
 1952
 1953
 1954
 1955
 1956
 1957
 1958
 1959
 1960
 1961
 1962
 1963
 1964
 1965
 1966
 1967
 1968
 1969
 1970
 1971
 1972
 1973
 1974
 1975
 1976
 1977
 1978
 1979
 1980
 1981
 1982
 1983
 1984
 1985
 1986
 1987
 1988
 1989
 1990
 1991
 1992
 1993
 1994
 1995
 1996
 1997
 1998
 1999
 2000
 2001
 2002
 2003
 2004
 2005
 2006
 2007
 2008
 2009
 2010
 2011
 2012
 2013
 2014
 2015
 2016
 2017
 2018
 2019
 2020
 2021
 2022
 2023
 2024
 2025
 2026
 2027
 2028
 2029
 2030
 2031
 2032
 2033
 2034
 2035
 2036
 2037
 2038
 2039
 2040
 2041
 2042
 2043
 2044
 2045
 2046
 2047
 2048
 2049
 2050
 2051
 2052
 2053
 2054
 2055
 2056
 2057
 2058
 2059
 2060
 2061
 2062
 2063
 2064
 2065
 2066
 2067
 2068
 2069
 2070
 2071
 2072
 2073
 2074
 2075
 2076
 2077
 2078
 2079
 2080
 2081
 2082
 2083
 2084
 2085
 2086
 2087
 2088
 2089
 2090
 2091
 2092
 2093
 2094
 2095
 2096
 2097
 2098
 2099
 2100
 2101
 2102
 2103
 2104
 2105
 2106
 2107
 2108
 2109
 2110
 2111
 2112
 2113
 2114
 2115
 2116
 2117
 2118
 2119
 2120
 2121
 2122
 2123
 2124
 2125
 2126
 2127
 2128
 2129
 2130
 2131
 2132
 2133
 2134
 2135
 2136
 2137
 2138
 2139
 2140
 2141
 2142
 2143
 2144
 2145
 2146
 2147
 2148
 2149
 2150
 2151
 2152
 2153
 2154
 2155
 2156
 2157
 2158
 2159
 2160
 2161
 2162
 2163
 2164
 2165
 2166
 2167
 2168
 2169
 2170
 2171
 2172
 2173
 2174
 2175
 2176
 2177
 2178
 2179
 2180
 2181
 2182
 2183
 2184
 2185
 2186
 2187
 2188
 2189
 2190
 2191
 2192
 2193
 2194
 2195
 2196
 2197
 2198
 2199
 2200
 2201
 2202
 2203
 2204
 2205
 2206
 2207
 2208
 2209
 2210
 2211
 2212
 2213
 2214
 2215
 2216
 2217
 2218
 2219
 2220
 2221
 2222
 2223
 2224
 2225
 2226
 2227
 2228
 2229
 2230
 2231
 2232
 2233
 2234
 2235
 2236
 2237
 2238
 2239
 2240
 2241
 2242
 2243
 2244
 2245
 2246
 2247
 2248
 2249
 2250
 2251
 2252
 2253
 2254
 2255
 2256
 2257
 2258
 2259
 2260
 2261
 2262
 2263
 2264
 2265
 2266
 2267
 2268
 2269
 2270
 2271
 2272
 2273
 2274
 2275
 2276
 2277
 2278
 2279
 2280
 2281
 2282
 2283
 2284
 2285
 2286
 2287
 2288
 2289
 2290
 2291
 2292
 2293
 2294
 2295
 2296
 2297
 2298
 2299
 2300
 2301
 2302
 2303
 2304
 2305
 2306
 2307
 2308
 2309
 2310
 2311
 2312
 2313
 2314
 2315
 2316
 2317
 2318
 2319
 2320
 2321
 2322
 2323
 2324
 2325
 2326
 2327
 2328
 2329
 2330
 2331
 2332
 2333
 2334
 2335
 2336
 2337
 2338
 2339
 2340
 2341
 2342
 2343
 2344
 2345
 2346
 2347
 2348
 2349
 2350
 2351
 2352
 2353
 2354
 2355
 2356
 2357
 2358
 2359
 2360
 2361
 2362
 2363
 2364
 2365
 2366
 2367
 2368
 2369
 2370
 2371
 2372
 2373
 2374
 2375
 2376
 2377
 2378
 2379
 2380
 2381
 2382
 2383
 2384
 2385
 2386
 2387
 2388
 2389
 2390
 2391
 2392
 2393
 2394
 2395
 2396
 2397
 2398
 2399
 2400
 2401
 2402
 2403
 2404
 2405
 2406
 2407
 2408
 2409
 2410
 2411
 2412
 2413
 2414
 2415
 2416
 2417
 2418
 2419
 2420
 2421
 2422
 2423
 2424
 2425
 2426
 2427
 2428
 2429
 2430
 2431
 2432
 2433
 2434
 2435
 2436
 2437
 2438
 2439
 2440
 2441
 2442
 2443
 2444
 2445
 2446
 2447
 2448
 2449
 2450
 2451
 2452
 2453
 2454
 2455
 2456
 2457
 2458
 2459
 2460
 2461
 2462
 2463
 2464
 2465
 2466
 2467
 2468
 2469
 2470
 2471
 2472
 2473
 2474
 2475
 2476
 2477
 2478
 2479
 2480
 2481
 2482
 2483
 2484
 2485
 2486
 2487
 2488
 2489
 2490
 2491
 2492
 2493
 2494
 2495
 2496
 2497
 2498
 2499
 2500
 2501
 2502
 2503
 2504
 2505
 2506
 2507
 2508
 2509
 2510
 2511
 2512
 2513
 2514
 2515
 2516
 2517
 2518
 2519
 2520
 2521
 2522
 2523
 2524
 2525
 2526
 2527
 2528
 2529
 2530
 2531
 2532
 2533
 2534
 2535
 2536
 2537
 2538
 2539
 2540
 2541
 2542
 2543
 2544
 2545
 2546
 2547
 2548
 2549
 2550
 2551
 2552
 2553
 2554
 2555
 2556
 2557
 2558
 2559
 2560
 2561
 2562
 2563
 2564
 2565
 2566
 2567
 2568
 2569
 2570
 2571
 2572
 2573
 2574
 2575
 2576
 2577
 2578
 2579
 2580
 2581
 2582
 2583
 2584
 2585
 2586
 2587
 2588
 2589
 2590
 2591
 2592
 2593
 2594
 2595
 2596
 2597
 2598
 2599
 2600
 2601
 2602
 2603
 2604
 2605
 2606
 2607
 2608
 2609
 2610
 2611
 2612
 2613
 2614
 2615
 2616
 2617
 2618
 2619
 2620
 2621
 2622
 2623
 2624
 2625
 2626
 2627
 2628
 2629
 2630
 2631
 2632
 2633
 2634
 2635
 2636
 2637
 2638
 2639
 2640
 2641
 2642
 2643
 2644
 2645
 2646
 2647
 2648
 2649
 2650
 2651
 2652
 2653
 2654
 2655
 2656
 2657
 2658
 2659
 2660
 2661
 2662
 2663
 2664
 2665
 2666
 2667
 2668
 2669
 2670
 2671
 2672
 2673
 2674
 2675
 2676
 2677
 2678
 2679
 2680
 2681
 2682
 2683
 2684
 2685
 2686
 2687
 2688
 2689
 2690
 2691
 2692
 2693
 2694
 2695
 2696
 2697
 2698
 2699
 2700
 2701
 2702
 2703
 2704
 2705
 2706
 2707
 2708
 2709
 2710
 2711
 2712
 2713
 2714
 2715
 2716
 2717
 2718
 2719
 2720
 2721
 2722
 2723
 2724
 2725
 2726
 2727
 2728
 2729
 2730
 2731
 2732
 2733
 2734
 2735
 2736
 2737
 2738
 2739
 2740
 2741
 2742
 2743
 2744
 2745
 2746
 2747
 2748
 2749
 2750
 2751
 2752
 2753
 2754
 2755
 2756
 2757
 2758
 2759
 2760
 2761
 2762
 2763
 2764
 2765
 2766
 2767
 2768
 2769
 2770
 2771
 2772
 2773
 2774
 2775
 2776
 2777
 2778
 2779
 2780
 2781
 2782
 2783
 2784
 2785
 2786
 2787
 2788
 2789
 2790
 2791
 2792
 2793
 2794
 2795
 2796
 2797
 2798
 2799
 2800
 2801
 2802
 2803
 2804
 2805
 2806
 2807
 2808
 2809
 2810
 2811
 2812
 2813
 2814
 2815
 2816
 2817
 2818
 2819
 2820
 2821
 2822
 2823
 2824
 2825
 2826
 2827
 2828
 2829
 2830
 2831
 2832
 2833
 2834
 2835
 2836
 2837
 2838
 2839
 2840
 2841
 2842
 2843
 2844
 2845
 2846
 2847
 2848
 2849
 2850
 2851
 2852
 2853
 2854
 2855
 2856
 2857
 2858
 2859
 2860
 2861
 2862
 2863
 2864
 2865
 2866
 2867
 2868
 2869
 2870
 2871
 2872
 2873
 2874
 2875
 2876
 2877
 2878
 2879
 2880
 2881
 2882
 2883
 2884
 2885
 2886
 2887
 2888
 2889
 2890
 2891
 2892
 2893
 2894
 2895
 2896
 2897
 2898
 2899
 2900
 2901
 2902
 2903
 2904
 2905
 2906
 2907
 2908
 2909
 2910
 2911
 2912
 2913
 2914
 2915
 2916
 2917
 2918
 2919
 2920
 2921
 2922
 2923
 2924
 2925
 2926
 2927
 2928
 2929
 2930
 2931
 2932
 2933
 2934
 2935
 2936
 2937
 2938
 2939
 2940
 2941
 2942
 2943
 2944
 2945
 2946
 2947
 2948
 2949
 2950
 2951
 2952
 2953
 2954
 2955
 2956
 2957
 2958
 2959
 2960
 2961
 2962
 2963
 2964
 2965
 2966
 2967
 2968
 2969
 2970
 2971
 2972
 2973
 2974
 2975
 2976
 2977
 2978
 2979
 2980
 2981
 2982
 2983
 2984
 2985
 2986
 2987
 2988
 2989
 2990
 2991
 2992
 2993
 2994
 2995
 2996
 2997
 2998
 2999
 3000
 3001
 3002
 3003
 3004
 3005
 3006
 3007
 3008
 3009
 3010
 3011
 3012
 3013
 3014
 3015
 3016
 3017
 3018
 3019
 3020
 3021
 3022
 3023
 3024
 3025
 3026
 3027
 3028
 3029
 3030
 3031
 3032
 3033
 3034
 3035
 3036
 3037
 3038
 3039
 3040
 3041
 3042
 3043
 3044
 3045
 3046
 3047
 3048
 3049
 3050
 3051
 3052
 3053
 3054
 3055
 3056
 3057
 3058
 3059
 3060
 3061
 3062
 3063
 3064
 3065
 3066
 3067
 3068
 3069
 3070
 3071
 3072
 3073
 3074
 3075
 3076
 3077
 3078
 3079
 3080
 3081
 3082
 3083
 3084
 3085
 3086
 3087
 3088
 3089
 3090
 3091
 3092
 3093
 3094
 3095
 3096
 3097
 3098
 3099
 3100
 3101
 3102
 3103
 3104
 3105
 3106
 3107
 3108
 3109
 3110
 3111
 3112
 3113
 3114
 3115
 3116
 3117
 3118
 3119
 3120
 3121
 3122
 3123
 3124
 3125
 3126
 3127
 3128
 3129
 3130
 3131
 3132
 3133
 3134
 3135
 3136
 3137
 3138
 3139
 3140
 3141
 3142
 3143
 3144
 3145
 3146
 3147
 3148
 3149
 3150
 3151
 3152
 3153
 3154
 3155
 3156
 3157
 3158
 3159
 3160
 3161
 3162
 3163
 3164
 3165
 3166
 3167
 3168
 3169
 3170
 3171
 3172
 3173
 3174
 3175
 3176
 3177
 3178
 3179
 3180
 3181
 3182
 3183
 3184
 3185
 3186
 3187
 3188
 3189
 3190
 3191
 3192
 3193
 3194
 3195
 3196
 3197
 3198
 3199
 3200
 3201
 3202
 3203
 3204
 3205
 3206
 3207
 3208
 3209
 3210
 3211
 3212
 3213
 3214
 3215
 3216
 3217
 3218
 3219
 3220
 3221
 3222
 3223
 3224
 3225
 3226
 3227
 3228
 3229
 3230
 3231
 3232
 3233
 3234
 3235
 3236
 3237
 3238
 3239
 3240
 3241
 3242
 3243
 3244
 3245
 3246
 3247
 3248
 3249
 3250
 3251
 3252
 3253
 3254
 3255
 3256
 3257
 3258
 3259
 3260
 3261
 3262
 3263
 3264
 3265
 3266
 3267
 3268
 3269
 3270
 3271
 3272
 3273
 3274
 3275
 3276
 3277
 3278
 3279
 3280
 3281
 3282
 3283
 3284
 3285
 3286
 3287
 3288
 3289
 3290
 3291
 3292
 3293
 3294
 3295
 3296
 3297
 3298
 3299
 3300
 3301
 3302
 3303
 3304
 3305
 3306
 3307
 3308
 3309
 3310
 3311
 3312
 3313
 3314
 3315
 3316
 3317
 3318
 3319
 3320
 3321
 3322
 3323
 3324
 3325
 3326
 3327
 3328
 3329
 3330
 3331
 3332
 3333
 3334
 3335
 3336
 3337
 3338
 3339
 3340
 3341
 3342
 3343
 3344
 3345
 3346
 3347
 3348
 3349
 3350
 3351
 3352
 3353
 3354
 3355
 3356
 3357
 3358
 3359
 3360
 3361
 3362
 3363
 3364
 3365
 3366
 3367
 3368
 3369
 3370
 3371
 3372
 3373
 3374
 3375
 3376
 3377
 3378
 3379
 3380
 3381
 3382
 3383
 3384
 3385
 3386
 3387
 3388
 3389
 3390
 3391
 3392
 3393
 3394
 3395
 3396
 3397
 3398
 3399
 3400
 3401
 3402
 3403
 3404
 3405
 3406
 3407
 3408
 3409
 3410
 3411
 3412
 3413
 3414
 3415
 3416
 3417
 3418
 3419
 3420
 3421
 3422
 3423
 3424
 3425
 3426
 3427
 3428
 3429
 3430
 3431
 3432
 3433
 3434
 3435
 3436
 3437
 3438
 3439
 3440
 3441
 3442
 3443
 3444
 3445
 3446
 3447
 3448
 3449
 3450
 3451
 3452
 3453
 3454
 3455
 3456
 3457
 3458
 3459
 3460
 3461
 3462
 3463
 3464
 3465
 3466
 3467
 3468
 3469
 3470
 3471
 3472
 3473
 3474
 3475
 3476
 3477
 3478
 3479
 3480
 3481
 3482
 3483
 3484
 3485
 3486
 3487
 3488
 3489
 3490
 3491
 3492
 3493
 3494
 3495
 3496
 3497
 3498
 3499
 3500
 3501
 3502
 3503
 3504
 3505
 3506
 3507
 3508
 3509
 3510
 3511
 3512
 3513
 3514
 3515
 3516
 3517
 3518
 3519
 3520
 3521
 3522
 3523
 3524
 3525
 3526
 3527
 3528
 3529
 3530
 3531
 3532
 3533
 3534
 3535
 3536
 3537
 3538
 3539
 3540
 3541
 3542
 3543
 3544
 3545
 3546
 3547
 3548
 3549
 3550
 3551
 3552
 3553
 3554
 3555
 3556
 3557
 3558
 3559
 3560
 3561
 3562
 3563
 3564
 3565
 3566
 3567
 3568
 3569
 3570
 3571
 3572
 3573
 3574
 3575
 3576
 3577
 3578
 3579
 3580
 3581
 3582
 3583
 3584
 3585
 3586
 3587
 3588
 3589
 3590
 3591
 3592
 3593
 3594
 3595
 3596
 3597
 3598
 3599
 3600
 3601
 3602
 3603
 3604
 3605
 3606
 3607
 3608
 3609
 3610
 3611
 3612
 3613
 3614
 3615
 3616
 3617
 3618
 3619
 3620
 3621
 3622
 3623
 3624
 3625
 3626
 3627
 3628
 3629
 3630
 3631
 3632
 3633
 3634
 3635
 3636
 3637
 3638
 3639
 3640
 3641
 3642
 3643
 3644
 3645
 3646
 3647
 3648
 3649
 3650
 3651
 3652
 3653
 3654
 3655
 3656
 3657
 3658
 3659
 3660
 3661
 3662
 3663
 3664
 3665
 3666
 3667
 3668
 3669
 3670
 3671
 3672
 3673
 3674
 3675
 3676
 3677
 3678
 3679
 3680
 3681
 3682
 3683
 3684
 3685
 3686
 3687
 3688
 3689
 3690
 3691
 3692
 3693
 3694
 3695
 3696
 3697
 3698
 3699
 3700
 3701
 3702
 3703
 3704
 3705
 3706
 3707
 3708
 3709
 3710
 3711
 3712
 3713
 3714
 3715
 3716
 3717
 3718
 3719
 3720
 3721
 3722
 3723
 3724
 3725
 3726
 3727
 3728
 3729
 3730
 3731
 3732
 3733
 3734
 3735
 3736
 3737
 3738
 3739
 3740
 3741
 3742
 3743
 3744
 3745
 3746
 3747
 3748
 3749
 3750
 3751
 3752
 3753
 3754
 3755
 3756
 3757
 3758
 3759
 3760
 3761
 3762
 3763
 3764
 3765
 3766
 3767
 3768
 3769
 3770
 3771
 3772
 3773
 3774
 3775
 3776
 3777
 3778
 3779
 3780
 3781
 3782
 3783
 3784
 3785
 3786
 3787
 3788
 3789
 3790
 3791
 3792
 3793
 3794
 3795
 3796
 3797
 3798
 3799
 3800
 3801
 3802
 3803
 3804
 3805
 3806
 3807
 3808
 3809
 3810
 3811
 3812
 3813
 3814
 3815
 3816
 3817
 3818
 3819
 3820
 3821
 3822
 3823
 3824
 3825
 3826
 3827
 3828
 3829
 3830
 3831
 3832
 3833
 3834
 3835
 3836
 3837
 3838
 3839
 3840
 3841
 3842
 3843
 3844
 3845
 3846
 3847
 3848
 3849
 3850
 3851
 3852
 3853
 3854
 3855
 3856
 3857
 3858
 3859
 3860
 3861
 3862
 3863
 3864
 3865
 3866
 3867
 3868
 3869
 3870
 3871
 3872
 3873
 3874
 3875
 3876
 3877
 3878
 3879
 3880
 3881
 3882
 3883
 3884
 3885
 3886
 3887
 3888
 3889
 3890
 3891
 3892
 3893
 3894
 3895
 3896
 3897
 3898
 3899
 3900
 3901
 3902
 3903
 3904
 3905
 3906
 3907
 3908
 3909
 3910
 3911
 3912
 3913
 3914
 3915
 3916
 3917
 3918
 3919
 3920
 3921
 3922
 3923
 3924
 3925
 3926
 3927
 3928
 3929
 3930
 3931
 3932
 3933
 3934
 3935
 3936
 3937
 3938
 3939
 3940
 3941
 3942
 3943
 3944
 3945
 3946
 3947
 3948
 3949
 3950
 3951
 3952
 3953
 3954
 3955
 3956
 3957
 3958
 3959
 3960
 3961
 3962
 3963
 3964
 3965
 3966
 3967
 3968
 3969
 3970
 3971
 3972
 3973
 3974
 3975
 3976
 3977
 3978
 3979
 3980
 3981
 3982
 3983
 3984
 3985
 3986
 3987
 3988
 3989
 3990
 3991
 3992
 3993
 3994
 3995
 3996
 3997
 3998
 3999
 4000
 4001
 4002
 4003
 4004
 4005
 4006
 4007
 4008
 4009
 4010
 4011
 4012
 4013
 4014
 4015
 4016
 4017
 4018
 4019
 4020
 4021
 4022
 4023
 4024
 4025
 4026
 4027
 4028
 4029
 4030
 4031
 4032
 4033
 4034
 4035
 4036
 4037
 4038
 4039
 4040
 4041
 4042
 4043
 4044
 4045
 4046
 4047
 4048
 4049
 4050
 4051
 4052
 4053
 4054
 4055
 4056
 4057
 4058
 4059
 4060
 4061
 4062
 4063
 4064
 4065
 4066
 4067
 4068
 4069
 4070
 4071
 4072
 4073
 4074
 4075
 4076
 4077
 4078
 4079
 4080
 4081
 4082
 4083
 4084
 4085
 4086
 4087
 4088
 4089
 4090
 4091
 4092
 4093
 4094
 4095
 4096
 4097
 4098
 4099
 4100
 4101
 4102
 4103
 4104
 4105
 4106
 4107
 4108
 4109
 4110
 4111
 4112
 4113
 4114
 4115
 4116
 4117
 4118
 4119
 4120
 4121
 4122
 4123
 4124
 4125
 4126
 4127
 4128
 4129
 4130
 4131
 4132
 4133
 4134
 4135
 4136
 4137
 4138
 4139
 4140
 4141
 4142
 4143
 4144
 4145
 4146
 4147
 4148
 4149
 4150
 4151
 4152
 4153
 4154
 4155
 4156
 4157
 4158
 4159
 4160
 4161
 4162
 4163
 4164
 4165
 4166
 4167
 4168
 4169
 4170
 4171
 4172
 4173
 4174
 4175
 4176
 4177
 4178
 4179
 4180
 4181
 4182
 4183
 4184
 4185
 4186
 4187
 4188
 4189
 4190
 4191
 4192
 4193
 4194
 4195
 4196
 4197
 4198
 4199
 4200
 4201
 4202
 4203
 4204
 4205
 4206
 4207
 4208
 4209
 4210
 4211
 4212
 4213
 4214
 4215
 4216
 4217
 4218
 4219
 4220
 4221
 4222
 4223
 4224
 4225
 4226
 4227
 4228
 4229
 4230
 4231
 4232
 4233
 4234
 4235
 4236
 4237
 4238
 4239
 4240
 4241
 4242
 4243
 4244
 4245
 4246
 4247
 4248
 4249
 4250
 4251
 4252
 4253
 4254
 4255
 4256
 4257
 4258
 4259
 4260
 4261
 4262
 4263
 4264
 4265
 4266
 4267
 4268
 4269
 4270
 4271
 4272
 4273
 4274
 4275
 4276
 4277
 4278
 4279
 4280
 4281
 4282
 4283
 4284
 4285
 4286
 4287
 4288
 4289
 4290
 4291
 4292
 4293
 4294
 4295
 4296
 4297
 4298
 4299
 4300
 4301
 4302
 4303
 4304
 4305
 4306
 4307
 4308
 4309
 4310
 4311
 4312
 4313
 4314
 4315
 4316
 4317
 4318
 4319
 4320
 4321
 4322
 4323
 4324
 4325
 4326
 4327
 4328
 4329
 4330
 4331
 4332
 4333
 4334
 4335
 4336
 4337
 4338
 4339
 4340
 4341
 4342
 4343
 4344
 4345
 4346
 4347
 4348
 4349
 4350
 4351
 4352
 4353
 4354
 4355
 4356
 4357
 4358
 4359
 4360
 4361
 4362
 4363
 4364
 4365
 4366
 4367
 4368
 4369
 4370
 4371
 4372
 4373
 4374
 4375
 4376
 4377
 4378
 4379
 4380
 4381
 4382
 4383
 4384
 4385
 4386
 4387
 4388
 4389
 4390
 4391
 4392
 4393
 4394
 4395
 4396
 4397
 4398
 4399
 4400
 4401
 4402
 4403
 4404
 4405
 4406
 4407
 4408
 4409
 4410
 4411
 4412
 4413
 4414
 4415
 4416
 4417
 4418
 4419
 4420
 4421
 4422
 4423
 4424
 4425
 4426
 4427
 4428
 4429
 4430
 4431
 4432
 4433
 4434
 4435
 4436
 4437
 4438
 4439
 4440
 4441
 4442
 4443
 4444
 4445
 4446
 4447
 4448
 4449
 4450
 4451
 4452
 4453
 4454
 4455
 4456
 4457
 4458
 4459
 4460
 4461
 4462
 4463
 4464
 4465
 4466
 4467
 4468
 4469
 4470
 4471
 4472
 4473
 4474
 4475
 4476
 4477
 4478
 4479
 4480
 4481
 4482
 4483
 4484
 4485
 4486
 4487
 4488
 4489
 4490
 4491
 4492
 4493
 4494
 4495
 4496
 4497
 4498
 4499
 4500
 4501
 4502
 4503
 4504
 4505
 4506
 4507
 4508
 4509
 4510
 4511
 4512
 4513
 4514
 4515
 4516
 4517
 4518
 4519
 4520
 4521
 4522
 4523
 4524
 4525
 4526
 4527
 4528
 4529
 4530
 4531
 4532
 4533
 4534
 4535
 4536
 4537
 4538
 4539
 4540
 4541
 4542
 4543
 4544
 4545
 4546
 4547
 4548
 4549
 4550
 4551
 4552
 4553
 4554
 4555
 4556
 4557
 4558
 4559
 4560
 4561
 4562
 4563
 4564
 4565
 4566
 4567
 4568
 4569
 4570
 4571
 4572
 4573
 4574
 4575
 4576
 4577
 4578
 4579
 4580
 4581
 4582
 4583
 4584
 4585
 4586
 4587
 4588
 4589
 4590
 4591
 4592
 4593
 4594
 4595
 4596
 4597
 4598
 4599
 4600
 4601
 4602
 4603
 4604
 4605
 4606
 4607
 4608
 4609
 4610
 4611
 4612
 4613
 4614
 4615
 4616
 4617
 4618
 4619
 4620
 4621
 4622
 4623
 4624
 4625
 4626
 4627
 4628
 4629
 4630
 4631
 4632
 4633
 4634
 4635
 4636
 4637
 4638
 4639
 4640
 4641
 4642
 4643
 4644
 4645
 4646
 4647
 4648
 4649
 4650
 4651
 4652
 4653
 4654
 4655
 4656
 4657
 4658
 4659
 4660
 4661
 4662
 4663
 4664
 4665
 4666
 4667
 4668
 4669
 4670
 4671
 4672
 4673
 4674
 4675
 4676
 4677
 4678
 4679
 4680
 4681
 4682
 4683
 4684
 4685
 4686
 4687
 4688
 4689
 4690
 4691
 4692
 4693
 4694
 4695
 4696
 4697
 4698
 4699
 4700
 4701
 4702
 4703
 4704
 4705
 4706
 4707
 4708
 4709
 4710
 4711
 4712
 4713
 4714
 4715
 4716
 4717
 4718
 4719
 4720
 4721
 4722
 4723
 4724
 4725
 4726
 4727
 4728
 4729
 4730
 4731
 4732
 4733
 4734
 4735
 4736
 4737
 4738
 4739
 4740
 4741
 4742
 4743
 4744
 4745
 4746
 4747
 4748
 4749
 4750
 4751
 4752
 4753
 4754
 4755
 4756
 4757
 4758
 4759
 4760
 4761
 4762
 4763
 4764
 4765
 4766
 4767
 4768
 4769
 4770
 4771
 4772
 4773
 4774
 4775
 4776
 4777
 4778
 4779
 4780
 4781
 4782
 4783
 4784
 4785
 4786
 4787
 4788
 4789
 4790
 4791
 4792
 4793
 4794
 4795
 4796
 4797
 4798
 4799
 4800
 4801
 4802
 4803
 4804
 4805
 4806
 4807
 4808
 4809
 4810
 4811
 4812
 4813
 4814
 4815
 4816
 4817
 4818
 4819
 4820
 4821
 4822
 4823
 4824
 4825
 4826
 4827
 4828
 4829
 4830
 4831
 4832
 4833
 4834
 4835
 4836
 4837
 4838
 4839
 4840
 4841
 4842
 4843
 4844
 4845
 4846
 4847
 4848
 4849
 4850
 4851
 4852
 4853
 4854
 4855
 4856
 4857
 4858
 4859
 4860
 4861
 4862
 4863
 4864
 4865
 4866
 4867
 4868
 4869
 4870
 4871
 4872
 4873
 4874
 4875
 4876
 4877
 4878
 4879
 4880
 4881
 4882
 4883
 4884
 4885
 4886
 4887
 4888
 4889
 4890
 4891
 4892
 4893
 4894
 4895
 4896
 4897
 4898
 4899
 4900
 4901
 4902
 4903
 4904
 4905
 4906
 4907
 4908
 4909
 4910
 4911
 4912
 4913
 4914
 4915
 4916
 4917
 4918
 4919
 4920
 4921
 4922
 4923
 4924
 4925
 4926
 4927
 4928
 4929
 4930
 4931
 4932
 4933
 4934
 4935
 4936
 4937
 4938
 4939
 4940
 4941
 4942
 4943
 4944
 4945
 4946
 4947
 4948
 4949
 4950
 4951
 4952
 4953
 4954
 4955
 4956
 4957
 4958
 4959
 4960
 4961
 4962
 4963
 4964
 4965
 4966
 4967
 4968
 4969
 4970
 4971
 4972
 4973
 4974
 4975
 4976
 4977
 4978
 4979
 4980
 4981
 4982
 4983
 4984
 4985
 4986
 4987
 4988
 4989
 4990
 4991
 4992
 4993
 4994
 4995
 4996
 4997
 4998
 4999
 5000
 5001
 5002
 5003
 5004
 5005
 5006
 5007
 5008
 5009
 5010
 5011
 5012
 5013
 5014
 5015
 5016
 5017
 5018
 5019
 5020
 5021
 5022
 5023
 5024
 5025
 5026
 5027
 5028
 5029
 5030
 5031
 5032
 5033
 5034
 5035
 5036
 5037
 5038
 5039
 5040
 5041
 5042
 5043
 5044
 5045
 5046
 5047
 5048
 5049
 5050
 5051
 5052
 5053
 5054
 5055
 5056
 5057
 5058
 5059
 5060
 5061
 5062
 5063
 5064
 5065
 5066
 5067
 5068
 5069
 5070
 5071
 5072
 5073
 5074
 5075
 5076
 5077
 5078
 5079
 5080
 5081
 5082
 5083
 5084
 5085
 5086
 5087
 5088
 5089
 5090
 5091
 5092
 5093
 5094
 5095
 5096
 5097
 5098
 5099
 5100
 5101
 5102
 5103
 5104
 5105
 5106
 5107
 5108
 5109
 5110
 5111
 5112
 5113
 5114
 5115
 5116
 5117
 5118
 5119
 5120
 5121
 5122
 5123
 5124
 5125
 5126
 5127
 5128
 5129
 5130
 5131
 5132
 5133
 5134
 5135
 5136
 5137
 5138
 5139
 5140
 5141
 5142
 5143
 5144
 5145
 5146
 5147
 5148
 5149
 5150
 5151
 5152
 5153
 5154
 5155
 5156
 5157
 5158
 5159
 5160
 5161
 5162
 5163
 5164
 5165
 5166
 5167
 5168
 5169
 5170
 5171
 5172
 5173
 5174
 5175
 5176
 5177
 5178
 5179
 5180
 5181
 5182
 5183
 5184
 5185
 5186
 5187
 5188
 5189
 5190
 5191
 5192
 5193
 5194
 5195
 5196
 5197
 5198
 5199
 5200
 5201
 5202
 5203
 5204
 5205
 5206
 5207
 5208
 5209
 5210
 5211
 5212
 5213
 5214
 5215
 5216
 5217
 5218
 5219
 5220
 5221
 5222
 5223
 5224
 5225
 5226
 5227
 5228
 5229
 5230
 5231
 5232
 5233
 5234
 5235
 5236
 5237
 5238
 5239
 5240
 5241
 5242
 5243
 5244
 5245
 5246
 5247
 5248
 5249
 5250
 5251
 5252
 5253
 5254
 5255
 5256
 5257
 5258
 5259
 5260
 5261
 5262
 5263
 5264
 5265
 5266
 5267
 5268
 5269
 5270
 5271
 5272
 5273
 5274
 5275
 5276
 5277
 5278
 5279
 5280
 5281
 5282
 5283
 5284
 5285
 5286
 5287
 5288
 5289
 5290
 5291
 5292
 5293
 5294
 5295
 5296
 5297
 5298
 5299
 5300
 5301
 5302
 5303
 5304
 5305
 5306
 5307
 5308
 5309
 5310
 5311
 5312
 5313
 5314
 5315
 5316
 5317
 5318
 5319
 5320
 5321
 5322
 5323
 5324
 5325
 5326
 5327
 5328
 5329
 5330
 5331
 5332
 5333
 5334
 5335
 5336
 5337
 5338
 5339
 5340
 5341
 5342
 5343
 5344
 5345
 5346
 5347
 5348
 5349
 5350
 5351
 5352
 5353
 5354
 5355
 5356
 5357
 5358
 5359
 5360
 5361
 5362
 5363
 5364
 5365
 5366
 5367
 5368
 5369
 5370
 5371
 5372
 5373
 5374
 5375
 5376
 5377
 5378
 5379
 5380
 5381
 5382
 5383
 5384
 5385
 5386
 5387
 5388
 5389
 5390
 5391
 5392
 5393
 5394
 5395
 5396
 5397
 5398
 5399
 5400
 5401
 5402
 5403
 5404
 5405
 5406
 5407
 5408
 5409
 5410
 5411
 5412
 5413
 5414
 5415
 5416
 5417
 5418
 5419
 5420
 5421
 5422
 5423
 5424
 5425
 5426
 5427
 5428
 5429
 5430
 5431
 5432
 5433
 5434
 5435
 5436
 5437
 5438
 5439
 5440
 5441
 5442
 5443
 5444
 5445
 5446
 5447
 5448
 5449
 5450
 5451
 5452
 5453
 5454
 5455
 5456
 5457
 5458
 5459
 5460
 5461
 5462
 5463
 5464
 5465
 5466
 5467
 5468
 5469
 5470
 5471
 5472
 5473
 5474
 5475
 5476
 5477
 5478
 5479
 5480
 5481
 5482
 5483
 5484
 5485
 5486
 5487
 5488
 5489
 5490
 5491
 5492
 5493
 5494
 5495
 5496
 5497
 5498
 5499
 5500
 5501
 5502
 5503
 5504
 5505
 5506
 5507
 5508
 5509
 5510
 5511
 5512
 5513
 5514
 5515
 5516
 5517
 5518
 5519
 5520
 5521
 5522
 5523
 5524
 5525
 5526
 5527
 5528
 5529
 5530
 5531
 5532
 5533
 5534
 5535
 5536
 5537
 5538
 5539
 5540
 5541
 5542
 5543
 5544
 5545
 5546
 5547
 5548
 5549
 5550
 5551
 5552
 5553
 5554
 5555
 5556
 5557
 5558
 5559
 5560
 5561
 5562
 5563
 5564
 5565
 5566
 5567
 5568
 5569
 5570
 5571
 5572
 5573
 5574
 5575
 5576
 5577
 5578
 5579
 5580
 5581
 5582
 5583
 5584
 5585
 5586
 5587
 5588
 5589
 5590
 5591
 5592
 5593
 5594
 5595
 5596
 5597
 5598
 5599
 5600
 5601
 5602
 5603
 5604
 5605
 5606
 5607
 5608
 5609
 5610
 5611
 5612
 5613
 5614
 5615
 5616
 5617
 5618
 5619
 5620
 5621
 5622
 5623
 5624
 5625
 5626
 5627
 5628
 5629
 5630
 5631
 5632
 5633
 5634
 5635
 5636
 5637
 5638
 5639
 5640
 5641
 5642
 5643
 5644
 5645
 5646
 5647
 5648
 5649
 5650
 5651
 5652
 5653
 5654
 5655
 5656
 5657
 5658
 5659
 5660
 5661
 5662
 5663
 5664
 5665
 5666
 5667
 5668
 5669
 5670
 5671
 5672
 5673
 5674
 5675
 5676
 5677
 5678
 5679
 5680
 5681
 5682
 5683
 5684
 5685
 5686
 5687
 5688
 5689
 5690
 5691
 5692
 5693
 5694
 5695
 5696
 5697
 5698
 5699
 5700
 5701
 5702
 5703
 5704
 5705
 5706
 5707
 5708
 5709
 5710
 5711
 5712
 5713
 5714
 5715
 5716
 5717
 5718
 5719
 5720
 5721
 5722
 5723
 5724
 5725
 5726
 5727
 5728
 5729
 5730
 5731
 5732
 5733
 5734
 5735
 5736
 5737
 5738
 5739
 5740
 5741
 5742
 5743
 5744
 5745
 5746
 5747
 5748
 5749
 5750
 5751
 5752
 5753
 5754
 5755
 5756
 5757
 5758
 5759
 5760
 5761
 5762
 5763
 5764
 5765
 5766
 5767
 5768
 5769
 5770
 5771
 5772
 5773
 5774
 5775
 5776
 5777
 5778
 5779
 5780
 5781
 5782
 5783
 5784
 5785
 5786
 5787
 5788
 5789
 5790
 5791
 5792
 5793
 5794
 5795
 5796
 5797
 5798
 5799
 5800
 5801
 5802
 5803
 5804
 5805
 5806
 5807
 5808
 5809
 5810
 5811
 5812
 5813
 5814
 5815
 5816
 5817
 5818
 5819
 5820
 5821
 5822
 5823
 5824
 5825
 5826
 5827
 5828
 5829
 5830
 5831
 5832
 5833
 5834
 5835
 5836
 5837
 5838
 5839
 5840
 5841
 5842
 5843
 5844
 5845
 5846
 5847
 5848
 5849
 5850
 5851
 5852
 5853
 5854
 5855
 5856
 5857
 5858
 5859
 5860
 5861
 5862
 5863
 5864
 5865
 5866
 5867
 5868
 5869
 5870
 5871
 5872
 5873
 5874
 5875
 5876
 5877
 5878
 5879
 5880
 5881
 5882
 5883
 5884
 5885
 5886
 5887
 5888
 5889
 5890
 5891
 5892
 5893
 5894
 5895
 5896
 5897
 5898
 5899
 5900
 5901
 5902
 5903
 5904
 5905
 5906
 5907
 5908
 5909
 5910
 5911
 5912
 5913
 5914
 5915
 5916
 5917
 5918
 5919
 5920
 5921
 5922
 5923
 5924
 5925
 5926
 5927
 5928
 5929
 5930
 5931
 5932
 5933
 5934
 5935
 5936
 5937
 5938
 5939
 5940
 5941
 5942
 5943
 5944
 5945
 5946
 5947
 5948
 5949
 5950
 5951
 5952
 5953
 5954
 5955
 5956
 5957
 5958
 5959
 5960
 5961
 5962
 5963
 5964
 5965
 5966
 5967
 5968
 5969
 5970
 5971
 5972
 5973
 5974
 5975
 5976
 5977
 5978
 5979
 5980
 5981
 5982
 5983
 5984
 5985
 5986
 5987
 5988
 5989
 5990
 5991
 5992
 5993
 5994
 5995
 5996
 5997
 5998
 5999
 6000
 6001
 6002
 6003
 6004
 6005
 6006
 6007
 6008
 6009
 6010
 6011
 6012
 6013
 6014
 6015
 6016
 6017
 6018
 6019
 6020
 6021
 6022
 6023
 6024
 6025
 6026
 6027
 6028
 6029
 6030
 6031
 6032
 6033
 6034
 6035
 6036
 6037
 6038
 6039
 6040
 6041
 6042
 6043
 6044
 6045
 6046
 6047
 6048
 6049
 6050
 6051
 6052
 6053
 6054
 6055
 6056
 6057
 6058
 6059
 6060
 6061
 6062
 6063
 6064
 6065
 6066
 6067
 6068
 6069
 6070
 6071
 6072
 6073
 6074
 6075
 6076
 6077
 6078
 6079
 6080
 6081
 6082
 6083
 6084
 6085
 6086
 6087
 6088
 6089
 6090
 6091
 6092
 6093
 6094
 6095
 6096
 6097
 6098
 6099
 6100
 6101
 6102
 6103
 6104
 6105
 6106
 6107
 6108
 6109
 6110
 6111
 6112
 6113
 6114
 6115
 6116
 6117
 6118
 6119
 6120
 6121
 6122
 6123
 6124
 6125
 6126
 6127
 6128
 6129
 6130
 6131
 6132
 6133
 6134
 6135
 6136
 6137
 6138
 6139
 6140
 6141
 6142
 6143
 6144
 6145
 6146
 6147
 6148
 6149
 6150
 6151
 6152
 6153
 6154
 6155
 6156
 6157
 6158
 6159
 6160
 6161
 6162
 6163
 6164
 6165
 6166
 6167
 6168
 6169
 6170
 6171
 6172
 6173
 6174
 6175
 6176
 6177
 6178
 6179
 6180
 6181
 6182
 6183
 6184
 6185
 6186
 6187
 6188
 6189
 6190
 6191
 6192
 6193
 6194
 6195
 6196
 6197
 6198
 6199
 6200
 6201
 6202
 6203
 6204
 6205
 6206
 6207
 6208
 6209
 6210
 6211
 6212
 6213
 6214
 6215
 6216
 6217
 6218
 6219
 6220
 6221
 6222
 6223
 6224
 6225
 6226
 6227
 6228
 6229
 6230
 6231
 6232
 6233
 6234
 6235
 6236
 6237
 6238
 6239
 6240
 6241
 6242
 6243
 6244
 6245
 6246
 6247
 6248
 6249
 6250
 6251
 6252
 6253
 6254
 6255
 6256
 6257
 6258
 6259
 6260
 6261
 6262
 6263
 6264
 6265
 6266
 6267
 6268
 6269
 6270
 6271
 6272
 6273
 6274
 6275
 6276
 6277
 6278
 6279
 6280
 6281
 6282
 6283
 6284
 6285
 6286
 6287
 6288
 6289
 6290
 6291
 6292
 6293
 6294
 6295
 6296
 6297
 6298
 6299
 6300
 6301
 6302
 6303
 6304
 6305
 6306
 6307
 6308
 6309
 6310
 6311
 6312
 6313
 6314
 6315
 6316
 6317
 6318
 6319
 6320
 6321
 6322
 6323
 6324
 6325
 6326
 6327
 6328
 6329
 6330
 6331
 6332
 6333
 6334
 6335
 6336
 6337
 6338
 6339
 6340
 6341
 6342
 6343
 6344
 6345
 6346
 6347
 6348
 6349
 6350
 6351
 6352
 6353
 6354
 6355
 6356
 6357
 6358
 6359
 6360
 6361
 6362
 6363
 6364
 6365
 6366
 6367
 6368
 6369
 6370
 6371
 6372
 6373
 6374
 6375
 6376
 6377
 6378
 6379
 6380
 6381
 6382
 6383
 6384
 6385
 6386
 6387
 6388
 6389
 6390
 6391
 6392
 6393
 6394
 6395
 6396
 6397
 6398
 6399
 6400
 6401
 6402
 6403
 6404
 6405
 6406
 6407
 6408
 6409
 6410
 6411
 6412
 6413
 6414
 6415
 6416
 6417
 6418
 6419
 6420
 6421
 6422
 6423
 6424
 6425
 6426
 6427
 6428
 6429
 6430
 6431
 6432
 6433
 6434
 6435
 6436
 6437
 6438
 6439
 6440
 6441
 6442
 6443
 6444
 6445
 6446
 6447
 6448
 6449
 6450
 6451
 6452
 6453
 6454
 6455
 6456
 6457
 6458
 6459
 6460
 6461
 6462
 6463
 6464
 6465
 6466
 6467
 6468
 6469
 6470
 6471
 6472
 6473
 6474
 6475
 6476
 6477
 6478
 6479
 6480
 6481
 6482
 6483
 6484
 6485
 6486
 6487
 6488
 6489
 6490
 6491
 6492
 6493
 6494
 6495
 6496
 6497
 6498
 6499
 6500
 6501
 6502
 6503
 6504
 6505
 6506
 6507
 6508
 6509
 6510
 6511
 6512
 6513
 6514
 6515
 6516
 6517
 6518
 6519
 6520
 6521
 6522
 6523
 6524
 6525
 6526
 6527
 6528
 6529
 6530
 6531
 6532
 6533
 6534
 6535
 6536
 6537
 6538
 6539
 6540
 6541
 6542
 6543
 6544
 6545
 6546
 6547
 6548
 6549
 6550
 6551
 6552
 6553
 6554
 6555
 6556
 6557
 6558
 6559
 6560
 6561
 6562
 6563
 6564
 6565
 6566
 6567
 6568
 6569
 6570
 6571
 6572
 6573
 6574
 6575
 6576
 6577
 6578
 6579
 6580
 6581
 6582
 6583
 6584
 6585
 6586
 6587
 6588
 6589
 6590
 6591
 6592
 6593
 6594
 6595
 6596
 6597
 6598
 6599
 6600
 6601
 6602
 6603
 6604
 6605
 6606
 6607
 6608
 6609
 6610
 6611
 6612
 6613
 6614
 6615
 6616
 6617
 6618
 6619
 6620
 6621
 6622
 6623
 6624
 6625
 6626
 6627
 6628
 6629
 6630
 6631
 6632
 6633
 6634
 6635
 6636
 6637
 6638
 6639
 6640
 6641
 6642
 6643
 6644
 6645
 6646
 6647
 6648
 6649
 6650
 6651
 6652
 6653
 6654
 6655
 6656
 6657
 6658
 6659
 6660
 6661
 6662
 6663
 6664
 6665
 6666
 6667
 6668
 6669
 6670
 6671
 6672
 6673
 6674
 6675
 6676
 6677
 6678
 6679
 6680
 6681
 6682
 6683
 6684
 6685
 6686
 6687
 6688
 6689
 6690
 6691
 6692
 6693
 6694
 6695
 6696
 6697
 6698
 6699
 6700
 6701
 6702
 6703
 6704
 6705
 6706
 6707
 6708
 6709
 6710
 6711
 6712
 6713
 6714
 6715
 6716
 6717
 6718
 6719
 6720
 6721
 6722
 6723
 6724
 6725
 6726
 6727
 6728
 6729
 6730
 6731
 6732
 6733
 6734
 6735
 6736
 6737
 6738
 6739
 6740
 6741
 6742
 6743
 6744
 6745
 6746
 6747
 6748
 6749
 6750
 6751
 6752
 6753
 6754
 6755
 6756
 6757
 6758
 6759
 6760
 6761
 6762
 6763
 6764
 6765
 6766
 6767
 6768
 6769
 6770
 6771
 6772
 6773
 6774
 6775
 6776
 6777
 6778
 6779
 6780
 6781
 6782
 6783
 6784
 6785
 6786
 6787
 6788
 6789
 6790
 6791
 6792
 6793
 6794
 6795
 6796
 6797
 6798
 6799
 6800
 6801
 6802
 6803
 6804
 6805
 6806
 6807
 6808
 6809
 6810
 6811
 6812
 6813
 6814
 6815
 6816
 6817
 6818
 6819
 6820
 6821
 6822
 6823
 6824
 6825
 6826
 6827
 6828
 6829
 6830
 6831
 6832
 6833
 6834
 6835
 6836
 6837
 6838
 6839
 6840
 6841
 6842
 6843
 6844
 6845
 6846
 6847
 6848
 6849
 6850
 6851
 6852
 6853
 6854
 6855
 6856
 6857
 6858
 6859
 6860
 6861
 6862
 6863
 6864
 6865
 6866
 6867
 6868
 6869
 6870
 6871
 6872
 6873
 6874
 6875
 6876
 6877
 6878
 6879
 6880
 6881
 6882
 6883
 6884
 6885
 6886
 6887
 6888
 6889
 6890
 6891
 6892
 6893
 6894
 6895
 6896
 6897
 6898
 6899
 6900
 6901
 6902
 6903
 6904
 6905
 6906
 6907
 6908
 6909
 6910
 6911
 6912
 6913
 6914
 6915
 6916
 6917
 6918
 6919
 6920
 6921
 6922
 6923
 6924
 6925
 6926
 6927
 6928
 6929
 6930
 6931
 6932
 6933
 6934
 6935
 6936
 6937
 6938
 6939
 6940
 6941
 6942
 6943
 6944
 6945
 6946
 6947
 6948
 6949
 6950
 6951
 6952
 6953
 6954
 6955
 6956
 6957
 6958
 6959
 6960
 6961
 6962
 6963
 6964
 6965
 6966
 6967
 6968
 6969
 6970
 6971
 6972
 6973
 6974
 6975
 6976
 6977
 6978
 6979
 6980
 6981
 6982
 6983
 6984
 6985
 6986
 6987
 6988
 6989
 6990
 6991
 6992
 6993
 6994
 6995
 6996
 6997
 6998
 6999
 7000
 7001
 7002
 7003
 7004
 7005
 7006
 7007
 7008
 7009
 7010
 7011
 7012
 7013
 7014
 7015
 7016
 7017
 7018
 7019
 7020
 7021
 7022
 7023
 7024
 7025
 7026
 7027
 7028
 7029
 7030
 7031
 7032
 7033
 7034
 7035
 7036
 7037
 7038
 7039
 7040
 7041
 7042
 7043
 7044
 7045
 7046
 7047
 7048
 7049
 7050
 7051
 7052
 7053
 7054
 7055
 7056
 7057
 7058
 7059
 7060
 7061
 7062
 7063
 7064
 7065
 7066
 7067
 7068
 7069
 7070
 7071
 7072
 7073
 7074
 7075
 7076
 7077
 7078
 7079
 7080
 7081
 7082
 7083
 7084
 7085
 7086
 7087
 7088
 7089
 7090
 7091
 7092
 7093
 7094
 7095
 7096
 7097
 7098
 7099
 7100
 7101
 7102
 7103
 7104
 7105
 7106
 7107
 7108
 7109
 7110
 7111
 7112
 7113
 7114
 7115
 7116
 7117
 7118
 7119
 7120
 7121
 7122
 7123
 7124
 7125
 7126
 7127
 7128
 7129
 7130
 7131
 7132
 7133
 7134
 7135
 7136
 7137
 7138
 7139
 7140
 7141
 7142
 7143
 7144
 7145
 7146
 7147
 7148
 7149
 7150
 7151
 7152
 7153
 7154
 7155
 7156
 7157
 7158
 7159
 7160
 7161
 7162
 7163
 7164
 7165
 7166
 7167
 7168
 7169
 7170
 7171
 7172
 7173
 7174
 7175
 7176
 7177
 7178
 7179
 7180
 7181
 7182
 7183
 7184
 7185
 7186
 7187
 7188
 7189
 7190
 7191
 7192
 7193
 7194
 7195
 7196
 7197
 7198
 7199
 7200
 7201
 7202
 7203
 7204
 7205
 7206
 7207
 7208
 7209
 7210
 7211
 7212
 7213
 7214
 7215
 7216
 7217
 7218
 7219
 7220
 7221
 7222
 7223
 7224
 7225
 7226
 7227
 7228
 7229
 7230
 7231
 7232
 7233
 7234
 7235
 7236
 7237
 7238
 7239
 7240
 7241
 7242
 7243
 7244
 7245
 7246
 7247
 7248
 7249
 7250
 7251
 7252
 7253
 7254
 7255
 7256
 7257
 7258
 7259
 7260
 7261
 7262
 7263
 7264
 7265
 7266
 7267
 7268
 7269
 7270
 7271
 7272
 7273
 7274
 7275
 7276
 7277
 7278
 7279
 7280
 7281
 7282
 7283
 7284
 7285
 7286
 7287
 7288
 7289
 7290
 7291
 7292
 7293
 7294
 7295
 7296
 7297
 7298
 7299
 7300
 7301
 7302
 7303
 7304
 7305
 7306
 7307
 7308
 7309
 7310
 7311
 7312
 7313
 7314
 7315
 7316
 7317
 7318
 7319
 7320
 7321
 7322
 7323
 7324
 7325
 7326
 7327
 7328
 7329
 7330
 7331
 7332
 7333
 7334
 7335
 7336
 7337
 7338
 7339
 7340
 7341
 7342
 7343
 7344
 7345
 7346
 7347
 7348
 7349
 7350
 7351
 7352
 7353
 7354
 7355
 7356
 7357
 7358
 7359
 7360
 7361
 7362
 7363
 7364
 7365
 7366
 7367
 7368
 7369
 7370
 7371
 7372
 7373
 7374
 7375
 7376
 7377
 7378
 7379
 7380
 7381
 7382
 7383
 7384
 7385
 7386
 7387
 7388
 7389
 7390
 7391
 7392
 7393
 7394
 7395
 7396
 7397
 7398
 7399
 7400
 7401
 7402
 7403
 7404
 7405
 7406
 7407
 7408
 7409
 7410
 7411
 7412
 7413
 7414
 7415
 7416
 7417
 7418
 7419
 7420
 7421
 7422
 7423
 7424
 7425
 7426
 7427
 7428
 7429
 7430
 7431
 7432
 7433
 7434
 7435
 7436
 7437
 7438
 7439
 7440
 7441
 7442
 7443
 7444
 7445
 7446
 7447
 7448
 7449
 7450
 7451
 7452
 7453
 7454
 7455
 7456
 7457
 7458
 7459
 7460
 7461
 7462
 7463
 7464
 7465
 7466
 7467
 7468
 7469
 7470
 7471
 7472
 7473
 7474
 7475
 7476
 7477
 7478
 7479
 7480
 7481
 7482
 7483
 7484
 7485
 7486
 7487
 7488
 7489
 7490
 7491
 7492
 7493
 7494
 7495
 7496
 7497
 7498
 7499
 7500
 7501
 7502
 7503
 7504
 7505
 7506
 7507
 7508
 7509
 7510
 7511
 7512
 7513
 7514
 7515
 7516
 7517
 7518
 7519
 7520
 7521
 7522
 7523
 7524
 7525
 7526
 7527
 7528
 7529
 7530
 7531
 7532
 7533
 7534
 7535
 7536
 7537
 7538
 7539
 7540
 7541
 7542
 7543
 7544
 7545
 7546
 7547
 7548
 7549
 7550
 7551
 7552
 7553
 7554
 7555
 7556
 7557
 7558
 7559
 7560
 7561
 7562
 7563
 7564
 7565
 7566
 7567
 7568
 7569
 7570
 7571
 7572
 7573
 7574
 7575
 7576
 7577
 7578
 7579
 7580
 7581
 7582
 7583
 7584
 7585
 7586
 7587
 7588
 7589
 7590
 7591
 7592
 7593
 7594
 7595
 7596
 7597
 7598
 7599
 7600
 7601
 7602
 7603
 7604
 7605
 7606
 7607
 7608
 7609
 7610
 7611
 7612
 7613
 7614
 7615
 7616
 7617
 7618
 7619
 7620
 7621
 7622
 7623
 7624
 7625
 7626
 7627
 7628
 7629
 7630
 7631
 7632
 7633
 7634
 7635
 7636
 7637
 7638
 7639
 7640
 7641
 7642
 7643
 7644
 7645
 7646
 7647
 7648
 7649
 7650
 7651
 7652
 7653
 7654
 7655
 7656
 7657
 7658
 7659
 7660
 7661
 7662
 7663
 7664
 7665
 7666
 7667
 7668
 7669
 7670
 7671
 7672
 7673
 7674
 7675
 7676
 7677
 7678
 7679
 7680
 7681
 7682
 7683
 7684
 7685
 7686
 7687
 7688
 7689
 7690
 7691
 7692
 7693
 7694
 7695
 7696
 7697
 7698
 7699
 7700
 7701
 7702
 7703
 7704
 7705
 7706
 7707
 7708
 7709
 7710
 7711
 7712
 7713
 7714
 7715
 7716
 7717
 7718
 7719
 7720
 7721
 7722
 7723
 7724
 7725
 7726
 7727
 7728
 7729
 7730
 7731
 7732
 7733
 7734
 7735
 7736
 7737
 7738
 7739
 7740
 7741
 7742
 7743
 7744
 7745
 7746
 7747
 7748
 7749
 7750
 7751
 7752
 7753
 7754
 7755
 7756
 7757
 7758
 7759
 7760
 7761
 7762
 7763
 7764
 7765
 7766
 7767
 7768
 7769
 7770
 7771
 7772
 7773
 7774
 7775
 7776
 7777
 7778
 7779
 7780
 7781
 7782
 7783
 7784
 7785
 7786
 7787
 7788
 7789
 7790
 7791
 7792
 7793
 7794
 7795
 7796
 7797
 7798
 7799
 7800
 7801
 7802
 7803
 7804
 7805
 7806
 7807
 7808
 7809
 7810
 7811
 7812
 7813
 7814
 7815
 7816
 7817
 7818
 7819
 7820
 7821
 7822
 7823
 7824
 7825
 7826
 7827
 7828
 7829
 7830
 7831
 7832
 7833
 7834
 7835
 7836
 7837
 7838
 7839
 7840
 7841
 7842
 7843
 7844
 7845
 7846
 7847
 7848
 7849
 7850
 7851
 7852
 7853
 7854
 7855
 7856
 7857
 7858
 7859
 7860
 7861
 7862
 7863
 7864
 7865
 7866
 7867
 7868
 7869
 7870
 7871
 7872
 7873
 7874
 7875
 7876
 7877
 7878
 7879
 7880
 7881
 7882
 7883
 7884
 7885
 7886
 7887
 7888
 7889
 7890
 7891
 7892
 7893
 7894
 7895
 7896
 7897
 7898
 7899
 7900
 7901
 7902
 7903
 7904
 7905
 7906
 7907
 7908
 7909
 7910
 7911
 7912
 7913
 7914
 7915
 7916
 7917
 7918
 7919
 7920
 7921
 7922
 7923
 7924
 7925
 7926
 7927
 7928
 7929
 7930
 7931
 7932
 7933
 7934
 7935
 7936
 7937
 7938
 7939
 7940
 7941
 7942
 7943
 7944
 7945
 7946
 7947
 7948
 7949
 7950
 7951
 7952
 7953
 7954
 7955
 7956
 7957
 7958
 7959
 7960
 7961
 7962
 7963
 7964
 7965
 7966
 7967
 7968
 7969
 7970
 7971
 7972
 7973
 7974
 7975
 7976
 7977
 7978
 7979
 7980
 7981
 7982
 7983
 7984
 7985
 7986
 7987
 7988
 7989
 7990
 7991
 7992
 7993
 7994
 7995
 7996
 7997
 7998
 7999
 8000
 8001
 8002
 8003
 8004
 8005
 8006
 8007
 8008
 8009
 8010
 8011
 8012
 8013
 8014
 8015
 8016
 8017
 8018
 8019
 8020
 8021
 8022
 8023
 8024
 8025
 8026
 8027
 8028
 8029
 8030
 8031
 8032
 8033
 8034
 8035
 8036
 8037
 8038
 8039
 8040
 8041
 8042
 8043
 8044
 8045
 8046
 8047
 8048
 8049
 8050
 8051
 8052
 8053
 8054
 8055
 8056
 8057
 8058
 8059
 8060
 8061
 8062
 8063
 8064
 8065
 8066
 8067
 8068
 8069
 8070
 8071
 8072
 8073
 8074
 8075
 8076
 8077
 8078
 8079
 8080
 8081
 8082
 8083
 8084
 8085
 8086
 8087
 8088
 8089
 8090
 8091
 8092
 8093
 8094
 8095
 8096
 8097
 8098
 8099
 8100
 8101
 8102
 8103
 8104
 8105
 8106
 8107
 8108
 8109
 8110
 8111
 8112
 8113
 8114
 8115
 8116
 8117
 8118
 8119
 8120
 8121
 8122
 8123
 8124
 8125
 8126
 8127
 8128
 8129
 8130
 8131
 8132
 8133
 8134
 8135
 8136
 8137
 8138
 8139
 8140
 8141
 8142
 8143
 8144
 8145
 8146
 8147
 8148
 8149
 8150
 8151
 8152
 8153
 8154
 8155
 8156
 8157
 8158
 8159
 8160
 8161
 8162
 8163
 8164
 8165
 8166
 8167
 8168
 8169
 8170
 8171
 8172
 8173
 8174
 8175
 8176
 8177
 8178
 8179
 8180
 8181
 8182
 8183
 8184
 8185
 8186
 8187
 8188
 8189
 8190
 8191
 8192
 8193
 8194
 8195
 8196
 8197
 8198
 8199
 8200
 8201
 8202
 8203
 8204
 8205
 8206
 8207
 8208
 8209
 8210
 8211
 8212
 8213
 8214
 8215
 8216
 8217
 8218
 8219
 8220
 8221
 8222
 8223
 8224
 8225
 8226
 8227
 8228
 8229
 8230
 8231
 8232
 8233
 8234
 8235
 8236
 8237
 8238
 8239
 8240
 8241
 8242
 8243
 8244
 8245
 8246
 8247
 8248
 8249
 8250
 8251
 8252
 8253
 8254
 8255
 8256
 8257
 8258
 8259
 8260
 8261
 8262
 8263
 8264
 8265
 8266
 8267
 8268
 8269
 8270
 8271
 8272
 8273
 8274
 8275
 8276
 8277
 8278
 8279
 8280
 8281
 8282
 8283
 8284
 8285
 8286
 8287
 8288
 8289
 8290
 8291
 8292
 8293
 8294
 8295
 8296
 8297
 8298
 8299
 8300
 8301
 8302
 8303
 8304
 8305
 8306
 8307
 8308
 8309
 8310
 8311
 8312
 8313
 8314
 8315
 8316
 8317
 8318
 8319
 8320
 8321
 8322
 8323
 8324
 8325
 8326
 8327
 8328
 8329
 8330
 8331
 8332
 8333
 8334
 8335
 8336
 8337
 8338
 8339
 8340
 8341
 8342
 8343
 8344
 8345
 8346
 8347
 8348
 8349
 8350
 8351
 8352
 8353
 8354
 8355
 8356
 8357
 8358
 8359
 8360
 8361
 8362
 8363
 8364
 8365
 8366
 8367
 8368
 8369
 8370
 8371
 8372
 8373
 8374
 8375
 8376
 8377
 8378
 8379
 8380
 8381
 8382
 8383
 8384
 8385
 8386
 8387
 8388
 8389
 8390
 8391
 8392
 8393
 8394
 8395
 8396
 8397
 8398
 8399
 8400
 8401
 8402
 8403
 8404
 8405
 8406
 8407
 8408
 8409
 8410
 8411
 8412
 8413
 8414
 8415
 8416
 8417
 8418
 8419
 8420
 8421
 8422
 8423
 8424
 8425
 8426
 8427
 8428
 8429
 8430
 8431
 8432
 8433
 8434
 8435
 8436
 8437
 8438
 8439
 8440
 8441
 8442
 8443
 8444
 8445
 8446
 8447
 8448
 8449
 8450
 8451
 8452
 8453
 8454
 8455
 8456
 8457
 8458
 8459
 8460
 8461
 8462
 8463
 8464
 8465
 8466
 8467
 8468
 8469
 8470
 8471
 8472
 8473
 8474
 8475
 8476
 8477
 8478
 8479
 8480
 8481
 8482
 8483
 8484
 8485
 8486
 8487
 8488
 8489
 8490
 8491
 8492
 8493
 8494
 8495
 8496
 8497
 8498
 8499
 8500
 8501
 8502
 8503
 8504
 8505
 8506
 8507
 8508
 8509
 8510
 8511
 8512
 8513
 8514
 8515
 8516
 8517
 8518
 8519
 8520
 8521
 8522
 8523
 8524
 8525
 8526
 8527
 8528
 8529
 8530
 8531
 8532
 8533
 8534
 8535
 8536
 8537
 8538
 8539
 8540
 8541
 8542
 8543
 8544
 8545
 8546
 8547
 8548
 8549
 8550
 8551
 8552
 8553
 8554
 8555
 8556
 8557
 8558
 8559
 8560
 8561
 8562
 8563
 8564
 8565
 8566
 8567
 8568
 8569
 8570
 8571
 8572
 8573
 8574
 8575
 8576
 8577
 8578
 8579
 8580
 8581
 8582
 8583
 8584
 8585
 8586
 8587
 8588
 8589
 8590
 8591
 8592
 8593
 8594
 8595
 8596
 8597
 8598
 8599
 8600
 8601
 8602
 8603
 8604
 8605
 8606
 8607
 8608
 8609
 8610
 8611
 8612
 8613
 8614
 8615
 8616
 8617
 8618
 8619
 8620
 8621
 8622
 8623
 8624
 8625
 8626
 8627
 8628
 8629
 8630
 8631
 8632
 8633
 8634
 8635
 8636
 8637
 8638
 8639
 8640
 8641
 8642
 8643
 8644
 8645
 8646
 8647
 8648
 8649
 8650
 8651
 8652
 8653
 8654
 8655
 8656
 8657
 8658
 8659
 8660
 8661
 8662
 8663
 8664
 8665
 8666
 8667
 8668
 8669
 8670
 8671
 8672
 8673
 8674
 8675
 8676
 8677
 8678
 8679
 8680
 8681
 8682
 8683
 8684
 8685
 8686
 8687
 8688
 8689
 8690
 8691
 8692
 8693
 8694
 8695
 8696
 8697
 8698
 8699
 8700
 8701
 8702
 8703
 8704
 8705
 8706
 8707
 8708
 8709
 8710
 8711
 8712
 8713
 8714
 8715
 8716
 8717
 8718
 8719
 8720
 8721
 8722
 8723
 8724
 8725
 8726
 8727
 8728
 8729
 8730
 8731
 8732
 8733
 8734
 8735
 8736
 8737
 8738
 8739
 8740
 8741
 8742
 8743
 8744
 8745
 8746
 8747
 8748
 8749
 8750
 8751
 8752
 8753
 8754
 8755
 8756
 8757
 8758
 8759
 8760
 8761
 8762
 8763
 8764
 8765
 8766
 8767
 8768
 8769
 8770
 8771
 8772
 8773
 8774
 8775
 8776
 8777
 8778
 8779
 8780
 8781
 8782
 8783
 8784
 8785
 8786
 8787
 8788
 8789
 8790
 8791
 8792
 8793
 8794
 8795
 8796
 8797
 8798
 8799
 8800
 8801
 8802
 8803
 8804
 8805
 8806
 8807
 8808
 8809
 8810
 8811
 8812
 8813
 8814
 8815
 8816
 8817
 8818
 8819
 8820
 8821
 8822
 8823
 8824
 8825
 8826
 8827
 8828
 8829
 8830
 8831
 8832
 8833
 8834
 8835
 8836
 8837
 8838
 8839
 8840
 8841
 8842
 8843
 8844
 8845
 8846
 8847
 8848
 8849
 8850
 8851
 8852
 8853
 8854
 8855
 8856
 8857
 8858
 8859
 8860
 8861
 8862
 8863
 8864
 8865
 8866
 8867
 8868
 8869
 8870
 8871
 8872
 8873
 8874
 8875
 8876
 8877
 8878
 8879
 8880
 8881
 8882
 8883
 8884
 8885
 8886
 8887
 8888
 8889
 8890
 8891
 8892
 8893
 8894
 8895
 8896
 8897
 8898
 8899
 8900
 8901
 8902
 8903
 8904
 8905
 8906
 8907
 8908
 8909
 8910
 8911
 8912
 8913
 8914
 8915
 8916
 8917
 8918
 8919
 8920
 8921
 8922
 8923
 8924
 8925
 8926
 8927
 8928
 8929
 8930
 8931
 8932
 8933
 8934
 8935
 8936
 8937
 8938
 8939
 8940
 8941
 8942
 8943
 8944
 8945
 8946
 8947
 8948
 8949
 8950
 8951
 8952
 8953
 8954
 8955
 8956
 8957
 8958
 8959
 8960
 8961
 8962
 8963
 8964
 8965
 8966
 8967
 8968
 8969
 8970
 8971
 8972
 8973
 8974
 8975
 8976
 8977
 8978
 8979
 8980
 8981
 8982
 8983
 8984
 8985
 8986
 8987
 8988
 8989
 8990
 8991
 8992
 8993
 8994
 8995
 8996
 8997
 8998
 8999
 9000
 9001
 9002
 9003
 9004
 9005
 9006
 9007
 9008
 9009
 9010
 9011
 9012
 9013
 9014
 9015
 9016
 9017
 9018
 9019
 9020
 9021
 9022
 9023
 9024
 9025
 9026
 9027
 9028
 9029
 9030
 9031
 9032
 9033
 9034
 9035
 9036
 9037
 9038
 9039
 9040
 9041
 9042
 9043
 9044
 9045
 9046
 9047
 9048
 9049
 9050
 9051
 9052
 9053
 9054
 9055
 9056
 9057
 9058
 9059
 9060
 9061
 9062
 9063
 9064
 9065
 9066
 9067
 9068
 9069
 9070
 9071
 9072
 9073
 9074
 9075
 9076
 9077
 9078
 9079
 9080
 9081
 9082
 9083
 9084
 9085
 9086
 9087
 9088
 9089
 9090
 9091
 9092
 9093
 9094
 9095
 9096
 9097
 9098
 9099
 9100
 9101
 9102
 9103
 9104
 9105
 9106
 9107
 9108
 9109
 9110
 9111
 9112
 9113
 9114
 9115
 9116
 9117
 9118
 9119
 9120
 9121
 9122
 9123
 9124
 9125
 9126
 9127
 9128
 9129
 9130
 9131
 9132
 9133
 9134
 9135
 9136
 9137
 9138
 9139
 9140
 9141
 9142
 9143
 9144
 9145
 9146
 9147
 9148
 9149
 9150
 9151
 9152
 9153
 9154
 9155
 9156
 9157
 9158
 9159
 9160
 9161
 9162
 9163
 9164
 9165
 9166
 9167
 9168
 9169
 9170
 9171
 9172
 9173
 9174
 9175
 9176
 9177
 9178
 9179
 9180
 9181
 9182
 9183
 9184
 9185
 9186
 9187
 9188
 9189
 9190
 9191
 9192
 9193
 9194
 9195
 9196
 9197
 9198
 9199
 9200
 9201
 9202
 9203
 9204
 9205
 9206
 9207
 9208
 9209
 9210
 9211
 9212
 9213
 9214
 9215
 9216
 9217
 9218
 9219
 9220
 9221
 9222
 9223
 9224
 9225
 9226
 9227
 9228
 9229
 9230
 9231
 9232
 9233
 9234
 9235
 9236
 9237
 9238
 9239
 9240
 9241
 9242
 9243
 9244
 9245
 9246
 9247
 9248
 9249
 9250
 9251
 9252
 9253
 9254
 9255
 9256
 9257
 9258
 9259
 9260
 9261
 9262
 9263
 9264
 9265
 9266
 9267
 9268
 9269
 9270
 9271
 9272
 9273
 9274
 9275
 9276
 9277
 9278
 9279
 9280
 9281
 9282
 9283
 9284
 9285
 9286
 9287
 9288
 9289
 9290
 9291
 9292
 9293
 9294
 9295
 9296
 9297
 9298
 9299
 9300
 9301
 9302
 9303
 9304
 9305
 9306
 9307
 9308
 9309
 9310
 9311
 9312
 9313
 9314
 9315
 9316
 9317
 9318
 9319
 9320
 9321
 9322
 9323
 9324
 9325
 9326
 9327
 9328
 9329
 9330
 9331
 9332
 9333
 9334
 9335
 9336
 9337
 9338
 9339
 9340
 9341
 9342
 9343
 9344
 9345
 9346
 9347
 9348
 9349
 9350
 9351
 9352
 9353
 9354
 9355
 9356
 9357
 9358
 9359
 9360
 9361
 9362
 9363
 9364
 9365
 9366
 9367
 9368
 9369
 9370
 9371
 9372
 9373
 9374
 9375
 9376
 9377
 9378
 9379
 9380
 9381
 9382
 9383
 9384
 9385
 9386
 9387
 9388
 9389
 9390
 9391
 9392
 9393
 9394
 9395
 9396
 9397
 9398
 9399
 9400
 9401
 9402
 9403
 9404
 9405
 9406
 9407
 9408
 9409
 9410
 9411
 9412
 9413
 9414
 9415
 9416
 9417
 9418
 9419
 9420
 9421
 9422
 9423
 9424
 9425
 9426
 9427
 9428
 9429
 9430
 9431
 9432
 9433
 9434
 9435
 9436
 9437
 9438
 9439
 9440
 9441
 9442
 9443
 9444
 9445
 9446
 9447
 9448
 9449
 9450
 9451
 9452
 9453
 9454
 9455
 9456
 9457
 9458
 9459
 9460
 9461
 9462
 9463
 9464
 9465
 9466
 9467
 9468
 9469
 9470
 9471
 9472
 9473
 9474
 9475
 9476
 9477
 9478
 9479
 9480
 9481
 9482
 9483
 9484
 9485
 9486
 9487
 9488
 9489
 9490
 9491
 9492
 9493
 9494
 9495
 9496
 9497
 9498
 9499
 9500
 9501
 9502
 9503
 9504
 9505
 9506
 9507
 9508
 9509
 9510
 9511
 9512
 9513
 9514
 9515
 9516
 9517
 9518
 9519
 9520
 9521
 9522
 9523
 9524
 9525
 9526
 9527
 9528
 9529
 9530
 9531
 9532
 9533
 9534
 9535
 9536
 9537
 9538
 9539
 9540
 9541
 9542
 9543
 9544
 9545
 9546
 9547
 9548
 9549
 9550
 9551
 9552
 9553
 9554
 9555
 9556
 9557
 9558
 9559
 9560
 9561
 9562
 9563
 9564
 9565
 9566
 9567
 9568
 9569
 9570
 9571
 9572
 9573
 9574
 9575
 9576
 9577
 9578
 9579
 9580
 9581
 9582
 9583
 9584
 9585
 9586
 9587
 9588
 9589
 9590
 9591
 9592
 9593
 9594
 9595
 9596
 9597
 9598
 9599
 9600
 9601
 9602
 9603
 9604
 9605
 9606
 9607
 9608
 9609
 9610
 9611
 9612
 9613
 9614
 9615
 9616
 9617
 9618
 9619
 9620
 9621
 9622
 9623
 9624
 9625
 9626
 9627
 9628
 9629
 9630
 9631
 9632
 9633
 9634
 9635
 9636
 9637
 9638
 9639
 9640
 9641
 9642
 9643
 9644
 9645
 9646
 9647
 9648
 9649
 9650
 9651
 9652
 9653
 9654
 9655
 9656
 9657
 9658
 9659
 9660
 9661
 9662
 9663
 9664
 9665
 9666
 9667
 9668
 9669
 9670
 9671
 9672
 9673
 9674
 9675
 9676
 9677
 9678
 9679
 9680
 9681
 9682
 9683
 9684
 9685
 9686
 9687
 9688
 9689
 9690
 9691
 9692
 9693
 9694
 9695
 9696
 9697
 9698
 9699
 9700
 9701
 9702
 9703
 9704
 9705
 9706
 9707
 9708
 9709
 9710
 9711
 9712
 9713
 9714
 9715
 9716
 9717
 9718
 9719
 9720
 9721
 9722
 9723
 9724
 9725
 9726
 9727
 9728
 9729
 9730
 9731
 9732
 9733
 9734
 9735
 9736
 9737
 9738
 9739
 9740
 9741
 9742
 9743
 9744
 9745
 9746
 9747
 9748
 9749
 9750
 9751
 9752
 9753
 9754
 9755
 9756
 9757
 9758
 9759
 9760
 9761
 9762
 9763
 9764
 9765
 9766
 9767
 9768
 9769
 9770
 9771
 9772
 9773
 9774
 9775
 9776
 9777
 9778
 9779
 9780
 9781
 9782
 9783
 9784
 9785
 9786
 9787
 9788
 9789
 9790
 9791
 9792
 9793
 9794
 9795
 9796
 9797
 9798
 9799
 9800
 9801
 9802
 9803
 9804
 9805
 9806
 9807
 9808
 9809
 9810
 9811
 9812
 9813
 9814
 9815
 9816
 9817
 9818
 9819
 9820
 9821
 9822
 9823
 9824
 9825
 9826
 9827
 9828
 9829
 9830
 9831
 9832
 9833
 9834
 9835
 9836
 9837
 9838
 9839
 9840
 9841
 9842
 9843
 9844
 9845
 9846
 9847
 9848
 9849
 9850
 9851
 9852
 9853
 9854
 9855
 9856
 9857
 9858
 9859
 9860
 9861
 9862
 9863
 9864
 9865
 9866
 9867
 9868
 9869
 9870
 9871
 9872
 9873
 9874
 9875
 9876
 9877
 9878
 9879
 9880
 9881
 9882
 9883
 9884
 9885
 9886
 9887
 9888
 9889
 9890
 9891
 9892
 9893
 9894
 9895
 9896
 9897
 9898
 9899
 9900
 9901
 9902
 9903
 9904
 9905
 9906
 9907
 9908
 9909
 9910
 9911
 9912
 9913
 9914
 9915
 9916
 9917
 9918
 9919
 9920
 9921
 9922
 9923
 9924
 9925
 9926
 9927
 9928
 9929
 9930
 9931
 9932
 9933
 9934
 9935
 9936
 9937
 9938
 9939
 9940
 9941
 9942
 9943
 9944
 9945
 9946
 9947
 9948
 9949
 9950
 9951
 9952
 9953
 9954
 9955
 9956
 9957
 9958
 9959
 9960
 9961
 9962
 9963
 9964
 9965
 9966
 9967
 9968
 9969
 9970
 9971
 9972
 9973
 9974
 9975
 9976
 9977
 9978
 9979
 9980
 9981
 9982
 9983
 9984
 9985
 9986
 9987
 9988
 9989
 9990
 9991
 9992
 9993
 9994
 9995
 9996
 9997
 9998
 9999
10000
10001
10002
10003
10004
10005
10006
10007
10008
10009
10010
10011
10012
10013
10014
10015
10016
10017
10018
10019
10020
10021
10022
10023
10024
10025
10026
10027
10028
10029
10030
10031
10032
10033
10034
10035
10036
10037
10038
10039
10040
10041
10042
10043
10044
10045
10046
10047
10048
10049
10050
10051
10052
10053
10054
10055
10056
10057
10058
10059
10060
10061
10062
10063
10064
10065
10066
10067
10068
10069
10070
10071
10072
10073
10074
10075
10076
10077
10078
10079
10080
10081
10082
10083
10084
10085
10086
10087
10088
10089
10090
10091
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110
10111
10112
10113
10114
10115
10116
10117
10118
10119
10120
10121
10122
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132
10133
10134
10135
10136
10137
10138
10139
10140
10141
10142
10143
10144
10145
10146
10147
10148
10149
10150
10151
10152
10153
10154
10155
10156
10157
10158
10159
10160
10161
10162
10163
10164
10165
10166
10167
10168
10169
10170
10171
10172
10173
10174
10175
10176
10177
10178
10179
10180
10181
10182
10183
10184
10185
10186
10187
10188
10189
10190
10191
10192
10193
10194
10195
10196
10197
10198
10199
10200
10201
10202
10203
10204
10205
10206
10207
10208
10209
10210
10211
10212
10213
10214
10215
10216
10217
10218
10219
10220
10221
10222
10223
10224
10225
10226
10227
10228
10229
10230
10231
10232
10233
10234
10235
10236
10237
10238
10239
10240
10241
10242
10243
10244
10245
10246
10247
10248
10249
10250
10251
10252
10253
10254
10255
10256
10257
10258
10259
10260
10261
10262
10263
10264
10265
10266
10267
10268
10269
10270
10271
10272
10273
10274
10275
10276
10277
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289
10290
10291
10292
10293
10294
10295
10296
10297
10298
10299
10300
10301
10302
10303
10304
10305
10306
10307
10308
10309
10310
10311
10312
10313
10314
10315
10316
10317
10318
10319
10320
10321
10322
10323
10324
10325
10326
10327
10328
10329
10330
10331
10332
10333
10334
10335
10336
10337
10338
10339
10340
10341
10342
10343
10344
10345
10346
10347
10348
10349
10350
10351
10352
10353
10354
10355
10356
10357
10358
10359
10360
10361
10362
10363
10364
10365
10366
10367
10368
10369
10370
10371
10372
10373
10374
10375
10376
10377
10378
10379
10380
10381
10382
10383
10384
10385
10386
10387
10388
10389
10390
10391
10392
10393
10394
10395
10396
10397
10398
10399
10400
10401
10402
10403
10404
10405
10406
10407
10408
10409
10410
10411
10412
10413
10414
10415
10416
10417
10418
10419
10420
10421
10422
10423
10424
10425
10426
10427
10428
10429
10430
10431
10432
10433
10434
10435
10436
10437
10438
10439
10440
10441
10442
10443
10444
10445
10446
10447
10448
10449
10450
10451
10452
10453
10454
10455
10456
10457
10458
10459
10460
10461
10462
10463
10464
10465
10466
10467
10468
10469
10470
10471
10472
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482
10483
10484
10485
10486
10487
10488
10489
10490
10491
10492
10493
10494
10495
10496
10497
10498
10499
10500
10501
10502
10503
10504
10505
10506
10507
10508
10509
10510
10511
10512
10513
10514
10515
10516
10517
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527
10528
10529
10530
10531
10532
10533
10534
10535
10536
10537
10538
10539
10540
10541
10542
10543
10544
10545
10546
10547
10548
10549
10550
10551
10552
10553
10554
10555
10556
10557
10558
10559
10560
10561
10562
10563
10564
10565
10566
10567
10568
10569
10570
10571
10572
10573
10574
10575
10576
10577
10578
10579
10580
10581
10582
10583
10584
10585
10586
10587
10588
10589
10590
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600
10601
10602
10603
10604
10605
10606
10607
10608
10609
10610
10611
10612
10613
10614
10615
10616
10617
10618
10619
10620
10621
10622
10623
10624
10625
10626
10627
10628
10629
10630
10631
10632
10633
10634
10635
10636
10637
10638
10639
10640
10641
10642
10643
10644
10645
10646
10647
10648
10649
10650
10651
10652
10653
10654
10655
10656
10657
10658
10659
10660
10661
10662
10663
10664
10665
10666
10667
10668
10669
10670
10671
10672
10673
10674
10675
10676
10677
10678
10679
10680
10681
10682
10683
10684
10685
10686
10687
10688
10689
10690
10691
10692
10693
10694
10695
10696
10697
10698
10699
10700
10701
10702
10703
10704
10705
10706
10707
10708
10709
10710
10711
10712
10713
10714
10715
10716
10717
10718
10719
10720
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731
10732
10733
10734
10735
10736
10737
10738
10739
10740
10741
10742
10743
10744
10745
10746
10747
10748
10749
10750
10751
10752
10753
10754
10755
10756
10757
10758
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769
10770
10771
10772
10773
10774
10775
10776
10777
10778
10779
10780
10781
10782
10783
10784
10785
10786
10787
10788
10789
10790
10791
10792
10793
10794
10795
10796
10797
10798
10799
10800
10801
10802
10803
10804
10805
10806
10807
10808
10809
10810
10811
10812
10813
10814
10815
10816
10817
10818
10819
10820
10821
10822
10823
10824
10825
10826
10827
10828
10829
10830
10831
10832
10833
10834
10835
10836
10837
10838
10839
10840
10841
10842
10843
10844
10845
10846
10847
10848
10849
10850
10851
10852
10853
10854
10855
10856
10857
10858
10859
10860
10861
10862
10863
10864
10865
10866
10867
10868
10869
10870
10871
10872
10873
10874
10875
10876
10877
10878
10879
10880
10881
10882
10883
10884
10885
10886
10887
10888
10889
10890
10891
10892
10893
10894
10895
10896
10897
10898
10899
10900
10901
10902
10903
10904
10905
10906
10907
10908
10909
10910
10911
10912
10913
10914
10915
10916
10917
10918
10919
10920
10921
10922
10923
10924
10925
10926
10927
10928
10929
10930
10931
10932
10933
10934
10935
10936
10937
10938
10939
10940
10941
10942
10943
10944
10945
10946
10947
10948
10949
10950
10951
10952
10953
10954
10955
10956
10957
10958
10959
10960
10961
10962
10963
10964
10965
10966
10967
10968
10969
10970
10971
10972
10973
10974
10975
10976
10977
10978
10979
10980
10981
10982
10983
10984
10985
10986
10987
10988
10989
10990
10991
10992
10993
10994
10995
10996
10997
10998
10999
11000
11001
11002
11003
11004
11005
11006
11007
11008
11009
11010
11011
11012
11013
11014
11015
11016
11017
11018
11019
11020
11021
11022
11023
11024
11025
11026
11027
11028
11029
11030
11031
11032
11033
11034
11035
11036
11037
11038
11039
11040
11041
11042
11043
11044
11045
11046
11047
11048
11049
11050
11051
11052
11053
11054
11055
11056
11057
11058
11059
11060
11061
11062
11063
11064
11065
11066
11067
11068
11069
11070
11071
11072
11073
11074
11075
11076
11077
11078
11079
11080
11081
11082
11083
11084
11085
11086
11087
11088
11089
11090
11091
11092
11093
11094
11095
11096
11097
11098
11099
11100
11101
11102
11103
11104
11105
11106
11107
11108
11109
11110
11111
11112
11113
11114
11115
11116
11117
11118
11119
11120
11121
11122
11123
11124
11125
11126
11127
11128
11129
11130
11131
11132
11133
11134
11135
11136
11137
11138
11139
11140
11141
11142
11143
11144
11145
11146
11147
11148
11149
11150
11151
11152
11153
11154
11155
11156
11157
11158
11159
11160
11161
11162
11163
11164
11165
11166
11167
11168
11169
11170
11171
11172
11173
11174
11175
11176
11177
11178
11179
11180
11181
11182
11183
11184
11185
11186
11187
11188
11189
11190
11191
11192
11193
11194
11195
11196
11197
11198
11199
11200
11201
11202
11203
11204
11205
11206
11207
11208
11209
11210
11211
11212
11213
11214
11215
11216
11217
11218
11219
11220
11221
11222
11223
11224
11225
11226
11227
11228
11229
11230
11231
11232
11233
11234
11235
11236
11237
11238
11239
11240
11241
11242
11243
11244
11245
11246
11247
11248
11249
11250
11251
11252
11253
11254
11255
11256
11257
11258
11259
11260
11261
11262
11263
11264
11265
11266
11267
11268
11269
11270
11271
11272
11273
11274
11275
11276
11277
11278
11279
11280
11281
11282
11283
11284
11285
11286
11287
11288
11289
11290
11291
11292
11293
11294
11295
11296
11297
11298
11299
11300
11301
11302
11303
11304
11305
11306
11307
11308
11309
11310
11311
11312
11313
11314
11315
11316
11317
11318
11319
11320
11321
11322
11323
11324
11325
11326
11327
11328
11329
11330
11331
11332
11333
11334
11335
11336
11337
11338
11339
11340
11341
11342
11343
11344
11345
11346
11347
11348
11349
11350
11351
11352
11353
11354
11355
11356
11357
11358
11359
11360
11361
11362
11363
11364
11365
11366
11367
11368
11369
11370
11371
11372
11373
11374
11375
11376
11377
11378
11379
11380
11381
11382
11383
11384
11385
11386
11387
11388
11389
11390
11391
11392
11393
11394
11395
11396
11397
11398
11399
11400
11401
11402
11403
11404
11405
11406
11407
11408
11409
11410
11411
11412
11413
11414
11415
11416
11417
11418
11419
11420
11421
11422
11423
11424
11425
11426
11427
11428
11429
11430
11431
11432
11433
11434
11435
11436
11437
11438
11439
11440
11441
11442
11443
11444
11445
11446
11447
11448
11449
11450
11451
11452
11453
11454
11455
11456
11457
11458
11459
11460
11461
11462
11463
11464
11465
11466
11467
11468
11469
11470
11471
11472
11473
11474
11475
11476
11477
11478
11479
11480
11481
11482
11483
11484
11485
11486
11487
11488
11489
11490
11491
11492
11493
11494
11495
11496
11497
11498
11499
11500
11501
11502
11503
11504
11505
11506
11507
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518
11519
11520
11521
11522
11523
11524
11525
11526
11527
11528
11529
11530
11531
11532
11533
11534
11535
11536
11537
11538
11539
11540
11541
11542
11543
11544
11545
11546
11547
11548
11549
11550
11551
11552
11553
11554
11555
11556
11557
11558
11559
11560
11561
11562
11563
11564
11565
11566
11567
11568
11569
11570
11571
11572
11573
11574
11575
11576
11577
11578
11579
11580
11581
11582
11583
11584
11585
11586
11587
11588
11589
11590
11591
11592
11593
11594
11595
11596
11597
11598
11599
11600
11601
11602
11603
11604
11605
11606
11607
11608
11609
11610
11611
11612
11613
11614
11615
11616
11617
11618
11619
11620
11621
11622
11623
11624
11625
11626
11627
11628
11629
11630
11631
11632
11633
11634
11635
11636
11637
11638
11639
11640
11641
11642
11643
11644
11645
11646
11647
11648
11649
11650
11651
11652
11653
11654
11655
11656
11657
11658
11659
11660
11661
11662
11663
11664
11665
11666
11667
11668
11669
11670
11671
11672
11673
11674
11675
11676
11677
11678
11679
11680
11681
11682
11683
11684
11685
11686
11687
11688
11689
11690
11691
11692
11693
11694
11695
11696
11697
11698
11699
11700
11701
11702
11703
11704
11705
11706
11707
11708
11709
11710
11711
11712
11713
11714
11715
11716
11717
11718
11719
11720
11721
11722
11723
11724
11725
11726
11727
11728
11729
11730
11731
11732
11733
11734
11735
11736
11737
11738
11739
11740
11741
11742
11743
11744
11745
11746
11747
11748
11749
11750
11751
11752
11753
11754
11755
11756
11757
11758
11759
11760
11761
11762
11763
11764
11765
11766
11767
11768
11769
11770
11771
11772
11773
11774
11775
11776
11777
11778
11779
11780
11781
11782
11783
11784
11785
11786
11787
11788
11789
11790
11791
11792
11793
11794
11795
11796
11797
11798
11799
11800
11801
11802
11803
11804
11805
11806
11807
11808
11809
11810
11811
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821
11822
11823
11824
11825
11826
11827
11828
11829
11830
11831
11832
11833
11834
11835
11836
11837
11838
11839
11840
11841
11842
11843
11844
11845
11846
11847
11848
11849
11850
11851
11852
11853
11854
11855
11856
11857
11858
11859
11860
11861
11862
11863
11864
11865
11866
11867
11868
11869
11870
11871
11872
11873
11874
11875
11876
11877
11878
11879
11880
11881
11882
11883
11884
11885
11886
11887
11888
11889
11890
11891
11892
11893
11894
11895
11896
11897
11898
11899
11900
11901
11902
11903
11904
11905
11906
11907
11908
11909
11910
11911
11912
11913
11914
11915
11916
11917
11918
11919
11920
11921
11922
11923
11924
11925
11926
11927
11928
11929
11930
11931
11932
11933
11934
11935
11936
11937
11938
11939
11940
11941
11942
11943
11944
11945
11946
11947
11948
11949
11950
11951
11952
11953
11954
11955
11956
11957
11958
11959
11960
11961
11962
11963
11964
11965
11966
11967
11968
11969
11970
11971
11972
11973
11974
11975
11976
11977
11978
11979
11980
11981
11982
11983
11984
11985
11986
11987
11988
11989
11990
11991
11992
11993
11994
11995
11996
11997
11998
11999
12000
12001
12002
12003
12004
12005
12006
12007
12008
12009
12010
12011
12012
12013
12014
12015
12016
12017
12018
12019
12020
12021
12022
12023
12024
12025
12026
12027
12028
12029
12030
12031
12032
12033
12034
12035
12036
12037
12038
12039
12040
12041
12042
12043
12044
12045
12046
12047
12048
12049
12050
12051
12052
12053
12054
12055
12056
12057
12058
12059
12060
12061
12062
12063
12064
12065
12066
12067
12068
12069
12070
12071
12072
12073
12074
12075
12076
12077
12078
12079
12080
12081
12082
12083
12084
12085
12086
12087
12088
12089
12090
12091
12092
12093
12094
12095
12096
12097
12098
12099
12100
12101
12102
12103
12104
12105
12106
12107
12108
12109
12110
12111
12112
12113
12114
12115
12116
12117
12118
12119
12120
12121
12122
12123
12124
12125
12126
12127
12128
12129
12130
12131
12132
12133
12134
12135
12136
12137
12138
12139
12140
12141
12142
12143
12144
12145
12146
12147
12148
12149
12150
12151
12152
12153
12154
12155
12156
12157
12158
12159
12160
12161
12162
12163
12164
12165
12166
12167
12168
12169
12170
12171
12172
12173
12174
12175
12176
12177
12178
12179
12180
12181
12182
12183
12184
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194
12195
12196
12197
12198
12199
12200
12201
12202
12203
12204
12205
12206
12207
12208
12209
12210
12211
12212
12213
12214
12215
12216
12217
12218
12219
12220
12221
12222
12223
12224
12225
12226
12227
12228
12229
12230
12231
12232
12233
12234
12235
12236
12237
12238
12239
12240
12241
12242
12243
12244
12245
12246
12247
12248
12249
12250
12251
12252
12253
12254
12255
12256
12257
12258
12259
12260
12261
12262
12263
12264
12265
12266
12267
12268
12269
12270
12271
12272
12273
12274
12275
12276
12277
12278
12279
12280
12281
12282
12283
12284
12285
12286
12287
12288
12289
12290
12291
12292
12293
12294
12295
12296
12297
12298
12299
12300
12301
12302
12303
12304
12305
12306
12307
12308
12309
12310
12311
12312
12313
12314
12315
12316
12317
12318
12319
12320
12321
12322
12323
12324
12325
12326
12327
12328
12329
12330
12331
12332
12333
12334
12335
12336
12337
12338
12339
12340
12341
12342
12343
12344
12345
12346
12347
12348
12349
12350
12351
12352
12353
12354
12355
12356
12357
12358
12359
12360
12361
12362
12363
12364
12365
12366
12367
12368
12369
12370
12371
12372
12373
12374
12375
12376
12377
12378
12379
12380
12381
12382
12383
12384
12385
12386
12387
12388
12389
12390
12391
12392
12393
12394
12395
12396
12397
12398
12399
12400
12401
12402
12403
12404
12405
12406
12407
12408
12409
12410
12411
12412
12413
12414
12415
12416
12417
12418
12419
12420
12421
12422
12423
12424
12425
12426
12427
12428
12429
12430
12431
12432
12433
12434
12435
12436
12437
12438
12439
12440
12441
12442
12443
12444
12445
12446
12447
12448
12449
12450
12451
12452
12453
12454
12455
12456
12457
12458
12459
12460
12461
12462
12463
12464
12465
12466
12467
12468
12469
12470
12471
12472
12473
12474
12475
12476
12477
12478
12479
12480
12481
12482
12483
12484
12485
12486
12487
12488
12489
12490
12491
12492
12493
12494
12495
12496
12497
12498
12499
12500
12501
12502
12503
12504
12505
12506
12507
12508
12509
12510
12511
12512
12513
12514
12515
12516
12517
12518
12519
12520
12521
12522
12523
12524
12525
12526
12527
12528
12529
12530
12531
12532
12533
12534
12535
12536
12537
12538
12539
12540
12541
12542
12543
12544
12545
12546
12547
//===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of the scalar evolution analysis
// engine, which is used primarily to analyze expressions involving induction
// variables in loops.
//
// There are several aspects to this library.  First is the representation of
// scalar expressions, which are represented as subclasses of the SCEV class.
// These classes are used to represent certain types of subexpressions that we
// can handle. We only create one SCEV of a particular shape, so
// pointer-comparisons for equality are legal.
//
// One important aspect of the SCEV objects is that they are never cyclic, even
// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
// the PHI node is one of the idioms that we can represent (e.g., a polynomial
// recurrence) then we represent it directly as a recurrence node, otherwise we
// represent it as a SCEVUnknown node.
//
// In addition to being able to represent expressions of various types, we also
// have folders that are used to build the *canonical* representation for a
// particular expression.  These folders are capable of using a variety of
// rewrite rules to simplify the expressions.
//
// Once the folders are defined, we can implement the more interesting
// higher-level code, such as the code that recognizes PHI nodes of various
// types, computes the execution count of a loop, etc.
//
// TODO: We should use these routines and value representations to implement
// dependence analysis!
//
//===----------------------------------------------------------------------===//
//
// There are several good references for the techniques used in this analysis.
//
//  Chains of recurrences -- a method to expedite the evaluation
//  of closed-form functions
//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
//
//  On computational properties of chains of recurrences
//  Eugene V. Zima
//
//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
//  Robert A. van Engelen
//
//  Efficient Symbolic Analysis for Optimizing Compilers
//  Robert A. van Engelen
//
//  Using the chains of recurrences algebra for data dependence testing and
//  induction variable substitution
//  MS Thesis, Johnie Birch
//
//===----------------------------------------------------------------------===//

#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <map>
#include <memory>
#include <tuple>
#include <utility>
#include <vector>

using namespace llvm;

#define DEBUG_TYPE "scalar-evolution"

STATISTIC(NumArrayLenItCounts,
          "Number of trip counts computed with array length");
STATISTIC(NumTripCountsComputed,
          "Number of loops with predictable loop counts");
STATISTIC(NumTripCountsNotComputed,
          "Number of loops without predictable loop counts");
STATISTIC(NumBruteForceTripCountsComputed,
          "Number of loops with trip counts computed by force");

static cl::opt<unsigned>
MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
                        cl::ZeroOrMore,
                        cl::desc("Maximum number of iterations SCEV will "
                                 "symbolically execute a constant "
                                 "derived loop"),
                        cl::init(100));

// FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean.
static cl::opt<bool> VerifySCEV(
    "verify-scev", cl::Hidden,
    cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
static cl::opt<bool> VerifySCEVStrict(
    "verify-scev-strict", cl::Hidden,
    cl::desc("Enable stricter verification with -verify-scev is passed"));
static cl::opt<bool>
    VerifySCEVMap("verify-scev-maps", cl::Hidden,
                  cl::desc("Verify no dangling value in ScalarEvolution's "
                           "ExprValueMap (slow)"));

static cl::opt<bool> VerifyIR(
    "scev-verify-ir", cl::Hidden,
    cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
    cl::init(false));

static cl::opt<unsigned> MulOpsInlineThreshold(
    "scev-mulops-inline-threshold", cl::Hidden,
    cl::desc("Threshold for inlining multiplication operands into a SCEV"),
    cl::init(32));

static cl::opt<unsigned> AddOpsInlineThreshold(
    "scev-addops-inline-threshold", cl::Hidden,
    cl::desc("Threshold for inlining addition operands into a SCEV"),
    cl::init(500));

static cl::opt<unsigned> MaxSCEVCompareDepth(
    "scalar-evolution-max-scev-compare-depth", cl::Hidden,
    cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
    cl::init(32));

static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth(
    "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden,
    cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
    cl::init(2));

static cl::opt<unsigned> MaxValueCompareDepth(
    "scalar-evolution-max-value-compare-depth", cl::Hidden,
    cl::desc("Maximum depth of recursive value complexity comparisons"),
    cl::init(2));

static cl::opt<unsigned>
    MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
                  cl::desc("Maximum depth of recursive arithmetics"),
                  cl::init(32));

static cl::opt<unsigned> MaxConstantEvolvingDepth(
    "scalar-evolution-max-constant-evolving-depth", cl::Hidden,
    cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));

static cl::opt<unsigned>
    MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden,
                 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
                 cl::init(8));

static cl::opt<unsigned>
    MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden,
                  cl::desc("Max coefficients in AddRec during evolving"),
                  cl::init(8));

static cl::opt<unsigned>
    HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden,
                  cl::desc("Size of the expression which is considered huge"),
                  cl::init(4096));

//===----------------------------------------------------------------------===//
//                           SCEV class definitions
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// Implementation of the SCEV class.
//

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void SCEV::dump() const {
  print(dbgs());
  dbgs() << '\n';
}
#endif

void SCEV::print(raw_ostream &OS) const {
  switch (static_cast<SCEVTypes>(getSCEVType())) {
  case scConstant:
    cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
    return;
  case scTruncate: {
    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
    const SCEV *Op = Trunc->getOperand();
    OS << "(trunc " << *Op->getType() << " " << *Op << " to "
       << *Trunc->getType() << ")";
    return;
  }
  case scZeroExtend: {
    const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
    const SCEV *Op = ZExt->getOperand();
    OS << "(zext " << *Op->getType() << " " << *Op << " to "
       << *ZExt->getType() << ")";
    return;
  }
  case scSignExtend: {
    const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
    const SCEV *Op = SExt->getOperand();
    OS << "(sext " << *Op->getType() << " " << *Op << " to "
       << *SExt->getType() << ")";
    return;
  }
  case scAddRecExpr: {
    const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
    OS << "{" << *AR->getOperand(0);
    for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
      OS << ",+," << *AR->getOperand(i);
    OS << "}<";
    if (AR->hasNoUnsignedWrap())
      OS << "nuw><";
    if (AR->hasNoSignedWrap())
      OS << "nsw><";
    if (AR->hasNoSelfWrap() &&
        !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
      OS << "nw><";
    AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
    OS << ">";
    return;
  }
  case scAddExpr:
  case scMulExpr:
  case scUMaxExpr:
  case scSMaxExpr:
  case scUMinExpr:
  case scSMinExpr: {
    const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
    const char *OpStr = nullptr;
    switch (NAry->getSCEVType()) {
    case scAddExpr: OpStr = " + "; break;
    case scMulExpr: OpStr = " * "; break;
    case scUMaxExpr: OpStr = " umax "; break;
    case scSMaxExpr: OpStr = " smax "; break;
    case scUMinExpr:
      OpStr = " umin ";
      break;
    case scSMinExpr:
      OpStr = " smin ";
      break;
    }
    OS << "(";
    for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
         I != E; ++I) {
      OS << **I;
      if (std::next(I) != E)
        OS << OpStr;
    }
    OS << ")";
    switch (NAry->getSCEVType()) {
    case scAddExpr:
    case scMulExpr:
      if (NAry->hasNoUnsignedWrap())
        OS << "<nuw>";
      if (NAry->hasNoSignedWrap())
        OS << "<nsw>";
    }
    return;
  }
  case scUDivExpr: {
    const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
    OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
    return;
  }
  case scUnknown: {
    const SCEVUnknown *U = cast<SCEVUnknown>(this);
    Type *AllocTy;
    if (U->isSizeOf(AllocTy)) {
      OS << "sizeof(" << *AllocTy << ")";
      return;
    }
    if (U->isAlignOf(AllocTy)) {
      OS << "alignof(" << *AllocTy << ")";
      return;
    }

    Type *CTy;
    Constant *FieldNo;
    if (U->isOffsetOf(CTy, FieldNo)) {
      OS << "offsetof(" << *CTy << ", ";
      FieldNo->printAsOperand(OS, false);
      OS << ")";
      return;
    }

    // Otherwise just print it normally.
    U->getValue()->printAsOperand(OS, false);
    return;
  }
  case scCouldNotCompute:
    OS << "***COULDNOTCOMPUTE***";
    return;
  }
  llvm_unreachable("Unknown SCEV kind!");
}

Type *SCEV::getType() const {
  switch (static_cast<SCEVTypes>(getSCEVType())) {
  case scConstant:
    return cast<SCEVConstant>(this)->getType();
  case scTruncate:
  case scZeroExtend:
  case scSignExtend:
    return cast<SCEVCastExpr>(this)->getType();
  case scAddRecExpr:
  case scMulExpr:
  case scUMaxExpr:
  case scSMaxExpr:
  case scUMinExpr:
  case scSMinExpr:
    return cast<SCEVNAryExpr>(this)->getType();
  case scAddExpr:
    return cast<SCEVAddExpr>(this)->getType();
  case scUDivExpr:
    return cast<SCEVUDivExpr>(this)->getType();
  case scUnknown:
    return cast<SCEVUnknown>(this)->getType();
  case scCouldNotCompute:
    llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  }
  llvm_unreachable("Unknown SCEV kind!");
}

bool SCEV::isZero() const {
  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
    return SC->getValue()->isZero();
  return false;
}

bool SCEV::isOne() const {
  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
    return SC->getValue()->isOne();
  return false;
}

bool SCEV::isAllOnesValue() const {
  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
    return SC->getValue()->isMinusOne();
  return false;
}

bool SCEV::isNonConstantNegative() const {
  const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
  if (!Mul) return false;

  // If there is a constant factor, it will be first.
  const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
  if (!SC) return false;

  // Return true if the value is negative, this matches things like (-42 * V).
  return SC->getAPInt().isNegative();
}

SCEVCouldNotCompute::SCEVCouldNotCompute() :
  SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {}

bool SCEVCouldNotCompute::classof(const SCEV *S) {
  return S->getSCEVType() == scCouldNotCompute;
}

const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
  FoldingSetNodeID ID;
  ID.AddInteger(scConstant);
  ID.AddPointer(V);
  void *IP = nullptr;
  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
  UniqueSCEVs.InsertNode(S, IP);
  return S;
}

const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
  return getConstant(ConstantInt::get(getContext(), Val));
}

const SCEV *
ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
  IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
  return getConstant(ConstantInt::get(ITy, V, isSigned));
}

SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
                           unsigned SCEVTy, const SCEV *op, Type *ty)
  : SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {}

SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
                                   const SCEV *op, Type *ty)
  : SCEVCastExpr(ID, scTruncate, op, ty) {
  assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
         "Cannot truncate non-integer value!");
}

SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
                                       const SCEV *op, Type *ty)
  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
  assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
         "Cannot zero extend non-integer value!");
}

SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
                                       const SCEV *op, Type *ty)
  : SCEVCastExpr(ID, scSignExtend, op, ty) {
  assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
         "Cannot sign extend non-integer value!");
}

void SCEVUnknown::deleted() {
  // Clear this SCEVUnknown from various maps.
  SE->forgetMemoizedResults(this);

  // Remove this SCEVUnknown from the uniquing map.
  SE->UniqueSCEVs.RemoveNode(this);

  // Release the value.
  setValPtr(nullptr);
}

void SCEVUnknown::allUsesReplacedWith(Value *New) {
  // Remove this SCEVUnknown from the uniquing map.
  SE->UniqueSCEVs.RemoveNode(this);

  // Update this SCEVUnknown to point to the new value. This is needed
  // because there may still be outstanding SCEVs which still point to
  // this SCEVUnknown.
  setValPtr(New);
}

bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
    if (VCE->getOpcode() == Instruction::PtrToInt)
      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
        if (CE->getOpcode() == Instruction::GetElementPtr &&
            CE->getOperand(0)->isNullValue() &&
            CE->getNumOperands() == 2)
          if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
            if (CI->isOne()) {
              AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
                                 ->getElementType();
              return true;
            }

  return false;
}

bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
    if (VCE->getOpcode() == Instruction::PtrToInt)
      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
        if (CE->getOpcode() == Instruction::GetElementPtr &&
            CE->getOperand(0)->isNullValue()) {
          Type *Ty =
            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
          if (StructType *STy = dyn_cast<StructType>(Ty))
            if (!STy->isPacked() &&
                CE->getNumOperands() == 3 &&
                CE->getOperand(1)->isNullValue()) {
              if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
                if (CI->isOne() &&
                    STy->getNumElements() == 2 &&
                    STy->getElementType(0)->isIntegerTy(1)) {
                  AllocTy = STy->getElementType(1);
                  return true;
                }
            }
        }

  return false;
}

bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
    if (VCE->getOpcode() == Instruction::PtrToInt)
      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
        if (CE->getOpcode() == Instruction::GetElementPtr &&
            CE->getNumOperands() == 3 &&
            CE->getOperand(0)->isNullValue() &&
            CE->getOperand(1)->isNullValue()) {
          Type *Ty =
            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
          // Ignore vector types here so that ScalarEvolutionExpander doesn't
          // emit getelementptrs that index into vectors.
          if (Ty->isStructTy() || Ty->isArrayTy()) {
            CTy = Ty;
            FieldNo = CE->getOperand(2);
            return true;
          }
        }

  return false;
}

//===----------------------------------------------------------------------===//
//                               SCEV Utilities
//===----------------------------------------------------------------------===//

/// Compare the two values \p LV and \p RV in terms of their "complexity" where
/// "complexity" is a partial (and somewhat ad-hoc) relation used to order
/// operands in SCEV expressions.  \p EqCache is a set of pairs of values that
/// have been previously deemed to be "equally complex" by this routine.  It is
/// intended to avoid exponential time complexity in cases like:
///
///   %a = f(%x, %y)
///   %b = f(%a, %a)
///   %c = f(%b, %b)
///
///   %d = f(%x, %y)
///   %e = f(%d, %d)
///   %f = f(%e, %e)
///
///   CompareValueComplexity(%f, %c)
///
/// Since we do not continue running this routine on expression trees once we
/// have seen unequal values, there is no need to track them in the cache.
static int
CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue,
                       const LoopInfo *const LI, Value *LV, Value *RV,
                       unsigned Depth) {
  if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV))
    return 0;

  // Order pointer values after integer values. This helps SCEVExpander form
  // GEPs.
  bool LIsPointer = LV->getType()->isPointerTy(),
       RIsPointer = RV->getType()->isPointerTy();
  if (LIsPointer != RIsPointer)
    return (int)LIsPointer - (int)RIsPointer;

  // Compare getValueID values.
  unsigned LID = LV->getValueID(), RID = RV->getValueID();
  if (LID != RID)
    return (int)LID - (int)RID;

  // Sort arguments by their position.
  if (const auto *LA = dyn_cast<Argument>(LV)) {
    const auto *RA = cast<Argument>(RV);
    unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
    return (int)LArgNo - (int)RArgNo;
  }

  if (const auto *LGV = dyn_cast<GlobalValue>(LV)) {
    const auto *RGV = cast<GlobalValue>(RV);

    const auto IsGVNameSemantic = [&](const GlobalValue *GV) {
      auto LT = GV->getLinkage();
      return !(GlobalValue::isPrivateLinkage(LT) ||
               GlobalValue::isInternalLinkage(LT));
    };

    // Use the names to distinguish the two values, but only if the
    // names are semantically important.
    if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
      return LGV->getName().compare(RGV->getName());
  }

  // For instructions, compare their loop depth, and their operand count.  This
  // is pretty loose.
  if (const auto *LInst = dyn_cast<Instruction>(LV)) {
    const auto *RInst = cast<Instruction>(RV);

    // Compare loop depths.
    const BasicBlock *LParent = LInst->getParent(),
                     *RParent = RInst->getParent();
    if (LParent != RParent) {
      unsigned LDepth = LI->getLoopDepth(LParent),
               RDepth = LI->getLoopDepth(RParent);
      if (LDepth != RDepth)
        return (int)LDepth - (int)RDepth;
    }

    // Compare the number of operands.
    unsigned LNumOps = LInst->getNumOperands(),
             RNumOps = RInst->getNumOperands();
    if (LNumOps != RNumOps)
      return (int)LNumOps - (int)RNumOps;

    for (unsigned Idx : seq(0u, LNumOps)) {
      int Result =
          CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx),
                                 RInst->getOperand(Idx), Depth + 1);
      if (Result != 0)
        return Result;
    }
  }

  EqCacheValue.unionSets(LV, RV);
  return 0;
}

// Return negative, zero, or positive, if LHS is less than, equal to, or greater
// than RHS, respectively. A three-way result allows recursive comparisons to be
// more efficient.
static int CompareSCEVComplexity(
    EquivalenceClasses<const SCEV *> &EqCacheSCEV,
    EquivalenceClasses<const Value *> &EqCacheValue,
    const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS,
    DominatorTree &DT, unsigned Depth = 0) {
  // Fast-path: SCEVs are uniqued so we can do a quick equality check.
  if (LHS == RHS)
    return 0;

  // Primarily, sort the SCEVs by their getSCEVType().
  unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
  if (LType != RType)
    return (int)LType - (int)RType;

  if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS))
    return 0;
  // Aside from the getSCEVType() ordering, the particular ordering
  // isn't very important except that it's beneficial to be consistent,
  // so that (a + b) and (b + a) don't end up as different expressions.
  switch (static_cast<SCEVTypes>(LType)) {
  case scUnknown: {
    const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
    const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);

    int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(),
                                   RU->getValue(), Depth + 1);
    if (X == 0)
      EqCacheSCEV.unionSets(LHS, RHS);
    return X;
  }

  case scConstant: {
    const SCEVConstant *LC = cast<SCEVConstant>(LHS);
    const SCEVConstant *RC = cast<SCEVConstant>(RHS);

    // Compare constant values.
    const APInt &LA = LC->getAPInt();
    const APInt &RA = RC->getAPInt();
    unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
    if (LBitWidth != RBitWidth)
      return (int)LBitWidth - (int)RBitWidth;
    return LA.ult(RA) ? -1 : 1;
  }

  case scAddRecExpr: {
    const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
    const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);

    // There is always a dominance between two recs that are used by one SCEV,
    // so we can safely sort recs by loop header dominance. We require such
    // order in getAddExpr.
    const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
    if (LLoop != RLoop) {
      const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader();
      assert(LHead != RHead && "Two loops share the same header?");
      if (DT.dominates(LHead, RHead))
        return 1;
      else
        assert(DT.dominates(RHead, LHead) &&
               "No dominance between recurrences used by one SCEV?");
      return -1;
    }

    // Addrec complexity grows with operand count.
    unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
    if (LNumOps != RNumOps)
      return (int)LNumOps - (int)RNumOps;

    // Lexicographically compare.
    for (unsigned i = 0; i != LNumOps; ++i) {
      int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
                                    LA->getOperand(i), RA->getOperand(i), DT,
                                    Depth + 1);
      if (X != 0)
        return X;
    }
    EqCacheSCEV.unionSets(LHS, RHS);
    return 0;
  }

  case scAddExpr:
  case scMulExpr:
  case scSMaxExpr:
  case scUMaxExpr:
  case scSMinExpr:
  case scUMinExpr: {
    const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
    const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);

    // Lexicographically compare n-ary expressions.
    unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
    if (LNumOps != RNumOps)
      return (int)LNumOps - (int)RNumOps;

    for (unsigned i = 0; i != LNumOps; ++i) {
      int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
                                    LC->getOperand(i), RC->getOperand(i), DT,
                                    Depth + 1);
      if (X != 0)
        return X;
    }
    EqCacheSCEV.unionSets(LHS, RHS);
    return 0;
  }

  case scUDivExpr: {
    const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
    const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);

    // Lexicographically compare udiv expressions.
    int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(),
                                  RC->getLHS(), DT, Depth + 1);
    if (X != 0)
      return X;
    X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(),
                              RC->getRHS(), DT, Depth + 1);
    if (X == 0)
      EqCacheSCEV.unionSets(LHS, RHS);
    return X;
  }

  case scTruncate:
  case scZeroExtend:
  case scSignExtend: {
    const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
    const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);

    // Compare cast expressions by operand.
    int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
                                  LC->getOperand(), RC->getOperand(), DT,
                                  Depth + 1);
    if (X == 0)
      EqCacheSCEV.unionSets(LHS, RHS);
    return X;
  }

  case scCouldNotCompute:
    llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  }
  llvm_unreachable("Unknown SCEV kind!");
}

/// Given a list of SCEV objects, order them by their complexity, and group
/// objects of the same complexity together by value.  When this routine is
/// finished, we know that any duplicates in the vector are consecutive and that
/// complexity is monotonically increasing.
///
/// Note that we go take special precautions to ensure that we get deterministic
/// results from this routine.  In other words, we don't want the results of
/// this to depend on where the addresses of various SCEV objects happened to
/// land in memory.
static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
                              LoopInfo *LI, DominatorTree &DT) {
  if (Ops.size() < 2) return;  // Noop

  EquivalenceClasses<const SCEV *> EqCacheSCEV;
  EquivalenceClasses<const Value *> EqCacheValue;
  if (Ops.size() == 2) {
    // This is the common case, which also happens to be trivially simple.
    // Special case it.
    const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
    if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0)
      std::swap(LHS, RHS);
    return;
  }

  // Do the rough sort by complexity.
  llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
    return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) <
           0;
  });

  // Now that we are sorted by complexity, group elements of the same
  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
  // be extremely short in practice.  Note that we take this approach because we
  // do not want to depend on the addresses of the objects we are grouping.
  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
    const SCEV *S = Ops[i];
    unsigned Complexity = S->getSCEVType();

    // If there are any objects of the same complexity and same value as this
    // one, group them.
    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
      if (Ops[j] == S) { // Found a duplicate.
        // Move it to immediately after i'th element.
        std::swap(Ops[i+1], Ops[j]);
        ++i;   // no need to rescan it.
        if (i == e-2) return;  // Done!
      }
    }
  }
}

// Returns the size of the SCEV S.
static inline int sizeOfSCEV(const SCEV *S) {
  struct FindSCEVSize {
    int Size = 0;

    FindSCEVSize() = default;

    bool follow(const SCEV *S) {
      ++Size;
      // Keep looking at all operands of S.
      return true;
    }

    bool isDone() const {
      return false;
    }
  };

  FindSCEVSize F;
  SCEVTraversal<FindSCEVSize> ST(F);
  ST.visitAll(S);
  return F.Size;
}

/// Returns true if the subtree of \p S contains at least HugeExprThreshold
/// nodes.
static bool isHugeExpression(const SCEV *S) {
  return S->getExpressionSize() >= HugeExprThreshold;
}

/// Returns true of \p Ops contains a huge SCEV (see definition above).
static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
  return any_of(Ops, isHugeExpression);
}

namespace {

struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> {
public:
  // Computes the Quotient and Remainder of the division of Numerator by
  // Denominator.
  static void divide(ScalarEvolution &SE, const SCEV *Numerator,
                     const SCEV *Denominator, const SCEV **Quotient,
                     const SCEV **Remainder) {
    assert(Numerator && Denominator && "Uninitialized SCEV");

    SCEVDivision D(SE, Numerator, Denominator);

    // Check for the trivial case here to avoid having to check for it in the
    // rest of the code.
    if (Numerator == Denominator) {
      *Quotient = D.One;
      *Remainder = D.Zero;
      return;
    }

    if (Numerator->isZero()) {
      *Quotient = D.Zero;
      *Remainder = D.Zero;
      return;
    }

    // A simple case when N/1. The quotient is N.
    if (Denominator->isOne()) {
      *Quotient = Numerator;
      *Remainder = D.Zero;
      return;
    }

    // Split the Denominator when it is a product.
    if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) {
      const SCEV *Q, *R;
      *Quotient = Numerator;
      for (const SCEV *Op : T->operands()) {
        divide(SE, *Quotient, Op, &Q, &R);
        *Quotient = Q;

        // Bail out when the Numerator is not divisible by one of the terms of
        // the Denominator.
        if (!R->isZero()) {
          *Quotient = D.Zero;
          *Remainder = Numerator;
          return;
        }
      }
      *Remainder = D.Zero;
      return;
    }

    D.visit(Numerator);
    *Quotient = D.Quotient;
    *Remainder = D.Remainder;
  }

  // Except in the trivial case described above, we do not know how to divide
  // Expr by Denominator for the following functions with empty implementation.
  void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {}
  void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {}
  void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {}
  void visitUDivExpr(const SCEVUDivExpr *Numerator) {}
  void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {}
  void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {}
  void visitSMinExpr(const SCEVSMinExpr *Numerator) {}
  void visitUMinExpr(const SCEVUMinExpr *Numerator) {}
  void visitUnknown(const SCEVUnknown *Numerator) {}
  void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {}

  void visitConstant(const SCEVConstant *Numerator) {
    if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) {
      APInt NumeratorVal = Numerator->getAPInt();
      APInt DenominatorVal = D->getAPInt();
      uint32_t NumeratorBW = NumeratorVal.getBitWidth();
      uint32_t DenominatorBW = DenominatorVal.getBitWidth();

      if (NumeratorBW > DenominatorBW)
        DenominatorVal = DenominatorVal.sext(NumeratorBW);
      else if (NumeratorBW < DenominatorBW)
        NumeratorVal = NumeratorVal.sext(DenominatorBW);

      APInt QuotientVal(NumeratorVal.getBitWidth(), 0);
      APInt RemainderVal(NumeratorVal.getBitWidth(), 0);
      APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal);
      Quotient = SE.getConstant(QuotientVal);
      Remainder = SE.getConstant(RemainderVal);
      return;
    }
  }

  void visitAddRecExpr(const SCEVAddRecExpr *Numerator) {
    const SCEV *StartQ, *StartR, *StepQ, *StepR;
    if (!Numerator->isAffine())
      return cannotDivide(Numerator);
    divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR);
    divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR);
    // Bail out if the types do not match.
    Type *Ty = Denominator->getType();
    if (Ty != StartQ->getType() || Ty != StartR->getType() ||
        Ty != StepQ->getType() || Ty != StepR->getType())
      return cannotDivide(Numerator);
    Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(),
                                Numerator->getNoWrapFlags());
    Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(),
                                 Numerator->getNoWrapFlags());
  }

  void visitAddExpr(const SCEVAddExpr *Numerator) {
    SmallVector<const SCEV *, 2> Qs, Rs;
    Type *Ty = Denominator->getType();

    for (const SCEV *Op : Numerator->operands()) {
      const SCEV *Q, *R;
      divide(SE, Op, Denominator, &Q, &R);

      // Bail out if types do not match.
      if (Ty != Q->getType() || Ty != R->getType())
        return cannotDivide(Numerator);

      Qs.push_back(Q);
      Rs.push_back(R);
    }

    if (Qs.size() == 1) {
      Quotient = Qs[0];
      Remainder = Rs[0];
      return;
    }

    Quotient = SE.getAddExpr(Qs);
    Remainder = SE.getAddExpr(Rs);
  }

  void visitMulExpr(const SCEVMulExpr *Numerator) {
    SmallVector<const SCEV *, 2> Qs;
    Type *Ty = Denominator->getType();

    bool FoundDenominatorTerm = false;
    for (const SCEV *Op : Numerator->operands()) {
      // Bail out if types do not match.
      if (Ty != Op->getType())
        return cannotDivide(Numerator);

      if (FoundDenominatorTerm) {
        Qs.push_back(Op);
        continue;
      }

      // Check whether Denominator divides one of the product operands.
      const SCEV *Q, *R;
      divide(SE, Op, Denominator, &Q, &R);
      if (!R->isZero()) {
        Qs.push_back(Op);
        continue;
      }

      // Bail out if types do not match.
      if (Ty != Q->getType())
        return cannotDivide(Numerator);

      FoundDenominatorTerm = true;
      Qs.push_back(Q);
    }

    if (FoundDenominatorTerm) {
      Remainder = Zero;
      if (Qs.size() == 1)
        Quotient = Qs[0];
      else
        Quotient = SE.getMulExpr(Qs);
      return;
    }

    if (!isa<SCEVUnknown>(Denominator))
      return cannotDivide(Numerator);

    // The Remainder is obtained by replacing Denominator by 0 in Numerator.
    ValueToValueMap RewriteMap;
    RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
        cast<SCEVConstant>(Zero)->getValue();
    Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);

    if (Remainder->isZero()) {
      // The Quotient is obtained by replacing Denominator by 1 in Numerator.
      RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
          cast<SCEVConstant>(One)->getValue();
      Quotient =
          SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
      return;
    }

    // Quotient is (Numerator - Remainder) divided by Denominator.
    const SCEV *Q, *R;
    const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder);
    // This SCEV does not seem to simplify: fail the division here.
    if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator))
      return cannotDivide(Numerator);
    divide(SE, Diff, Denominator, &Q, &R);
    if (R != Zero)
      return cannotDivide(Numerator);
    Quotient = Q;
  }

private:
  SCEVDivision(ScalarEvolution &S, const SCEV *Numerator,
               const SCEV *Denominator)
      : SE(S), Denominator(Denominator) {
    Zero = SE.getZero(Denominator->getType());
    One = SE.getOne(Denominator->getType());

    // We generally do not know how to divide Expr by Denominator. We
    // initialize the division to a "cannot divide" state to simplify the rest
    // of the code.
    cannotDivide(Numerator);
  }

  // Convenience function for giving up on the division. We set the quotient to
  // be equal to zero and the remainder to be equal to the numerator.
  void cannotDivide(const SCEV *Numerator) {
    Quotient = Zero;
    Remainder = Numerator;
  }

  ScalarEvolution &SE;
  const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
};

} // end anonymous namespace

//===----------------------------------------------------------------------===//
//                      Simple SCEV method implementations
//===----------------------------------------------------------------------===//

/// Compute BC(It, K).  The result has width W.  Assume, K > 0.
static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
                                       ScalarEvolution &SE,
                                       Type *ResultTy) {
  // Handle the simplest case efficiently.
  if (K == 1)
    return SE.getTruncateOrZeroExtend(It, ResultTy);

  // We are using the following formula for BC(It, K):
  //
  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
  //
  // Suppose, W is the bitwidth of the return value.  We must be prepared for
  // overflow.  Hence, we must assure that the result of our computation is
  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
  // safe in modular arithmetic.
  //
  // However, this code doesn't use exactly that formula; the formula it uses
  // is something like the following, where T is the number of factors of 2 in
  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
  // exponentiation:
  //
  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
  //
  // This formula is trivially equivalent to the previous formula.  However,
  // this formula can be implemented much more efficiently.  The trick is that
  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
  // arithmetic.  To do exact division in modular arithmetic, all we have
  // to do is multiply by the inverse.  Therefore, this step can be done at
  // width W.
  //
  // The next issue is how to safely do the division by 2^T.  The way this
  // is done is by doing the multiplication step at a width of at least W + T
  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
  // when we perform the division by 2^T (which is equivalent to a right shift
  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
  // truncated out after the division by 2^T.
  //
  // In comparison to just directly using the first formula, this technique
  // is much more efficient; using the first formula requires W * K bits,
  // but this formula less than W + K bits. Also, the first formula requires
  // a division step, whereas this formula only requires multiplies and shifts.
  //
  // It doesn't matter whether the subtraction step is done in the calculation
  // width or the input iteration count's width; if the subtraction overflows,
  // the result must be zero anyway.  We prefer here to do it in the width of
  // the induction variable because it helps a lot for certain cases; CodeGen
  // isn't smart enough to ignore the overflow, which leads to much less
  // efficient code if the width of the subtraction is wider than the native
  // register width.
  //
  // (It's possible to not widen at all by pulling out factors of 2 before
  // the multiplication; for example, K=2 can be calculated as
  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
  // extra arithmetic, so it's not an obvious win, and it gets
  // much more complicated for K > 3.)

  // Protection from insane SCEVs; this bound is conservative,
  // but it probably doesn't matter.
  if (K > 1000)
    return SE.getCouldNotCompute();

  unsigned W = SE.getTypeSizeInBits(ResultTy);

  // Calculate K! / 2^T and T; we divide out the factors of two before
  // multiplying for calculating K! / 2^T to avoid overflow.
  // Other overflow doesn't matter because we only care about the bottom
  // W bits of the result.
  APInt OddFactorial(W, 1);
  unsigned T = 1;
  for (unsigned i = 3; i <= K; ++i) {
    APInt Mult(W, i);
    unsigned TwoFactors = Mult.countTrailingZeros();
    T += TwoFactors;
    Mult.lshrInPlace(TwoFactors);
    OddFactorial *= Mult;
  }

  // We need at least W + T bits for the multiplication step
  unsigned CalculationBits = W + T;

  // Calculate 2^T, at width T+W.
  APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);

  // Calculate the multiplicative inverse of K! / 2^T;
  // this multiplication factor will perform the exact division by
  // K! / 2^T.
  APInt Mod = APInt::getSignedMinValue(W+1);
  APInt MultiplyFactor = OddFactorial.zext(W+1);
  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
  MultiplyFactor = MultiplyFactor.trunc(W);

  // Calculate the product, at width T+W
  IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
                                                      CalculationBits);
  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
  for (unsigned i = 1; i != K; ++i) {
    const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
    Dividend = SE.getMulExpr(Dividend,
                             SE.getTruncateOrZeroExtend(S, CalculationTy));
  }

  // Divide by 2^T
  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));

  // Truncate the result, and divide by K! / 2^T.

  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
}

/// Return the value of this chain of recurrences at the specified iteration
/// number.  We can evaluate this recurrence by multiplying each element in the
/// chain by the binomial coefficient corresponding to it.  In other words, we
/// can evaluate {A,+,B,+,C,+,D} as:
///
///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
///
/// where BC(It, k) stands for binomial coefficient.
const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
                                                ScalarEvolution &SE) const {
  const SCEV *Result = getStart();
  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
    // The computation is correct in the face of overflow provided that the
    // multiplication is performed _after_ the evaluation of the binomial
    // coefficient.
    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
    if (isa<SCEVCouldNotCompute>(Coeff))
      return Coeff;

    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
  }
  return Result;
}

//===----------------------------------------------------------------------===//
//                    SCEV Expression folder implementations
//===----------------------------------------------------------------------===//

const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
                                             unsigned Depth) {
  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
         "This is not a truncating conversion!");
  assert(isSCEVable(Ty) &&
         "This is not a conversion to a SCEVable type!");
  Ty = getEffectiveSCEVType(Ty);

  FoldingSetNodeID ID;
  ID.AddInteger(scTruncate);
  ID.AddPointer(Op);
  ID.AddPointer(Ty);
  void *IP = nullptr;
  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;

  // Fold if the operand is constant.
  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
    return getConstant(
      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));

  // trunc(trunc(x)) --> trunc(x)
  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
    return getTruncateExpr(ST->getOperand(), Ty, Depth + 1);

  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
    return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1);

  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
    return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1);

  if (Depth > MaxCastDepth) {
    SCEV *S =
        new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty);
    UniqueSCEVs.InsertNode(S, IP);
    addToLoopUseLists(S);
    return S;
  }

  // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
  // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
  // if after transforming we have at most one truncate, not counting truncates
  // that replace other casts.
  if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
    auto *CommOp = cast<SCEVCommutativeExpr>(Op);
    SmallVector<const SCEV *, 4> Operands;
    unsigned numTruncs = 0;
    for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
         ++i) {
      const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
      if (!isa<SCEVCastExpr>(CommOp->getOperand(i)) && isa<SCEVTruncateExpr>(S))
        numTruncs++;
      Operands.push_back(S);
    }
    if (numTruncs < 2) {
      if (isa<SCEVAddExpr>(Op))
        return getAddExpr(Operands);
      else if (isa<SCEVMulExpr>(Op))
        return getMulExpr(Operands);
      else
        llvm_unreachable("Unexpected SCEV type for Op.");
    }
    // Although we checked in the beginning that ID is not in the cache, it is
    // possible that during recursion and different modification ID was inserted
    // into the cache. So if we find it, just return it.
    if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
      return S;
  }

  // If the input value is a chrec scev, truncate the chrec's operands.
  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
    SmallVector<const SCEV *, 4> Operands;
    for (const SCEV *Op : AddRec->operands())
      Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
    return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
  }

  // The cast wasn't folded; create an explicit cast node. We can reuse
  // the existing insert position since if we get here, we won't have
  // made any changes which would invalidate it.
  SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
                                                 Op, Ty);
  UniqueSCEVs.InsertNode(S, IP);
  addToLoopUseLists(S);
  return S;
}

// Get the limit of a recurrence such that incrementing by Step cannot cause
// signed overflow as long as the value of the recurrence within the
// loop does not exceed this limit before incrementing.
static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
                                                 ICmpInst::Predicate *Pred,
                                                 ScalarEvolution *SE) {
  unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
  if (SE->isKnownPositive(Step)) {
    *Pred = ICmpInst::ICMP_SLT;
    return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
                           SE->getSignedRangeMax(Step));
  }
  if (SE->isKnownNegative(Step)) {
    *Pred = ICmpInst::ICMP_SGT;
    return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
                           SE->getSignedRangeMin(Step));
  }
  return nullptr;
}

// Get the limit of a recurrence such that incrementing by Step cannot cause
// unsigned overflow as long as the value of the recurrence within the loop does
// not exceed this limit before incrementing.
static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
                                                   ICmpInst::Predicate *Pred,
                                                   ScalarEvolution *SE) {
  unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
  *Pred = ICmpInst::ICMP_ULT;

  return SE->getConstant(APInt::getMinValue(BitWidth) -
                         SE->getUnsignedRangeMax(Step));
}

namespace {

struct ExtendOpTraitsBase {
  typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *,
                                                          unsigned);
};

// Used to make code generic over signed and unsigned overflow.
template <typename ExtendOp> struct ExtendOpTraits {
  // Members present:
  //
  // static const SCEV::NoWrapFlags WrapType;
  //
  // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
  //
  // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
  //                                           ICmpInst::Predicate *Pred,
  //                                           ScalarEvolution *SE);
};

template <>
struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
  static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;

  static const GetExtendExprTy GetExtendExpr;

  static const SCEV *getOverflowLimitForStep(const SCEV *Step,
                                             ICmpInst::Predicate *Pred,
                                             ScalarEvolution *SE) {
    return getSignedOverflowLimitForStep(Step, Pred, SE);
  }
};

const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
    SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;

template <>
struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
  static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;

  static const GetExtendExprTy GetExtendExpr;

  static const SCEV *getOverflowLimitForStep(const SCEV *Step,
                                             ICmpInst::Predicate *Pred,
                                             ScalarEvolution *SE) {
    return getUnsignedOverflowLimitForStep(Step, Pred, SE);
  }
};

const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
    SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;

} // end anonymous namespace

// The recurrence AR has been shown to have no signed/unsigned wrap or something
// close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
// easily prove NSW/NUW for its preincrement or postincrement sibling. This
// allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
// Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
// expression "Step + sext/zext(PreIncAR)" is congruent with
// "sext/zext(PostIncAR)"
template <typename ExtendOpTy>
static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
                                        ScalarEvolution *SE, unsigned Depth) {
  auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
  auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;

  const Loop *L = AR->getLoop();
  const SCEV *Start = AR->getStart();
  const SCEV *Step = AR->getStepRecurrence(*SE);

  // Check for a simple looking step prior to loop entry.
  const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
  if (!SA)
    return nullptr;

  // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
  // subtraction is expensive. For this purpose, perform a quick and dirty
  // difference, by checking for Step in the operand list.
  SmallVector<const SCEV *, 4> DiffOps;
  for (const SCEV *Op : SA->operands())
    if (Op != Step)
      DiffOps.push_back(Op);

  if (DiffOps.size() == SA->getNumOperands())
    return nullptr;

  // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
  // `Step`:

  // 1. NSW/NUW flags on the step increment.
  auto PreStartFlags =
    ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW);
  const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
  const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
      SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));

  // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
  // "S+X does not sign/unsign-overflow".
  //

  const SCEV *BECount = SE->getBackedgeTakenCount(L);
  if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
      !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
    return PreStart;

  // 2. Direct overflow check on the step operation's expression.
  unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
  Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
  const SCEV *OperandExtendedStart =
      SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
                     (SE->*GetExtendExpr)(Step, WideTy, Depth));
  if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
    if (PreAR && AR->getNoWrapFlags(WrapType)) {
      // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
      // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
      // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`.  Cache this fact.
      const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType);
    }
    return PreStart;
  }

  // 3. Loop precondition.
  ICmpInst::Predicate Pred;
  const SCEV *OverflowLimit =
      ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);

  if (OverflowLimit &&
      SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit))
    return PreStart;

  return nullptr;
}

// Get the normalized zero or sign extended expression for this AddRec's Start.
template <typename ExtendOpTy>
static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
                                        ScalarEvolution *SE,
                                        unsigned Depth) {
  auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;

  const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
  if (!PreStart)
    return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth);

  return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty,
                                             Depth),
                        (SE->*GetExtendExpr)(PreStart, Ty, Depth));
}

// Try to prove away overflow by looking at "nearby" add recurrences.  A
// motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
// does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
//
// Formally:
//
//     {S,+,X} == {S-T,+,X} + T
//  => Ext({S,+,X}) == Ext({S-T,+,X} + T)
//
// If ({S-T,+,X} + T) does not overflow  ... (1)
//
//  RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
//
// If {S-T,+,X} does not overflow  ... (2)
//
//  RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
//      == {Ext(S-T)+Ext(T),+,Ext(X)}
//
// If (S-T)+T does not overflow  ... (3)
//
//  RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
//      == {Ext(S),+,Ext(X)} == LHS
//
// Thus, if (1), (2) and (3) are true for some T, then
//   Ext({S,+,X}) == {Ext(S),+,Ext(X)}
//
// (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
// does not overflow" restricted to the 0th iteration.  Therefore we only need
// to check for (1) and (2).
//
// In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
// is `Delta` (defined below).
template <typename ExtendOpTy>
bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
                                                const SCEV *Step,
                                                const Loop *L) {
  auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;

  // We restrict `Start` to a constant to prevent SCEV from spending too much
  // time here.  It is correct (but more expensive) to continue with a
  // non-constant `Start` and do a general SCEV subtraction to compute
  // `PreStart` below.
  const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
  if (!StartC)
    return false;

  APInt StartAI = StartC->getAPInt();

  for (unsigned Delta : {-2, -1, 1, 2}) {
    const SCEV *PreStart = getConstant(StartAI - Delta);

    FoldingSetNodeID ID;
    ID.AddInteger(scAddRecExpr);
    ID.AddPointer(PreStart);
    ID.AddPointer(Step);
    ID.AddPointer(L);
    void *IP = nullptr;
    const auto *PreAR =
      static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));

    // Give up if we don't already have the add recurrence we need because
    // actually constructing an add recurrence is relatively expensive.
    if (PreAR && PreAR->getNoWrapFlags(WrapType)) {  // proves (2)
      const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
      ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
      const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
          DeltaS, &Pred, this);
      if (Limit && isKnownPredicate(Pred, PreAR, Limit))  // proves (1)
        return true;
    }
  }

  return false;
}

// Finds an integer D for an expression (C + x + y + ...) such that the top
// level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
// unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
// maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
// the (C + x + y + ...) expression is \p WholeAddExpr.
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
                                            const SCEVConstant *ConstantTerm,
                                            const SCEVAddExpr *WholeAddExpr) {
  const APInt C = ConstantTerm->getAPInt();
  const unsigned BitWidth = C.getBitWidth();
  // Find number of trailing zeros of (x + y + ...) w/o the C first:
  uint32_t TZ = BitWidth;
  for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I)
    TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I)));
  if (TZ) {
    // Set D to be as many least significant bits of C as possible while still
    // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
    return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C;
  }
  return APInt(BitWidth, 0);
}

// Finds an integer D for an affine AddRec expression {C,+,x} such that the top
// level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
// number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
// ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
                                            const APInt &ConstantStart,
                                            const SCEV *Step) {
  const unsigned BitWidth = ConstantStart.getBitWidth();
  const uint32_t TZ = SE.GetMinTrailingZeros(Step);
  if (TZ)
    return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth)
                         : ConstantStart;
  return APInt(BitWidth, 0);
}

const SCEV *
ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
         "This is not an extending conversion!");
  assert(isSCEVable(Ty) &&
         "This is not a conversion to a SCEVable type!");
  Ty = getEffectiveSCEVType(Ty);

  // Fold if the operand is constant.
  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
    return getConstant(
      cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));

  // zext(zext(x)) --> zext(x)
  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
    return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);

  // Before doing any expensive analysis, check to see if we've already
  // computed a SCEV for this Op and Ty.
  FoldingSetNodeID ID;
  ID.AddInteger(scZeroExtend);
  ID.AddPointer(Op);
  ID.AddPointer(Ty);
  void *IP = nullptr;
  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  if (Depth > MaxCastDepth) {
    SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
                                                     Op, Ty);
    UniqueSCEVs.InsertNode(S, IP);
    addToLoopUseLists(S);
    return S;
  }

  // zext(trunc(x)) --> zext(x) or x or trunc(x)
  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
    // It's possible the bits taken off by the truncate were all zero bits. If
    // so, we should be able to simplify this further.
    const SCEV *X = ST->getOperand();
    ConstantRange CR = getUnsignedRange(X);
    unsigned TruncBits = getTypeSizeInBits(ST->getType());
    unsigned NewBits = getTypeSizeInBits(Ty);
    if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
            CR.zextOrTrunc(NewBits)))
      return getTruncateOrZeroExtend(X, Ty, Depth);
  }

  // If the input value is a chrec scev, and we can prove that the value
  // did not overflow the old, smaller, value, we can zero extend all of the
  // operands (often constants).  This allows analysis of something like
  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
    if (AR->isAffine()) {
      const SCEV *Start = AR->getStart();
      const SCEV *Step = AR->getStepRecurrence(*this);
      unsigned BitWidth = getTypeSizeInBits(AR->getType());
      const Loop *L = AR->getLoop();

      if (!AR->hasNoUnsignedWrap()) {
        auto NewFlags = proveNoWrapViaConstantRanges(AR);
        const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags);
      }

      // If we have special knowledge that this addrec won't overflow,
      // we don't need to do any further analysis.
      if (AR->hasNoUnsignedWrap())
        return getAddRecExpr(
            getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
            getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());

      // Check whether the backedge-taken count is SCEVCouldNotCompute.
      // Note that this serves two purposes: It filters out loops that are
      // simply not analyzable, and it covers the case where this code is
      // being called from within backedge-taken count analysis, such that
      // attempting to ask for the backedge-taken count would likely result
      // in infinite recursion. In the later case, the analysis code will
      // cope with a conservative value, and it will take care to purge
      // that value once it has finished.
      const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
        // Manually compute the final value for AR, checking for
        // overflow.

        // Check whether the backedge-taken count can be losslessly casted to
        // the addrec's type. The count is always unsigned.
        const SCEV *CastedMaxBECount =
            getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
        const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
            CastedMaxBECount, MaxBECount->getType(), Depth);
        if (MaxBECount == RecastedMaxBECount) {
          Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
          // Check whether Start+Step*MaxBECount has no unsigned overflow.
          const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
                                        SCEV::FlagAnyWrap, Depth + 1);
          const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
                                                          SCEV::FlagAnyWrap,
                                                          Depth + 1),
                                               WideTy, Depth + 1);
          const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
          const SCEV *WideMaxBECount =
            getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
          const SCEV *OperandExtendedAdd =
            getAddExpr(WideStart,
                       getMulExpr(WideMaxBECount,
                                  getZeroExtendExpr(Step, WideTy, Depth + 1),
                                  SCEV::FlagAnyWrap, Depth + 1),
                       SCEV::FlagAnyWrap, Depth + 1);
          if (ZAdd == OperandExtendedAdd) {
            // Cache knowledge of AR NUW, which is propagated to this AddRec.
            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
            // Return the expression with the addrec on the outside.
            return getAddRecExpr(
                getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
                                                         Depth + 1),
                getZeroExtendExpr(Step, Ty, Depth + 1), L,
                AR->getNoWrapFlags());
          }
          // Similar to above, only this time treat the step value as signed.
          // This covers loops that count down.
          OperandExtendedAdd =
            getAddExpr(WideStart,
                       getMulExpr(WideMaxBECount,
                                  getSignExtendExpr(Step, WideTy, Depth + 1),
                                  SCEV::FlagAnyWrap, Depth + 1),
                       SCEV::FlagAnyWrap, Depth + 1);
          if (ZAdd == OperandExtendedAdd) {
            // Cache knowledge of AR NW, which is propagated to this AddRec.
            // Negative step causes unsigned wrap, but it still can't self-wrap.
            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
            // Return the expression with the addrec on the outside.
            return getAddRecExpr(
                getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
                                                         Depth + 1),
                getSignExtendExpr(Step, Ty, Depth + 1), L,
                AR->getNoWrapFlags());
          }
        }
      }

      // Normally, in the cases we can prove no-overflow via a
      // backedge guarding condition, we can also compute a backedge
      // taken count for the loop.  The exceptions are assumptions and
      // guards present in the loop -- SCEV is not great at exploiting
      // these to compute max backedge taken counts, but can still use
      // these to prove lack of overflow.  Use this fact to avoid
      // doing extra work that may not pay off.
      if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
          !AC.assumptions().empty()) {
        // If the backedge is guarded by a comparison with the pre-inc
        // value the addrec is safe. Also, if the entry is guarded by
        // a comparison with the start value and the backedge is
        // guarded by a comparison with the post-inc value, the addrec
        // is safe.
        if (isKnownPositive(Step)) {
          const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
                                      getUnsignedRangeMax(Step));
          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
              isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) {
            // Cache knowledge of AR NUW, which is propagated to this
            // AddRec.
            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
            // Return the expression with the addrec on the outside.
            return getAddRecExpr(
                getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
                                                         Depth + 1),
                getZeroExtendExpr(Step, Ty, Depth + 1), L,
                AR->getNoWrapFlags());
          }
        } else if (isKnownNegative(Step)) {
          const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
                                      getSignedRangeMin(Step));
          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
              isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) {
            // Cache knowledge of AR NW, which is propagated to this
            // AddRec.  Negative step causes unsigned wrap, but it
            // still can't self-wrap.
            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
            // Return the expression with the addrec on the outside.
            return getAddRecExpr(
                getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
                                                         Depth + 1),
                getSignExtendExpr(Step, Ty, Depth + 1), L,
                AR->getNoWrapFlags());
          }
        }
      }

      // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
      // if D + (C - D + Step * n) could be proven to not unsigned wrap
      // where D maximizes the number of trailing zeros of (C - D + Step * n)
      if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
        const APInt &C = SC->getAPInt();
        const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
        if (D != 0) {
          const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
          const SCEV *SResidual =
              getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
          const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
          return getAddExpr(SZExtD, SZExtR,
                            (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
                            Depth + 1);
        }
      }

      if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
        const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
        return getAddRecExpr(
            getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
            getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
      }
    }

  // zext(A % B) --> zext(A) % zext(B)
  {
    const SCEV *LHS;
    const SCEV *RHS;
    if (matchURem(Op, LHS, RHS))
      return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1),
                         getZeroExtendExpr(RHS, Ty, Depth + 1));
  }

  // zext(A / B) --> zext(A) / zext(B).
  if (auto *Div = dyn_cast<SCEVUDivExpr>(Op))
    return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1),
                       getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1));

  if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
    // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
    if (SA->hasNoUnsignedWrap()) {
      // If the addition does not unsign overflow then we can, by definition,
      // commute the zero extension with the addition operation.
      SmallVector<const SCEV *, 4> Ops;
      for (const auto *Op : SA->operands())
        Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
      return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
    }

    // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
    // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
    // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
    //
    // Often address arithmetics contain expressions like
    // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
    // This transformation is useful while proving that such expressions are
    // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
    if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
      const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
      if (D != 0) {
        const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
        const SCEV *SResidual =
            getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
        const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
        return getAddExpr(SZExtD, SZExtR,
                          (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
                          Depth + 1);
      }
    }
  }

  if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) {
    // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
    if (SM->hasNoUnsignedWrap()) {
      // If the multiply does not unsign overflow then we can, by definition,
      // commute the zero extension with the multiply operation.
      SmallVector<const SCEV *, 4> Ops;
      for (const auto *Op : SM->operands())
        Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
      return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
    }

    // zext(2^K * (trunc X to iN)) to iM ->
    // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
    //
    // Proof:
    //
    //     zext(2^K * (trunc X to iN)) to iM
    //   = zext((trunc X to iN) << K) to iM
    //   = zext((trunc X to i{N-K}) << K)<nuw> to iM
    //     (because shl removes the top K bits)
    //   = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
    //   = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
    //
    if (SM->getNumOperands() == 2)
      if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
        if (MulLHS->getAPInt().isPowerOf2())
          if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
            int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
                               MulLHS->getAPInt().logBase2();
            Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
            return getMulExpr(
                getZeroExtendExpr(MulLHS, Ty),
                getZeroExtendExpr(
                    getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
                SCEV::FlagNUW, Depth + 1);
          }
  }

  // The cast wasn't folded; create an explicit cast node.
  // Recompute the insert position, as it may have been invalidated.
  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
                                                   Op, Ty);
  UniqueSCEVs.InsertNode(S, IP);
  addToLoopUseLists(S);
  return S;
}

const SCEV *
ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
         "This is not an extending conversion!");
  assert(isSCEVable(Ty) &&
         "This is not a conversion to a SCEVable type!");
  Ty = getEffectiveSCEVType(Ty);

  // Fold if the operand is constant.
  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
    return getConstant(
      cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));

  // sext(sext(x)) --> sext(x)
  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
    return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1);

  // sext(zext(x)) --> zext(x)
  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
    return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);

  // Before doing any expensive analysis, check to see if we've already
  // computed a SCEV for this Op and Ty.
  FoldingSetNodeID ID;
  ID.AddInteger(scSignExtend);
  ID.AddPointer(Op);
  ID.AddPointer(Ty);
  void *IP = nullptr;
  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  // Limit recursion depth.
  if (Depth > MaxCastDepth) {
    SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
                                                     Op, Ty);
    UniqueSCEVs.InsertNode(S, IP);
    addToLoopUseLists(S);
    return S;
  }

  // sext(trunc(x)) --> sext(x) or x or trunc(x)
  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
    // It's possible the bits taken off by the truncate were all sign bits. If
    // so, we should be able to simplify this further.
    const SCEV *X = ST->getOperand();
    ConstantRange CR = getSignedRange(X);
    unsigned TruncBits = getTypeSizeInBits(ST->getType());
    unsigned NewBits = getTypeSizeInBits(Ty);
    if (CR.truncate(TruncBits).signExtend(NewBits).contains(
            CR.sextOrTrunc(NewBits)))
      return getTruncateOrSignExtend(X, Ty, Depth);
  }

  if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
    // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
    if (SA->hasNoSignedWrap()) {
      // If the addition does not sign overflow then we can, by definition,
      // commute the sign extension with the addition operation.
      SmallVector<const SCEV *, 4> Ops;
      for (const auto *Op : SA->operands())
        Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
      return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
    }

    // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
    // if D + (C - D + x + y + ...) could be proven to not signed wrap
    // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
    //
    // For instance, this will bring two seemingly different expressions:
    //     1 + sext(5 + 20 * %x + 24 * %y)  and
    //         sext(6 + 20 * %x + 24 * %y)
    // to the same form:
    //     2 + sext(4 + 20 * %x + 24 * %y)
    if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
      const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
      if (D != 0) {
        const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
        const SCEV *SResidual =
            getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
        const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
        return getAddExpr(SSExtD, SSExtR,
                          (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
                          Depth + 1);
      }
    }
  }
  // If the input value is a chrec scev, and we can prove that the value
  // did not overflow the old, smaller, value, we can sign extend all of the
  // operands (often constants).  This allows analysis of something like
  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
    if (AR->isAffine()) {
      const SCEV *Start = AR->getStart();
      const SCEV *Step = AR->getStepRecurrence(*this);
      unsigned BitWidth = getTypeSizeInBits(AR->getType());
      const Loop *L = AR->getLoop();

      if (!AR->hasNoSignedWrap()) {
        auto NewFlags = proveNoWrapViaConstantRanges(AR);
        const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags);
      }

      // If we have special knowledge that this addrec won't overflow,
      // we don't need to do any further analysis.
      if (AR->hasNoSignedWrap())
        return getAddRecExpr(
            getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
            getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW);

      // Check whether the backedge-taken count is SCEVCouldNotCompute.
      // Note that this serves two purposes: It filters out loops that are
      // simply not analyzable, and it covers the case where this code is
      // being called from within backedge-taken count analysis, such that
      // attempting to ask for the backedge-taken count would likely result
      // in infinite recursion. In the later case, the analysis code will
      // cope with a conservative value, and it will take care to purge
      // that value once it has finished.
      const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
        // Manually compute the final value for AR, checking for
        // overflow.

        // Check whether the backedge-taken count can be losslessly casted to
        // the addrec's type. The count is always unsigned.
        const SCEV *CastedMaxBECount =
            getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
        const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
            CastedMaxBECount, MaxBECount->getType(), Depth);
        if (MaxBECount == RecastedMaxBECount) {
          Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
          // Check whether Start+Step*MaxBECount has no signed overflow.
          const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
                                        SCEV::FlagAnyWrap, Depth + 1);
          const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
                                                          SCEV::FlagAnyWrap,
                                                          Depth + 1),
                                               WideTy, Depth + 1);
          const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
          const SCEV *WideMaxBECount =
            getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
          const SCEV *OperandExtendedAdd =
            getAddExpr(WideStart,
                       getMulExpr(WideMaxBECount,
                                  getSignExtendExpr(Step, WideTy, Depth + 1),
                                  SCEV::FlagAnyWrap, Depth + 1),
                       SCEV::FlagAnyWrap, Depth + 1);
          if (SAdd == OperandExtendedAdd) {
            // Cache knowledge of AR NSW, which is propagated to this AddRec.
            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
            // Return the expression with the addrec on the outside.
            return getAddRecExpr(
                getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
                                                         Depth + 1),
                getSignExtendExpr(Step, Ty, Depth + 1), L,
                AR->getNoWrapFlags());
          }
          // Similar to above, only this time treat the step value as unsigned.
          // This covers loops that count up with an unsigned step.
          OperandExtendedAdd =
            getAddExpr(WideStart,
                       getMulExpr(WideMaxBECount,
                                  getZeroExtendExpr(Step, WideTy, Depth + 1),
                                  SCEV::FlagAnyWrap, Depth + 1),
                       SCEV::FlagAnyWrap, Depth + 1);
          if (SAdd == OperandExtendedAdd) {
            // If AR wraps around then
            //
            //    abs(Step) * MaxBECount > unsigned-max(AR->getType())
            // => SAdd != OperandExtendedAdd
            //
            // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
            // (SAdd == OperandExtendedAdd => AR is NW)

            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);

            // Return the expression with the addrec on the outside.
            return getAddRecExpr(
                getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
                                                         Depth + 1),
                getZeroExtendExpr(Step, Ty, Depth + 1), L,
                AR->getNoWrapFlags());
          }
        }
      }

      // Normally, in the cases we can prove no-overflow via a
      // backedge guarding condition, we can also compute a backedge
      // taken count for the loop.  The exceptions are assumptions and
      // guards present in the loop -- SCEV is not great at exploiting
      // these to compute max backedge taken counts, but can still use
      // these to prove lack of overflow.  Use this fact to avoid
      // doing extra work that may not pay off.

      if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
          !AC.assumptions().empty()) {
        // If the backedge is guarded by a comparison with the pre-inc
        // value the addrec is safe. Also, if the entry is guarded by
        // a comparison with the start value and the backedge is
        // guarded by a comparison with the post-inc value, the addrec
        // is safe.
        ICmpInst::Predicate Pred;
        const SCEV *OverflowLimit =
            getSignedOverflowLimitForStep(Step, &Pred, this);
        if (OverflowLimit &&
            (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
             isKnownOnEveryIteration(Pred, AR, OverflowLimit))) {
          // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
          const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
          return getAddRecExpr(
              getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
              getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
        }
      }

      // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
      // if D + (C - D + Step * n) could be proven to not signed wrap
      // where D maximizes the number of trailing zeros of (C - D + Step * n)
      if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
        const APInt &C = SC->getAPInt();
        const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
        if (D != 0) {
          const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
          const SCEV *SResidual =
              getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
          const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
          return getAddExpr(SSExtD, SSExtR,
                            (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
                            Depth + 1);
        }
      }

      if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
        const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
        return getAddRecExpr(
            getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
            getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
      }
    }

  // If the input value is provably positive and we could not simplify
  // away the sext build a zext instead.
  if (isKnownNonNegative(Op))
    return getZeroExtendExpr(Op, Ty, Depth + 1);

  // The cast wasn't folded; create an explicit cast node.
  // Recompute the insert position, as it may have been invalidated.
  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
                                                   Op, Ty);
  UniqueSCEVs.InsertNode(S, IP);
  addToLoopUseLists(S);
  return S;
}

/// getAnyExtendExpr - Return a SCEV for the given operand extended with
/// unspecified bits out to the given type.
const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
                                              Type *Ty) {
  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
         "This is not an extending conversion!");
  assert(isSCEVable(Ty) &&
         "This is not a conversion to a SCEVable type!");
  Ty = getEffectiveSCEVType(Ty);

  // Sign-extend negative constants.
  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
    if (SC->getAPInt().isNegative())
      return getSignExtendExpr(Op, Ty);

  // Peel off a truncate cast.
  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
    const SCEV *NewOp = T->getOperand();
    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
      return getAnyExtendExpr(NewOp, Ty);
    return getTruncateOrNoop(NewOp, Ty);
  }

  // Next try a zext cast. If the cast is folded, use it.
  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
  if (!isa<SCEVZeroExtendExpr>(ZExt))
    return ZExt;

  // Next try a sext cast. If the cast is folded, use it.
  const SCEV *SExt = getSignExtendExpr(Op, Ty);
  if (!isa<SCEVSignExtendExpr>(SExt))
    return SExt;

  // Force the cast to be folded into the operands of an addrec.
  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
    SmallVector<const SCEV *, 4> Ops;
    for (const SCEV *Op : AR->operands())
      Ops.push_back(getAnyExtendExpr(Op, Ty));
    return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
  }

  // If the expression is obviously signed, use the sext cast value.
  if (isa<SCEVSMaxExpr>(Op))
    return SExt;

  // Absent any other information, use the zext cast value.
  return ZExt;
}

/// Process the given Ops list, which is a list of operands to be added under
/// the given scale, update the given map. This is a helper function for
/// getAddRecExpr. As an example of what it does, given a sequence of operands
/// that would form an add expression like this:
///
///    m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
///
/// where A and B are constants, update the map with these values:
///
///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
///
/// and add 13 + A*B*29 to AccumulatedConstant.
/// This will allow getAddRecExpr to produce this:
///
///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
///
/// This form often exposes folding opportunities that are hidden in
/// the original operand list.
///
/// Return true iff it appears that any interesting folding opportunities
/// may be exposed. This helps getAddRecExpr short-circuit extra work in
/// the common case where no interesting opportunities are present, and
/// is also used as a check to avoid infinite recursion.
static bool
CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
                             SmallVectorImpl<const SCEV *> &NewOps,
                             APInt &AccumulatedConstant,
                             const SCEV *const *Ops, size_t NumOperands,
                             const APInt &Scale,
                             ScalarEvolution &SE) {
  bool Interesting = false;

  // Iterate over the add operands. They are sorted, with constants first.
  unsigned i = 0;
  while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
    ++i;
    // Pull a buried constant out to the outside.
    if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
      Interesting = true;
    AccumulatedConstant += Scale * C->getAPInt();
  }

  // Next comes everything else. We're especially interested in multiplies
  // here, but they're in the middle, so just visit the rest with one loop.
  for (; i != NumOperands; ++i) {
    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
      APInt NewScale =
          Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt();
      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
        // A multiplication of a constant with another add; recurse.
        const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
        Interesting |=
          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
                                       Add->op_begin(), Add->getNumOperands(),
                                       NewScale, SE);
      } else {
        // A multiplication of a constant with some other value. Update
        // the map.
        SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
        const SCEV *Key = SE.getMulExpr(MulOps);
        auto Pair = M.insert({Key, NewScale});
        if (Pair.second) {
          NewOps.push_back(Pair.first->first);
        } else {
          Pair.first->second += NewScale;
          // The map already had an entry for this value, which may indicate
          // a folding opportunity.
          Interesting = true;
        }
      }
    } else {
      // An ordinary operand. Update the map.
      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
          M.insert({Ops[i], Scale});
      if (Pair.second) {
        NewOps.push_back(Pair.first->first);
      } else {
        Pair.first->second += Scale;
        // The map already had an entry for this value, which may indicate
        // a folding opportunity.
        Interesting = true;
      }
    }
  }

  return Interesting;
}

// We're trying to construct a SCEV of type `Type' with `Ops' as operands and
// `OldFlags' as can't-wrap behavior.  Infer a more aggressive set of
// can't-overflow flags for the operation if possible.
static SCEV::NoWrapFlags
StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
                      const ArrayRef<const SCEV *> Ops,
                      SCEV::NoWrapFlags Flags) {
  using namespace std::placeholders;

  using OBO = OverflowingBinaryOperator;

  bool CanAnalyze =
      Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
  (void)CanAnalyze;
  assert(CanAnalyze && "don't call from other places!");

  int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
  SCEV::NoWrapFlags SignOrUnsignWrap =
      ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);

  // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
  auto IsKnownNonNegative = [&](const SCEV *S) {
    return SE->isKnownNonNegative(S);
  };

  if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
    Flags =
        ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);

  SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);

  if (SignOrUnsignWrap != SignOrUnsignMask &&
      (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 &&
      isa<SCEVConstant>(Ops[0])) {

    auto Opcode = [&] {
      switch (Type) {
      case scAddExpr:
        return Instruction::Add;
      case scMulExpr:
        return Instruction::Mul;
      default:
        llvm_unreachable("Unexpected SCEV op.");
      }
    }();

    const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt();

    // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
    if (!(SignOrUnsignWrap & SCEV::FlagNSW)) {
      auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
          Opcode, C, OBO::NoSignedWrap);
      if (NSWRegion.contains(SE->getSignedRange(Ops[1])))
        Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
    }

    // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
    if (!(SignOrUnsignWrap & SCEV::FlagNUW)) {
      auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
          Opcode, C, OBO::NoUnsignedWrap);
      if (NUWRegion.contains(SE->getUnsignedRange(Ops[1])))
        Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
    }
  }

  return Flags;
}

bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) {
  return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader());
}

/// Get a canonical add expression, or something simpler if possible.
const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
                                        SCEV::NoWrapFlags Flags,
                                        unsigned Depth) {
  assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
         "only nuw or nsw allowed");
  assert(!Ops.empty() && "Cannot get empty add!");
  if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
           "SCEVAddExpr operand types don't match!");
#endif

  // Sort by complexity, this groups all similar expression types together.
  GroupByComplexity(Ops, &LI, DT);

  Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags);

  // If there are any constants, fold them together.
  unsigned Idx = 0;
  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
    ++Idx;
    assert(Idx < Ops.size());
    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
      // We found two constants, fold them together!
      Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt());
      if (Ops.size() == 2) return Ops[0];
      Ops.erase(Ops.begin()+1);  // Erase the folded element
      LHSC = cast<SCEVConstant>(Ops[0]);
    }

    // If we are left with a constant zero being added, strip it off.
    if (LHSC->getValue()->isZero()) {
      Ops.erase(Ops.begin());
      --Idx;
    }

    if (Ops.size() == 1) return Ops[0];
  }

  // Limit recursion calls depth.
  if (Depth > MaxArithDepth || hasHugeExpression(Ops))
    return getOrCreateAddExpr(Ops, Flags);

  // Okay, check to see if the same value occurs in the operand list more than
  // once.  If so, merge them together into an multiply expression.  Since we
  // sorted the list, these values are required to be adjacent.
  Type *Ty = Ops[0]->getType();
  bool FoundMatch = false;
  for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
      // Scan ahead to count how many equal operands there are.
      unsigned Count = 2;
      while (i+Count != e && Ops[i+Count] == Ops[i])
        ++Count;
      // Merge the values into a multiply.
      const SCEV *Scale = getConstant(Ty, Count);
      const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
      if (Ops.size() == Count)
        return Mul;
      Ops[i] = Mul;
      Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
      --i; e -= Count - 1;
      FoundMatch = true;
    }
  if (FoundMatch)
    return getAddExpr(Ops, Flags, Depth + 1);

  // Check for truncates. If all the operands are truncated from the same
  // type, see if factoring out the truncate would permit the result to be
  // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
  // if the contents of the resulting outer trunc fold to something simple.
  auto FindTruncSrcType = [&]() -> Type * {
    // We're ultimately looking to fold an addrec of truncs and muls of only
    // constants and truncs, so if we find any other types of SCEV
    // as operands of the addrec then we bail and return nullptr here.
    // Otherwise, we return the type of the operand of a trunc that we find.
    if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
      return T->getOperand()->getType();
    if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
      const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
      if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
        return T->getOperand()->getType();
    }
    return nullptr;
  };
  if (auto *SrcType = FindTruncSrcType()) {
    SmallVector<const SCEV *, 8> LargeOps;
    bool Ok = true;
    // Check all the operands to see if they can be represented in the
    // source type of the truncate.
    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
        if (T->getOperand()->getType() != SrcType) {
          Ok = false;
          break;
        }
        LargeOps.push_back(T->getOperand());
      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
        LargeOps.push_back(getAnyExtendExpr(C, SrcType));
      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
        SmallVector<const SCEV *, 8> LargeMulOps;
        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
          if (const SCEVTruncateExpr *T =
                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
            if (T->getOperand()->getType() != SrcType) {
              Ok = false;
              break;
            }
            LargeMulOps.push_back(T->getOperand());
          } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
            LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
          } else {
            Ok = false;
            break;
          }
        }
        if (Ok)
          LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
      } else {
        Ok = false;
        break;
      }
    }
    if (Ok) {
      // Evaluate the expression in the larger type.
      const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1);
      // If it folds to something simple, use it. Otherwise, don't.
      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
        return getTruncateExpr(Fold, Ty);
    }
  }

  // Skip past any other cast SCEVs.
  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
    ++Idx;

  // If there are add operands they would be next.
  if (Idx < Ops.size()) {
    bool DeletedAdd = false;
    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
      if (Ops.size() > AddOpsInlineThreshold ||
          Add->getNumOperands() > AddOpsInlineThreshold)
        break;
      // If we have an add, expand the add operands onto the end of the operands
      // list.
      Ops.erase(Ops.begin()+Idx);
      Ops.append(Add->op_begin(), Add->op_end());
      DeletedAdd = true;
    }

    // If we deleted at least one add, we added operands to the end of the list,
    // and they are not necessarily sorted.  Recurse to resort and resimplify
    // any operands we just acquired.
    if (DeletedAdd)
      return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
  }

  // Skip over the add expression until we get to a multiply.
  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
    ++Idx;

  // Check to see if there are any folding opportunities present with
  // operands multiplied by constant values.
  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
    uint64_t BitWidth = getTypeSizeInBits(Ty);
    DenseMap<const SCEV *, APInt> M;
    SmallVector<const SCEV *, 8> NewOps;
    APInt AccumulatedConstant(BitWidth, 0);
    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
                                     Ops.data(), Ops.size(),
                                     APInt(BitWidth, 1), *this)) {
      struct APIntCompare {
        bool operator()(const APInt &LHS, const APInt &RHS) const {
          return LHS.ult(RHS);
        }
      };

      // Some interesting folding opportunity is present, so its worthwhile to
      // re-generate the operands list. Group the operands by constant scale,
      // to avoid multiplying by the same constant scale multiple times.
      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
      for (const SCEV *NewOp : NewOps)
        MulOpLists[M.find(NewOp)->second].push_back(NewOp);
      // Re-generate the operands list.
      Ops.clear();
      if (AccumulatedConstant != 0)
        Ops.push_back(getConstant(AccumulatedConstant));
      for (auto &MulOp : MulOpLists)
        if (MulOp.first != 0)
          Ops.push_back(getMulExpr(
              getConstant(MulOp.first),
              getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
              SCEV::FlagAnyWrap, Depth + 1));
      if (Ops.empty())
        return getZero(Ty);
      if (Ops.size() == 1)
        return Ops[0];
      return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
    }
  }

  // If we are adding something to a multiply expression, make sure the
  // something is not already an operand of the multiply.  If so, merge it into
  // the multiply.
  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
      if (isa<SCEVConstant>(MulOpSCEV))
        continue;
      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
        if (MulOpSCEV == Ops[AddOp]) {
          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
          const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
          if (Mul->getNumOperands() != 2) {
            // If the multiply has more than two operands, we must get the
            // Y*Z term.
            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
                                                Mul->op_begin()+MulOp);
            MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
            InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
          }
          SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
          const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
          const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
                                            SCEV::FlagAnyWrap, Depth + 1);
          if (Ops.size() == 2) return OuterMul;
          if (AddOp < Idx) {
            Ops.erase(Ops.begin()+AddOp);
            Ops.erase(Ops.begin()+Idx-1);
          } else {
            Ops.erase(Ops.begin()+Idx);
            Ops.erase(Ops.begin()+AddOp-1);
          }
          Ops.push_back(OuterMul);
          return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
        }

      // Check this multiply against other multiplies being added together.
      for (unsigned OtherMulIdx = Idx+1;
           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
           ++OtherMulIdx) {
        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
        // If MulOp occurs in OtherMul, we can fold the two multiplies
        // together.
        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
             OMulOp != e; ++OMulOp)
          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
            const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
            if (Mul->getNumOperands() != 2) {
              SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
                                                  Mul->op_begin()+MulOp);
              MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
              InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
            }
            const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
            if (OtherMul->getNumOperands() != 2) {
              SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
                                                  OtherMul->op_begin()+OMulOp);
              MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
              InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
            }
            SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
            const SCEV *InnerMulSum =
                getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
            const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
                                              SCEV::FlagAnyWrap, Depth + 1);
            if (Ops.size() == 2) return OuterMul;
            Ops.erase(Ops.begin()+Idx);
            Ops.erase(Ops.begin()+OtherMulIdx-1);
            Ops.push_back(OuterMul);
            return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
          }
      }
    }
  }

  // If there are any add recurrences in the operands list, see if any other
  // added values are loop invariant.  If so, we can fold them into the
  // recurrence.
  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
    ++Idx;

  // Scan over all recurrences, trying to fold loop invariants into them.
  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
    // Scan all of the other operands to this add and add them to the vector if
    // they are loop invariant w.r.t. the recurrence.
    SmallVector<const SCEV *, 8> LIOps;
    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
    const Loop *AddRecLoop = AddRec->getLoop();
    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
      if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
        LIOps.push_back(Ops[i]);
        Ops.erase(Ops.begin()+i);
        --i; --e;
      }

    // If we found some loop invariants, fold them into the recurrence.
    if (!LIOps.empty()) {
      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
      LIOps.push_back(AddRec->getStart());

      SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
                                             AddRec->op_end());
      // This follows from the fact that the no-wrap flags on the outer add
      // expression are applicable on the 0th iteration, when the add recurrence
      // will be equal to its start value.
      AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1);

      // Build the new addrec. Propagate the NUW and NSW flags if both the
      // outer add and the inner addrec are guaranteed to have no overflow.
      // Always propagate NW.
      Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);

      // If all of the other operands were loop invariant, we are done.
      if (Ops.size() == 1) return NewRec;

      // Otherwise, add the folded AddRec by the non-invariant parts.
      for (unsigned i = 0;; ++i)
        if (Ops[i] == AddRec) {
          Ops[i] = NewRec;
          break;
        }
      return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
    }

    // Okay, if there weren't any loop invariants to be folded, check to see if
    // there are multiple AddRec's with the same loop induction variable being
    // added together.  If so, we can fold them.
    for (unsigned OtherIdx = Idx+1;
         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
         ++OtherIdx) {
      // We expect the AddRecExpr's to be sorted in reverse dominance order,
      // so that the 1st found AddRecExpr is dominated by all others.
      assert(DT.dominates(
           cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
           AddRec->getLoop()->getHeader()) &&
        "AddRecExprs are not sorted in reverse dominance order?");
      if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
        // Other + {A,+,B}<L> + {C,+,D}<L>  -->  Other + {A+C,+,B+D}<L>
        SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
                                               AddRec->op_end());
        for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
             ++OtherIdx) {
          const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
          if (OtherAddRec->getLoop() == AddRecLoop) {
            for (unsigned i = 0, e = OtherAddRec->getNumOperands();
                 i != e; ++i) {
              if (i >= AddRecOps.size()) {
                AddRecOps.append(OtherAddRec->op_begin()+i,
                                 OtherAddRec->op_end());
                break;
              }
              SmallVector<const SCEV *, 2> TwoOps = {
                  AddRecOps[i], OtherAddRec->getOperand(i)};
              AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
            }
            Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
          }
        }
        // Step size has changed, so we cannot guarantee no self-wraparound.
        Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
        return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
      }
    }

    // Otherwise couldn't fold anything into this recurrence.  Move onto the
    // next one.
  }

  // Okay, it looks like we really DO need an add expr.  Check to see if we
  // already have one, otherwise create a new one.
  return getOrCreateAddExpr(Ops, Flags);
}

const SCEV *
ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
                                    SCEV::NoWrapFlags Flags) {
  FoldingSetNodeID ID;
  ID.AddInteger(scAddExpr);
  for (const SCEV *Op : Ops)
    ID.AddPointer(Op);
  void *IP = nullptr;
  SCEVAddExpr *S =
      static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  if (!S) {
    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
    S = new (SCEVAllocator)
        SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
    UniqueSCEVs.InsertNode(S, IP);
    addToLoopUseLists(S);
  }
  S->setNoWrapFlags(Flags);
  return S;
}

const SCEV *
ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
                                       const Loop *L, SCEV::NoWrapFlags Flags) {
  FoldingSetNodeID ID;
  ID.AddInteger(scAddRecExpr);
  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    ID.AddPointer(Ops[i]);
  ID.AddPointer(L);
  void *IP = nullptr;
  SCEVAddRecExpr *S =
      static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  if (!S) {
    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
    S = new (SCEVAllocator)
        SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
    UniqueSCEVs.InsertNode(S, IP);
    addToLoopUseLists(S);
  }
  S->setNoWrapFlags(Flags);
  return S;
}

const SCEV *
ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
                                    SCEV::NoWrapFlags Flags) {
  FoldingSetNodeID ID;
  ID.AddInteger(scMulExpr);
  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    ID.AddPointer(Ops[i]);
  void *IP = nullptr;
  SCEVMulExpr *S =
    static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  if (!S) {
    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
    S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
                                        O, Ops.size());
    UniqueSCEVs.InsertNode(S, IP);
    addToLoopUseLists(S);
  }
  S->setNoWrapFlags(Flags);
  return S;
}

static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
  uint64_t k = i*j;
  if (j > 1 && k / j != i) Overflow = true;
  return k;
}

/// Compute the result of "n choose k", the binomial coefficient.  If an
/// intermediate computation overflows, Overflow will be set and the return will
/// be garbage. Overflow is not cleared on absence of overflow.
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
  // We use the multiplicative formula:
  //     n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
  // At each iteration, we take the n-th term of the numeral and divide by the
  // (k-n)th term of the denominator.  This division will always produce an
  // integral result, and helps reduce the chance of overflow in the
  // intermediate computations. However, we can still overflow even when the
  // final result would fit.

  if (n == 0 || n == k) return 1;
  if (k > n) return 0;

  if (k > n/2)
    k = n-k;

  uint64_t r = 1;
  for (uint64_t i = 1; i <= k; ++i) {
    r = umul_ov(r, n-(i-1), Overflow);
    r /= i;
  }
  return r;
}

/// Determine if any of the operands in this SCEV are a constant or if
/// any of the add or multiply expressions in this SCEV contain a constant.
static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
  struct FindConstantInAddMulChain {
    bool FoundConstant = false;

    bool follow(const SCEV *S) {
      FoundConstant |= isa<SCEVConstant>(S);
      return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
    }

    bool isDone() const {
      return FoundConstant;
    }
  };

  FindConstantInAddMulChain F;
  SCEVTraversal<FindConstantInAddMulChain> ST(F);
  ST.visitAll(StartExpr);
  return F.FoundConstant;
}

/// Get a canonical multiply expression, or something simpler if possible.
const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
                                        SCEV::NoWrapFlags Flags,
                                        unsigned Depth) {
  assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
         "only nuw or nsw allowed");
  assert(!Ops.empty() && "Cannot get empty mul!");
  if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
           "SCEVMulExpr operand types don't match!");
#endif

  // Sort by complexity, this groups all similar expression types together.
  GroupByComplexity(Ops, &LI, DT);

  Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);

  // Limit recursion calls depth.
  if (Depth > MaxArithDepth || hasHugeExpression(Ops))
    return getOrCreateMulExpr(Ops, Flags);

  // If there are any constants, fold them together.
  unsigned Idx = 0;
  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {

    if (Ops.size() == 2)
      // C1*(C2+V) -> C1*C2 + C1*V
      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
        // If any of Add's ops are Adds or Muls with a constant, apply this
        // transformation as well.
        //
        // TODO: There are some cases where this transformation is not
        // profitable; for example, Add = (C0 + X) * Y + Z.  Maybe the scope of
        // this transformation should be narrowed down.
        if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add))
          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
                                       SCEV::FlagAnyWrap, Depth + 1),
                            getMulExpr(LHSC, Add->getOperand(1),
                                       SCEV::FlagAnyWrap, Depth + 1),
                            SCEV::FlagAnyWrap, Depth + 1);

    ++Idx;
    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
      // We found two constants, fold them together!
      ConstantInt *Fold =
          ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt());
      Ops[0] = getConstant(Fold);
      Ops.erase(Ops.begin()+1);  // Erase the folded element
      if (Ops.size() == 1) return Ops[0];
      LHSC = cast<SCEVConstant>(Ops[0]);
    }

    // If we are left with a constant one being multiplied, strip it off.
    if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) {
      Ops.erase(Ops.begin());
      --Idx;
    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
      // If we have a multiply of zero, it will always be zero.
      return Ops[0];
    } else if (Ops[0]->isAllOnesValue()) {
      // If we have a mul by -1 of an add, try distributing the -1 among the
      // add operands.
      if (Ops.size() == 2) {
        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
          SmallVector<const SCEV *, 4> NewOps;
          bool AnyFolded = false;
          for (const SCEV *AddOp : Add->operands()) {
            const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
                                         Depth + 1);
            if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
            NewOps.push_back(Mul);
          }
          if (AnyFolded)
            return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
        } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
          // Negation preserves a recurrence's no self-wrap property.
          SmallVector<const SCEV *, 4> Operands;
          for (const SCEV *AddRecOp : AddRec->operands())
            Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
                                          Depth + 1));

          return getAddRecExpr(Operands, AddRec->getLoop(),
                               AddRec->getNoWrapFlags(SCEV::FlagNW));
        }
      }
    }

    if (Ops.size() == 1)
      return Ops[0];
  }

  // Skip over the add expression until we get to a multiply.
  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
    ++Idx;

  // If there are mul operands inline them all into this expression.
  if (Idx < Ops.size()) {
    bool DeletedMul = false;
    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
      if (Ops.size() > MulOpsInlineThreshold)
        break;
      // If we have an mul, expand the mul operands onto the end of the
      // operands list.
      Ops.erase(Ops.begin()+Idx);
      Ops.append(Mul->op_begin(), Mul->op_end());
      DeletedMul = true;
    }

    // If we deleted at least one mul, we added operands to the end of the
    // list, and they are not necessarily sorted.  Recurse to resort and
    // resimplify any operands we just acquired.
    if (DeletedMul)
      return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
  }

  // If there are any add recurrences in the operands list, see if any other
  // added values are loop invariant.  If so, we can fold them into the
  // recurrence.
  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
    ++Idx;

  // Scan over all recurrences, trying to fold loop invariants into them.
  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
    // Scan all of the other operands to this mul and add them to the vector
    // if they are loop invariant w.r.t. the recurrence.
    SmallVector<const SCEV *, 8> LIOps;
    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
    const Loop *AddRecLoop = AddRec->getLoop();
    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
      if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
        LIOps.push_back(Ops[i]);
        Ops.erase(Ops.begin()+i);
        --i; --e;
      }

    // If we found some loop invariants, fold them into the recurrence.
    if (!LIOps.empty()) {
      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
      SmallVector<const SCEV *, 4> NewOps;
      NewOps.reserve(AddRec->getNumOperands());
      const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
      for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
        NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
                                    SCEV::FlagAnyWrap, Depth + 1));

      // Build the new addrec. Propagate the NUW and NSW flags if both the
      // outer mul and the inner addrec are guaranteed to have no overflow.
      //
      // No self-wrap cannot be guaranteed after changing the step size, but
      // will be inferred if either NUW or NSW is true.
      Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
      const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);

      // If all of the other operands were loop invariant, we are done.
      if (Ops.size() == 1) return NewRec;

      // Otherwise, multiply the folded AddRec by the non-invariant parts.
      for (unsigned i = 0;; ++i)
        if (Ops[i] == AddRec) {
          Ops[i] = NewRec;
          break;
        }
      return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
    }

    // Okay, if there weren't any loop invariants to be folded, check to see
    // if there are multiple AddRec's with the same loop induction variable
    // being multiplied together.  If so, we can fold them.

    // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
    // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
    //       choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
    //   ]]],+,...up to x=2n}.
    // Note that the arguments to choose() are always integers with values
    // known at compile time, never SCEV objects.
    //
    // The implementation avoids pointless extra computations when the two
    // addrec's are of different length (mathematically, it's equivalent to
    // an infinite stream of zeros on the right).
    bool OpsModified = false;
    for (unsigned OtherIdx = Idx+1;
         OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
         ++OtherIdx) {
      const SCEVAddRecExpr *OtherAddRec =
        dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
      if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
        continue;

      // Limit max number of arguments to avoid creation of unreasonably big
      // SCEVAddRecs with very complex operands.
      if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 >
          MaxAddRecSize || isHugeExpression(AddRec) ||
          isHugeExpression(OtherAddRec))
        continue;

      bool Overflow = false;
      Type *Ty = AddRec->getType();
      bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
      SmallVector<const SCEV*, 7> AddRecOps;
      for (int x = 0, xe = AddRec->getNumOperands() +
             OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
        SmallVector <const SCEV *, 7> SumOps;
        for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
          uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
          for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
                 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
               z < ze && !Overflow; ++z) {
            uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
            uint64_t Coeff;
            if (LargerThan64Bits)
              Coeff = umul_ov(Coeff1, Coeff2, Overflow);
            else
              Coeff = Coeff1*Coeff2;
            const SCEV *CoeffTerm = getConstant(Ty, Coeff);
            const SCEV *Term1 = AddRec->getOperand(y-z);
            const SCEV *Term2 = OtherAddRec->getOperand(z);
            SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2,
                                        SCEV::FlagAnyWrap, Depth + 1));
          }
        }
        if (SumOps.empty())
          SumOps.push_back(getZero(Ty));
        AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1));
      }
      if (!Overflow) {
        const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop,
                                              SCEV::FlagAnyWrap);
        if (Ops.size() == 2) return NewAddRec;
        Ops[Idx] = NewAddRec;
        Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
        OpsModified = true;
        AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
        if (!AddRec)
          break;
      }
    }
    if (OpsModified)
      return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);

    // Otherwise couldn't fold anything into this recurrence.  Move onto the
    // next one.
  }

  // Okay, it looks like we really DO need an mul expr.  Check to see if we
  // already have one, otherwise create a new one.
  return getOrCreateMulExpr(Ops, Flags);
}

/// Represents an unsigned remainder expression based on unsigned division.
const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
                                         const SCEV *RHS) {
  assert(getEffectiveSCEVType(LHS->getType()) ==
         getEffectiveSCEVType(RHS->getType()) &&
         "SCEVURemExpr operand types don't match!");

  // Short-circuit easy cases
  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
    // If constant is one, the result is trivial
    if (RHSC->getValue()->isOne())
      return getZero(LHS->getType()); // X urem 1 --> 0

    // If constant is a power of two, fold into a zext(trunc(LHS)).
    if (RHSC->getAPInt().isPowerOf2()) {
      Type *FullTy = LHS->getType();
      Type *TruncTy =
          IntegerType::get(getContext(), RHSC->getAPInt().logBase2());
      return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy);
    }
  }

  // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
  const SCEV *UDiv = getUDivExpr(LHS, RHS);
  const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW);
  return getMinusSCEV(LHS, Mult, SCEV::FlagNUW);
}

/// Get a canonical unsigned division expression, or something simpler if
/// possible.
const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
                                         const SCEV *RHS) {
  assert(getEffectiveSCEVType(LHS->getType()) ==
         getEffectiveSCEVType(RHS->getType()) &&
         "SCEVUDivExpr operand types don't match!");

  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
    if (RHSC->getValue()->isOne())
      return LHS;                               // X udiv 1 --> x
    // If the denominator is zero, the result of the udiv is undefined. Don't
    // try to analyze it, because the resolution chosen here may differ from
    // the resolution chosen in other parts of the compiler.
    if (!RHSC->getValue()->isZero()) {
      // Determine if the division can be folded into the operands of
      // its operands.
      // TODO: Generalize this to non-constants by using known-bits information.
      Type *Ty = LHS->getType();
      unsigned LZ = RHSC->getAPInt().countLeadingZeros();
      unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
      // For non-power-of-two values, effectively round the value up to the
      // nearest power of two.
      if (!RHSC->getAPInt().isPowerOf2())
        ++MaxShiftAmt;
      IntegerType *ExtTy =
        IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
      if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
        if (const SCEVConstant *Step =
            dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
          // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
          const APInt &StepInt = Step->getAPInt();
          const APInt &DivInt = RHSC->getAPInt();
          if (!StepInt.urem(DivInt) &&
              getZeroExtendExpr(AR, ExtTy) ==
              getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
                            getZeroExtendExpr(Step, ExtTy),
                            AR->getLoop(), SCEV::FlagAnyWrap)) {
            SmallVector<const SCEV *, 4> Operands;
            for (const SCEV *Op : AR->operands())
              Operands.push_back(getUDivExpr(Op, RHS));
            return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
          }
          /// Get a canonical UDivExpr for a recurrence.
          /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
          // We can currently only fold X%N if X is constant.
          const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
          if (StartC && !DivInt.urem(StepInt) &&
              getZeroExtendExpr(AR, ExtTy) ==
              getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
                            getZeroExtendExpr(Step, ExtTy),
                            AR->getLoop(), SCEV::FlagAnyWrap)) {
            const APInt &StartInt = StartC->getAPInt();
            const APInt &StartRem = StartInt.urem(StepInt);
            if (StartRem != 0)
              LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
                                  AR->getLoop(), SCEV::FlagNW);
          }
        }
      // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
      if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
        SmallVector<const SCEV *, 4> Operands;
        for (const SCEV *Op : M->operands())
          Operands.push_back(getZeroExtendExpr(Op, ExtTy));
        if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
          // Find an operand that's safely divisible.
          for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
            const SCEV *Op = M->getOperand(i);
            const SCEV *Div = getUDivExpr(Op, RHSC);
            if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
              Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
                                                      M->op_end());
              Operands[i] = Div;
              return getMulExpr(Operands);
            }
          }
      }

      // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
      if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) {
        if (auto *DivisorConstant =
                dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
          bool Overflow = false;
          APInt NewRHS =
              DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow);
          if (Overflow) {
            return getConstant(RHSC->getType(), 0, false);
          }
          return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS));
        }
      }

      // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
      if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
        SmallVector<const SCEV *, 4> Operands;
        for (const SCEV *Op : A->operands())
          Operands.push_back(getZeroExtendExpr(Op, ExtTy));
        if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
          Operands.clear();
          for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
            const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
            if (isa<SCEVUDivExpr>(Op) ||
                getMulExpr(Op, RHS) != A->getOperand(i))
              break;
            Operands.push_back(Op);
          }
          if (Operands.size() == A->getNumOperands())
            return getAddExpr(Operands);
        }
      }

      // Fold if both operands are constant.
      if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
        Constant *LHSCV = LHSC->getValue();
        Constant *RHSCV = RHSC->getValue();
        return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
                                                                   RHSCV)));
      }
    }
  }

  FoldingSetNodeID ID;
  ID.AddInteger(scUDivExpr);
  ID.AddPointer(LHS);
  ID.AddPointer(RHS);
  void *IP = nullptr;
  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
                                             LHS, RHS);
  UniqueSCEVs.InsertNode(S, IP);
  addToLoopUseLists(S);
  return S;
}

static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
  APInt A = C1->getAPInt().abs();
  APInt B = C2->getAPInt().abs();
  uint32_t ABW = A.getBitWidth();
  uint32_t BBW = B.getBitWidth();

  if (ABW > BBW)
    B = B.zext(ABW);
  else if (ABW < BBW)
    A = A.zext(BBW);

  return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
}

/// Get a canonical unsigned division expression, or something simpler if
/// possible. There is no representation for an exact udiv in SCEV IR, but we
/// can attempt to remove factors from the LHS and RHS.  We can't do this when
/// it's not exact because the udiv may be clearing bits.
const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
                                              const SCEV *RHS) {
  // TODO: we could try to find factors in all sorts of things, but for now we
  // just deal with u/exact (multiply, constant). See SCEVDivision towards the
  // end of this file for inspiration.

  const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
  if (!Mul || !Mul->hasNoUnsignedWrap())
    return getUDivExpr(LHS, RHS);

  if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
    // If the mulexpr multiplies by a constant, then that constant must be the
    // first element of the mulexpr.
    if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
      if (LHSCst == RHSCst) {
        SmallVector<const SCEV *, 2> Operands;
        Operands.append(Mul->op_begin() + 1, Mul->op_end());
        return getMulExpr(Operands);
      }

      // We can't just assume that LHSCst divides RHSCst cleanly, it could be
      // that there's a factor provided by one of the other terms. We need to
      // check.
      APInt Factor = gcd(LHSCst, RHSCst);
      if (!Factor.isIntN(1)) {
        LHSCst =
            cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
        RHSCst =
            cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
        SmallVector<const SCEV *, 2> Operands;
        Operands.push_back(LHSCst);
        Operands.append(Mul->op_begin() + 1, Mul->op_end());
        LHS = getMulExpr(Operands);
        RHS = RHSCst;
        Mul = dyn_cast<SCEVMulExpr>(LHS);
        if (!Mul)
          return getUDivExactExpr(LHS, RHS);
      }
    }
  }

  for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
    if (Mul->getOperand(i) == RHS) {
      SmallVector<const SCEV *, 2> Operands;
      Operands.append(Mul->op_begin(), Mul->op_begin() + i);
      Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
      return getMulExpr(Operands);
    }
  }

  return getUDivExpr(LHS, RHS);
}

/// Get an add recurrence expression for the specified loop.  Simplify the
/// expression as much as possible.
const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
                                           const Loop *L,
                                           SCEV::NoWrapFlags Flags) {
  SmallVector<const SCEV *, 4> Operands;
  Operands.push_back(Start);
  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
    if (StepChrec->getLoop() == L) {
      Operands.append(StepChrec->op_begin(), StepChrec->op_end());
      return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
    }

  Operands.push_back(Step);
  return getAddRecExpr(Operands, L, Flags);
}

/// Get an add recurrence expression for the specified loop.  Simplify the
/// expression as much as possible.
const SCEV *
ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
                               const Loop *L, SCEV::NoWrapFlags Flags) {
  if (Operands.size() == 1) return Operands[0];
#ifndef NDEBUG
  Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
    assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
           "SCEVAddRecExpr operand types don't match!");
  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
    assert(isLoopInvariant(Operands[i], L) &&
           "SCEVAddRecExpr operand is not loop-invariant!");
#endif

  if (Operands.back()->isZero()) {
    Operands.pop_back();
    return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0}  -->  X
  }

  // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and
  // use that information to infer NUW and NSW flags. However, computing a
  // BE count requires calling getAddRecExpr, so we may not yet have a
  // meaningful BE count at this point (and if we don't, we'd be stuck
  // with a SCEVCouldNotCompute as the cached BE count).

  Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);

  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
    const Loop *NestedLoop = NestedAR->getLoop();
    if (L->contains(NestedLoop)
            ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
            : (!NestedLoop->contains(L) &&
               DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
                                                  NestedAR->op_end());
      Operands[0] = NestedAR->getStart();
      // AddRecs require their operands be loop-invariant with respect to their
      // loops. Don't perform this transformation if it would break this
      // requirement.
      bool AllInvariant = all_of(
          Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); });

      if (AllInvariant) {
        // Create a recurrence for the outer loop with the same step size.
        //
        // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
        // inner recurrence has the same property.
        SCEV::NoWrapFlags OuterFlags =
          maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());

        NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
        AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) {
          return isLoopInvariant(Op, NestedLoop);
        });

        if (AllInvariant) {
          // Ok, both add recurrences are valid after the transformation.
          //
          // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
          // the outer recurrence has the same property.
          SCEV::NoWrapFlags InnerFlags =
            maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
          return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
        }
      }
      // Reset Operands to its original state.
      Operands[0] = NestedAR;
    }
  }

  // Okay, it looks like we really DO need an addrec expr.  Check to see if we
  // already have one, otherwise create a new one.
  return getOrCreateAddRecExpr(Operands, L, Flags);
}

const SCEV *
ScalarEvolution::getGEPExpr(GEPOperator *GEP,
                            const SmallVectorImpl<const SCEV *> &IndexExprs) {
  const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
  // getSCEV(Base)->getType() has the same address space as Base->getType()
  // because SCEV::getType() preserves the address space.
  Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType());
  // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
  // instruction to its SCEV, because the Instruction may be guarded by control
  // flow and the no-overflow bits may not be valid for the expression in any
  // context. This can be fixed similarly to how these flags are handled for
  // adds.
  SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW
                                             : SCEV::FlagAnyWrap;

  const SCEV *TotalOffset = getZero(IntPtrTy);
  // The array size is unimportant. The first thing we do on CurTy is getting
  // its element type.
  Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0);
  for (const SCEV *IndexExpr : IndexExprs) {
    // Compute the (potentially symbolic) offset in bytes for this index.
    if (StructType *STy = dyn_cast<StructType>(CurTy)) {
      // For a struct, add the member offset.
      ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
      unsigned FieldNo = Index->getZExtValue();
      const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);

      // Add the field offset to the running total offset.
      TotalOffset = getAddExpr(TotalOffset, FieldOffset);

      // Update CurTy to the type of the field at Index.
      CurTy = STy->getTypeAtIndex(Index);
    } else {
      // Update CurTy to its element type.
      CurTy = cast<SequentialType>(CurTy)->getElementType();
      // For an array, add the element offset, explicitly scaled.
      const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy);
      // Getelementptr indices are signed.
      IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy);

      // Multiply the index by the element size to compute the element offset.
      const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap);

      // Add the element offset to the running total offset.
      TotalOffset = getAddExpr(TotalOffset, LocalOffset);
    }
  }

  // Add the total offset from all the GEP indices to the base.
  return getAddExpr(BaseExpr, TotalOffset, Wrap);
}

std::tuple<const SCEV *, FoldingSetNodeID, void *>
ScalarEvolution::findExistingSCEVInCache(int SCEVType,
                                         ArrayRef<const SCEV *> Ops) {
  FoldingSetNodeID ID;
  void *IP = nullptr;
  ID.AddInteger(SCEVType);
  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
    ID.AddPointer(Ops[i]);
  return std::tuple<const SCEV *, FoldingSetNodeID, void *>(
      UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP);
}

const SCEV *ScalarEvolution::getMinMaxExpr(unsigned Kind,
                                           SmallVectorImpl<const SCEV *> &Ops) {
  assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
  if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
           "Operand types don't match!");
#endif

  bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr;
  bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr;

  // Sort by complexity, this groups all similar expression types together.
  GroupByComplexity(Ops, &LI, DT);

  // Check if we have created the same expression before.
  if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) {
    return S;
  }

  // If there are any constants, fold them together.
  unsigned Idx = 0;
  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
    ++Idx;
    assert(Idx < Ops.size());
    auto FoldOp = [&](const APInt &LHS, const APInt &RHS) {
      if (Kind == scSMaxExpr)
        return APIntOps::smax(LHS, RHS);
      else if (Kind == scSMinExpr)
        return APIntOps::smin(LHS, RHS);
      else if (Kind == scUMaxExpr)
        return APIntOps::umax(LHS, RHS);
      else if (Kind == scUMinExpr)
        return APIntOps::umin(LHS, RHS);
      llvm_unreachable("Unknown SCEV min/max opcode");
    };

    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
      // We found two constants, fold them together!
      ConstantInt *Fold = ConstantInt::get(
          getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
      Ops[0] = getConstant(Fold);
      Ops.erase(Ops.begin()+1);  // Erase the folded element
      if (Ops.size() == 1) return Ops[0];
      LHSC = cast<SCEVConstant>(Ops[0]);
    }

    bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
    bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);

    if (IsMax ? IsMinV : IsMaxV) {
      // If we are left with a constant minimum(/maximum)-int, strip it off.
      Ops.erase(Ops.begin());
      --Idx;
    } else if (IsMax ? IsMaxV : IsMinV) {
      // If we have a max(/min) with a constant maximum(/minimum)-int,
      // it will always be the extremum.
      return LHSC;
    }

    if (Ops.size() == 1) return Ops[0];
  }

  // Find the first operation of the same kind
  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind)
    ++Idx;

  // Check to see if one of the operands is of the same kind. If so, expand its
  // operands onto our operand list, and recurse to simplify.
  if (Idx < Ops.size()) {
    bool DeletedAny = false;
    while (Ops[Idx]->getSCEVType() == Kind) {
      const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]);
      Ops.erase(Ops.begin()+Idx);
      Ops.append(SMME->op_begin(), SMME->op_end());
      DeletedAny = true;
    }

    if (DeletedAny)
      return getMinMaxExpr(Kind, Ops);
  }

  // Okay, check to see if the same value occurs in the operand list twice.  If
  // so, delete one.  Since we sorted the list, these values are required to
  // be adjacent.
  llvm::CmpInst::Predicate GEPred =
      IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
  llvm::CmpInst::Predicate LEPred =
      IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
  llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred;
  llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred;
  for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) {
    if (Ops[i] == Ops[i + 1] ||
        isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
      //  X op Y op Y  -->  X op Y
      //  X op Y       -->  X, if we know X, Y are ordered appropriately
      Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2);
      --i;
      --e;
    } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
                                               Ops[i + 1])) {
      //  X op Y       -->  Y, if we know X, Y are ordered appropriately
      Ops.erase(Ops.begin() + i, Ops.begin() + i + 1);
      --i;
      --e;
    }
  }

  if (Ops.size() == 1) return Ops[0];

  assert(!Ops.empty() && "Reduced smax down to nothing!");

  // Okay, it looks like we really DO need an expr.  Check to see if we
  // already have one, otherwise create a new one.
  const SCEV *ExistingSCEV;
  FoldingSetNodeID ID;
  void *IP;
  std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops);
  if (ExistingSCEV)
    return ExistingSCEV;
  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  SCEV *S = new (SCEVAllocator) SCEVMinMaxExpr(
      ID.Intern(SCEVAllocator), static_cast<SCEVTypes>(Kind), O, Ops.size());

  UniqueSCEVs.InsertNode(S, IP);
  addToLoopUseLists(S);
  return S;
}

const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
  SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
  return getSMaxExpr(Ops);
}

const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
  return getMinMaxExpr(scSMaxExpr, Ops);
}

const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) {
  SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
  return getUMaxExpr(Ops);
}

const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
  return getMinMaxExpr(scUMaxExpr, Ops);
}

const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
                                         const SCEV *RHS) {
  SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
  return getSMinExpr(Ops);
}

const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
  return getMinMaxExpr(scSMinExpr, Ops);
}

const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
                                         const SCEV *RHS) {
  SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
  return getUMinExpr(Ops);
}

const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
  return getMinMaxExpr(scUMinExpr, Ops);
}

const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
  // We can bypass creating a target-independent
  // constant expression and then folding it back into a ConstantInt.
  // This is just a compile-time optimization.
  return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
}

const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
                                             StructType *STy,
                                             unsigned FieldNo) {
  // We can bypass creating a target-independent
  // constant expression and then folding it back into a ConstantInt.
  // This is just a compile-time optimization.
  return getConstant(
      IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
}

const SCEV *ScalarEvolution::getUnknown(Value *V) {
  // Don't attempt to do anything other than create a SCEVUnknown object
  // here.  createSCEV only calls getUnknown after checking for all other
  // interesting possibilities, and any other code that calls getUnknown
  // is doing so in order to hide a value from SCEV canonicalization.

  FoldingSetNodeID ID;
  ID.AddInteger(scUnknown);
  ID.AddPointer(V);
  void *IP = nullptr;
  if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
    assert(cast<SCEVUnknown>(S)->getValue() == V &&
           "Stale SCEVUnknown in uniquing map!");
    return S;
  }
  SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
                                            FirstUnknown);
  FirstUnknown = cast<SCEVUnknown>(S);
  UniqueSCEVs.InsertNode(S, IP);
  return S;
}

//===----------------------------------------------------------------------===//
//            Basic SCEV Analysis and PHI Idiom Recognition Code
//

/// Test if values of the given type are analyzable within the SCEV
/// framework. This primarily includes integer types, and it can optionally
/// include pointer types if the ScalarEvolution class has access to
/// target-specific information.
bool ScalarEvolution::isSCEVable(Type *Ty) const {
  // Integers and pointers are always SCEVable.
  return Ty->isIntOrPtrTy();
}

/// Return the size in bits of the specified type, for which isSCEVable must
/// return true.
uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
  assert(isSCEVable(Ty) && "Type is not SCEVable!");
  if (Ty->isPointerTy())
    return getDataLayout().getIndexTypeSizeInBits(Ty);
  return getDataLayout().getTypeSizeInBits(Ty);
}

/// Return a type with the same bitwidth as the given type and which represents
/// how SCEV will treat the given type, for which isSCEVable must return
/// true. For pointer types, this is the pointer-sized integer type.
Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
  assert(isSCEVable(Ty) && "Type is not SCEVable!");

  if (Ty->isIntegerTy())
    return Ty;

  // The only other support type is pointer.
  assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
  return getDataLayout().getIntPtrType(Ty);
}

Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const {
  return  getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2;
}

const SCEV *ScalarEvolution::getCouldNotCompute() {
  return CouldNotCompute.get();
}

bool ScalarEvolution::checkValidity(const SCEV *S) const {
  bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) {
    auto *SU = dyn_cast<SCEVUnknown>(S);
    return SU && SU->getValue() == nullptr;
  });

  return !ContainsNulls;
}

bool ScalarEvolution::containsAddRecurrence(const SCEV *S) {
  HasRecMapType::iterator I = HasRecMap.find(S);
  if (I != HasRecMap.end())
    return I->second;

  bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>);
  HasRecMap.insert({S, FoundAddRec});
  return FoundAddRec;
}

/// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
/// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an
/// offset I, then return {S', I}, else return {\p S, nullptr}.
static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) {
  const auto *Add = dyn_cast<SCEVAddExpr>(S);
  if (!Add)
    return {S, nullptr};

  if (Add->getNumOperands() != 2)
    return {S, nullptr};

  auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0));
  if (!ConstOp)
    return {S, nullptr};

  return {Add->getOperand(1), ConstOp->getValue()};
}

/// Return the ValueOffsetPair set for \p S. \p S can be represented
/// by the value and offset from any ValueOffsetPair in the set.
SetVector<ScalarEvolution::ValueOffsetPair> *
ScalarEvolution::getSCEVValues(const SCEV *S) {
  ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
  if (SI == ExprValueMap.end())
    return nullptr;
#ifndef NDEBUG
  if (VerifySCEVMap) {
    // Check there is no dangling Value in the set returned.
    for (const auto &VE : SI->second)
      assert(ValueExprMap.count(VE.first));
  }
#endif
  return &SI->second;
}

/// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
/// cannot be used separately. eraseValueFromMap should be used to remove
/// V from ValueExprMap and ExprValueMap at the same time.
void ScalarEvolution::eraseValueFromMap(Value *V) {
  ValueExprMapType::iterator I = ValueExprMap.find_as(V);
  if (I != ValueExprMap.end()) {
    const SCEV *S = I->second;
    // Remove {V, 0} from the set of ExprValueMap[S]
    if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S))
      SV->remove({V, nullptr});

    // Remove {V, Offset} from the set of ExprValueMap[Stripped]
    const SCEV *Stripped;
    ConstantInt *Offset;
    std::tie(Stripped, Offset) = splitAddExpr(S);
    if (Offset != nullptr) {
      if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped))
        SV->remove({V, Offset});
    }
    ValueExprMap.erase(V);
  }
}

/// Check whether value has nuw/nsw/exact set but SCEV does not.
/// TODO: In reality it is better to check the poison recursively
/// but this is better than nothing.
static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) {
  if (auto *I = dyn_cast<Instruction>(V)) {
    if (isa<OverflowingBinaryOperator>(I)) {
      if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) {
        if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap())
          return true;
        if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap())
          return true;
      }
    } else if (isa<PossiblyExactOperator>(I) && I->isExact())
      return true;
  }
  return false;
}

/// Return an existing SCEV if it exists, otherwise analyze the expression and
/// create a new one.
const SCEV *ScalarEvolution::getSCEV(Value *V) {
  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");

  const SCEV *S = getExistingSCEV(V);
  if (S == nullptr) {
    S = createSCEV(V);
    // During PHI resolution, it is possible to create two SCEVs for the same
    // V, so it is needed to double check whether V->S is inserted into
    // ValueExprMap before insert S->{V, 0} into ExprValueMap.
    std::pair<ValueExprMapType::iterator, bool> Pair =
        ValueExprMap.insert({SCEVCallbackVH(V, this), S});
    if (Pair.second && !SCEVLostPoisonFlags(S, V)) {
      ExprValueMap[S].insert({V, nullptr});

      // If S == Stripped + Offset, add Stripped -> {V, Offset} into
      // ExprValueMap.
      const SCEV *Stripped = S;
      ConstantInt *Offset = nullptr;
      std::tie(Stripped, Offset) = splitAddExpr(S);
      // If stripped is SCEVUnknown, don't bother to save
      // Stripped -> {V, offset}. It doesn't simplify and sometimes even
      // increase the complexity of the expansion code.
      // If V is GetElementPtrInst, don't save Stripped -> {V, offset}
      // because it may generate add/sub instead of GEP in SCEV expansion.
      if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) &&
          !isa<GetElementPtrInst>(V))
        ExprValueMap[Stripped].insert({V, Offset});
    }
  }
  return S;
}

const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");

  ValueExprMapType::iterator I = ValueExprMap.find_as(V);
  if (I != ValueExprMap.end()) {
    const SCEV *S = I->second;
    if (checkValidity(S))
      return S;
    eraseValueFromMap(V);
    forgetMemoizedResults(S);
  }
  return nullptr;
}

/// Return a SCEV corresponding to -V = -1*V
const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V,
                                             SCEV::NoWrapFlags Flags) {
  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
    return getConstant(
               cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));

  Type *Ty = V->getType();
  Ty = getEffectiveSCEVType(Ty);
  return getMulExpr(
      V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags);
}

/// If Expr computes ~A, return A else return nullptr
static const SCEV *MatchNotExpr(const SCEV *Expr) {
  const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
  if (!Add || Add->getNumOperands() != 2 ||
      !Add->getOperand(0)->isAllOnesValue())
    return nullptr;

  const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
  if (!AddRHS || AddRHS->getNumOperands() != 2 ||
      !AddRHS->getOperand(0)->isAllOnesValue())
    return nullptr;

  return AddRHS->getOperand(1);
}

/// Return a SCEV corresponding to ~V = -1-V
const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
    return getConstant(
                cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));

  // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
  if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
    auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
      SmallVector<const SCEV *, 2> MatchedOperands;
      for (const SCEV *Operand : MME->operands()) {
        const SCEV *Matched = MatchNotExpr(Operand);
        if (!Matched)
          return (const SCEV *)nullptr;
        MatchedOperands.push_back(Matched);
      }
      return getMinMaxExpr(
          SCEVMinMaxExpr::negate(static_cast<SCEVTypes>(MME->getSCEVType())),
          MatchedOperands);
    };
    if (const SCEV *Replaced = MatchMinMaxNegation(MME))
      return Replaced;
  }

  Type *Ty = V->getType();
  Ty = getEffectiveSCEVType(Ty);
  const SCEV *AllOnes =
                   getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
  return getMinusSCEV(AllOnes, V);
}

const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
                                          SCEV::NoWrapFlags Flags,
                                          unsigned Depth) {
  // Fast path: X - X --> 0.
  if (LHS == RHS)
    return getZero(LHS->getType());

  // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
  // makes it so that we cannot make much use of NUW.
  auto AddFlags = SCEV::FlagAnyWrap;
  const bool RHSIsNotMinSigned =
      !getSignedRangeMin(RHS).isMinSignedValue();
  if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) {
    // Let M be the minimum representable signed value. Then (-1)*RHS
    // signed-wraps if and only if RHS is M. That can happen even for
    // a NSW subtraction because e.g. (-1)*M signed-wraps even though
    // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
    // (-1)*RHS, we need to prove that RHS != M.
    //
    // If LHS is non-negative and we know that LHS - RHS does not
    // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
    // either by proving that RHS > M or that LHS >= 0.
    if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) {
      AddFlags = SCEV::FlagNSW;
    }
  }

  // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
  // RHS is NSW and LHS >= 0.
  //
  // The difficulty here is that the NSW flag may have been proven
  // relative to a loop that is to be found in a recurrence in LHS and
  // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
  // larger scope than intended.
  auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;

  return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
}

const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
                                                     unsigned Depth) {
  Type *SrcTy = V->getType();
  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
         "Cannot truncate or zero extend with non-integer arguments!");
  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
    return V;  // No conversion
  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
    return getTruncateExpr(V, Ty, Depth);
  return getZeroExtendExpr(V, Ty, Depth);
}

const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty,
                                                     unsigned Depth) {
  Type *SrcTy = V->getType();
  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
         "Cannot truncate or zero extend with non-integer arguments!");
  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
    return V;  // No conversion
  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
    return getTruncateExpr(V, Ty, Depth);
  return getSignExtendExpr(V, Ty, Depth);
}

const SCEV *
ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
  Type *SrcTy = V->getType();
  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
         "Cannot noop or zero extend with non-integer arguments!");
  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
         "getNoopOrZeroExtend cannot truncate!");
  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
    return V;  // No conversion
  return getZeroExtendExpr(V, Ty);
}

const SCEV *
ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
  Type *SrcTy = V->getType();
  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
         "Cannot noop or sign extend with non-integer arguments!");
  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
         "getNoopOrSignExtend cannot truncate!");
  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
    return V;  // No conversion
  return getSignExtendExpr(V, Ty);
}

const SCEV *
ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
  Type *SrcTy = V->getType();
  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
         "Cannot noop or any extend with non-integer arguments!");
  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
         "getNoopOrAnyExtend cannot truncate!");
  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
    return V;  // No conversion
  return getAnyExtendExpr(V, Ty);
}

const SCEV *
ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
  Type *SrcTy = V->getType();
  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
         "Cannot truncate or noop with non-integer arguments!");
  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
         "getTruncateOrNoop cannot extend!");
  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
    return V;  // No conversion
  return getTruncateExpr(V, Ty);
}

const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
                                                        const SCEV *RHS) {
  const SCEV *PromotedLHS = LHS;
  const SCEV *PromotedRHS = RHS;

  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
  else
    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());

  return getUMaxExpr(PromotedLHS, PromotedRHS);
}

const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
                                                        const SCEV *RHS) {
  SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
  return getUMinFromMismatchedTypes(Ops);
}

const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(
    SmallVectorImpl<const SCEV *> &Ops) {
  assert(!Ops.empty() && "At least one operand must be!");
  // Trivial case.
  if (Ops.size() == 1)
    return Ops[0];

  // Find the max type first.
  Type *MaxType = nullptr;
  for (auto *S : Ops)
    if (MaxType)
      MaxType = getWiderType(MaxType, S->getType());
    else
      MaxType = S->getType();

  // Extend all ops to max type.
  SmallVector<const SCEV *, 2> PromotedOps;
  for (auto *S : Ops)
    PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));

  // Generate umin.
  return getUMinExpr(PromotedOps);
}

const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
  // A pointer operand may evaluate to a nonpointer expression, such as null.
  if (!V->getType()->isPointerTy())
    return V;

  if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
    return getPointerBase(Cast->getOperand());
  } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
    const SCEV *PtrOp = nullptr;
    for (const SCEV *NAryOp : NAry->operands()) {
      if (NAryOp->getType()->isPointerTy()) {
        // Cannot find the base of an expression with multiple pointer operands.
        if (PtrOp)
          return V;
        PtrOp = NAryOp;
      }
    }
    if (!PtrOp)
      return V;
    return getPointerBase(PtrOp);
  }
  return V;
}

/// Push users of the given Instruction onto the given Worklist.
static void
PushDefUseChildren(Instruction *I,
                   SmallVectorImpl<Instruction *> &Worklist) {
  // Push the def-use children onto the Worklist stack.
  for (User *U : I->users())
    Worklist.push_back(cast<Instruction>(U));
}

void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) {
  SmallVector<Instruction *, 16> Worklist;
  PushDefUseChildren(PN, Worklist);

  SmallPtrSet<Instruction *, 8> Visited;
  Visited.insert(PN);
  while (!Worklist.empty()) {
    Instruction *I = Worklist.pop_back_val();
    if (!Visited.insert(I).second)
      continue;

    auto It = ValueExprMap.find_as(static_cast<Value *>(I));
    if (It != ValueExprMap.end()) {
      const SCEV *Old = It->second;

      // Short-circuit the def-use traversal if the symbolic name
      // ceases to appear in expressions.
      if (Old != SymName && !hasOperand(Old, SymName))
        continue;

      // SCEVUnknown for a PHI either means that it has an unrecognized
      // structure, it's a PHI that's in the progress of being computed
      // by createNodeForPHI, or it's a single-value PHI. In the first case,
      // additional loop trip count information isn't going to change anything.
      // In the second case, createNodeForPHI will perform the necessary
      // updates on its own when it gets to that point. In the third, we do
      // want to forget the SCEVUnknown.
      if (!isa<PHINode>(I) ||
          !isa<SCEVUnknown>(Old) ||
          (I != PN && Old == SymName)) {
        eraseValueFromMap(It->first);
        forgetMemoizedResults(Old);
      }
    }

    PushDefUseChildren(I, Worklist);
  }
}

namespace {

/// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
/// expression in case its Loop is L. If it is not L then
/// if IgnoreOtherLoops is true then use AddRec itself
/// otherwise rewrite cannot be done.
/// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
public:
  static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
                             bool IgnoreOtherLoops = true) {
    SCEVInitRewriter Rewriter(L, SE);
    const SCEV *Result = Rewriter.visit(S);
    if (Rewriter.hasSeenLoopVariantSCEVUnknown())
      return SE.getCouldNotCompute();
    return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
               ? SE.getCouldNotCompute()
               : Result;
  }

  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
    if (!SE.isLoopInvariant(Expr, L))
      SeenLoopVariantSCEVUnknown = true;
    return Expr;
  }

  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
    // Only re-write AddRecExprs for this loop.
    if (Expr->getLoop() == L)
      return Expr->getStart();
    SeenOtherLoops = true;
    return Expr;
  }

  bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }

  bool hasSeenOtherLoops() { return SeenOtherLoops; }

private:
  explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE)
      : SCEVRewriteVisitor(SE), L(L) {}

  const Loop *L;
  bool SeenLoopVariantSCEVUnknown = false;
  bool SeenOtherLoops = false;
};

/// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
/// increment expression in case its Loop is L. If it is not L then
/// use AddRec itself.
/// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> {
public:
  static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) {
    SCEVPostIncRewriter Rewriter(L, SE);
    const SCEV *Result = Rewriter.visit(S);
    return Rewriter.hasSeenLoopVariantSCEVUnknown()
        ? SE.getCouldNotCompute()
        : Result;
  }

  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
    if (!SE.isLoopInvariant(Expr, L))
      SeenLoopVariantSCEVUnknown = true;
    return Expr;
  }

  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
    // Only re-write AddRecExprs for this loop.
    if (Expr->getLoop() == L)
      return Expr->getPostIncExpr(SE);
    SeenOtherLoops = true;
    return Expr;
  }

  bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }

  bool hasSeenOtherLoops() { return SeenOtherLoops; }

private:
  explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE)
      : SCEVRewriteVisitor(SE), L(L) {}

  const Loop *L;
  bool SeenLoopVariantSCEVUnknown = false;
  bool SeenOtherLoops = false;
};

/// This class evaluates the compare condition by matching it against the
/// condition of loop latch. If there is a match we assume a true value
/// for the condition while building SCEV nodes.
class SCEVBackedgeConditionFolder
    : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> {
public:
  static const SCEV *rewrite(const SCEV *S, const Loop *L,
                             ScalarEvolution &SE) {
    bool IsPosBECond = false;
    Value *BECond = nullptr;
    if (BasicBlock *Latch = L->getLoopLatch()) {
      BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
      if (BI && BI->isConditional()) {
        assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
               "Both outgoing branches should not target same header!");
        BECond = BI->getCondition();
        IsPosBECond = BI->getSuccessor(0) == L->getHeader();
      } else {
        return S;
      }
    }
    SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE);
    return Rewriter.visit(S);
  }

  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
    const SCEV *Result = Expr;
    bool InvariantF = SE.isLoopInvariant(Expr, L);

    if (!InvariantF) {
      Instruction *I = cast<Instruction>(Expr->getValue());
      switch (I->getOpcode()) {
      case Instruction::Select: {
        SelectInst *SI = cast<SelectInst>(I);
        Optional<const SCEV *> Res =
            compareWithBackedgeCondition(SI->getCondition());
        if (Res.hasValue()) {
          bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
          Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
        }
        break;
      }
      default: {
        Optional<const SCEV *> Res = compareWithBackedgeCondition(I);
        if (Res.hasValue())
          Result = Res.getValue();
        break;
      }
      }
    }
    return Result;
  }

private:
  explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond,
                                       bool IsPosBECond, ScalarEvolution &SE)
      : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond),
        IsPositiveBECond(IsPosBECond) {}

  Optional<const SCEV *> compareWithBackedgeCondition(Value *IC);

  const Loop *L;
  /// Loop back condition.
  Value *BackedgeCond = nullptr;
  /// Set to true if loop back is on positive branch condition.
  bool IsPositiveBECond;
};

Optional<const SCEV *>
SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {

  // If value matches the backedge condition for loop latch,
  // then return a constant evolution node based on loopback
  // branch taken.
  if (BackedgeCond == IC)
    return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext()))
                            : SE.getZero(Type::getInt1Ty(SE.getContext()));
  return None;
}

class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
public:
  static const SCEV *rewrite(const SCEV *S, const Loop *L,
                             ScalarEvolution &SE) {
    SCEVShiftRewriter Rewriter(L, SE);
    const SCEV *Result = Rewriter.visit(S);
    return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
  }

  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
    // Only allow AddRecExprs for this loop.
    if (!SE.isLoopInvariant(Expr, L))
      Valid = false;
    return Expr;
  }

  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
    if (Expr->getLoop() == L && Expr->isAffine())
      return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE));
    Valid = false;
    return Expr;
  }

  bool isValid() { return Valid; }

private:
  explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE)
      : SCEVRewriteVisitor(SE), L(L) {}

  const Loop *L;
  bool Valid = true;
};

} // end anonymous namespace

SCEV::NoWrapFlags
ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
  if (!AR->isAffine())
    return SCEV::FlagAnyWrap;

  using OBO = OverflowingBinaryOperator;

  SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap;

  if (!AR->hasNoSignedWrap()) {
    ConstantRange AddRecRange = getSignedRange(AR);
    ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this));

    auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
        Instruction::Add, IncRange, OBO::NoSignedWrap);
    if (NSWRegion.contains(AddRecRange))
      Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW);
  }

  if (!AR->hasNoUnsignedWrap()) {
    ConstantRange AddRecRange = getUnsignedRange(AR);
    ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this));

    auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
        Instruction::Add, IncRange, OBO::NoUnsignedWrap);
    if (NUWRegion.contains(AddRecRange))
      Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW);
  }

  return Result;
}

namespace {

/// Represents an abstract binary operation.  This may exist as a
/// normal instruction or constant expression, or may have been
/// derived from an expression tree.
struct BinaryOp {
  unsigned Opcode;
  Value *LHS;
  Value *RHS;
  bool IsNSW = false;
  bool IsNUW = false;

  /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
  /// constant expression.
  Operator *Op = nullptr;

  explicit BinaryOp(Operator *Op)
      : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)),
        Op(Op) {
    if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
      IsNSW = OBO->hasNoSignedWrap();
      IsNUW = OBO->hasNoUnsignedWrap();
    }
  }

  explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false,
                    bool IsNUW = false)
      : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
};

} // end anonymous namespace

/// Try to map \p V into a BinaryOp, and return \c None on failure.
static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
  auto *Op = dyn_cast<Operator>(V);
  if (!Op)
    return None;

  // Implementation detail: all the cleverness here should happen without
  // creating new SCEV expressions -- our caller knowns tricks to avoid creating
  // SCEV expressions when possible, and we should not break that.

  switch (Op->getOpcode()) {
  case Instruction::Add:
  case Instruction::Sub:
  case Instruction::Mul:
  case Instruction::UDiv:
  case Instruction::URem:
  case Instruction::And:
  case Instruction::Or:
  case Instruction::AShr:
  case Instruction::Shl:
    return BinaryOp(Op);

  case Instruction::Xor:
    if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1)))
      // If the RHS of the xor is a signmask, then this is just an add.
      // Instcombine turns add of signmask into xor as a strength reduction step.
      if (RHSC->getValue().isSignMask())
        return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1));
    return BinaryOp(Op);

  case Instruction::LShr:
    // Turn logical shift right of a constant into a unsigned divide.
    if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) {
      uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth();

      // If the shift count is not less than the bitwidth, the result of
      // the shift is undefined. Don't try to analyze it, because the
      // resolution chosen here may differ from the resolution chosen in
      // other parts of the compiler.
      if (SA->getValue().ult(BitWidth)) {
        Constant *X =
            ConstantInt::get(SA->getContext(),
                             APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
        return BinaryOp(Instruction::UDiv, Op->getOperand(0), X);
      }
    }
    return BinaryOp(Op);

  case Instruction::ExtractValue: {
    auto *EVI = cast<ExtractValueInst>(Op);
    if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
      break;

    auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
    if (!WO)
      break;

    Instruction::BinaryOps BinOp = WO->getBinaryOp();
    bool Signed = WO->isSigned();
    // TODO: Should add nuw/nsw flags for mul as well.
    if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT))
      return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());

    // Now that we know that all uses of the arithmetic-result component of
    // CI are guarded by the overflow check, we can go ahead and pretend
    // that the arithmetic is non-overflowing.
    return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
                    /* IsNSW = */ Signed, /* IsNUW = */ !Signed);
  }

  default:
    break;
  }

  return None;
}

/// Helper function to createAddRecFromPHIWithCasts. We have a phi
/// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
/// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
/// way. This function checks if \p Op, an operand of this SCEVAddExpr,
/// follows one of the following patterns:
/// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
/// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
/// If the SCEV expression of \p Op conforms with one of the expected patterns
/// we return the type of the truncation operation, and indicate whether the
/// truncated type should be treated as signed/unsigned by setting
/// \p Signed to true/false, respectively.
static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
                               bool &Signed, ScalarEvolution &SE) {
  // The case where Op == SymbolicPHI (that is, with no type conversions on
  // the way) is handled by the regular add recurrence creating logic and
  // would have already been triggered in createAddRecForPHI. Reaching it here
  // means that createAddRecFromPHI had failed for this PHI before (e.g.,
  // because one of the other operands of the SCEVAddExpr updating this PHI is
  // not invariant).
  //
  // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
  // this case predicates that allow us to prove that Op == SymbolicPHI will
  // be added.
  if (Op == SymbolicPHI)
    return nullptr;

  unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType());
  unsigned NewBits = SE.getTypeSizeInBits(Op->getType());
  if (SourceBits != NewBits)
    return nullptr;

  const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op);
  const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op);
  if (!SExt && !ZExt)
    return nullptr;
  const SCEVTruncateExpr *Trunc =
      SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand())
           : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand());
  if (!Trunc)
    return nullptr;
  const SCEV *X = Trunc->getOperand();
  if (X != SymbolicPHI)
    return nullptr;
  Signed = SExt != nullptr;
  return Trunc->getType();
}

static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
  if (!PN->getType()->isIntegerTy())
    return nullptr;
  const Loop *L = LI.getLoopFor(PN->getParent());
  if (!L || L->getHeader() != PN->getParent())
    return nullptr;
  return L;
}

// Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
// computation that updates the phi follows the following pattern:
//   (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
// which correspond to a phi->trunc->sext/zext->add->phi update chain.
// If so, try to see if it can be rewritten as an AddRecExpr under some
// Predicates. If successful, return them as a pair. Also cache the results
// of the analysis.
//
// Example usage scenario:
//    Say the Rewriter is called for the following SCEV:
//         8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
//    where:
//         %X = phi i64 (%Start, %BEValue)
//    It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
//    and call this function with %SymbolicPHI = %X.
//
//    The analysis will find that the value coming around the backedge has
//    the following SCEV:
//         BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
//    Upon concluding that this matches the desired pattern, the function
//    will return the pair {NewAddRec, SmallPredsVec} where:
//         NewAddRec = {%Start,+,%Step}
//         SmallPredsVec = {P1, P2, P3} as follows:
//           P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
//           P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
//           P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
//    The returned pair means that SymbolicPHI can be rewritten into NewAddRec
//    under the predicates {P1,P2,P3}.
//    This predicated rewrite will be cached in PredicatedSCEVRewrites:
//         PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
//
// TODO's:
//
// 1) Extend the Induction descriptor to also support inductions that involve
//    casts: When needed (namely, when we are called in the context of the
//    vectorizer induction analysis), a Set of cast instructions will be
//    populated by this method, and provided back to isInductionPHI. This is
//    needed to allow the vectorizer to properly record them to be ignored by
//    the cost model and to avoid vectorizing them (otherwise these casts,
//    which are redundant under the runtime overflow checks, will be
//    vectorized, which can be costly).
//
// 2) Support additional induction/PHISCEV patterns: We also want to support
//    inductions where the sext-trunc / zext-trunc operations (partly) occur
//    after the induction update operation (the induction increment):
//
//      (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
//    which correspond to a phi->add->trunc->sext/zext->phi update chain.
//
//      (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
//    which correspond to a phi->trunc->add->sext/zext->phi update chain.
//
// 3) Outline common code with createAddRecFromPHI to avoid duplication.
Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
  SmallVector<const SCEVPredicate *, 3> Predicates;

  // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
  // return an AddRec expression under some predicate.

  auto *PN = cast<PHINode>(SymbolicPHI->getValue());
  const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
  assert(L && "Expecting an integer loop header phi");

  // The loop may have multiple entrances or multiple exits; we can analyze
  // this phi as an addrec if it has a unique entry value and a unique
  // backedge value.
  Value *BEValueV = nullptr, *StartValueV = nullptr;
  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
    Value *V = PN->getIncomingValue(i);
    if (L->contains(PN->getIncomingBlock(i))) {
      if (!BEValueV) {
        BEValueV = V;
      } else if (BEValueV != V) {
        BEValueV = nullptr;
        break;
      }
    } else if (!StartValueV) {
      StartValueV = V;
    } else if (StartValueV != V) {
      StartValueV = nullptr;
      break;
    }
  }
  if (!BEValueV || !StartValueV)
    return None;

  const SCEV *BEValue = getSCEV(BEValueV);

  // If the value coming around the backedge is an add with the symbolic
  // value we just inserted, possibly with casts that we can ignore under
  // an appropriate runtime guard, then we found a simple induction variable!
  const auto *Add = dyn_cast<SCEVAddExpr>(BEValue);
  if (!Add)
    return None;

  // If there is a single occurrence of the symbolic value, possibly
  // casted, replace it with a recurrence.
  unsigned FoundIndex = Add->getNumOperands();
  Type *TruncTy = nullptr;
  bool Signed;
  for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
    if ((TruncTy =
             isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this)))
      if (FoundIndex == e) {
        FoundIndex = i;
        break;
      }

  if (FoundIndex == Add->getNumOperands())
    return None;

  // Create an add with everything but the specified operand.
  SmallVector<const SCEV *, 8> Ops;
  for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
    if (i != FoundIndex)
      Ops.push_back(Add->getOperand(i));
  const SCEV *Accum = getAddExpr(Ops);

  // The runtime checks will not be valid if the step amount is
  // varying inside the loop.
  if (!isLoopInvariant(Accum, L))
    return None;

  // *** Part2: Create the predicates

  // Analysis was successful: we have a phi-with-cast pattern for which we
  // can return an AddRec expression under the following predicates:
  //
  // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
  //     fits within the truncated type (does not overflow) for i = 0 to n-1.
  // P2: An Equal predicate that guarantees that
  //     Start = (Ext ix (Trunc iy (Start) to ix) to iy)
  // P3: An Equal predicate that guarantees that
  //     Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
  //
  // As we next prove, the above predicates guarantee that:
  //     Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
  //
  //
  // More formally, we want to prove that:
  //     Expr(i+1) = Start + (i+1) * Accum
  //               = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
  //
  // Given that:
  // 1) Expr(0) = Start
  // 2) Expr(1) = Start + Accum
  //            = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
  // 3) Induction hypothesis (step i):
  //    Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
  //
  // Proof:
  //  Expr(i+1) =
  //   = Start + (i+1)*Accum
  //   = (Start + i*Accum) + Accum
  //   = Expr(i) + Accum
  //   = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
  //                                                             :: from step i
  //
  //   = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
  //
  //   = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
  //     + (Ext ix (Trunc iy (Accum) to ix) to iy)
  //     + Accum                                                     :: from P3
  //
  //   = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
  //     + Accum                            :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
  //
  //   = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
  //   = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
  //
  // By induction, the same applies to all iterations 1<=i<n:
  //

  // Create a truncated addrec for which we will add a no overflow check (P1).
  const SCEV *StartVal = getSCEV(StartValueV);
  const SCEV *PHISCEV =
      getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
                    getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap);

  // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
  // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
  // will be constant.
  //
  //  If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
  // add P1.
  if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
    SCEVWrapPredicate::IncrementWrapFlags AddedFlags =
        Signed ? SCEVWrapPredicate::IncrementNSSW
               : SCEVWrapPredicate::IncrementNUSW;
    const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags);
    Predicates.push_back(AddRecPred);
  }

  // Create the Equal Predicates P2,P3:

  // It is possible that the predicates P2 and/or P3 are computable at
  // compile time due to StartVal and/or Accum being constants.
  // If either one is, then we can check that now and escape if either P2
  // or P3 is false.

  // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
  // for each of StartVal and Accum
  auto getExtendedExpr = [&](const SCEV *Expr,
                             bool CreateSignExtend) -> const SCEV * {
    assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
    const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
    const SCEV *ExtendedExpr =
        CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType())
                         : getZeroExtendExpr(TruncatedExpr, Expr->getType());
    return ExtendedExpr;
  };

  // Given:
  //  ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
  //               = getExtendedExpr(Expr)
  // Determine whether the predicate P: Expr == ExtendedExpr
  // is known to be false at compile time
  auto PredIsKnownFalse = [&](const SCEV *Expr,
                              const SCEV *ExtendedExpr) -> bool {
    return Expr != ExtendedExpr &&
           isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr);
  };

  const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
  if (PredIsKnownFalse(StartVal, StartExtended)) {
    LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
    return None;
  }

  // The Step is always Signed (because the overflow checks are either
  // NSSW or NUSW)
  const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
  if (PredIsKnownFalse(Accum, AccumExtended)) {
    LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
    return None;
  }

  auto AppendPredicate = [&](const SCEV *Expr,
                             const SCEV *ExtendedExpr) -> void {
    if (Expr != ExtendedExpr &&
        !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
      const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
      LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred);
      Predicates.push_back(Pred);
    }
  };

  AppendPredicate(StartVal, StartExtended);
  AppendPredicate(Accum, AccumExtended);

  // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
  // which the casts had been folded away. The caller can rewrite SymbolicPHI
  // into NewAR if it will also add the runtime overflow checks specified in
  // Predicates.
  auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);

  std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
      std::make_pair(NewAR, Predicates);
  // Remember the result of the analysis for this SCEV at this locayyytion.
  PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
  return PredRewrite;
}

Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
  auto *PN = cast<PHINode>(SymbolicPHI->getValue());
  const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
  if (!L)
    return None;

  // Check to see if we already analyzed this PHI.
  auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
  if (I != PredicatedSCEVRewrites.end()) {
    std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
        I->second;
    // Analysis was done before and failed to create an AddRec:
    if (Rewrite.first == SymbolicPHI)
      return None;
    // Analysis was done before and succeeded to create an AddRec under
    // a predicate:
    assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec");
    assert(!(Rewrite.second).empty() && "Expected to find Predicates");
    return Rewrite;
  }

  Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
    Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);

  // Record in the cache that the analysis failed
  if (!Rewrite) {
    SmallVector<const SCEVPredicate *, 3> Predicates;
    PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
    return None;
  }

  return Rewrite;
}

// FIXME: This utility is currently required because the Rewriter currently
// does not rewrite this expression:
// {0, +, (sext ix (trunc iy to ix) to iy)}
// into {0, +, %step},
// even when the following Equal predicate exists:
// "%step == (sext ix (trunc iy to ix) to iy)".
bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
    const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const {
  if (AR1 == AR2)
    return true;

  auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool {
    if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) &&
        !Preds.implies(SE.getEqualPredicate(Expr2, Expr1)))
      return false;
    return true;
  };

  if (!areExprsEqual(AR1->getStart(), AR2->getStart()) ||
      !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE)))
    return false;
  return true;
}

/// A helper function for createAddRecFromPHI to handle simple cases.
///
/// This function tries to find an AddRec expression for the simplest (yet most
/// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
/// If it fails, createAddRecFromPHI will use a more general, but slow,
/// technique for finding the AddRec expression.
const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
                                                      Value *BEValueV,
                                                      Value *StartValueV) {
  const Loop *L = LI.getLoopFor(PN->getParent());
  assert(L && L->getHeader() == PN->getParent());
  assert(BEValueV && StartValueV);

  auto BO = MatchBinaryOp(BEValueV, DT);
  if (!BO)
    return nullptr;

  if (BO->Opcode != Instruction::Add)
    return nullptr;

  const SCEV *Accum = nullptr;
  if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
    Accum = getSCEV(BO->RHS);
  else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
    Accum = getSCEV(BO->LHS);

  if (!Accum)
    return nullptr;

  SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
  if (BO->IsNUW)
    Flags = setFlags(Flags, SCEV::FlagNUW);
  if (BO->IsNSW)
    Flags = setFlags(Flags, SCEV::FlagNSW);

  const SCEV *StartVal = getSCEV(StartValueV);
  const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);

  ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;

  // We can add Flags to the post-inc expression only if we
  // know that it is *undefined behavior* for BEValueV to
  // overflow.
  if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
    if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
      (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);

  return PHISCEV;
}

const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
  const Loop *L = LI.getLoopFor(PN->getParent());
  if (!L || L->getHeader() != PN->getParent())
    return nullptr;

  // The loop may have multiple entrances or multiple exits; we can analyze
  // this phi as an addrec if it has a unique entry value and a unique
  // backedge value.
  Value *BEValueV = nullptr, *StartValueV = nullptr;
  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
    Value *V = PN->getIncomingValue(i);
    if (L->contains(PN->getIncomingBlock(i))) {
      if (!BEValueV) {
        BEValueV = V;
      } else if (BEValueV != V) {
        BEValueV = nullptr;
        break;
      }
    } else if (!StartValueV) {
      StartValueV = V;
    } else if (StartValueV != V) {
      StartValueV = nullptr;
      break;
    }
  }
  if (!BEValueV || !StartValueV)
    return nullptr;

  assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
         "PHI node already processed?");

  // First, try to find AddRec expression without creating a fictituos symbolic
  // value for PN.
  if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
    return S;

  // Handle PHI node value symbolically.
  const SCEV *SymbolicName = getUnknown(PN);
  ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});

  // Using this symbolic name for the PHI, analyze the value coming around
  // the back-edge.
  const SCEV *BEValue = getSCEV(BEValueV);

  // NOTE: If BEValue is loop invariant, we know that the PHI node just
  // has a special value for the first iteration of the loop.

  // If the value coming around the backedge is an add with the symbolic
  // value we just inserted, then we found a simple induction variable!
  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
    // If there is a single occurrence of the symbolic value, replace it
    // with a recurrence.
    unsigned FoundIndex = Add->getNumOperands();
    for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
      if (Add->getOperand(i) == SymbolicName)
        if (FoundIndex == e) {
          FoundIndex = i;
          break;
        }

    if (FoundIndex != Add->getNumOperands()) {
      // Create an add with everything but the specified operand.
      SmallVector<const SCEV *, 8> Ops;
      for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
        if (i != FoundIndex)
          Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
                                                             L, *this));
      const SCEV *Accum = getAddExpr(Ops);

      // This is not a valid addrec if the step amount is varying each
      // loop iteration, but is not itself an addrec in this loop.
      if (isLoopInvariant(Accum, L) ||
          (isa<SCEVAddRecExpr>(Accum) &&
           cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
        SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;

        if (auto BO = MatchBinaryOp(BEValueV, DT)) {
          if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
            if (BO->IsNUW)
              Flags = setFlags(Flags, SCEV::FlagNUW);
            if (BO->IsNSW)
              Flags = setFlags(Flags, SCEV::FlagNSW);
          }
        } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
          // If the increment is an inbounds GEP, then we know the address
          // space cannot be wrapped around. We cannot make any guarantee
          // about signed or unsigned overflow because pointers are
          // unsigned but we may have a negative index from the base
          // pointer. We can guarantee that no unsigned wrap occurs if the
          // indices form a positive value.
          if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
            Flags = setFlags(Flags, SCEV::FlagNW);

            const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
            if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
              Flags = setFlags(Flags, SCEV::FlagNUW);
          }

          // We cannot transfer nuw and nsw flags from subtraction
          // operations -- sub nuw X, Y is not the same as add nuw X, -Y
          // for instance.
        }

        const SCEV *StartVal = getSCEV(StartValueV);
        const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);

        // Okay, for the entire analysis of this edge we assumed the PHI
        // to be symbolic.  We now need to go back and purge all of the
        // entries for the scalars that use the symbolic expression.
        forgetSymbolicName(PN, SymbolicName);
        ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;

        // We can add Flags to the post-inc expression only if we
        // know that it is *undefined behavior* for BEValueV to
        // overflow.
        if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
          if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
            (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);

        return PHISCEV;
      }
    }
  } else {
    // Otherwise, this could be a loop like this:
    //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
    // In this case, j = {1,+,1}  and BEValue is j.
    // Because the other in-value of i (0) fits the evolution of BEValue
    // i really is an addrec evolution.
    //
    // We can generalize this saying that i is the shifted value of BEValue
    // by one iteration:
    //   PHI(f(0), f({1,+,1})) --> f({0,+,1})
    const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
    const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false);
    if (Shifted != getCouldNotCompute() &&
        Start != getCouldNotCompute()) {
      const SCEV *StartVal = getSCEV(StartValueV);
      if (Start == StartVal) {
        // Okay, for the entire analysis of this edge we assumed the PHI
        // to be symbolic.  We now need to go back and purge all of the
        // entries for the scalars that use the symbolic expression.
        forgetSymbolicName(PN, SymbolicName);
        ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted;
        return Shifted;
      }
    }
  }

  // Remove the temporary PHI node SCEV that has been inserted while intending
  // to create an AddRecExpr for this PHI node. We can not keep this temporary
  // as it will prevent later (possibly simpler) SCEV expressions to be added
  // to the ValueExprMap.
  eraseValueFromMap(PN);

  return nullptr;
}

// Checks if the SCEV S is available at BB.  S is considered available at BB
// if S can be materialized at BB without introducing a fault.
static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S,
                               BasicBlock *BB) {
  struct CheckAvailable {
    bool TraversalDone = false;
    bool Available = true;

    const Loop *L = nullptr;  // The loop BB is in (can be nullptr)
    BasicBlock *BB = nullptr;
    DominatorTree &DT;

    CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT)
      : L(L), BB(BB), DT(DT) {}

    bool setUnavailable() {
      TraversalDone = true;
      Available = false;
      return false;
    }

    bool follow(const SCEV *S) {
      switch (S->getSCEVType()) {
      case scConstant: case scTruncate: case scZeroExtend: case scSignExtend:
      case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr:
      case scUMinExpr:
      case scSMinExpr:
        // These expressions are available if their operand(s) is/are.
        return true;

      case scAddRecExpr: {
        // We allow add recurrences that are on the loop BB is in, or some
        // outer loop.  This guarantees availability because the value of the
        // add recurrence at BB is simply the "current" value of the induction
        // variable.  We can relax this in the future; for instance an add
        // recurrence on a sibling dominating loop is also available at BB.
        const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop();
        if (L && (ARLoop == L || ARLoop->contains(L)))
          return true;

        return setUnavailable();
      }

      case scUnknown: {
        // For SCEVUnknown, we check for simple dominance.
        const auto *SU = cast<SCEVUnknown>(S);
        Value *V = SU->getValue();

        if (isa<Argument>(V))
          return false;

        if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB))
          return false;

        return setUnavailable();
      }

      case scUDivExpr:
      case scCouldNotCompute:
        // We do not try to smart about these at all.
        return setUnavailable();
      }
      llvm_unreachable("switch should be fully covered!");
    }

    bool isDone() { return TraversalDone; }
  };

  CheckAvailable CA(L, BB, DT);
  SCEVTraversal<CheckAvailable> ST(CA);

  ST.visitAll(S);
  return CA.Available;
}

// Try to match a control flow sequence that branches out at BI and merges back
// at Merge into a "C ? LHS : RHS" select pattern.  Return true on a successful
// match.
static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge,
                          Value *&C, Value *&LHS, Value *&RHS) {
  C = BI->getCondition();

  BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0));
  BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1));

  if (!LeftEdge.isSingleEdge())
    return false;

  assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()");

  Use &LeftUse = Merge->getOperandUse(0);
  Use &RightUse = Merge->getOperandUse(1);

  if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) {
    LHS = LeftUse;
    RHS = RightUse;
    return true;
  }

  if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) {
    LHS = RightUse;
    RHS = LeftUse;
    return true;
  }

  return false;
}

const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
  auto IsReachable =
      [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); };
  if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) {
    const Loop *L = LI.getLoopFor(PN->getParent());

    // We don't want to break LCSSA, even in a SCEV expression tree.
    for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
      if (LI.getLoopFor(PN->getIncomingBlock(i)) != L)
        return nullptr;

    // Try to match
    //
    //  br %cond, label %left, label %right
    // left:
    //  br label %merge
    // right:
    //  br label %merge
    // merge:
    //  V = phi [ %x, %left ], [ %y, %right ]
    //
    // as "select %cond, %x, %y"

    BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock();
    assert(IDom && "At least the entry block should dominate PN");

    auto *BI = dyn_cast<BranchInst>(IDom->getTerminator());
    Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr;

    if (BI && BI->isConditional() &&
        BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) &&
        IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) &&
        IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent()))
      return createNodeForSelectOrPHI(PN, Cond, LHS, RHS);
  }

  return nullptr;
}

const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
  if (const SCEV *S = createAddRecFromPHI(PN))
    return S;

  if (const SCEV *S = createNodeFromSelectLikePHI(PN))
    return S;

  // If the PHI has a single incoming value, follow that value, unless the
  // PHI's incoming blocks are in a different loop, in which case doing so
  // risks breaking LCSSA form. Instcombine would normally zap these, but
  // it doesn't have DominatorTree information, so it may miss cases.
  if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
    if (LI.replacementPreservesLCSSAForm(PN, V))
      return getSCEV(V);

  // If it's not a loop phi, we can't handle it yet.
  return getUnknown(PN);
}

const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I,
                                                      Value *Cond,
                                                      Value *TrueVal,
                                                      Value *FalseVal) {
  // Handle "constant" branch or select. This can occur for instance when a
  // loop pass transforms an inner loop and moves on to process the outer loop.
  if (auto *CI = dyn_cast<ConstantInt>(Cond))
    return getSCEV(CI->isOne() ? TrueVal : FalseVal);

  // Try to match some simple smax or umax patterns.
  auto *ICI = dyn_cast<ICmpInst>(Cond);
  if (!ICI)
    return getUnknown(I);

  Value *LHS = ICI->getOperand(0);
  Value *RHS = ICI->getOperand(1);

  switch (ICI->getPredicate()) {
  case ICmpInst::ICMP_SLT:
  case ICmpInst::ICMP_SLE:
    std::swap(LHS, RHS);
    LLVM_FALLTHROUGH;
  case ICmpInst::ICMP_SGT:
  case ICmpInst::ICMP_SGE:
    // a >s b ? a+x : b+x  ->  smax(a, b)+x
    // a >s b ? b+x : a+x  ->  smin(a, b)+x
    if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
      const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType());
      const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType());
      const SCEV *LA = getSCEV(TrueVal);
      const SCEV *RA = getSCEV(FalseVal);
      const SCEV *LDiff = getMinusSCEV(LA, LS);
      const SCEV *RDiff = getMinusSCEV(RA, RS);
      if (LDiff == RDiff)
        return getAddExpr(getSMaxExpr(LS, RS), LDiff);
      LDiff = getMinusSCEV(LA, RS);
      RDiff = getMinusSCEV(RA, LS);
      if (LDiff == RDiff)
        return getAddExpr(getSMinExpr(LS, RS), LDiff);
    }
    break;
  case ICmpInst::ICMP_ULT:
  case ICmpInst::ICMP_ULE:
    std::swap(LHS, RHS);
    LLVM_FALLTHROUGH;
  case ICmpInst::ICMP_UGT:
  case ICmpInst::ICMP_UGE:
    // a >u b ? a+x : b+x  ->  umax(a, b)+x
    // a >u b ? b+x : a+x  ->  umin(a, b)+x
    if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
      const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
      const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType());
      const SCEV *LA = getSCEV(TrueVal);
      const SCEV *RA = getSCEV(FalseVal);
      const SCEV *LDiff = getMinusSCEV(LA, LS);
      const SCEV *RDiff = getMinusSCEV(RA, RS);
      if (LDiff == RDiff)
        return getAddExpr(getUMaxExpr(LS, RS), LDiff);
      LDiff = getMinusSCEV(LA, RS);
      RDiff = getMinusSCEV(RA, LS);
      if (LDiff == RDiff)
        return getAddExpr(getUMinExpr(LS, RS), LDiff);
    }
    break;
  case ICmpInst::ICMP_NE:
    // n != 0 ? n+x : 1+x  ->  umax(n, 1)+x
    if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
        isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
      const SCEV *One = getOne(I->getType());
      const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
      const SCEV *LA = getSCEV(TrueVal);
      const SCEV *RA = getSCEV(FalseVal);
      const SCEV *LDiff = getMinusSCEV(LA, LS);
      const SCEV *RDiff = getMinusSCEV(RA, One);
      if (LDiff == RDiff)
        return getAddExpr(getUMaxExpr(One, LS), LDiff);
    }
    break;
  case ICmpInst::ICMP_EQ:
    // n == 0 ? 1+x : n+x  ->  umax(n, 1)+x
    if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
        isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
      const SCEV *One = getOne(I->getType());
      const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
      const SCEV *LA = getSCEV(TrueVal);
      const SCEV *RA = getSCEV(FalseVal);
      const SCEV *LDiff = getMinusSCEV(LA, One);
      const SCEV *RDiff = getMinusSCEV(RA, LS);
      if (LDiff == RDiff)
        return getAddExpr(getUMaxExpr(One, LS), LDiff);
    }
    break;
  default:
    break;
  }

  return getUnknown(I);
}

/// Expand GEP instructions into add and multiply operations. This allows them
/// to be analyzed by regular SCEV code.
const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
  // Don't attempt to analyze GEPs over unsized objects.
  if (!GEP->getSourceElementType()->isSized())
    return getUnknown(GEP);

  SmallVector<const SCEV *, 4> IndexExprs;
  for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
    IndexExprs.push_back(getSCEV(*Index));
  return getGEPExpr(GEP, IndexExprs);
}

uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) {
  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
    return C->getAPInt().countTrailingZeros();

  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
    return std::min(GetMinTrailingZeros(T->getOperand()),
                    (uint32_t)getTypeSizeInBits(T->getType()));

  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
    return OpRes == getTypeSizeInBits(E->getOperand()->getType())
               ? getTypeSizeInBits(E->getType())
               : OpRes;
  }

  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
    return OpRes == getTypeSizeInBits(E->getOperand()->getType())
               ? getTypeSizeInBits(E->getType())
               : OpRes;
  }

  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
    // The result is the min of all operands results.
    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
    return MinOpRes;
  }

  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
    // The result is the sum of all operands results.
    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
    uint32_t BitWidth = getTypeSizeInBits(M->getType());
    for (unsigned i = 1, e = M->getNumOperands();
         SumOpRes != BitWidth && i != e; ++i)
      SumOpRes =
          std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth);
    return SumOpRes;
  }

  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
    // The result is the min of all operands results.
    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
    return MinOpRes;
  }

  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
    // The result is the min of all operands results.
    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
    return MinOpRes;
  }

  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
    // The result is the min of all operands results.
    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
    return MinOpRes;
  }

  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
    // For a SCEVUnknown, ask ValueTracking.
    KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT);
    return Known.countMinTrailingZeros();
  }

  // SCEVUDivExpr
  return 0;
}

uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
  auto I = MinTrailingZerosCache.find(S);
  if (I != MinTrailingZerosCache.end())
    return I->second;

  uint32_t Result = GetMinTrailingZerosImpl(S);
  auto InsertPair = MinTrailingZerosCache.insert({S, Result});
  assert(InsertPair.second && "Should insert a new key");
  return InsertPair.first->second;
}

/// Helper method to assign a range to V from metadata present in the IR.
static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
  if (Instruction *I = dyn_cast<Instruction>(V))
    if (MDNode *MD = I->getMetadata(LLVMContext::MD_range))
      return getConstantRangeFromMetadata(*MD);

  return None;
}

/// Determine the range for a particular SCEV.  If SignHint is
/// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
/// with a "cleaner" unsigned (resp. signed) representation.
const ConstantRange &
ScalarEvolution::getRangeRef(const SCEV *S,
                             ScalarEvolution::RangeSignHint SignHint) {
  DenseMap<const SCEV *, ConstantRange> &Cache =
      SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
                                                       : SignedRanges;
  ConstantRange::PreferredRangeType RangeType =
      SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED
          ? ConstantRange::Unsigned : ConstantRange::Signed;

  // See if we've computed this range already.
  DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S);
  if (I != Cache.end())
    return I->second;

  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
    return setRange(C, SignHint, ConstantRange(C->getAPInt()));

  unsigned BitWidth = getTypeSizeInBits(S->getType());
  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);

  // If the value has known zeros, the maximum value will have those known zeros
  // as well.
  uint32_t TZ = GetMinTrailingZeros(S);
  if (TZ != 0) {
    if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED)
      ConservativeResult =
          ConstantRange(APInt::getMinValue(BitWidth),
                        APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
    else
      ConservativeResult = ConstantRange(
          APInt::getSignedMinValue(BitWidth),
          APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
  }

  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
    ConstantRange X = getRangeRef(Add->getOperand(0), SignHint);
    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
      X = X.add(getRangeRef(Add->getOperand(i), SignHint));
    return setRange(Add, SignHint,
                    ConservativeResult.intersectWith(X, RangeType));
  }

  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
    ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint);
    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
      X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint));
    return setRange(Mul, SignHint,
                    ConservativeResult.intersectWith(X, RangeType));
  }

  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
    ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint);
    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
      X = X.smax(getRangeRef(SMax->getOperand(i), SignHint));
    return setRange(SMax, SignHint,
                    ConservativeResult.intersectWith(X, RangeType));
  }

  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
    ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint);
    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
      X = X.umax(getRangeRef(UMax->getOperand(i), SignHint));
    return setRange(UMax, SignHint,
                    ConservativeResult.intersectWith(X, RangeType));
  }

  if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) {
    ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint);
    for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i)
      X = X.smin(getRangeRef(SMin->getOperand(i), SignHint));
    return setRange(SMin, SignHint,
                    ConservativeResult.intersectWith(X, RangeType));
  }

  if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) {
    ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint);
    for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i)
      X = X.umin(getRangeRef(UMin->getOperand(i), SignHint));
    return setRange(UMin, SignHint,
                    ConservativeResult.intersectWith(X, RangeType));
  }

  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
    ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint);
    ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint);
    return setRange(UDiv, SignHint,
                    ConservativeResult.intersectWith(X.udiv(Y), RangeType));
  }

  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
    ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint);
    return setRange(ZExt, SignHint,
                    ConservativeResult.intersectWith(X.zeroExtend(BitWidth),
                                                     RangeType));
  }

  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
    ConstantRange X = getRangeRef(SExt->getOperand(), SignHint);
    return setRange(SExt, SignHint,
                    ConservativeResult.intersectWith(X.signExtend(BitWidth),
                                                     RangeType));
  }

  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
    ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint);
    return setRange(Trunc, SignHint,
                    ConservativeResult.intersectWith(X.truncate(BitWidth),
                                                     RangeType));
  }

  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
    // If there's no unsigned wrap, the value will never be less than its
    // initial value.
    if (AddRec->hasNoUnsignedWrap())
      if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
        if (!C->getValue()->isZero())
          ConservativeResult = ConservativeResult.intersectWith(
              ConstantRange(C->getAPInt(), APInt(BitWidth, 0)), RangeType);

    // If there's no signed wrap, and all the operands have the same sign or
    // zero, the value won't ever change sign.
    if (AddRec->hasNoSignedWrap()) {
      bool AllNonNeg = true;
      bool AllNonPos = true;
      for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
        if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
        if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
      }
      if (AllNonNeg)
        ConservativeResult = ConservativeResult.intersectWith(
          ConstantRange(APInt(BitWidth, 0),
                        APInt::getSignedMinValue(BitWidth)), RangeType);
      else if (AllNonPos)
        ConservativeResult = ConservativeResult.intersectWith(
          ConstantRange(APInt::getSignedMinValue(BitWidth),
                        APInt(BitWidth, 1)), RangeType);
    }

    // TODO: non-affine addrec
    if (AddRec->isAffine()) {
      const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop());
      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
        auto RangeFromAffine = getRangeForAffineAR(
            AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
            BitWidth);
        if (!RangeFromAffine.isFullSet())
          ConservativeResult =
              ConservativeResult.intersectWith(RangeFromAffine, RangeType);

        auto RangeFromFactoring = getRangeViaFactoring(
            AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
            BitWidth);
        if (!RangeFromFactoring.isFullSet())
          ConservativeResult =
              ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
      }
    }

    return setRange(AddRec, SignHint, std::move(ConservativeResult));
  }

  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
    // Check if the IR explicitly contains !range metadata.
    Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
    if (MDRange.hasValue())
      ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(),
                                                            RangeType);

    // Split here to avoid paying the compile-time cost of calling both
    // computeKnownBits and ComputeNumSignBits.  This restriction can be lifted
    // if needed.
    const DataLayout &DL = getDataLayout();
    if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
      // For a SCEVUnknown, ask ValueTracking.
      KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
      if (Known.One != ~Known.Zero + 1)
        ConservativeResult =
            ConservativeResult.intersectWith(
                ConstantRange(Known.One, ~Known.Zero + 1), RangeType);
    } else {
      assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
             "generalize as needed!");
      unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
      if (NS > 1)
        ConservativeResult = ConservativeResult.intersectWith(
            ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
                          APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1),
            RangeType);
    }

    // A range of Phi is a subset of union of all ranges of its input.
    if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) {
      // Make sure that we do not run over cycled Phis.
      if (PendingPhiRanges.insert(Phi).second) {
        ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false);
        for (auto &Op : Phi->operands()) {
          auto OpRange = getRangeRef(getSCEV(Op), SignHint);
          RangeFromOps = RangeFromOps.unionWith(OpRange);
          // No point to continue if we already have a full set.
          if (RangeFromOps.isFullSet())
            break;
        }
        ConservativeResult =
            ConservativeResult.intersectWith(RangeFromOps, RangeType);
        bool Erased = PendingPhiRanges.erase(Phi);
        assert(Erased && "Failed to erase Phi properly?");
        (void) Erased;
      }
    }

    return setRange(U, SignHint, std::move(ConservativeResult));
  }

  return setRange(S, SignHint, std::move(ConservativeResult));
}

// Given a StartRange, Step and MaxBECount for an expression compute a range of
// values that the expression can take. Initially, the expression has a value
// from StartRange and then is changed by Step up to MaxBECount times. Signed
// argument defines if we treat Step as signed or unsigned.
static ConstantRange getRangeForAffineARHelper(APInt Step,
                                               const ConstantRange &StartRange,
                                               const APInt &MaxBECount,
                                               unsigned BitWidth, bool Signed) {
  // If either Step or MaxBECount is 0, then the expression won't change, and we
  // just need to return the initial range.
  if (Step == 0 || MaxBECount == 0)
    return StartRange;

  // If we don't know anything about the initial value (i.e. StartRange is
  // FullRange), then we don't know anything about the final range either.
  // Return FullRange.
  if (StartRange.isFullSet())
    return ConstantRange::getFull(BitWidth);

  // If Step is signed and negative, then we use its absolute value, but we also
  // note that we're moving in the opposite direction.
  bool Descending = Signed && Step.isNegative();

  if (Signed)
    // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
    // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
    // This equations hold true due to the well-defined wrap-around behavior of
    // APInt.
    Step = Step.abs();

  // Check if Offset is more than full span of BitWidth. If it is, the
  // expression is guaranteed to overflow.
  if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount))
    return ConstantRange::getFull(BitWidth);

  // Offset is by how much the expression can change. Checks above guarantee no
  // overflow here.
  APInt Offset = Step * MaxBECount;

  // Minimum value of the final range will match the minimal value of StartRange
  // if the expression is increasing and will be decreased by Offset otherwise.
  // Maximum value of the final range will match the maximal value of StartRange
  // if the expression is decreasing and will be increased by Offset otherwise.
  APInt StartLower = StartRange.getLower();
  APInt StartUpper = StartRange.getUpper() - 1;
  APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
                                   : (StartUpper + std::move(Offset));

  // It's possible that the new minimum/maximum value will fall into the initial
  // range (due to wrap around). This means that the expression can take any
  // value in this bitwidth, and we have to return full range.
  if (StartRange.contains(MovedBoundary))
    return ConstantRange::getFull(BitWidth);

  APInt NewLower =
      Descending ? std::move(MovedBoundary) : std::move(StartLower);
  APInt NewUpper =
      Descending ? std::move(StartUpper) : std::move(MovedBoundary);
  NewUpper += 1;

  // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
  return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper));
}

ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
                                                   const SCEV *Step,
                                                   const SCEV *MaxBECount,
                                                   unsigned BitWidth) {
  assert(!isa<SCEVCouldNotCompute>(MaxBECount) &&
         getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
         "Precondition!");

  MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType());
  APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount);

  // First, consider step signed.
  ConstantRange StartSRange = getSignedRange(Start);
  ConstantRange StepSRange = getSignedRange(Step);

  // If Step can be both positive and negative, we need to find ranges for the
  // maximum absolute step values in both directions and union them.
  ConstantRange SR =
      getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange,
                                MaxBECountValue, BitWidth, /* Signed = */ true);
  SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(),
                                              StartSRange, MaxBECountValue,
                                              BitWidth, /* Signed = */ true));

  // Next, consider step unsigned.
  ConstantRange UR = getRangeForAffineARHelper(
      getUnsignedRangeMax(Step), getUnsignedRange(Start),
      MaxBECountValue, BitWidth, /* Signed = */ false);

  // Finally, intersect signed and unsigned ranges.
  return SR.intersectWith(UR, ConstantRange::Smallest);
}

ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start,
                                                    const SCEV *Step,
                                                    const SCEV *MaxBECount,
                                                    unsigned BitWidth) {
  //    RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
  // == RangeOf({A,+,P}) union RangeOf({B,+,Q})

  struct SelectPattern {
    Value *Condition = nullptr;
    APInt TrueValue;
    APInt FalseValue;

    explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth,
                           const SCEV *S) {
      Optional<unsigned> CastOp;
      APInt Offset(BitWidth, 0);

      assert(SE.getTypeSizeInBits(S->getType()) == BitWidth &&
             "Should be!");

      // Peel off a constant offset:
      if (auto *SA = dyn_cast<SCEVAddExpr>(S)) {
        // In the future we could consider being smarter here and handle
        // {Start+Step,+,Step} too.
        if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0)))
          return;

        Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt();
        S = SA->getOperand(1);
      }

      // Peel off a cast operation
      if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) {
        CastOp = SCast->getSCEVType();
        S = SCast->getOperand();
      }

      using namespace llvm::PatternMatch;

      auto *SU = dyn_cast<SCEVUnknown>(S);
      const APInt *TrueVal, *FalseVal;
      if (!SU ||
          !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal),
                                          m_APInt(FalseVal)))) {
        Condition = nullptr;
        return;
      }

      TrueValue = *TrueVal;
      FalseValue = *FalseVal;

      // Re-apply the cast we peeled off earlier
      if (CastOp.hasValue())
        switch (*CastOp) {
        default:
          llvm_unreachable("Unknown SCEV cast type!");

        case scTruncate:
          TrueValue = TrueValue.trunc(BitWidth);
          FalseValue = FalseValue.trunc(BitWidth);
          break;
        case scZeroExtend:
          TrueValue = TrueValue.zext(BitWidth);
          FalseValue = FalseValue.zext(BitWidth);
          break;
        case scSignExtend:
          TrueValue = TrueValue.sext(BitWidth);
          FalseValue = FalseValue.sext(BitWidth);
          break;
        }

      // Re-apply the constant offset we peeled off earlier
      TrueValue += Offset;
      FalseValue += Offset;
    }

    bool isRecognized() { return Condition != nullptr; }
  };

  SelectPattern StartPattern(*this, BitWidth, Start);
  if (!StartPattern.isRecognized())
    return ConstantRange::getFull(BitWidth);

  SelectPattern StepPattern(*this, BitWidth, Step);
  if (!StepPattern.isRecognized())
    return ConstantRange::getFull(BitWidth);

  if (StartPattern.Condition != StepPattern.Condition) {
    // We don't handle this case today; but we could, by considering four
    // possibilities below instead of two. I'm not sure if there are cases where
    // that will help over what getRange already does, though.
    return ConstantRange::getFull(BitWidth);
  }

  // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
  // construct arbitrary general SCEV expressions here.  This function is called
  // from deep in the call stack, and calling getSCEV (on a sext instruction,
  // say) can end up caching a suboptimal value.

  // FIXME: without the explicit `this` receiver below, MSVC errors out with
  // C2352 and C2512 (otherwise it isn't needed).

  const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue);
  const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue);
  const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue);
  const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue);

  ConstantRange TrueRange =
      this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth);
  ConstantRange FalseRange =
      this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth);

  return TrueRange.unionWith(FalseRange);
}

SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
  if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap;
  const BinaryOperator *BinOp = cast<BinaryOperator>(V);

  // Return early if there are no flags to propagate to the SCEV.
  SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
  if (BinOp->hasNoUnsignedWrap())
    Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
  if (BinOp->hasNoSignedWrap())
    Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
  if (Flags == SCEV::FlagAnyWrap)
    return SCEV::FlagAnyWrap;

  return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap;
}

bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
  // Here we check that I is in the header of the innermost loop containing I,
  // since we only deal with instructions in the loop header. The actual loop we
  // need to check later will come from an add recurrence, but getting that
  // requires computing the SCEV of the operands, which can be expensive. This
  // check we can do cheaply to rule out some cases early.
  Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent());
  if (InnermostContainingLoop == nullptr ||
      InnermostContainingLoop->getHeader() != I->getParent())
    return false;

  // Only proceed if we can prove that I does not yield poison.
  if (!programUndefinedIfFullPoison(I))
    return false;

  // At this point we know that if I is executed, then it does not wrap
  // according to at least one of NSW or NUW. If I is not executed, then we do
  // not know if the calculation that I represents would wrap. Multiple
  // instructions can map to the same SCEV. If we apply NSW or NUW from I to
  // the SCEV, we must guarantee no wrapping for that SCEV also when it is
  // derived from other instructions that map to the same SCEV. We cannot make
  // that guarantee for cases where I is not executed. So we need to find the
  // loop that I is considered in relation to and prove that I is executed for
  // every iteration of that loop. That implies that the value that I
  // calculates does not wrap anywhere in the loop, so then we can apply the
  // flags to the SCEV.
  //
  // We check isLoopInvariant to disambiguate in case we are adding recurrences
  // from different loops, so that we know which loop to prove that I is
  // executed in.
  for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) {
    // I could be an extractvalue from a call to an overflow intrinsic.
    // TODO: We can do better here in some cases.
    if (!isSCEVable(I->getOperand(OpIndex)->getType()))
      return false;
    const SCEV *Op = getSCEV(I->getOperand(OpIndex));
    if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
      bool AllOtherOpsLoopInvariant = true;
      for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands();
           ++OtherOpIndex) {
        if (OtherOpIndex != OpIndex) {
          const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex));
          if (!isLoopInvariant(OtherOp, AddRec->getLoop())) {
            AllOtherOpsLoopInvariant = false;
            break;
          }
        }
      }
      if (AllOtherOpsLoopInvariant &&
          isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop()))
        return true;
    }
  }
  return false;
}

bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) {
  // If we know that \c I can never be poison period, then that's enough.
  if (isSCEVExprNeverPoison(I))
    return true;

  // For an add recurrence specifically, we assume that infinite loops without
  // side effects are undefined behavior, and then reason as follows:
  //
  // If the add recurrence is poison in any iteration, it is poison on all
  // future iterations (since incrementing poison yields poison). If the result
  // of the add recurrence is fed into the loop latch condition and the loop
  // does not contain any throws or exiting blocks other than the latch, we now
  // have the ability to "choose" whether the backedge is taken or not (by
  // choosing a sufficiently evil value for the poison feeding into the branch)
  // for every iteration including and after the one in which \p I first became
  // poison.  There are two possibilities (let's call the iteration in which \p
  // I first became poison as K):
  //
  //  1. In the set of iterations including and after K, the loop body executes
  //     no side effects.  In this case executing the backege an infinte number
  //     of times will yield undefined behavior.
  //
  //  2. In the set of iterations including and after K, the loop body executes
  //     at least one side effect.  In this case, that specific instance of side
  //     effect is control dependent on poison, which also yields undefined
  //     behavior.

  auto *ExitingBB = L->getExitingBlock();
  auto *LatchBB = L->getLoopLatch();
  if (!ExitingBB || !LatchBB || ExitingBB != LatchBB)
    return false;

  SmallPtrSet<const Instruction *, 16> Pushed;
  SmallVector<const Instruction *, 8> PoisonStack;

  // We start by assuming \c I, the post-inc add recurrence, is poison.  Only
  // things that are known to be fully poison under that assumption go on the
  // PoisonStack.
  Pushed.insert(I);
  PoisonStack.push_back(I);

  bool LatchControlDependentOnPoison = false;
  while (!PoisonStack.empty() && !LatchControlDependentOnPoison) {
    const Instruction *Poison = PoisonStack.pop_back_val();

    for (auto *PoisonUser : Poison->users()) {
      if (propagatesFullPoison(cast<Instruction>(PoisonUser))) {
        if (Pushed.insert(cast<Instruction>(PoisonUser)).second)
          PoisonStack.push_back(cast<Instruction>(PoisonUser));
      } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) {
        assert(BI->isConditional() && "Only possibility!");
        if (BI->getParent() == LatchBB) {
          LatchControlDependentOnPoison = true;
          break;
        }
      }
    }
  }

  return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L);
}

ScalarEvolution::LoopProperties
ScalarEvolution::getLoopProperties(const Loop *L) {
  using LoopProperties = ScalarEvolution::LoopProperties;

  auto Itr = LoopPropertiesCache.find(L);
  if (Itr == LoopPropertiesCache.end()) {
    auto HasSideEffects = [](Instruction *I) {
      if (auto *SI = dyn_cast<StoreInst>(I))
        return !SI->isSimple();

      return I->mayHaveSideEffects();
    };

    LoopProperties LP = {/* HasNoAbnormalExits */ true,
                         /*HasNoSideEffects*/ true};

    for (auto *BB : L->getBlocks())
      for (auto &I : *BB) {
        if (!isGuaranteedToTransferExecutionToSuccessor(&I))
          LP.HasNoAbnormalExits = false;
        if (HasSideEffects(&I))
          LP.HasNoSideEffects = false;
        if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
          break; // We're already as pessimistic as we can get.
      }

    auto InsertPair = LoopPropertiesCache.insert({L, LP});
    assert(InsertPair.second && "We just checked!");
    Itr = InsertPair.first;
  }

  return Itr->second;
}

const SCEV *ScalarEvolution::createSCEV(Value *V) {
  if (!isSCEVable(V->getType()))
    return getUnknown(V);

  if (Instruction *I = dyn_cast<Instruction>(V)) {
    // Don't attempt to analyze instructions in blocks that aren't
    // reachable. Such instructions don't matter, and they aren't required
    // to obey basic rules for definitions dominating uses which this
    // analysis depends on.
    if (!DT.isReachableFromEntry(I->getParent()))
      return getUnknown(UndefValue::get(V->getType()));
  } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
    return getConstant(CI);
  else if (isa<ConstantPointerNull>(V))
    return getZero(V->getType());
  else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
    return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee());
  else if (!isa<ConstantExpr>(V))
    return getUnknown(V);

  Operator *U = cast<Operator>(V);
  if (auto BO = MatchBinaryOp(U, DT)) {
    switch (BO->Opcode) {
    case Instruction::Add: {
      // The simple thing to do would be to just call getSCEV on both operands
      // and call getAddExpr with the result. However if we're looking at a
      // bunch of things all added together, this can be quite inefficient,
      // because it leads to N-1 getAddExpr calls for N ultimate operands.
      // Instead, gather up all the operands and make a single getAddExpr call.
      // LLVM IR canonical form means we need only traverse the left operands.
      SmallVector<const SCEV *, 4> AddOps;
      do {
        if (BO->Op) {
          if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
            AddOps.push_back(OpSCEV);
            break;
          }

          // If a NUW or NSW flag can be applied to the SCEV for this
          // addition, then compute the SCEV for this addition by itself
          // with a separate call to getAddExpr. We need to do that
          // instead of pushing the operands of the addition onto AddOps,
          // since the flags are only known to apply to this particular
          // addition - they may not apply to other additions that can be
          // formed with operands from AddOps.
          const SCEV *RHS = getSCEV(BO->RHS);
          SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
          if (Flags != SCEV::FlagAnyWrap) {
            const SCEV *LHS = getSCEV(BO->LHS);
            if (BO->Opcode == Instruction::Sub)
              AddOps.push_back(getMinusSCEV(LHS, RHS, Flags));
            else
              AddOps.push_back(getAddExpr(LHS, RHS, Flags));
            break;
          }
        }

        if (BO->Opcode == Instruction::Sub)
          AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS)));
        else
          AddOps.push_back(getSCEV(BO->RHS));

        auto NewBO = MatchBinaryOp(BO->LHS, DT);
        if (!NewBO || (NewBO->Opcode != Instruction::Add &&
                       NewBO->Opcode != Instruction::Sub)) {
          AddOps.push_back(getSCEV(BO->LHS));
          break;
        }
        BO = NewBO;
      } while (true);

      return getAddExpr(AddOps);
    }

    case Instruction::Mul: {
      SmallVector<const SCEV *, 4> MulOps;
      do {
        if (BO->Op) {
          if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
            MulOps.push_back(OpSCEV);
            break;
          }

          SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
          if (Flags != SCEV::FlagAnyWrap) {
            MulOps.push_back(
                getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags));
            break;
          }
        }

        MulOps.push_back(getSCEV(BO->RHS));
        auto NewBO = MatchBinaryOp(BO->LHS, DT);
        if (!NewBO || NewBO->Opcode != Instruction::Mul) {
          MulOps.push_back(getSCEV(BO->LHS));
          break;
        }
        BO = NewBO;
      } while (true);

      return getMulExpr(MulOps);
    }
    case Instruction::UDiv:
      return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
    case Instruction::URem:
      return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
    case Instruction::Sub: {
      SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
      if (BO->Op)
        Flags = getNoWrapFlagsFromUB(BO->Op);
      return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags);
    }
    case Instruction::And:
      // For an expression like x&255 that merely masks off the high bits,
      // use zext(trunc(x)) as the SCEV expression.
      if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
        if (CI->isZero())
          return getSCEV(BO->RHS);
        if (CI->isMinusOne())
          return getSCEV(BO->LHS);
        const APInt &A = CI->getValue();

        // Instcombine's ShrinkDemandedConstant may strip bits out of
        // constants, obscuring what would otherwise be a low-bits mask.
        // Use computeKnownBits to compute what ShrinkDemandedConstant
        // knew about to reconstruct a low-bits mask value.
        unsigned LZ = A.countLeadingZeros();
        unsigned TZ = A.countTrailingZeros();
        unsigned BitWidth = A.getBitWidth();
        KnownBits Known(BitWidth);
        computeKnownBits(BO->LHS, Known, getDataLayout(),
                         0, &AC, nullptr, &DT);

        APInt EffectiveMask =
            APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
        if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) {
          const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ));
          const SCEV *LHS = getSCEV(BO->LHS);
          const SCEV *ShiftedLHS = nullptr;
          if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
            if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
              // For an expression like (x * 8) & 8, simplify the multiply.
              unsigned MulZeros = OpC->getAPInt().countTrailingZeros();
              unsigned GCD = std::min(MulZeros, TZ);
              APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD);
              SmallVector<const SCEV*, 4> MulOps;
              MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD)));
              MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end());
              auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
              ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt));
            }
          }
          if (!ShiftedLHS)
            ShiftedLHS = getUDivExpr(LHS, MulCount);
          return getMulExpr(
              getZeroExtendExpr(
                  getTruncateExpr(ShiftedLHS,
                      IntegerType::get(getContext(), BitWidth - LZ - TZ)),
                  BO->LHS->getType()),
              MulCount);
        }
      }
      break;

    case Instruction::Or:
      // If the RHS of the Or is a constant, we may have something like:
      // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
      // optimizations will transparently handle this case.
      //
      // In order for this transformation to be safe, the LHS must be of the
      // form X*(2^n) and the Or constant must be less than 2^n.
      if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
        const SCEV *LHS = getSCEV(BO->LHS);
        const APInt &CIVal = CI->getValue();
        if (GetMinTrailingZeros(LHS) >=
            (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
          // Build a plain add SCEV.
          const SCEV *S = getAddExpr(LHS, getSCEV(CI));
          // If the LHS of the add was an addrec and it has no-wrap flags,
          // transfer the no-wrap flags, since an or won't introduce a wrap.
          if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
            const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
            const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
                OldAR->getNoWrapFlags());
          }
          return S;
        }
      }
      break;

    case Instruction::Xor:
      if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
        // If the RHS of xor is -1, then this is a not operation.
        if (CI->isMinusOne())
          return getNotSCEV(getSCEV(BO->LHS));

        // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
        // This is a variant of the check for xor with -1, and it handles
        // the case where instcombine has trimmed non-demanded bits out
        // of an xor with -1.
        if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
          if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
            if (LBO->getOpcode() == Instruction::And &&
                LCI->getValue() == CI->getValue())
              if (const SCEVZeroExtendExpr *Z =
                      dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) {
                Type *UTy = BO->LHS->getType();
                const SCEV *Z0 = Z->getOperand();
                Type *Z0Ty = Z0->getType();
                unsigned Z0TySize = getTypeSizeInBits(Z0Ty);

                // If C is a low-bits mask, the zero extend is serving to
                // mask off the high bits. Complement the operand and
                // re-apply the zext.
                if (CI->getValue().isMask(Z0TySize))
                  return getZeroExtendExpr(getNotSCEV(Z0), UTy);

                // If C is a single bit, it may be in the sign-bit position
                // before the zero-extend. In this case, represent the xor
                // using an add, which is equivalent, and re-apply the zext.
                APInt Trunc = CI->getValue().trunc(Z0TySize);
                if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
                    Trunc.isSignMask())
                  return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
                                           UTy);
              }
      }
      break;

    case Instruction::Shl:
      // Turn shift left of a constant amount into a multiply.
      if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
        uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth();

        // If the shift count is not less than the bitwidth, the result of
        // the shift is undefined. Don't try to analyze it, because the
        // resolution chosen here may differ from the resolution chosen in
        // other parts of the compiler.
        if (SA->getValue().uge(BitWidth))
          break;

        // It is currently not resolved how to interpret NSW for left
        // shift by BitWidth - 1, so we avoid applying flags in that
        // case. Remove this check (or this comment) once the situation
        // is resolved. See
        // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html
        // and http://reviews.llvm.org/D8890 .
        auto Flags = SCEV::FlagAnyWrap;
        if (BO->Op && SA->getValue().ult(BitWidth - 1))
          Flags = getNoWrapFlagsFromUB(BO->Op);

        Constant *X = ConstantInt::get(
            getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
        return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags);
      }
      break;

    case Instruction::AShr: {
      // AShr X, C, where C is a constant.
      ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS);
      if (!CI)
        break;

      Type *OuterTy = BO->LHS->getType();
      uint64_t BitWidth = getTypeSizeInBits(OuterTy);
      // If the shift count is not less than the bitwidth, the result of
      // the shift is undefined. Don't try to analyze it, because the
      // resolution chosen here may differ from the resolution chosen in
      // other parts of the compiler.
      if (CI->getValue().uge(BitWidth))
        break;

      if (CI->isZero())
        return getSCEV(BO->LHS); // shift by zero --> noop

      uint64_t AShrAmt = CI->getZExtValue();
      Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt);

      Operator *L = dyn_cast<Operator>(BO->LHS);
      if (L && L->getOpcode() == Instruction::Shl) {
        // X = Shl A, n
        // Y = AShr X, m
        // Both n and m are constant.

        const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0));
        if (L->getOperand(1) == BO->RHS)
          // For a two-shift sext-inreg, i.e. n = m,
          // use sext(trunc(x)) as the SCEV expression.
          return getSignExtendExpr(
              getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy);

        ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1));
        if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) {
          uint64_t ShlAmt = ShlAmtCI->getZExtValue();
          if (ShlAmt > AShrAmt) {
            // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
            // expression. We already checked that ShlAmt < BitWidth, so
            // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
            // ShlAmt - AShrAmt < Amt.
            APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt,
                                            ShlAmt - AShrAmt);
            return getSignExtendExpr(
                getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy),
                getConstant(Mul)), OuterTy);
          }
        }
      }
      break;
    }
    }
  }

  switch (U->getOpcode()) {
  case Instruction::Trunc:
    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());

  case Instruction::ZExt:
    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());

  case Instruction::SExt:
    if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) {
      // The NSW flag of a subtract does not always survive the conversion to
      // A + (-1)*B.  By pushing sign extension onto its operands we are much
      // more likely to preserve NSW and allow later AddRec optimisations.
      //
      // NOTE: This is effectively duplicating this logic from getSignExtend:
      //   sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
      // but by that point the NSW information has potentially been lost.
      if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
        Type *Ty = U->getType();
        auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty);
        auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty);
        return getMinusSCEV(V1, V2, SCEV::FlagNSW);
      }
    }
    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());

  case Instruction::BitCast:
    // BitCasts are no-op casts so we just eliminate the cast.
    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
      return getSCEV(U->getOperand(0));
    break;

  // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
  // lead to pointer expressions which cannot safely be expanded to GEPs,
  // because ScalarEvolution doesn't respect the GEP aliasing rules when
  // simplifying integer expressions.

  case Instruction::GetElementPtr:
    return createNodeForGEP(cast<GEPOperator>(U));

  case Instruction::PHI:
    return createNodeForPHI(cast<PHINode>(U));

  case Instruction::Select:
    // U can also be a select constant expr, which let fall through.  Since
    // createNodeForSelect only works for a condition that is an `ICmpInst`, and
    // constant expressions cannot have instructions as operands, we'd have
    // returned getUnknown for a select constant expressions anyway.
    if (isa<Instruction>(U))
      return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0),
                                      U->getOperand(1), U->getOperand(2));
    break;

  case Instruction::Call:
  case Instruction::Invoke:
    if (Value *RV = CallSite(U).getReturnedArgOperand())
      return getSCEV(RV);
    break;
  }

  return getUnknown(V);
}

//===----------------------------------------------------------------------===//
//                   Iteration Count Computation Code
//

static unsigned getConstantTripCount(const SCEVConstant *ExitCount) {
  if (!ExitCount)
    return 0;

  ConstantInt *ExitConst = ExitCount->getValue();

  // Guard against huge trip counts.
  if (ExitConst->getValue().getActiveBits() > 32)
    return 0;

  // In case of integer overflow, this returns 0, which is correct.
  return ((unsigned)ExitConst->getZExtValue()) + 1;
}

unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) {
  if (BasicBlock *ExitingBB = L->getExitingBlock())
    return getSmallConstantTripCount(L, ExitingBB);

  // No trip count information for multiple exits.
  return 0;
}

unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L,
                                                    BasicBlock *ExitingBlock) {
  assert(ExitingBlock && "Must pass a non-null exiting block!");
  assert(L->isLoopExiting(ExitingBlock) &&
         "Exiting block must actually branch out of the loop!");
  const SCEVConstant *ExitCount =
      dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
  return getConstantTripCount(ExitCount);
}

unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) {
  const auto *MaxExitCount =
      dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L));
  return getConstantTripCount(MaxExitCount);
}

unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) {
  if (BasicBlock *ExitingBB = L->getExitingBlock())
    return getSmallConstantTripMultiple(L, ExitingBB);

  // No trip multiple information for multiple exits.
  return 0;
}

/// Returns the largest constant divisor of the trip count of this loop as a
/// normal unsigned value, if possible. This means that the actual trip count is
/// always a multiple of the returned value (don't forget the trip count could
/// very well be zero as well!).
///
/// Returns 1 if the trip count is unknown or not guaranteed to be the
/// multiple of a constant (which is also the case if the trip count is simply
/// constant, use getSmallConstantTripCount for that case), Will also return 1
/// if the trip count is very large (>= 2^32).
///
/// As explained in the comments for getSmallConstantTripCount, this assumes
/// that control exits the loop via ExitingBlock.
unsigned
ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
                                              BasicBlock *ExitingBlock) {
  assert(ExitingBlock && "Must pass a non-null exiting block!");
  assert(L->isLoopExiting(ExitingBlock) &&
         "Exiting block must actually branch out of the loop!");
  const SCEV *ExitCount = getExitCount(L, ExitingBlock);
  if (ExitCount == getCouldNotCompute())
    return 1;

  // Get the trip count from the BE count by adding 1.
  const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType()));

  const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr);
  if (!TC)
    // Attempt to factor more general cases. Returns the greatest power of
    // two divisor. If overflow happens, the trip count expression is still
    // divisible by the greatest power of 2 divisor returned.
    return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr));

  ConstantInt *Result = TC->getValue();

  // Guard against huge trip counts (this requires checking
  // for zero to handle the case where the trip count == -1 and the
  // addition wraps).
  if (!Result || Result->getValue().getActiveBits() > 32 ||
      Result->getValue().getActiveBits() == 0)
    return 1;

  return (unsigned)Result->getZExtValue();
}

const SCEV *ScalarEvolution::getExitCount(const Loop *L,
                                          BasicBlock *ExitingBlock,
                                          ExitCountKind Kind) {
  switch (Kind) {
  case Exact: 
    return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
  case ConstantMaximum:
    return getBackedgeTakenInfo(L).getMax(ExitingBlock, this);
  };
  llvm_unreachable("Invalid ExitCountKind!");
}

const SCEV *
ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L,
                                                 SCEVUnionPredicate &Preds) {
  return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds);
}

const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L,
                                                   ExitCountKind Kind) {
  switch (Kind) {
  case Exact: 
    return getBackedgeTakenInfo(L).getExact(L, this);
  case ConstantMaximum:
    return getBackedgeTakenInfo(L).getMax(this);
  };
  llvm_unreachable("Invalid ExitCountKind!");
}

bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) {
  return getBackedgeTakenInfo(L).isMaxOrZero(this);
}

/// Push PHI nodes in the header of the given loop onto the given Worklist.
static void
PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
  BasicBlock *Header = L->getHeader();

  // Push all Loop-header PHIs onto the Worklist stack.
  for (PHINode &PN : Header->phis())
    Worklist.push_back(&PN);
}

const ScalarEvolution::BackedgeTakenInfo &
ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) {
  auto &BTI = getBackedgeTakenInfo(L);
  if (BTI.hasFullInfo())
    return BTI;

  auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()});

  if (!Pair.second)
    return Pair.first->second;

  BackedgeTakenInfo Result =
      computeBackedgeTakenCount(L, /*AllowPredicates=*/true);

  return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
}

const ScalarEvolution::BackedgeTakenInfo &
ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
  // Initially insert an invalid entry for this loop. If the insertion
  // succeeds, proceed to actually compute a backedge-taken count and
  // update the value. The temporary CouldNotCompute value tells SCEV
  // code elsewhere that it shouldn't attempt to request a new
  // backedge-taken count, which could result in infinite recursion.
  std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
      BackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
  if (!Pair.second)
    return Pair.first->second;

  // computeBackedgeTakenCount may allocate memory for its result. Inserting it
  // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
  // must be cleared in this scope.
  BackedgeTakenInfo Result = computeBackedgeTakenCount(L);

  // In product build, there are no usage of statistic.
  (void)NumTripCountsComputed;
  (void)NumTripCountsNotComputed;
#if LLVM_ENABLE_STATS || !defined(NDEBUG)
  const SCEV *BEExact = Result.getExact(L, this);
  if (BEExact != getCouldNotCompute()) {
    assert(isLoopInvariant(BEExact, L) &&
           isLoopInvariant(Result.getMax(this), L) &&
           "Computed backedge-taken count isn't loop invariant for loop!");
    ++NumTripCountsComputed;
  }
  else if (Result.getMax(this) == getCouldNotCompute() &&
           isa<PHINode>(L->getHeader()->begin())) {
    // Only count loops that have phi nodes as not being computable.
    ++NumTripCountsNotComputed;
  }
#endif // LLVM_ENABLE_STATS || !defined(NDEBUG)

  // Now that we know more about the trip count for this loop, forget any
  // existing SCEV values for PHI nodes in this loop since they are only
  // conservative estimates made without the benefit of trip count
  // information. This is similar to the code in forgetLoop, except that
  // it handles SCEVUnknown PHI nodes specially.
  if (Result.hasAnyInfo()) {
    SmallVector<Instruction *, 16> Worklist;
    PushLoopPHIs(L, Worklist);

    SmallPtrSet<Instruction *, 8> Discovered;
    while (!Worklist.empty()) {
      Instruction *I = Worklist.pop_back_val();

      ValueExprMapType::iterator It =
        ValueExprMap.find_as(static_cast<Value *>(I));
      if (It != ValueExprMap.end()) {
        const SCEV *Old = It->second;

        // SCEVUnknown for a PHI either means that it has an unrecognized
        // structure, or it's a PHI that's in the progress of being computed
        // by createNodeForPHI.  In the former case, additional loop trip
        // count information isn't going to change anything. In the later
        // case, createNodeForPHI will perform the necessary updates on its
        // own when it gets to that point.
        if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
          eraseValueFromMap(It->first);
          forgetMemoizedResults(Old);
        }
        if (PHINode *PN = dyn_cast<PHINode>(I))
          ConstantEvolutionLoopExitValue.erase(PN);
      }

      // Since we don't need to invalidate anything for correctness and we're
      // only invalidating to make SCEV's results more precise, we get to stop
      // early to avoid invalidating too much.  This is especially important in
      // cases like:
      //
      //   %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node
      // loop0:
      //   %pn0 = phi
      //   ...
      // loop1:
      //   %pn1 = phi
      //   ...
      //
      // where both loop0 and loop1's backedge taken count uses the SCEV
      // expression for %v.  If we don't have the early stop below then in cases
      // like the above, getBackedgeTakenInfo(loop1) will clear out the trip
      // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip
      // count for loop1, effectively nullifying SCEV's trip count cache.
      for (auto *U : I->users())
        if (auto *I = dyn_cast<Instruction>(U)) {
          auto *LoopForUser = LI.getLoopFor(I->getParent());
          if (LoopForUser && L->contains(LoopForUser) &&
              Discovered.insert(I).second)
            Worklist.push_back(I);
        }
    }
  }

  // Re-lookup the insert position, since the call to
  // computeBackedgeTakenCount above could result in a
  // recusive call to getBackedgeTakenInfo (on a different
  // loop), which would invalidate the iterator computed
  // earlier.
  return BackedgeTakenCounts.find(L)->second = std::move(Result);
}

void ScalarEvolution::forgetAllLoops() {
  // This method is intended to forget all info about loops. It should
  // invalidate caches as if the following happened:
  // - The trip counts of all loops have changed arbitrarily
  // - Every llvm::Value has been updated in place to produce a different
  // result.
  BackedgeTakenCounts.clear();
  PredicatedBackedgeTakenCounts.clear();
  LoopPropertiesCache.clear();
  ConstantEvolutionLoopExitValue.clear();
  ValueExprMap.clear();
  ValuesAtScopes.clear();
  LoopDispositions.clear();
  BlockDispositions.clear();
  UnsignedRanges.clear();
  SignedRanges.clear();
  ExprValueMap.clear();
  HasRecMap.clear();
  MinTrailingZerosCache.clear();
  PredicatedSCEVRewrites.clear();
}

void ScalarEvolution::forgetLoop(const Loop *L) {
  // Drop any stored trip count value.
  auto RemoveLoopFromBackedgeMap =
      [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) {
        auto BTCPos = Map.find(L);
        if (BTCPos != Map.end()) {
          BTCPos->second.clear();
          Map.erase(BTCPos);
        }
      };

  SmallVector<const Loop *, 16> LoopWorklist(1, L);
  SmallVector<Instruction *, 32> Worklist;
  SmallPtrSet<Instruction *, 16> Visited;

  // Iterate over all the loops and sub-loops to drop SCEV information.
  while (!LoopWorklist.empty()) {
    auto *CurrL = LoopWorklist.pop_back_val();

    RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL);
    RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL);

    // Drop information about predicated SCEV rewrites for this loop.
    for (auto I = PredicatedSCEVRewrites.begin();
         I != PredicatedSCEVRewrites.end();) {
      std::pair<const SCEV *, const Loop *> Entry = I->first;
      if (Entry.second == CurrL)
        PredicatedSCEVRewrites.erase(I++);
      else
        ++I;
    }

    auto LoopUsersItr = LoopUsers.find(CurrL);
    if (LoopUsersItr != LoopUsers.end()) {
      for (auto *S : LoopUsersItr->second)
        forgetMemoizedResults(S);
      LoopUsers.erase(LoopUsersItr);
    }

    // Drop information about expressions based on loop-header PHIs.
    PushLoopPHIs(CurrL, Worklist);

    while (!Worklist.empty()) {
      Instruction *I = Worklist.pop_back_val();
      if (!Visited.insert(I).second)
        continue;

      ValueExprMapType::iterator It =
          ValueExprMap.find_as(static_cast<Value *>(I));
      if (It != ValueExprMap.end()) {
        eraseValueFromMap(It->first);
        forgetMemoizedResults(It->second);
        if (PHINode *PN = dyn_cast<PHINode>(I))
          ConstantEvolutionLoopExitValue.erase(PN);
      }

      PushDefUseChildren(I, Worklist);
    }

    LoopPropertiesCache.erase(CurrL);
    // Forget all contained loops too, to avoid dangling entries in the
    // ValuesAtScopes map.
    LoopWorklist.append(CurrL->begin(), CurrL->end());
  }
}

void ScalarEvolution::forgetTopmostLoop(const Loop *L) {
  while (Loop *Parent = L->getParentLoop())
    L = Parent;
  forgetLoop(L);
}

void ScalarEvolution::forgetValue(Value *V) {
  Instruction *I = dyn_cast<Instruction>(V);
  if (!I) return;

  // Drop information about expressions based on loop-header PHIs.
  SmallVector<Instruction *, 16> Worklist;
  Worklist.push_back(I);

  SmallPtrSet<Instruction *, 8> Visited;
  while (!Worklist.empty()) {
    I = Worklist.pop_back_val();
    if (!Visited.insert(I).second)
      continue;

    ValueExprMapType::iterator It =
      ValueExprMap.find_as(static_cast<Value *>(I));
    if (It != ValueExprMap.end()) {
      eraseValueFromMap(It->first);
      forgetMemoizedResults(It->second);
      if (PHINode *PN = dyn_cast<PHINode>(I))
        ConstantEvolutionLoopExitValue.erase(PN);
    }

    PushDefUseChildren(I, Worklist);
  }
}

/// Get the exact loop backedge taken count considering all loop exits. A
/// computable result can only be returned for loops with all exiting blocks
/// dominating the latch. howFarToZero assumes that the limit of each loop test
/// is never skipped. This is a valid assumption as long as the loop exits via
/// that test. For precise results, it is the caller's responsibility to specify
/// the relevant loop exiting block using getExact(ExitingBlock, SE).
const SCEV *
ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE,
                                             SCEVUnionPredicate *Preds) const {
  // If any exits were not computable, the loop is not computable.
  if (!isComplete() || ExitNotTaken.empty())
    return SE->getCouldNotCompute();

  const BasicBlock *Latch = L->getLoopLatch();
  // All exiting blocks we have collected must dominate the only backedge.
  if (!Latch)
    return SE->getCouldNotCompute();

  // All exiting blocks we have gathered dominate loop's latch, so exact trip
  // count is simply a minimum out of all these calculated exit counts.
  SmallVector<const SCEV *, 2> Ops;
  for (auto &ENT : ExitNotTaken) {
    const SCEV *BECount = ENT.ExactNotTaken;
    assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!");
    assert(SE->DT.dominates(ENT.ExitingBlock, Latch) &&
           "We should only have known counts for exiting blocks that dominate "
           "latch!");

    Ops.push_back(BECount);

    if (Preds && !ENT.hasAlwaysTruePredicate())
      Preds->add(ENT.Predicate.get());

    assert((Preds || ENT.hasAlwaysTruePredicate()) &&
           "Predicate should be always true!");
  }

  return SE->getUMinFromMismatchedTypes(Ops);
}

/// Get the exact not taken count for this loop exit.
const SCEV *
ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
                                             ScalarEvolution *SE) const {
  for (auto &ENT : ExitNotTaken)
    if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
      return ENT.ExactNotTaken;

  return SE->getCouldNotCompute();
}

const SCEV *
ScalarEvolution::BackedgeTakenInfo::getMax(BasicBlock *ExitingBlock,
                                           ScalarEvolution *SE) const {
  for (auto &ENT : ExitNotTaken)
    if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
      return ENT.MaxNotTaken;

  return SE->getCouldNotCompute();
}

/// getMax - Get the max backedge taken count for the loop.
const SCEV *
ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const {
  auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
    return !ENT.hasAlwaysTruePredicate();
  };

  if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax())
    return SE->getCouldNotCompute();

  assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) &&
         "No point in having a non-constant max backedge taken count!");
  return getMax();
}

bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const {
  auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
    return !ENT.hasAlwaysTruePredicate();
  };
  return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue);
}

bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
                                                    ScalarEvolution *SE) const {
  if (getMax() && getMax() != SE->getCouldNotCompute() &&
      SE->hasOperand(getMax(), S))
    return true;

  for (auto &ENT : ExitNotTaken)
    if (ENT.ExactNotTaken != SE->getCouldNotCompute() &&
        SE->hasOperand(ENT.ExactNotTaken, S))
      return true;

  return false;
}

ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
    : ExactNotTaken(E), MaxNotTaken(E) {
  assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
          isa<SCEVConstant>(MaxNotTaken)) &&
         "No point in having a non-constant max backedge taken count!");
}

ScalarEvolution::ExitLimit::ExitLimit(
    const SCEV *E, const SCEV *M, bool MaxOrZero,
    ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList)
    : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) {
  assert((isa<SCEVCouldNotCompute>(ExactNotTaken) ||
          !isa<SCEVCouldNotCompute>(MaxNotTaken)) &&
         "Exact is not allowed to be less precise than Max");
  assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
          isa<SCEVConstant>(MaxNotTaken)) &&
         "No point in having a non-constant max backedge taken count!");
  for (auto *PredSet : PredSetList)
    for (auto *P : *PredSet)
      addPredicate(P);
}

ScalarEvolution::ExitLimit::ExitLimit(
    const SCEV *E, const SCEV *M, bool MaxOrZero,
    const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
    : ExitLimit(E, M, MaxOrZero, {&PredSet}) {
  assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
          isa<SCEVConstant>(MaxNotTaken)) &&
         "No point in having a non-constant max backedge taken count!");
}

ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M,
                                      bool MaxOrZero)
    : ExitLimit(E, M, MaxOrZero, None) {
  assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
          isa<SCEVConstant>(MaxNotTaken)) &&
         "No point in having a non-constant max backedge taken count!");
}

/// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
/// computable exit into a persistent ExitNotTakenInfo array.
ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
    ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo>
        ExitCounts,
    bool Complete, const SCEV *MaxCount, bool MaxOrZero)
    : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) {
  using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;

  ExitNotTaken.reserve(ExitCounts.size());
  std::transform(
      ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken),
      [&](const EdgeExitInfo &EEI) {
        BasicBlock *ExitBB = EEI.first;
        const ExitLimit &EL = EEI.second;
        if (EL.Predicates.empty())
          return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
                                  nullptr);

        std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate);
        for (auto *Pred : EL.Predicates)
          Predicate->add(Pred);

        return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
                                std::move(Predicate));
      });
  assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) &&
         "No point in having a non-constant max backedge taken count!");
}

/// Invalidate this result and free the ExitNotTakenInfo array.
void ScalarEvolution::BackedgeTakenInfo::clear() {
  ExitNotTaken.clear();
}

/// Compute the number of times the backedge of the specified loop will execute.
ScalarEvolution::BackedgeTakenInfo
ScalarEvolution::computeBackedgeTakenCount(const Loop *L,
                                           bool AllowPredicates) {
  SmallVector<BasicBlock *, 8> ExitingBlocks;
  L->getExitingBlocks(ExitingBlocks);

  using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;

  SmallVector<EdgeExitInfo, 4> ExitCounts;
  bool CouldComputeBECount = true;
  BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
  const SCEV *MustExitMaxBECount = nullptr;
  const SCEV *MayExitMaxBECount = nullptr;
  bool MustExitMaxOrZero = false;

  // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
  // and compute maxBECount.
  // Do a union of all the predicates here.
  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
    BasicBlock *ExitBB = ExitingBlocks[i];
    ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates);

    assert((AllowPredicates || EL.Predicates.empty()) &&
           "Predicated exit limit when predicates are not allowed!");

    // 1. For each exit that can be computed, add an entry to ExitCounts.
    // CouldComputeBECount is true only if all exits can be computed.
    if (EL.ExactNotTaken == getCouldNotCompute())
      // We couldn't compute an exact value for this exit, so
      // we won't be able to compute an exact value for the loop.
      CouldComputeBECount = false;
    else
      ExitCounts.emplace_back(ExitBB, EL);

    // 2. Derive the loop's MaxBECount from each exit's max number of
    // non-exiting iterations. Partition the loop exits into two kinds:
    // LoopMustExits and LoopMayExits.
    //
    // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
    // is a LoopMayExit.  If any computable LoopMustExit is found, then
    // MaxBECount is the minimum EL.MaxNotTaken of computable
    // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
    // EL.MaxNotTaken, where CouldNotCompute is considered greater than any
    // computable EL.MaxNotTaken.
    if (EL.MaxNotTaken != getCouldNotCompute() && Latch &&
        DT.dominates(ExitBB, Latch)) {
      if (!MustExitMaxBECount) {
        MustExitMaxBECount = EL.MaxNotTaken;
        MustExitMaxOrZero = EL.MaxOrZero;
      } else {
        MustExitMaxBECount =
            getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken);
      }
    } else if (MayExitMaxBECount != getCouldNotCompute()) {
      if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute())
        MayExitMaxBECount = EL.MaxNotTaken;
      else {
        MayExitMaxBECount =
            getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken);
      }
    }
  }
  const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
    (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
  // The loop backedge will be taken the maximum or zero times if there's
  // a single exit that must be taken the maximum or zero times.
  bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
  return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
                           MaxBECount, MaxOrZero);
}

ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
                                      bool AllowPredicates) {
  assert(L->contains(ExitingBlock) && "Exit count for non-loop block?");
  // If our exiting block does not dominate the latch, then its connection with
  // loop's exit limit may be far from trivial.
  const BasicBlock *Latch = L->getLoopLatch();
  if (!Latch || !DT.dominates(ExitingBlock, Latch))
    return getCouldNotCompute();

  bool IsOnlyExit = (L->getExitingBlock() != nullptr);
  Instruction *Term = ExitingBlock->getTerminator();
  if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
    assert(BI->isConditional() && "If unconditional, it can't be in loop!");
    bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
    assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) &&
           "It should have one successor in loop and one exit block!");
    // Proceed to the next level to examine the exit condition expression.
    return computeExitLimitFromCond(
        L, BI->getCondition(), ExitIfTrue,
        /*ControlsExit=*/IsOnlyExit, AllowPredicates);
  }

  if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) {
    // For switch, make sure that there is a single exit from the loop.
    BasicBlock *Exit = nullptr;
    for (auto *SBB : successors(ExitingBlock))
      if (!L->contains(SBB)) {
        if (Exit) // Multiple exit successors.
          return getCouldNotCompute();
        Exit = SBB;
      }
    assert(Exit && "Exiting block must have at least one exit");
    return computeExitLimitFromSingleExitSwitch(L, SI, Exit,
                                                /*ControlsExit=*/IsOnlyExit);
  }

  return getCouldNotCompute();
}

ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond(
    const Loop *L, Value *ExitCond, bool ExitIfTrue,
    bool ControlsExit, bool AllowPredicates) {
  ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
  return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
                                        ControlsExit, AllowPredicates);
}

Optional<ScalarEvolution::ExitLimit>
ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond,
                                      bool ExitIfTrue, bool ControlsExit,
                                      bool AllowPredicates) {
  (void)this->L;
  (void)this->ExitIfTrue;
  (void)this->AllowPredicates;

  assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
         this->AllowPredicates == AllowPredicates &&
         "Variance in assumed invariant key components!");
  auto Itr = TripCountMap.find({ExitCond, ControlsExit});
  if (Itr == TripCountMap.end())
    return None;
  return Itr->second;
}

void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond,
                                             bool ExitIfTrue,
                                             bool ControlsExit,
                                             bool AllowPredicates,
                                             const ExitLimit &EL) {
  assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
         this->AllowPredicates == AllowPredicates &&
         "Variance in assumed invariant key components!");

  auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL});
  assert(InsertResult.second && "Expected successful insertion!");
  (void)InsertResult;
  (void)ExitIfTrue;
}

ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
    ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
    bool ControlsExit, bool AllowPredicates) {

  if (auto MaybeEL =
          Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
    return *MaybeEL;

  ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue,
                                              ControlsExit, AllowPredicates);
  Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL);
  return EL;
}

ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
    ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
    bool ControlsExit, bool AllowPredicates) {
  // Check if the controlling expression for this loop is an And or Or.
  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
    if (BO->getOpcode() == Instruction::And) {
      // Recurse on the operands of the and.
      bool EitherMayExit = !ExitIfTrue;
      ExitLimit EL0 = computeExitLimitFromCondCached(
          Cache, L, BO->getOperand(0), ExitIfTrue,
          ControlsExit && !EitherMayExit, AllowPredicates);
      ExitLimit EL1 = computeExitLimitFromCondCached(
          Cache, L, BO->getOperand(1), ExitIfTrue,
          ControlsExit && !EitherMayExit, AllowPredicates);
      const SCEV *BECount = getCouldNotCompute();
      const SCEV *MaxBECount = getCouldNotCompute();
      if (EitherMayExit) {
        // Both conditions must be true for the loop to continue executing.
        // Choose the less conservative count.
        if (EL0.ExactNotTaken == getCouldNotCompute() ||
            EL1.ExactNotTaken == getCouldNotCompute())
          BECount = getCouldNotCompute();
        else
          BECount =
              getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken);
        if (EL0.MaxNotTaken == getCouldNotCompute())
          MaxBECount = EL1.MaxNotTaken;
        else if (EL1.MaxNotTaken == getCouldNotCompute())
          MaxBECount = EL0.MaxNotTaken;
        else
          MaxBECount =
              getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken);
      } else {
        // Both conditions must be true at the same time for the loop to exit.
        // For now, be conservative.
        if (EL0.MaxNotTaken == EL1.MaxNotTaken)
          MaxBECount = EL0.MaxNotTaken;
        if (EL0.ExactNotTaken == EL1.ExactNotTaken)
          BECount = EL0.ExactNotTaken;
      }

      // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
      // to be more aggressive when computing BECount than when computing
      // MaxBECount.  In these cases it is possible for EL0.ExactNotTaken and
      // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
      // to not.
      if (isa<SCEVCouldNotCompute>(MaxBECount) &&
          !isa<SCEVCouldNotCompute>(BECount))
        MaxBECount = getConstant(getUnsignedRangeMax(BECount));

      return ExitLimit(BECount, MaxBECount, false,
                       {&EL0.Predicates, &EL1.Predicates});
    }
    if (BO->getOpcode() == Instruction::Or) {
      // Recurse on the operands of the or.
      bool EitherMayExit = ExitIfTrue;
      ExitLimit EL0 = computeExitLimitFromCondCached(
          Cache, L, BO->getOperand(0), ExitIfTrue,
          ControlsExit && !EitherMayExit, AllowPredicates);
      ExitLimit EL1 = computeExitLimitFromCondCached(
          Cache, L, BO->getOperand(1), ExitIfTrue,
          ControlsExit && !EitherMayExit, AllowPredicates);
      const SCEV *BECount = getCouldNotCompute();
      const SCEV *MaxBECount = getCouldNotCompute();
      if (EitherMayExit) {
        // Both conditions must be false for the loop to continue executing.
        // Choose the less conservative count.
        if (EL0.ExactNotTaken == getCouldNotCompute() ||
            EL1.ExactNotTaken == getCouldNotCompute())
          BECount = getCouldNotCompute();
        else
          BECount =
              getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken);
        if (EL0.MaxNotTaken == getCouldNotCompute())
          MaxBECount = EL1.MaxNotTaken;
        else if (EL1.MaxNotTaken == getCouldNotCompute())
          MaxBECount = EL0.MaxNotTaken;
        else
          MaxBECount =
              getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken);
      } else {
        // Both conditions must be false at the same time for the loop to exit.
        // For now, be conservative.
        if (EL0.MaxNotTaken == EL1.MaxNotTaken)
          MaxBECount = EL0.MaxNotTaken;
        if (EL0.ExactNotTaken == EL1.ExactNotTaken)
          BECount = EL0.ExactNotTaken;
      }
      // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
      // to be more aggressive when computing BECount than when computing
      // MaxBECount.  In these cases it is possible for EL0.ExactNotTaken and
      // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
      // to not.
      if (isa<SCEVCouldNotCompute>(MaxBECount) &&
          !isa<SCEVCouldNotCompute>(BECount))
        MaxBECount = getConstant(getUnsignedRangeMax(BECount));

      return ExitLimit(BECount, MaxBECount, false,
                       {&EL0.Predicates, &EL1.Predicates});
    }
  }

  // With an icmp, it may be feasible to compute an exact backedge-taken count.
  // Proceed to the next level to examine the icmp.
  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
    ExitLimit EL =
        computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit);
    if (EL.hasFullInfo() || !AllowPredicates)
      return EL;

    // Try again, but use SCEV predicates this time.
    return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit,
                                    /*AllowPredicates=*/true);
  }

  // Check for a constant condition. These are normally stripped out by
  // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
  // preserve the CFG and is temporarily leaving constant conditions
  // in place.
  if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
    if (ExitIfTrue == !CI->getZExtValue())
      // The backedge is always taken.
      return getCouldNotCompute();
    else
      // The backedge is never taken.
      return getZero(CI->getType());
  }

  // If it's not an integer or pointer comparison then compute it the hard way.
  return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
}

ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
                                          ICmpInst *ExitCond,
                                          bool ExitIfTrue,
                                          bool ControlsExit,
                                          bool AllowPredicates) {
  // If the condition was exit on true, convert the condition to exit on false
  ICmpInst::Predicate Pred;
  if (!ExitIfTrue)
    Pred = ExitCond->getPredicate();
  else
    Pred = ExitCond->getInversePredicate();
  const ICmpInst::Predicate OriginalPred = Pred;

  // Handle common loops like: for (X = "string"; *X; ++X)
  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
      ExitLimit ItCnt =
        computeLoadConstantCompareExitLimit(LI, RHS, L, Pred);
      if (ItCnt.hasAnyInfo())
        return ItCnt;
    }

  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));

  // Try to evaluate any dependencies out of the loop.
  LHS = getSCEVAtScope(LHS, L);
  RHS = getSCEVAtScope(RHS, L);

  // At this point, we would like to compute how many iterations of the
  // loop the predicate will return true for these inputs.
  if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
    // If there is a loop-invariant, force it into the RHS.
    std::swap(LHS, RHS);
    Pred = ICmpInst::getSwappedPredicate(Pred);
  }

  // Simplify the operands before analyzing them.
  (void)SimplifyICmpOperands(Pred, LHS, RHS);

  // If we have a comparison of a chrec against a constant, try to use value
  // ranges to answer this query.
  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
      if (AddRec->getLoop() == L) {
        // Form the constant range.
        ConstantRange CompRange =
            ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt());

        const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
      }

  switch (Pred) {
  case ICmpInst::ICMP_NE: {                     // while (X != Y)
    // Convert to: while (X-Y != 0)
    ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit,
                                AllowPredicates);
    if (EL.hasAnyInfo()) return EL;
    break;
  }
  case ICmpInst::ICMP_EQ: {                     // while (X == Y)
    // Convert to: while (X-Y == 0)
    ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L);
    if (EL.hasAnyInfo()) return EL;
    break;
  }
  case ICmpInst::ICMP_SLT:
  case ICmpInst::ICMP_ULT: {                    // while (X < Y)
    bool IsSigned = Pred == ICmpInst::ICMP_SLT;
    ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit,
                                    AllowPredicates);
    if (EL.hasAnyInfo()) return EL;
    break;
  }
  case ICmpInst::ICMP_SGT:
  case ICmpInst::ICMP_UGT: {                    // while (X > Y)
    bool IsSigned = Pred == ICmpInst::ICMP_SGT;
    ExitLimit EL =
        howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit,
                            AllowPredicates);
    if (EL.hasAnyInfo()) return EL;
    break;
  }
  default:
    break;
  }

  auto *ExhaustiveCount =
      computeExitCountExhaustively(L, ExitCond, ExitIfTrue);

  if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
    return ExhaustiveCount;

  return computeShiftCompareExitLimit(ExitCond->getOperand(0),
                                      ExitCond->getOperand(1), L, OriginalPred);
}

ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L,
                                                      SwitchInst *Switch,
                                                      BasicBlock *ExitingBlock,
                                                      bool ControlsExit) {
  assert(!L->contains(ExitingBlock) && "Not an exiting block!");

  // Give up if the exit is the default dest of a switch.
  if (Switch->getDefaultDest() == ExitingBlock)
    return getCouldNotCompute();

  assert(L->contains(Switch->getDefaultDest()) &&
         "Default case must not exit the loop!");
  const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
  const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));

  // while (X != Y) --> while (X-Y != 0)
  ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
  if (EL.hasAnyInfo())
    return EL;

  return getCouldNotCompute();
}

static ConstantInt *
EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
                                ScalarEvolution &SE) {
  const SCEV *InVal = SE.getConstant(C);
  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
  assert(isa<SCEVConstant>(Val) &&
         "Evaluation of SCEV at constant didn't fold correctly?");
  return cast<SCEVConstant>(Val)->getValue();
}

/// Given an exit condition of 'icmp op load X, cst', try to see if we can
/// compute the backedge execution count.
ScalarEvolution::ExitLimit
ScalarEvolution::computeLoadConstantCompareExitLimit(
  LoadInst *LI,
  Constant *RHS,
  const Loop *L,
  ICmpInst::Predicate predicate) {
  if (LI->isVolatile()) return getCouldNotCompute();

  // Check to see if the loaded pointer is a getelementptr of a global.
  // TODO: Use SCEV instead of manually grubbing with GEPs.
  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
  if (!GEP) return getCouldNotCompute();

  // Make sure that it is really a constant global we are gepping, with an
  // initializer, and make sure the first IDX is really 0.
  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
      !cast<Constant>(GEP->getOperand(1))->isNullValue())
    return getCouldNotCompute();

  // Okay, we allow one non-constant index into the GEP instruction.
  Value *VarIdx = nullptr;
  std::vector<Constant*> Indexes;
  unsigned VarIdxNum = 0;
  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
      Indexes.push_back(CI);
    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
      if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
      VarIdx = GEP->getOperand(i);
      VarIdxNum = i-2;
      Indexes.push_back(nullptr);
    }

  // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
  if (!VarIdx)
    return getCouldNotCompute();

  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
  // Check to see if X is a loop variant variable value now.
  const SCEV *Idx = getSCEV(VarIdx);
  Idx = getSCEVAtScope(Idx, L);

  // We can only recognize very limited forms of loop index expressions, in
  // particular, only affine AddRec's like {C1,+,C2}.
  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
  if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
    return getCouldNotCompute();

  unsigned MaxSteps = MaxBruteForceIterations;
  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
    ConstantInt *ItCst = ConstantInt::get(
                           cast<IntegerType>(IdxExpr->getType()), IterationNum);
    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);

    // Form the GEP offset.
    Indexes[VarIdxNum] = Val;

    Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
                                                         Indexes);
    if (!Result) break;  // Cannot compute!

    // Evaluate the condition for this iteration.
    Result = ConstantExpr::getICmp(predicate, Result, RHS);
    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
      ++NumArrayLenItCounts;
      return getConstant(ItCst);   // Found terminating iteration!
    }
  }
  return getCouldNotCompute();
}

ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
    Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) {
  ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV);
  if (!RHS)
    return getCouldNotCompute();

  const BasicBlock *Latch = L->getLoopLatch();
  if (!Latch)
    return getCouldNotCompute();

  const BasicBlock *Predecessor = L->getLoopPredecessor();
  if (!Predecessor)
    return getCouldNotCompute();

  // Return true if V is of the form "LHS `shift_op` <positive constant>".
  // Return LHS in OutLHS and shift_opt in OutOpCode.
  auto MatchPositiveShift =
      [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) {

    using namespace PatternMatch;

    ConstantInt *ShiftAmt;
    if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
      OutOpCode = Instruction::LShr;
    else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
      OutOpCode = Instruction::AShr;
    else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
      OutOpCode = Instruction::Shl;
    else
      return false;

    return ShiftAmt->getValue().isStrictlyPositive();
  };

  // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
  //
  // loop:
  //   %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
  //   %iv.shifted = lshr i32 %iv, <positive constant>
  //
  // Return true on a successful match.  Return the corresponding PHI node (%iv
  // above) in PNOut and the opcode of the shift operation in OpCodeOut.
  auto MatchShiftRecurrence =
      [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) {
    Optional<Instruction::BinaryOps> PostShiftOpCode;

    {
      Instruction::BinaryOps OpC;
      Value *V;

      // If we encounter a shift instruction, "peel off" the shift operation,
      // and remember that we did so.  Later when we inspect %iv's backedge
      // value, we will make sure that the backedge value uses the same
      // operation.
      //
      // Note: the peeled shift operation does not have to be the same
      // instruction as the one feeding into the PHI's backedge value.  We only
      // really care about it being the same *kind* of shift instruction --
      // that's all that is required for our later inferences to hold.
      if (MatchPositiveShift(LHS, V, OpC)) {
        PostShiftOpCode = OpC;
        LHS = V;
      }
    }

    PNOut = dyn_cast<PHINode>(LHS);
    if (!PNOut || PNOut->getParent() != L->getHeader())
      return false;

    Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
    Value *OpLHS;

    return
        // The backedge value for the PHI node must be a shift by a positive
        // amount
        MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&

        // of the PHI node itself
        OpLHS == PNOut &&

        // and the kind of shift should be match the kind of shift we peeled
        // off, if any.
        (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut);
  };

  PHINode *PN;
  Instruction::BinaryOps OpCode;
  if (!MatchShiftRecurrence(LHS, PN, OpCode))
    return getCouldNotCompute();

  const DataLayout &DL = getDataLayout();

  // The key rationale for this optimization is that for some kinds of shift
  // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
  // within a finite number of iterations.  If the condition guarding the
  // backedge (in the sense that the backedge is taken if the condition is true)
  // is false for the value the shift recurrence stabilizes to, then we know
  // that the backedge is taken only a finite number of times.

  ConstantInt *StableValue = nullptr;
  switch (OpCode) {
  default:
    llvm_unreachable("Impossible case!");

  case Instruction::AShr: {
    // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
    // bitwidth(K) iterations.
    Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
    KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr,
                                       Predecessor->getTerminator(), &DT);
    auto *Ty = cast<IntegerType>(RHS->getType());
    if (Known.isNonNegative())
      StableValue = ConstantInt::get(Ty, 0);
    else if (Known.isNegative())
      StableValue = ConstantInt::get(Ty, -1, true);
    else
      return getCouldNotCompute();

    break;
  }
  case Instruction::LShr:
  case Instruction::Shl:
    // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
    // stabilize to 0 in at most bitwidth(K) iterations.
    StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0);
    break;
  }

  auto *Result =
      ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI);
  assert(Result->getType()->isIntegerTy(1) &&
         "Otherwise cannot be an operand to a branch instruction");

  if (Result->isZeroValue()) {
    unsigned BitWidth = getTypeSizeInBits(RHS->getType());
    const SCEV *UpperBound =
        getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth);
    return ExitLimit(getCouldNotCompute(), UpperBound, false);
  }

  return getCouldNotCompute();
}

/// Return true if we can constant fold an instruction of the specified type,
/// assuming that all operands were constants.
static bool CanConstantFold(const Instruction *I) {
  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
      isa<LoadInst>(I) || isa<ExtractValueInst>(I))
    return true;

  if (const CallInst *CI = dyn_cast<CallInst>(I))
    if (const Function *F = CI->getCalledFunction())
      return canConstantFoldCallTo(CI, F);
  return false;
}

/// Determine whether this instruction can constant evolve within this loop
/// assuming its operands can all constant evolve.
static bool canConstantEvolve(Instruction *I, const Loop *L) {
  // An instruction outside of the loop can't be derived from a loop PHI.
  if (!L->contains(I)) return false;

  if (isa<PHINode>(I)) {
    // We don't currently keep track of the control flow needed to evaluate
    // PHIs, so we cannot handle PHIs inside of loops.
    return L->getHeader() == I->getParent();
  }

  // If we won't be able to constant fold this expression even if the operands
  // are constants, bail early.
  return CanConstantFold(I);
}

/// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
/// recursing through each instruction operand until reaching a loop header phi.
static PHINode *
getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
                               DenseMap<Instruction *, PHINode *> &PHIMap,
                               unsigned Depth) {
  if (Depth > MaxConstantEvolvingDepth)
    return nullptr;

  // Otherwise, we can evaluate this instruction if all of its operands are
  // constant or derived from a PHI node themselves.
  PHINode *PHI = nullptr;
  for (Value *Op : UseInst->operands()) {
    if (isa<Constant>(Op)) continue;

    Instruction *OpInst = dyn_cast<Instruction>(Op);
    if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;

    PHINode *P = dyn_cast<PHINode>(OpInst);
    if (!P)
      // If this operand is already visited, reuse the prior result.
      // We may have P != PHI if this is the deepest point at which the
      // inconsistent paths meet.
      P = PHIMap.lookup(OpInst);
    if (!P) {
      // Recurse and memoize the results, whether a phi is found or not.
      // This recursive call invalidates pointers into PHIMap.
      P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1);
      PHIMap[OpInst] = P;
    }
    if (!P)
      return nullptr;  // Not evolving from PHI
    if (PHI && PHI != P)
      return nullptr;  // Evolving from multiple different PHIs.
    PHI = P;
  }
  // This is a expression evolving from a constant PHI!
  return PHI;
}

/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
/// in the loop that V is derived from.  We allow arbitrary operations along the
/// way, but the operands of an operation must either be constants or a value
/// derived from a constant PHI.  If this expression does not fit with these
/// constraints, return null.
static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
  Instruction *I = dyn_cast<Instruction>(V);
  if (!I || !canConstantEvolve(I, L)) return nullptr;

  if (PHINode *PN = dyn_cast<PHINode>(I))
    return PN;

  // Record non-constant instructions contained by the loop.
  DenseMap<Instruction *, PHINode *> PHIMap;
  return getConstantEvolvingPHIOperands(I, L, PHIMap, 0);
}

/// EvaluateExpression - Given an expression that passes the
/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
/// in the loop has the value PHIVal.  If we can't fold this expression for some
/// reason, return null.
static Constant *EvaluateExpression(Value *V, const Loop *L,
                                    DenseMap<Instruction *, Constant *> &Vals,
                                    const DataLayout &DL,
                                    const TargetLibraryInfo *TLI) {
  // Convenient constant check, but redundant for recursive calls.
  if (Constant *C = dyn_cast<Constant>(V)) return C;
  Instruction *I = dyn_cast<Instruction>(V);
  if (!I) return nullptr;

  if (Constant *C = Vals.lookup(I)) return C;

  // An instruction inside the loop depends on a value outside the loop that we
  // weren't given a mapping for, or a value such as a call inside the loop.
  if (!canConstantEvolve(I, L)) return nullptr;

  // An unmapped PHI can be due to a branch or another loop inside this loop,
  // or due to this not being the initial iteration through a loop where we
  // couldn't compute the evolution of this particular PHI last time.
  if (isa<PHINode>(I)) return nullptr;

  std::vector<Constant*> Operands(I->getNumOperands());

  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
    Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
    if (!Operand) {
      Operands[i] = dyn_cast<Constant>(I->getOperand(i));
      if (!Operands[i]) return nullptr;
      continue;
    }
    Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
    Vals[Operand] = C;
    if (!C) return nullptr;
    Operands[i] = C;
  }

  if (CmpInst *CI = dyn_cast<CmpInst>(I))
    return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
                                           Operands[1], DL, TLI);
  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
    if (!LI->isVolatile())
      return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL);
  }
  return ConstantFoldInstOperands(I, Operands, DL, TLI);
}


// If every incoming value to PN except the one for BB is a specific Constant,
// return that, else return nullptr.
static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) {
  Constant *IncomingVal = nullptr;

  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
    if (PN->getIncomingBlock(i) == BB)
      continue;

    auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i));
    if (!CurrentVal)
      return nullptr;

    if (IncomingVal != CurrentVal) {
      if (IncomingVal)
        return nullptr;
      IncomingVal = CurrentVal;
    }
  }

  return IncomingVal;
}

/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
/// in the header of its containing loop, we know the loop executes a
/// constant number of times, and the PHI node is just a recurrence
/// involving constants, fold it.
Constant *
ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
                                                   const APInt &BEs,
                                                   const Loop *L) {
  auto I = ConstantEvolutionLoopExitValue.find(PN);
  if (I != ConstantEvolutionLoopExitValue.end())
    return I->second;

  if (BEs.ugt(MaxBruteForceIterations))
    return ConstantEvolutionLoopExitValue[PN] = nullptr;  // Not going to evaluate it.

  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];

  DenseMap<Instruction *, Constant *> CurrentIterVals;
  BasicBlock *Header = L->getHeader();
  assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");

  BasicBlock *Latch = L->getLoopLatch();
  if (!Latch)
    return nullptr;

  for (PHINode &PHI : Header->phis()) {
    if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
      CurrentIterVals[&PHI] = StartCST;
  }
  if (!CurrentIterVals.count(PN))
    return RetVal = nullptr;

  Value *BEValue = PN->getIncomingValueForBlock(Latch);

  // Execute the loop symbolically to determine the exit value.
  assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) &&
         "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");

  unsigned NumIterations = BEs.getZExtValue(); // must be in range
  unsigned IterationNum = 0;
  const DataLayout &DL = getDataLayout();
  for (; ; ++IterationNum) {
    if (IterationNum == NumIterations)
      return RetVal = CurrentIterVals[PN];  // Got exit value!

    // Compute the value of the PHIs for the next iteration.
    // EvaluateExpression adds non-phi values to the CurrentIterVals map.
    DenseMap<Instruction *, Constant *> NextIterVals;
    Constant *NextPHI =
        EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
    if (!NextPHI)
      return nullptr;        // Couldn't evaluate!
    NextIterVals[PN] = NextPHI;

    bool StoppedEvolving = NextPHI == CurrentIterVals[PN];

    // Also evaluate the other PHI nodes.  However, we don't get to stop if we
    // cease to be able to evaluate one of them or if they stop evolving,
    // because that doesn't necessarily prevent us from computing PN.
    SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
    for (const auto &I : CurrentIterVals) {
      PHINode *PHI = dyn_cast<PHINode>(I.first);
      if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
      PHIsToCompute.emplace_back(PHI, I.second);
    }
    // We use two distinct loops because EvaluateExpression may invalidate any
    // iterators into CurrentIterVals.
    for (const auto &I : PHIsToCompute) {
      PHINode *PHI = I.first;
      Constant *&NextPHI = NextIterVals[PHI];
      if (!NextPHI) {   // Not already computed.
        Value *BEValue = PHI->getIncomingValueForBlock(Latch);
        NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
      }
      if (NextPHI != I.second)
        StoppedEvolving = false;
    }

    // If all entries in CurrentIterVals == NextIterVals then we can stop
    // iterating, the loop can't continue to change.
    if (StoppedEvolving)
      return RetVal = CurrentIterVals[PN];

    CurrentIterVals.swap(NextIterVals);
  }
}

const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L,
                                                          Value *Cond,
                                                          bool ExitWhen) {
  PHINode *PN = getConstantEvolvingPHI(Cond, L);
  if (!PN) return getCouldNotCompute();

  // If the loop is canonicalized, the PHI will have exactly two entries.
  // That's the only form we support here.
  if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();

  DenseMap<Instruction *, Constant *> CurrentIterVals;
  BasicBlock *Header = L->getHeader();
  assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");

  BasicBlock *Latch = L->getLoopLatch();
  assert(Latch && "Should follow from NumIncomingValues == 2!");

  for (PHINode &PHI : Header->phis()) {
    if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
      CurrentIterVals[&PHI] = StartCST;
  }
  if (!CurrentIterVals.count(PN))
    return getCouldNotCompute();

  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
  // the loop symbolically to determine when the condition gets a value of
  // "ExitWhen".
  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
  const DataLayout &DL = getDataLayout();
  for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
    auto *CondVal = dyn_cast_or_null<ConstantInt>(
        EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI));

    // Couldn't symbolically evaluate.
    if (!CondVal) return getCouldNotCompute();

    if (CondVal->getValue() == uint64_t(ExitWhen)) {
      ++NumBruteForceTripCountsComputed;
      return getConstant(Type::getInt32Ty(getContext()), IterationNum);
    }

    // Update all the PHI nodes for the next iteration.
    DenseMap<Instruction *, Constant *> NextIterVals;

    // Create a list of which PHIs we need to compute. We want to do this before
    // calling EvaluateExpression on them because that may invalidate iterators
    // into CurrentIterVals.
    SmallVector<PHINode *, 8> PHIsToCompute;
    for (const auto &I : CurrentIterVals) {
      PHINode *PHI = dyn_cast<PHINode>(I.first);
      if (!PHI || PHI->getParent() != Header) continue;
      PHIsToCompute.push_back(PHI);
    }
    for (PHINode *PHI : PHIsToCompute) {
      Constant *&NextPHI = NextIterVals[PHI];
      if (NextPHI) continue;    // Already computed!

      Value *BEValue = PHI->getIncomingValueForBlock(Latch);
      NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
    }
    CurrentIterVals.swap(NextIterVals);
  }

  // Too many iterations were needed to evaluate.
  return getCouldNotCompute();
}

const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
  SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values =
      ValuesAtScopes[V];
  // Check to see if we've folded this expression at this loop before.
  for (auto &LS : Values)
    if (LS.first == L)
      return LS.second ? LS.second : V;

  Values.emplace_back(L, nullptr);

  // Otherwise compute it.
  const SCEV *C = computeSCEVAtScope(V, L);
  for (auto &LS : reverse(ValuesAtScopes[V]))
    if (LS.first == L) {
      LS.second = C;
      break;
    }
  return C;
}

/// This builds up a Constant using the ConstantExpr interface.  That way, we
/// will return Constants for objects which aren't represented by a
/// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
/// Returns NULL if the SCEV isn't representable as a Constant.
static Constant *BuildConstantFromSCEV(const SCEV *V) {
  switch (static_cast<SCEVTypes>(V->getSCEVType())) {
    case scCouldNotCompute:
    case scAddRecExpr:
      break;
    case scConstant:
      return cast<SCEVConstant>(V)->getValue();
    case scUnknown:
      return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
    case scSignExtend: {
      const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
      if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
        return ConstantExpr::getSExt(CastOp, SS->getType());
      break;
    }
    case scZeroExtend: {
      const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
      if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
        return ConstantExpr::getZExt(CastOp, SZ->getType());
      break;
    }
    case scTruncate: {
      const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
      if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
        return ConstantExpr::getTrunc(CastOp, ST->getType());
      break;
    }
    case scAddExpr: {
      const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
      if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
        if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
          unsigned AS = PTy->getAddressSpace();
          Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
          C = ConstantExpr::getBitCast(C, DestPtrTy);
        }
        for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
          Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
          if (!C2) return nullptr;

          // First pointer!
          if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
            unsigned AS = C2->getType()->getPointerAddressSpace();
            std::swap(C, C2);
            Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
            // The offsets have been converted to bytes.  We can add bytes to an
            // i8* by GEP with the byte count in the first index.
            C = ConstantExpr::getBitCast(C, DestPtrTy);
          }

          // Don't bother trying to sum two pointers. We probably can't
          // statically compute a load that results from it anyway.
          if (C2->getType()->isPointerTy())
            return nullptr;

          if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
            if (PTy->getElementType()->isStructTy())
              C2 = ConstantExpr::getIntegerCast(
                  C2, Type::getInt32Ty(C->getContext()), true);
            C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2);
          } else
            C = ConstantExpr::getAdd(C, C2);
        }
        return C;
      }
      break;
    }
    case scMulExpr: {
      const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
      if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
        // Don't bother with pointers at all.
        if (C->getType()->isPointerTy()) return nullptr;
        for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
          Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
          if (!C2 || C2->getType()->isPointerTy()) return nullptr;
          C = ConstantExpr::getMul(C, C2);
        }
        return C;
      }
      break;
    }
    case scUDivExpr: {
      const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
      if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
        if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
          if (LHS->getType() == RHS->getType())
            return ConstantExpr::getUDiv(LHS, RHS);
      break;
    }
    case scSMaxExpr:
    case scUMaxExpr:
    case scSMinExpr:
    case scUMinExpr:
      break; // TODO: smax, umax, smin, umax.
  }
  return nullptr;
}

const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
  if (isa<SCEVConstant>(V)) return V;

  // If this instruction is evolved from a constant-evolving PHI, compute the
  // exit value from the loop without using SCEVs.
  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
      if (PHINode *PN = dyn_cast<PHINode>(I)) {
        const Loop *LI = this->LI[I->getParent()];
        // Looking for loop exit value.
        if (LI && LI->getParentLoop() == L &&
            PN->getParent() == LI->getHeader()) {
          // Okay, there is no closed form solution for the PHI node.  Check
          // to see if the loop that contains it has a known backedge-taken
          // count.  If so, we may be able to force computation of the exit
          // value.
          const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
          // This trivial case can show up in some degenerate cases where
          // the incoming IR has not yet been fully simplified.
          if (BackedgeTakenCount->isZero()) {
            Value *InitValue = nullptr;
            bool MultipleInitValues = false;
            for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
              if (!LI->contains(PN->getIncomingBlock(i))) {
                if (!InitValue)
                  InitValue = PN->getIncomingValue(i);
                else if (InitValue != PN->getIncomingValue(i)) {
                  MultipleInitValues = true;
                  break;
                }
              }
            }
            if (!MultipleInitValues && InitValue)
              return getSCEV(InitValue);
          }
          // Do we have a loop invariant value flowing around the backedge
          // for a loop which must execute the backedge?
          if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
              isKnownPositive(BackedgeTakenCount) &&
              PN->getNumIncomingValues() == 2) {
            unsigned InLoopPred = LI->contains(PN->getIncomingBlock(0)) ? 0 : 1;
            const SCEV *OnBackedge = getSCEV(PN->getIncomingValue(InLoopPred));
            if (IsAvailableOnEntry(LI, DT, OnBackedge, PN->getParent()))
              return OnBackedge;
          }
          if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
            // Okay, we know how many times the containing loop executes.  If
            // this is a constant evolving PHI node, get the final value at
            // the specified iteration number.
            Constant *RV =
                getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI);
            if (RV) return getSCEV(RV);
          }
        }

        // If there is a single-input Phi, evaluate it at our scope. If we can
        // prove that this replacement does not break LCSSA form, use new value.
        if (PN->getNumOperands() == 1) {
          const SCEV *Input = getSCEV(PN->getOperand(0));
          const SCEV *InputAtScope = getSCEVAtScope(Input, L);
          // TODO: We can generalize it using LI.replacementPreservesLCSSAForm,
          // for the simplest case just support constants.
          if (isa<SCEVConstant>(InputAtScope)) return InputAtScope;
        }
      }

      // Okay, this is an expression that we cannot symbolically evaluate
      // into a SCEV.  Check to see if it's possible to symbolically evaluate
      // the arguments into constants, and if so, try to constant propagate the
      // result.  This is particularly useful for computing loop exit values.
      if (CanConstantFold(I)) {
        SmallVector<Constant *, 4> Operands;
        bool MadeImprovement = false;
        for (Value *Op : I->operands()) {
          if (Constant *C = dyn_cast<Constant>(Op)) {
            Operands.push_back(C);
            continue;
          }

          // If any of the operands is non-constant and if they are
          // non-integer and non-pointer, don't even try to analyze them
          // with scev techniques.
          if (!isSCEVable(Op->getType()))
            return V;

          const SCEV *OrigV = getSCEV(Op);
          const SCEV *OpV = getSCEVAtScope(OrigV, L);
          MadeImprovement |= OrigV != OpV;

          Constant *C = BuildConstantFromSCEV(OpV);
          if (!C) return V;
          if (C->getType() != Op->getType())
            C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
                                                              Op->getType(),
                                                              false),
                                      C, Op->getType());
          Operands.push_back(C);
        }

        // Check to see if getSCEVAtScope actually made an improvement.
        if (MadeImprovement) {
          Constant *C = nullptr;
          const DataLayout &DL = getDataLayout();
          if (const CmpInst *CI = dyn_cast<CmpInst>(I))
            C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
                                                Operands[1], DL, &TLI);
          else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
            if (!LI->isVolatile())
              C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL);
          } else
            C = ConstantFoldInstOperands(I, Operands, DL, &TLI);
          if (!C) return V;
          return getSCEV(C);
        }
      }
    }

    // This is some other type of SCEVUnknown, just return it.
    return V;
  }

  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
    // Avoid performing the look-up in the common case where the specified
    // expression has no loop-variant portions.
    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
      const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
      if (OpAtScope != Comm->getOperand(i)) {
        // Okay, at least one of these operands is loop variant but might be
        // foldable.  Build a new instance of the folded commutative expression.
        SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
                                            Comm->op_begin()+i);
        NewOps.push_back(OpAtScope);

        for (++i; i != e; ++i) {
          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
          NewOps.push_back(OpAtScope);
        }
        if (isa<SCEVAddExpr>(Comm))
          return getAddExpr(NewOps, Comm->getNoWrapFlags());
        if (isa<SCEVMulExpr>(Comm))
          return getMulExpr(NewOps, Comm->getNoWrapFlags());
        if (isa<SCEVMinMaxExpr>(Comm))
          return getMinMaxExpr(Comm->getSCEVType(), NewOps);
        llvm_unreachable("Unknown commutative SCEV type!");
      }
    }
    // If we got here, all operands are loop invariant.
    return Comm;
  }

  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
    const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
    const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
    if (LHS == Div->getLHS() && RHS == Div->getRHS())
      return Div;   // must be loop invariant
    return getUDivExpr(LHS, RHS);
  }

  // If this is a loop recurrence for a loop that does not contain L, then we
  // are dealing with the final value computed by the loop.
  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
    // First, attempt to evaluate each operand.
    // Avoid performing the look-up in the common case where the specified
    // expression has no loop-variant portions.
    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
      const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
      if (OpAtScope == AddRec->getOperand(i))
        continue;

      // Okay, at least one of these operands is loop variant but might be
      // foldable.  Build a new instance of the folded commutative expression.
      SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
                                          AddRec->op_begin()+i);
      NewOps.push_back(OpAtScope);
      for (++i; i != e; ++i)
        NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));

      const SCEV *FoldedRec =
        getAddRecExpr(NewOps, AddRec->getLoop(),
                      AddRec->getNoWrapFlags(SCEV::FlagNW));
      AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
      // The addrec may be folded to a nonrecurrence, for example, if the
      // induction variable is multiplied by zero after constant folding. Go
      // ahead and return the folded value.
      if (!AddRec)
        return FoldedRec;
      break;
    }

    // If the scope is outside the addrec's loop, evaluate it by using the
    // loop exit value of the addrec.
    if (!AddRec->getLoop()->contains(L)) {
      // To evaluate this recurrence, we need to know how many times the AddRec
      // loop iterates.  Compute this now.
      const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
      if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;

      // Then, evaluate the AddRec.
      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
    }

    return AddRec;
  }

  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
    if (Op == Cast->getOperand())
      return Cast;  // must be loop invariant
    return getZeroExtendExpr(Op, Cast->getType());
  }

  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
    if (Op == Cast->getOperand())
      return Cast;  // must be loop invariant
    return getSignExtendExpr(Op, Cast->getType());
  }

  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
    if (Op == Cast->getOperand())
      return Cast;  // must be loop invariant
    return getTruncateExpr(Op, Cast->getType());
  }

  llvm_unreachable("Unknown SCEV type!");
}

const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
  return getSCEVAtScope(getSCEV(V), L);
}

const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const {
  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S))
    return stripInjectiveFunctions(ZExt->getOperand());
  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S))
    return stripInjectiveFunctions(SExt->getOperand());
  return S;
}

/// Finds the minimum unsigned root of the following equation:
///
///     A * X = B (mod N)
///
/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
/// A and B isn't important.
///
/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B,
                                               ScalarEvolution &SE) {
  uint32_t BW = A.getBitWidth();
  assert(BW == SE.getTypeSizeInBits(B->getType()));
  assert(A != 0 && "A must be non-zero.");

  // 1. D = gcd(A, N)
  //
  // The gcd of A and N may have only one prime factor: 2. The number of
  // trailing zeros in A is its multiplicity
  uint32_t Mult2 = A.countTrailingZeros();
  // D = 2^Mult2

  // 2. Check if B is divisible by D.
  //
  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
  // is not less than multiplicity of this prime factor for D.
  if (SE.GetMinTrailingZeros(B) < Mult2)
    return SE.getCouldNotCompute();

  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
  // modulo (N / D).
  //
  // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
  // (N / D) in general. The inverse itself always fits into BW bits, though,
  // so we immediately truncate it.
  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
  APInt Mod(BW + 1, 0);
  Mod.setBit(BW - Mult2);  // Mod = N / D
  APInt I = AD.multiplicativeInverse(Mod).trunc(BW);

  // 4. Compute the minimum unsigned root of the equation:
  // I * (B / D) mod (N / D)
  // To simplify the computation, we factor out the divide by D:
  // (I * B mod N) / D
  const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2));
  return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D);
}

/// For a given quadratic addrec, generate coefficients of the corresponding
/// quadratic equation, multiplied by a common value to ensure that they are
/// integers.
/// The returned value is a tuple { A, B, C, M, BitWidth }, where
/// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
/// were multiplied by, and BitWidth is the bit width of the original addrec
/// coefficients.
/// This function returns None if the addrec coefficients are not compile-
/// time constants.
static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
GetQuadraticEquation(const SCEVAddRecExpr *AddRec) {
  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
  LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: "
                    << *AddRec << '\n');

  // We currently can only solve this if the coefficients are constants.
  if (!LC || !MC || !NC) {
    LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n");
    return None;
  }

  APInt L = LC->getAPInt();
  APInt M = MC->getAPInt();
  APInt N = NC->getAPInt();
  assert(!N.isNullValue() && "This is not a quadratic addrec");

  unsigned BitWidth = LC->getAPInt().getBitWidth();
  unsigned NewWidth = BitWidth + 1;
  LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: "
                    << BitWidth << '\n');
  // The sign-extension (as opposed to a zero-extension) here matches the
  // extension used in SolveQuadraticEquationWrap (with the same motivation).
  N = N.sext(NewWidth);
  M = M.sext(NewWidth);
  L = L.sext(NewWidth);

  // The increments are M, M+N, M+2N, ..., so the accumulated values are
  //   L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
  //   L+M, L+2M+N, L+3M+3N, ...
  // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
  //
  // The equation Acc = 0 is then
  //   L + nM + n(n-1)/2 N = 0,  or  2L + 2M n + n(n-1) N = 0.
  // In a quadratic form it becomes:
  //   N n^2 + (2M-N) n + 2L = 0.

  APInt A = N;
  APInt B = 2 * M - A;
  APInt C = 2 * L;
  APInt T = APInt(NewWidth, 2);
  LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B
                    << "x + " << C << ", coeff bw: " << NewWidth
                    << ", multiplied by " << T << '\n');
  return std::make_tuple(A, B, C, T, BitWidth);
}

/// Helper function to compare optional APInts:
/// (a) if X and Y both exist, return min(X, Y),
/// (b) if neither X nor Y exist, return None,
/// (c) if exactly one of X and Y exists, return that value.
static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) {
  if (X.hasValue() && Y.hasValue()) {
    unsigned W = std::max(X->getBitWidth(), Y->getBitWidth());
    APInt XW = X->sextOrSelf(W);
    APInt YW = Y->sextOrSelf(W);
    return XW.slt(YW) ? *X : *Y;
  }
  if (!X.hasValue() && !Y.hasValue())
    return None;
  return X.hasValue() ? *X : *Y;
}

/// Helper function to truncate an optional APInt to a given BitWidth.
/// When solving addrec-related equations, it is preferable to return a value
/// that has the same bit width as the original addrec's coefficients. If the
/// solution fits in the original bit width, truncate it (except for i1).
/// Returning a value of a different bit width may inhibit some optimizations.
///
/// In general, a solution to a quadratic equation generated from an addrec
/// may require BW+1 bits, where BW is the bit width of the addrec's
/// coefficients. The reason is that the coefficients of the quadratic
/// equation are BW+1 bits wide (to avoid truncation when converting from
/// the addrec to the equation).
static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) {
  if (!X.hasValue())
    return None;
  unsigned W = X->getBitWidth();
  if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth))
    return X->trunc(BitWidth);
  return X;
}

/// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
/// iterations. The values L, M, N are assumed to be signed, and they
/// should all have the same bit widths.
/// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
/// where BW is the bit width of the addrec's coefficients.
/// If the calculated value is a BW-bit integer (for BW > 1), it will be
/// returned as such, otherwise the bit width of the returned value may
/// be greater than BW.
///
/// This function returns None if
/// (a) the addrec coefficients are not constant, or
/// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
///     like x^2 = 5, no integer solutions exist, in other cases an integer
///     solution may exist, but SolveQuadraticEquationWrap may fail to find it.
static Optional<APInt>
SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
  APInt A, B, C, M;
  unsigned BitWidth;
  auto T = GetQuadraticEquation(AddRec);
  if (!T.hasValue())
    return None;

  std::tie(A, B, C, M, BitWidth) = *T;
  LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n");
  Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1);
  if (!X.hasValue())
    return None;

  ConstantInt *CX = ConstantInt::get(SE.getContext(), *X);
  ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE);
  if (!V->isZero())
    return None;

  return TruncIfPossible(X, BitWidth);
}

/// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
/// iterations. The values M, N are assumed to be signed, and they
/// should all have the same bit widths.
/// Find the least n such that c(n) does not belong to the given range,
/// while c(n-1) does.
///
/// This function returns None if
/// (a) the addrec coefficients are not constant, or
/// (b) SolveQuadraticEquationWrap was unable to find a solution for the
///     bounds of the range.
static Optional<APInt>
SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
                          const ConstantRange &Range, ScalarEvolution &SE) {
  assert(AddRec->getOperand(0)->isZero() &&
         "Starting value of addrec should be 0");
  LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range "
                    << Range << ", addrec " << *AddRec << '\n');
  // This case is handled in getNumIterationsInRange. Here we can assume that
  // we start in the range.
  assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) &&
         "Addrec's initial value should be in range");

  APInt A, B, C, M;
  unsigned BitWidth;
  auto T = GetQuadraticEquation(AddRec);
  if (!T.hasValue())
    return None;

  // Be careful about the return value: there can be two reasons for not
  // returning an actual number. First, if no solutions to the equations
  // were found, and second, if the solutions don't leave the given range.
  // The first case means that the actual solution is "unknown", the second
  // means that it's known, but not valid. If the solution is unknown, we
  // cannot make any conclusions.
  // Return a pair: the optional solution and a flag indicating if the
  // solution was found.
  auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> {
    // Solve for signed overflow and unsigned overflow, pick the lower
    // solution.
    LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "
                      << Bound << " (before multiplying by " << M << ")\n");
    Bound *= M; // The quadratic equation multiplier.

    Optional<APInt> SO = None;
    if (BitWidth > 1) {
      LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
                           "signed overflow\n");
      SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth);
    }
    LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
                         "unsigned overflow\n");
    Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound,
                                                              BitWidth+1);

    auto LeavesRange = [&] (const APInt &X) {
      ConstantInt *C0 = ConstantInt::get(SE.getContext(), X);
      ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE);
      if (Range.contains(V0->getValue()))
        return false;
      // X should be at least 1, so X-1 is non-negative.
      ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1);
      ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE);
      if (Range.contains(V1->getValue()))
        return true;
      return false;
    };

    // If SolveQuadraticEquationWrap returns None, it means that there can
    // be a solution, but the function failed to find it. We cannot treat it
    // as "no solution".
    if (!SO.hasValue() || !UO.hasValue())
      return { None, false };

    // Check the smaller value first to see if it leaves the range.
    // At this point, both SO and UO must have values.
    Optional<APInt> Min = MinOptional(SO, UO);
    if (LeavesRange(*Min))
      return { Min, true };
    Optional<APInt> Max = Min == SO ? UO : SO;
    if (LeavesRange(*Max))
      return { Max, true };

    // Solutions were found, but were eliminated, hence the "true".
    return { None, true };
  };

  std::tie(A, B, C, M, BitWidth) = *T;
  // Lower bound is inclusive, subtract 1 to represent the exiting value.
  APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1;
  APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth());
  auto SL = SolveForBoundary(Lower);
  auto SU = SolveForBoundary(Upper);
  // If any of the solutions was unknown, no meaninigful conclusions can
  // be made.
  if (!SL.second || !SU.second)
    return None;

  // Claim: The correct solution is not some value between Min and Max.
  //
  // Justification: Assuming that Min and Max are different values, one of
  // them is when the first signed overflow happens, the other is when the
  // first unsigned overflow happens. Crossing the range boundary is only
  // possible via an overflow (treating 0 as a special case of it, modeling
  // an overflow as crossing k*2^W for some k).
  //
  // The interesting case here is when Min was eliminated as an invalid
  // solution, but Max was not. The argument is that if there was another
  // overflow between Min and Max, it would also have been eliminated if
  // it was considered.
  //
  // For a given boundary, it is possible to have two overflows of the same
  // type (signed/unsigned) without having the other type in between: this
  // can happen when the vertex of the parabola is between the iterations
  // corresponding to the overflows. This is only possible when the two
  // overflows cross k*2^W for the same k. In such case, if the second one
  // left the range (and was the first one to do so), the first overflow
  // would have to enter the range, which would mean that either we had left
  // the range before or that we started outside of it. Both of these cases
  // are contradictions.
  //
  // Claim: In the case where SolveForBoundary returns None, the correct
  // solution is not some value between the Max for this boundary and the
  // Min of the other boundary.
  //
  // Justification: Assume that we had such Max_A and Min_B corresponding
  // to range boundaries A and B and such that Max_A < Min_B. If there was
  // a solution between Max_A and Min_B, it would have to be caused by an
  // overflow corresponding to either A or B. It cannot correspond to B,
  // since Min_B is the first occurrence of such an overflow. If it
  // corresponded to A, it would have to be either a signed or an unsigned
  // overflow that is larger than both eliminated overflows for A. But
  // between the eliminated overflows and this overflow, the values would
  // cover the entire value space, thus crossing the other boundary, which
  // is a contradiction.

  return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth);
}

ScalarEvolution::ExitLimit
ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
                              bool AllowPredicates) {

  // This is only used for loops with a "x != y" exit test. The exit condition
  // is now expressed as a single expression, V = x-y. So the exit test is
  // effectively V != 0.  We know and take advantage of the fact that this
  // expression only being used in a comparison by zero context.

  SmallPtrSet<const SCEVPredicate *, 4> Predicates;
  // If the value is a constant
  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
    // If the value is already zero, the branch will execute zero times.
    if (C->getValue()->isZero()) return C;
    return getCouldNotCompute();  // Otherwise it will loop infinitely.
  }

  const SCEVAddRecExpr *AddRec =
      dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V));

  if (!AddRec && AllowPredicates)
    // Try to make this an AddRec using runtime tests, in the first X
    // iterations of this loop, where X is the SCEV expression found by the
    // algorithm below.
    AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates);

  if (!AddRec || AddRec->getLoop() != L)
    return getCouldNotCompute();

  // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
  // the quadratic equation to solve it.
  if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
    // We can only use this value if the chrec ends up with an exact zero
    // value at this index.  When solving for "X*X != 5", for example, we
    // should not accept a root of 2.
    if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) {
      const auto *R = cast<SCEVConstant>(getConstant(S.getValue()));
      return ExitLimit(R, R, false, Predicates);
    }
    return getCouldNotCompute();
  }

  // Otherwise we can only handle this if it is affine.
  if (!AddRec->isAffine())
    return getCouldNotCompute();

  // If this is an affine expression, the execution count of this branch is
  // the minimum unsigned root of the following equation:
  //
  //     Start + Step*N = 0 (mod 2^BW)
  //
  // equivalent to:
  //
  //             Step*N = -Start (mod 2^BW)
  //
  // where BW is the common bit width of Start and Step.

  // Get the initial value for the loop.
  const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
  const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());

  // For now we handle only constant steps.
  //
  // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
  // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
  // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
  // We have not yet seen any such cases.
  const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
  if (!StepC || StepC->getValue()->isZero())
    return getCouldNotCompute();

  // For positive steps (counting up until unsigned overflow):
  //   N = -Start/Step (as unsigned)
  // For negative steps (counting down to zero):
  //   N = Start/-Step
  // First compute the unsigned distance from zero in the direction of Step.
  bool CountDown = StepC->getAPInt().isNegative();
  const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);

  // Handle unitary steps, which cannot wraparound.
  // 1*N = -Start; -1*N = Start (mod 2^BW), so:
  //   N = Distance (as unsigned)
  if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) {
    APInt MaxBECount = getUnsignedRangeMax(Distance);

    // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
    // we end up with a loop whose backedge-taken count is n - 1.  Detect this
    // case, and see if we can improve the bound.
    //
    // Explicitly handling this here is necessary because getUnsignedRange
    // isn't context-sensitive; it doesn't know that we only care about the
    // range inside the loop.
    const SCEV *Zero = getZero(Distance->getType());
    const SCEV *One = getOne(Distance->getType());
    const SCEV *DistancePlusOne = getAddExpr(Distance, One);
    if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) {
      // If Distance + 1 doesn't overflow, we can compute the maximum distance
      // as "unsigned_max(Distance + 1) - 1".
      ConstantRange CR = getUnsignedRange(DistancePlusOne);
      MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1);
    }
    return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates);
  }

  // If the condition controls loop exit (the loop exits only if the expression
  // is true) and the addition is no-wrap we can use unsigned divide to
  // compute the backedge count.  In this case, the step may not divide the
  // distance, but we don't care because if the condition is "missed" the loop
  // will have undefined behavior due to wrapping.
  if (ControlsExit && AddRec->hasNoSelfWrap() &&
      loopHasNoAbnormalExits(AddRec->getLoop())) {
    const SCEV *Exact =
        getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
    const SCEV *Max =
        Exact == getCouldNotCompute()
            ? Exact
            : getConstant(getUnsignedRangeMax(Exact));
    return ExitLimit(Exact, Max, false, Predicates);
  }

  // Solve the general equation.
  const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(),
                                               getNegativeSCEV(Start), *this);
  const SCEV *M = E == getCouldNotCompute()
                      ? E
                      : getConstant(getUnsignedRangeMax(E));
  return ExitLimit(E, M, false, Predicates);
}

ScalarEvolution::ExitLimit
ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) {
  // Loops that look like: while (X == 0) are very strange indeed.  We don't
  // handle them yet except for the trivial case.  This could be expanded in the
  // future as needed.

  // If the value is a constant, check to see if it is known to be non-zero
  // already.  If so, the backedge will execute zero times.
  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
    if (!C->getValue()->isZero())
      return getZero(C->getType());
    return getCouldNotCompute();  // Otherwise it will loop infinitely.
  }

  // We could implement others, but I really doubt anyone writes loops like
  // this, and if they did, they would already be constant folded.
  return getCouldNotCompute();
}

std::pair<BasicBlock *, BasicBlock *>
ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
  // If the block has a unique predecessor, then there is no path from the
  // predecessor to the block that does not go through the direct edge
  // from the predecessor to the block.
  if (BasicBlock *Pred = BB->getSinglePredecessor())
    return {Pred, BB};

  // A loop's header is defined to be a block that dominates the loop.
  // If the header has a unique predecessor outside the loop, it must be
  // a block that has exactly one successor that can reach the loop.
  if (Loop *L = LI.getLoopFor(BB))
    return {L->getLoopPredecessor(), L->getHeader()};

  return {nullptr, nullptr};
}

/// SCEV structural equivalence is usually sufficient for testing whether two
/// expressions are equal, however for the purposes of looking for a condition
/// guarding a loop, it can be useful to be a little more general, since a
/// front-end may have replicated the controlling expression.
static bool HasSameValue(const SCEV *A, const SCEV *B) {
  // Quick check to see if they are the same SCEV.
  if (A == B) return true;

  auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) {
    // Not all instructions that are "identical" compute the same value.  For
    // instance, two distinct alloca instructions allocating the same type are
    // identical and do not read memory; but compute distinct values.
    return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A));
  };

  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
  // two different instructions with the same value. Check for this case.
  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
          if (ComputesEqualValues(AI, BI))
            return true;

  // Otherwise assume they may have a different value.
  return false;
}

bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
                                           const SCEV *&LHS, const SCEV *&RHS,
                                           unsigned Depth) {
  bool Changed = false;
  // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
  // '0 != 0'.
  auto TrivialCase = [&](bool TriviallyTrue) {
    LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
    Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
    return true;
  };
  // If we hit the max recursion limit bail out.
  if (Depth >= 3)
    return false;

  // Canonicalize a constant to the right side.
  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
    // Check for both operands constant.
    if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
      if (ConstantExpr::getICmp(Pred,
                                LHSC->getValue(),
                                RHSC->getValue())->isNullValue())
        return TrivialCase(false);
      else
        return TrivialCase(true);
    }
    // Otherwise swap the operands to put the constant on the right.
    std::swap(LHS, RHS);
    Pred = ICmpInst::getSwappedPredicate(Pred);
    Changed = true;
  }

  // If we're comparing an addrec with a value which is loop-invariant in the
  // addrec's loop, put the addrec on the left. Also make a dominance check,
  // as both operands could be addrecs loop-invariant in each other's loop.
  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
    const Loop *L = AR->getLoop();
    if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
      std::swap(LHS, RHS);
      Pred = ICmpInst::getSwappedPredicate(Pred);
      Changed = true;
    }
  }

  // If there's a constant operand, canonicalize comparisons with boundary
  // cases, and canonicalize *-or-equal comparisons to regular comparisons.
  if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
    const APInt &RA = RC->getAPInt();

    bool SimplifiedByConstantRange = false;

    if (!ICmpInst::isEquality(Pred)) {
      ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA);
      if (ExactCR.isFullSet())
        return TrivialCase(true);
      else if (ExactCR.isEmptySet())
        return TrivialCase(false);

      APInt NewRHS;
      CmpInst::Predicate NewPred;
      if (ExactCR.getEquivalentICmp(NewPred, NewRHS) &&
          ICmpInst::isEquality(NewPred)) {
        // We were able to convert an inequality to an equality.
        Pred = NewPred;
        RHS = getConstant(NewRHS);
        Changed = SimplifiedByConstantRange = true;
      }
    }

    if (!SimplifiedByConstantRange) {
      switch (Pred) {
      default:
        break;
      case ICmpInst::ICMP_EQ:
      case ICmpInst::ICMP_NE:
        // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
        if (!RA)
          if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
            if (const SCEVMulExpr *ME =
                    dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
              if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
                  ME->getOperand(0)->isAllOnesValue()) {
                RHS = AE->getOperand(1);
                LHS = ME->getOperand(1);
                Changed = true;
              }
        break;


        // The "Should have been caught earlier!" messages refer to the fact
        // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
        // should have fired on the corresponding cases, and canonicalized the
        // check to trivial case.

      case ICmpInst::ICMP_UGE:
        assert(!RA.isMinValue() && "Should have been caught earlier!");
        Pred = ICmpInst::ICMP_UGT;
        RHS = getConstant(RA - 1);
        Changed = true;
        break;
      case ICmpInst::ICMP_ULE:
        assert(!RA.isMaxValue() && "Should have been caught earlier!");
        Pred = ICmpInst::ICMP_ULT;
        RHS = getConstant(RA + 1);
        Changed = true;
        break;
      case ICmpInst::ICMP_SGE:
        assert(!RA.isMinSignedValue() && "Should have been caught earlier!");
        Pred = ICmpInst::ICMP_SGT;
        RHS = getConstant(RA - 1);
        Changed = true;
        break;
      case ICmpInst::ICMP_SLE:
        assert(!RA.isMaxSignedValue() && "Should have been caught earlier!");
        Pred = ICmpInst::ICMP_SLT;
        RHS = getConstant(RA + 1);
        Changed = true;
        break;
      }
    }
  }

  // Check for obvious equality.
  if (HasSameValue(LHS, RHS)) {
    if (ICmpInst::isTrueWhenEqual(Pred))
      return TrivialCase(true);
    if (ICmpInst::isFalseWhenEqual(Pred))
      return TrivialCase(false);
  }

  // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
  // adding or subtracting 1 from one of the operands.
  switch (Pred) {
  case ICmpInst::ICMP_SLE:
    if (!getSignedRangeMax(RHS).isMaxSignedValue()) {
      RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
                       SCEV::FlagNSW);
      Pred = ICmpInst::ICMP_SLT;
      Changed = true;
    } else if (!getSignedRangeMin(LHS).isMinSignedValue()) {
      LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
                       SCEV::FlagNSW);
      Pred = ICmpInst::ICMP_SLT;
      Changed = true;
    }
    break;
  case ICmpInst::ICMP_SGE:
    if (!getSignedRangeMin(RHS).isMinSignedValue()) {
      RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
                       SCEV::FlagNSW);
      Pred = ICmpInst::ICMP_SGT;
      Changed = true;
    } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) {
      LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
                       SCEV::FlagNSW);
      Pred = ICmpInst::ICMP_SGT;
      Changed = true;
    }
    break;
  case ICmpInst::ICMP_ULE:
    if (!getUnsignedRangeMax(RHS).isMaxValue()) {
      RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
                       SCEV::FlagNUW);
      Pred = ICmpInst::ICMP_ULT;
      Changed = true;
    } else if (!getUnsignedRangeMin(LHS).isMinValue()) {
      LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS);
      Pred = ICmpInst::ICMP_ULT;
      Changed = true;
    }
    break;
  case ICmpInst::ICMP_UGE:
    if (!getUnsignedRangeMin(RHS).isMinValue()) {
      RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS);
      Pred = ICmpInst::ICMP_UGT;
      Changed = true;
    } else if (!getUnsignedRangeMax(LHS).isMaxValue()) {
      LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
                       SCEV::FlagNUW);
      Pred = ICmpInst::ICMP_UGT;
      Changed = true;
    }
    break;
  default:
    break;
  }

  // TODO: More simplifications are possible here.

  // Recursively simplify until we either hit a recursion limit or nothing
  // changes.
  if (Changed)
    return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);

  return Changed;
}

bool ScalarEvolution::isKnownNegative(const SCEV *S) {
  return getSignedRangeMax(S).isNegative();
}

bool ScalarEvolution::isKnownPositive(const SCEV *S) {
  return getSignedRangeMin(S).isStrictlyPositive();
}

bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
  return !getSignedRangeMin(S).isNegative();
}

bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
  return !getSignedRangeMax(S).isStrictlyPositive();
}

bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
  return isKnownNegative(S) || isKnownPositive(S);
}

std::pair<const SCEV *, const SCEV *>
ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) {
  // Compute SCEV on entry of loop L.
  const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this);
  if (Start == getCouldNotCompute())
    return { Start, Start };
  // Compute post increment SCEV for loop L.
  const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this);
  assert(PostInc != getCouldNotCompute() && "Unexpected could not compute");
  return { Start, PostInc };
}

bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred,
                                          const SCEV *LHS, const SCEV *RHS) {
  // First collect all loops.
  SmallPtrSet<const Loop *, 8> LoopsUsed;
  getUsedLoops(LHS, LoopsUsed);
  getUsedLoops(RHS, LoopsUsed);

  if (LoopsUsed.empty())
    return false;

  // Domination relationship must be a linear order on collected loops.
#ifndef NDEBUG
  for (auto *L1 : LoopsUsed)
    for (auto *L2 : LoopsUsed)
      assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
              DT.dominates(L2->getHeader(), L1->getHeader())) &&
             "Domination relationship is not a linear order");
#endif

  const Loop *MDL =
      *std::max_element(LoopsUsed.begin(), LoopsUsed.end(),
                        [&](const Loop *L1, const Loop *L2) {
         return DT.properlyDominates(L1->getHeader(), L2->getHeader());
       });

  // Get init and post increment value for LHS.
  auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS);
  // if LHS contains unknown non-invariant SCEV then bail out.
  if (SplitLHS.first == getCouldNotCompute())
    return false;
  assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC");
  // Get init and post increment value for RHS.
  auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS);
  // if RHS contains unknown non-invariant SCEV then bail out.
  if (SplitRHS.first == getCouldNotCompute())
    return false;
  assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC");
  // It is possible that init SCEV contains an invariant load but it does
  // not dominate MDL and is not available at MDL loop entry, so we should
  // check it here.
  if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) ||
      !isAvailableAtLoopEntry(SplitRHS.first, MDL))
    return false;

  return isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first) &&
         isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second,
                                     SplitRHS.second);
}

bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
                                       const SCEV *LHS, const SCEV *RHS) {
  // Canonicalize the inputs first.
  (void)SimplifyICmpOperands(Pred, LHS, RHS);

  if (isKnownViaInduction(Pred, LHS, RHS))
    return true;

  if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
    return true;

  // Otherwise see what can be done with some simple reasoning.
  return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
}

bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred,
                                              const SCEVAddRecExpr *LHS,
                                              const SCEV *RHS) {
  const Loop *L = LHS->getLoop();
  return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) &&
         isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS);
}

bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS,
                                           ICmpInst::Predicate Pred,
                                           bool &Increasing) {
  bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing);

#ifndef NDEBUG
  // Verify an invariant: inverting the predicate should turn a monotonically
  // increasing change to a monotonically decreasing one, and vice versa.
  bool IncreasingSwapped;
  bool ResultSwapped = isMonotonicPredicateImpl(
      LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped);

  assert(Result == ResultSwapped && "should be able to analyze both!");
  if (ResultSwapped)
    assert(Increasing == !IncreasingSwapped &&
           "monotonicity should flip as we flip the predicate");
#endif

  return Result;
}

bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS,
                                               ICmpInst::Predicate Pred,
                                               bool &Increasing) {

  // A zero step value for LHS means the induction variable is essentially a
  // loop invariant value. We don't really depend on the predicate actually
  // flipping from false to true (for increasing predicates, and the other way
  // around for decreasing predicates), all we care about is that *if* the
  // predicate changes then it only changes from false to true.
  //
  // A zero step value in itself is not very useful, but there may be places
  // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
  // as general as possible.

  switch (Pred) {
  default:
    return false; // Conservative answer

  case ICmpInst::ICMP_UGT:
  case ICmpInst::ICMP_UGE:
  case ICmpInst::ICMP_ULT:
  case ICmpInst::ICMP_ULE:
    if (!LHS->hasNoUnsignedWrap())
      return false;

    Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE;
    return true;

  case ICmpInst::ICMP_SGT:
  case ICmpInst::ICMP_SGE:
  case ICmpInst::ICMP_SLT:
  case ICmpInst::ICMP_SLE: {
    if (!LHS->hasNoSignedWrap())
      return false;

    const SCEV *Step = LHS->getStepRecurrence(*this);

    if (isKnownNonNegative(Step)) {
      Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE;
      return true;
    }

    if (isKnownNonPositive(Step)) {
      Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE;
      return true;
    }

    return false;
  }

  }

  llvm_unreachable("switch has default clause!");
}

bool ScalarEvolution::isLoopInvariantPredicate(
    ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
    ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS,
    const SCEV *&InvariantRHS) {

  // If there is a loop-invariant, force it into the RHS, otherwise bail out.
  if (!isLoopInvariant(RHS, L)) {
    if (!isLoopInvariant(LHS, L))
      return false;

    std::swap(LHS, RHS);
    Pred = ICmpInst::getSwappedPredicate(Pred);
  }

  const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS);
  if (!ArLHS || ArLHS->getLoop() != L)
    return false;

  bool Increasing;
  if (!isMonotonicPredicate(ArLHS, Pred, Increasing))
    return false;

  // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
  // true as the loop iterates, and the backedge is control dependent on
  // "ArLHS `Pred` RHS" == true then we can reason as follows:
  //
  //   * if the predicate was false in the first iteration then the predicate
  //     is never evaluated again, since the loop exits without taking the
  //     backedge.
  //   * if the predicate was true in the first iteration then it will
  //     continue to be true for all future iterations since it is
  //     monotonically increasing.
  //
  // For both the above possibilities, we can replace the loop varying
  // predicate with its value on the first iteration of the loop (which is
  // loop invariant).
  //
  // A similar reasoning applies for a monotonically decreasing predicate, by
  // replacing true with false and false with true in the above two bullets.

  auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred);

  if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS))
    return false;

  InvariantPred = Pred;
  InvariantLHS = ArLHS->getStart();
  InvariantRHS = RHS;
  return true;
}

bool ScalarEvolution::isKnownPredicateViaConstantRanges(
    ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) {
  if (HasSameValue(LHS, RHS))
    return ICmpInst::isTrueWhenEqual(Pred);

  // This code is split out from isKnownPredicate because it is called from
  // within isLoopEntryGuardedByCond.

  auto CheckRanges =
      [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) {
    return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS)
        .contains(RangeLHS);
  };

  // The check at the top of the function catches the case where the values are
  // known to be equal.
  if (Pred == CmpInst::ICMP_EQ)
    return false;

  if (Pred == CmpInst::ICMP_NE)
    return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) ||
           CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) ||
           isKnownNonZero(getMinusSCEV(LHS, RHS));

  if (CmpInst::isSigned(Pred))
    return CheckRanges(getSignedRange(LHS), getSignedRange(RHS));

  return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS));
}

bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
                                                    const SCEV *LHS,
                                                    const SCEV *RHS) {
  // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer.
  // Return Y via OutY.
  auto MatchBinaryAddToConst =
      [this](const SCEV *Result, const SCEV *X, APInt &OutY,
             SCEV::NoWrapFlags ExpectedFlags) {
    const SCEV *NonConstOp, *ConstOp;
    SCEV::NoWrapFlags FlagsPresent;

    if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) ||
        !isa<SCEVConstant>(ConstOp) || NonConstOp != X)
      return false;

    OutY = cast<SCEVConstant>(ConstOp)->getAPInt();
    return (FlagsPresent & ExpectedFlags) == ExpectedFlags;
  };

  APInt C;

  switch (Pred) {
  default:
    break;

  case ICmpInst::ICMP_SGE:
    std::swap(LHS, RHS);
    LLVM_FALLTHROUGH;
  case ICmpInst::ICMP_SLE:
    // X s<= (X + C)<nsw> if C >= 0
    if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative())
      return true;

    // (X + C)<nsw> s<= X if C <= 0
    if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) &&
        !C.isStrictlyPositive())
      return true;
    break;

  case ICmpInst::ICMP_SGT:
    std::swap(LHS, RHS);
    LLVM_FALLTHROUGH;
  case ICmpInst::ICMP_SLT:
    // X s< (X + C)<nsw> if C > 0
    if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) &&
        C.isStrictlyPositive())
      return true;

    // (X + C)<nsw> s< X if C < 0
    if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative())
      return true;
    break;
  }

  return false;
}

bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,
                                                   const SCEV *LHS,
                                                   const SCEV *RHS) {
  if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate)
    return false;

  // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
  // the stack can result in exponential time complexity.
  SaveAndRestore<bool> Restore(ProvingSplitPredicate, true);

  // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
  //
  // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
  // isKnownPredicate.  isKnownPredicate is more powerful, but also more
  // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
  // interesting cases seen in practice.  We can consider "upgrading" L >= 0 to
  // use isKnownPredicate later if needed.
  return isKnownNonNegative(RHS) &&
         isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) &&
         isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS);
}

bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB,
                                        ICmpInst::Predicate Pred,
                                        const SCEV *LHS, const SCEV *RHS) {
  // No need to even try if we know the module has no guards.
  if (!HasGuards)
    return false;

  return any_of(*BB, [&](Instruction &I) {
    using namespace llvm::PatternMatch;

    Value *Condition;
    return match(&I, m_Intrinsic<Intrinsic::experimental_guard>(
                         m_Value(Condition))) &&
           isImpliedCond(Pred, LHS, RHS, Condition, false);
  });
}

/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
/// protected by a conditional between LHS and RHS.  This is used to
/// to eliminate casts.
bool
ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
                                             ICmpInst::Predicate Pred,
                                             const SCEV *LHS, const SCEV *RHS) {
  // Interpret a null as meaning no loop, where there is obviously no guard
  // (interprocedural conditions notwithstanding).
  if (!L) return true;

  if (VerifyIR)
    assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) &&
           "This cannot be done on broken IR!");


  if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
    return true;

  BasicBlock *Latch = L->getLoopLatch();
  if (!Latch)
    return false;

  BranchInst *LoopContinuePredicate =
    dyn_cast<BranchInst>(Latch->getTerminator());
  if (LoopContinuePredicate && LoopContinuePredicate->isConditional() &&
      isImpliedCond(Pred, LHS, RHS,
                    LoopContinuePredicate->getCondition(),
                    LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
    return true;

  // We don't want more than one activation of the following loops on the stack
  // -- that can lead to O(n!) time complexity.
  if (WalkingBEDominatingConds)
    return false;

  SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true);

  // See if we can exploit a trip count to prove the predicate.
  const auto &BETakenInfo = getBackedgeTakenInfo(L);
  const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this);
  if (LatchBECount != getCouldNotCompute()) {
    // We know that Latch branches back to the loop header exactly
    // LatchBECount times.  This means the backdege condition at Latch is
    // equivalent to  "{0,+,1} u< LatchBECount".
    Type *Ty = LatchBECount->getType();
    auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW);
    const SCEV *LoopCounter =
      getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags);
    if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter,
                      LatchBECount))
      return true;
  }

  // Check conditions due to any @llvm.assume intrinsics.
  for (auto &AssumeVH : AC.assumptions()) {
    if (!AssumeVH)
      continue;
    auto *CI = cast<CallInst>(AssumeVH);
    if (!DT.dominates(CI, Latch->getTerminator()))
      continue;

    if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
      return true;
  }

  // If the loop is not reachable from the entry block, we risk running into an
  // infinite loop as we walk up into the dom tree.  These loops do not matter
  // anyway, so we just return a conservative answer when we see them.
  if (!DT.isReachableFromEntry(L->getHeader()))
    return false;

  if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
    return true;

  for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
       DTN != HeaderDTN; DTN = DTN->getIDom()) {
    assert(DTN && "should reach the loop header before reaching the root!");

    BasicBlock *BB = DTN->getBlock();
    if (isImpliedViaGuard(BB, Pred, LHS, RHS))
      return true;

    BasicBlock *PBB = BB->getSinglePredecessor();
    if (!PBB)
      continue;

    BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
    if (!ContinuePredicate || !ContinuePredicate->isConditional())
      continue;

    Value *Condition = ContinuePredicate->getCondition();

    // If we have an edge `E` within the loop body that dominates the only
    // latch, the condition guarding `E` also guards the backedge.  This
    // reasoning works only for loops with a single latch.

    BasicBlockEdge DominatingEdge(PBB, BB);
    if (DominatingEdge.isSingleEdge()) {
      // We're constructively (and conservatively) enumerating edges within the
      // loop body that dominate the latch.  The dominator tree better agree
      // with us on this:
      assert(DT.dominates(DominatingEdge, Latch) && "should be!");

      if (isImpliedCond(Pred, LHS, RHS, Condition,
                        BB != ContinuePredicate->getSuccessor(0)))
        return true;
    }
  }

  return false;
}

bool
ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
                                          ICmpInst::Predicate Pred,
                                          const SCEV *LHS, const SCEV *RHS) {
  // Interpret a null as meaning no loop, where there is obviously no guard
  // (interprocedural conditions notwithstanding).
  if (!L) return false;

  if (VerifyIR)
    assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) &&
           "This cannot be done on broken IR!");

  // Both LHS and RHS must be available at loop entry.
  assert(isAvailableAtLoopEntry(LHS, L) &&
         "LHS is not available at Loop Entry");
  assert(isAvailableAtLoopEntry(RHS, L) &&
         "RHS is not available at Loop Entry");

  if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
    return true;

  // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
  // the facts (a >= b && a != b) separately. A typical situation is when the
  // non-strict comparison is known from ranges and non-equality is known from
  // dominating predicates. If we are proving strict comparison, we always try
  // to prove non-equality and non-strict comparison separately.
  auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred);
  const bool ProvingStrictComparison = (Pred != NonStrictPredicate);
  bool ProvedNonStrictComparison = false;
  bool ProvedNonEquality = false;

  if (ProvingStrictComparison) {
    ProvedNonStrictComparison =
        isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS);
    ProvedNonEquality =
        isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS);
    if (ProvedNonStrictComparison && ProvedNonEquality)
      return true;
  }

  // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
  auto ProveViaGuard = [&](BasicBlock *Block) {
    if (isImpliedViaGuard(Block, Pred, LHS, RHS))
      return true;
    if (ProvingStrictComparison) {
      if (!ProvedNonStrictComparison)
        ProvedNonStrictComparison =
            isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS);
      if (!ProvedNonEquality)
        ProvedNonEquality =
            isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS);
      if (ProvedNonStrictComparison && ProvedNonEquality)
        return true;
    }
    return false;
  };

  // Try to prove (Pred, LHS, RHS) using isImpliedCond.
  auto ProveViaCond = [&](Value *Condition, bool Inverse) {
    if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse))
      return true;
    if (ProvingStrictComparison) {
      if (!ProvedNonStrictComparison)
        ProvedNonStrictComparison =
            isImpliedCond(NonStrictPredicate, LHS, RHS, Condition, Inverse);
      if (!ProvedNonEquality)
        ProvedNonEquality =
            isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, Condition, Inverse);
      if (ProvedNonStrictComparison && ProvedNonEquality)
        return true;
    }
    return false;
  };

  // Starting at the loop predecessor, climb up the predecessor chain, as long
  // as there are predecessors that can be found that have unique successors
  // leading to the original header.
  for (std::pair<BasicBlock *, BasicBlock *>
         Pair(L->getLoopPredecessor(), L->getHeader());
       Pair.first;
       Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {

    if (ProveViaGuard(Pair.first))
      return true;

    BranchInst *LoopEntryPredicate =
      dyn_cast<BranchInst>(Pair.first->getTerminator());
    if (!LoopEntryPredicate ||
        LoopEntryPredicate->isUnconditional())
      continue;

    if (ProveViaCond(LoopEntryPredicate->getCondition(),
                     LoopEntryPredicate->getSuccessor(0) != Pair.second))
      return true;
  }

  // Check conditions due to any @llvm.assume intrinsics.
  for (auto &AssumeVH : AC.assumptions()) {
    if (!AssumeVH)
      continue;
    auto *CI = cast<CallInst>(AssumeVH);
    if (!DT.dominates(CI, L->getHeader()))
      continue;

    if (ProveViaCond(CI->getArgOperand(0), false))
      return true;
  }

  return false;
}

bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
                                    const SCEV *LHS, const SCEV *RHS,
                                    Value *FoundCondValue,
                                    bool Inverse) {
  if (!PendingLoopPredicates.insert(FoundCondValue).second)
    return false;

  auto ClearOnExit =
      make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });

  // Recursively handle And and Or conditions.
  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
    if (BO->getOpcode() == Instruction::And) {
      if (!Inverse)
        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
    } else if (BO->getOpcode() == Instruction::Or) {
      if (Inverse)
        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
    }
  }

  ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
  if (!ICI) return false;

  // Now that we found a conditional branch that dominates the loop or controls
  // the loop latch. Check to see if it is the comparison we are looking for.
  ICmpInst::Predicate FoundPred;
  if (Inverse)
    FoundPred = ICI->getInversePredicate();
  else
    FoundPred = ICI->getPredicate();

  const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
  const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));

  return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS);
}

bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
                                    const SCEV *RHS,
                                    ICmpInst::Predicate FoundPred,
                                    const SCEV *FoundLHS,
                                    const SCEV *FoundRHS) {
  // Balance the types.
  if (getTypeSizeInBits(LHS->getType()) <
      getTypeSizeInBits(FoundLHS->getType())) {
    if (CmpInst::isSigned(Pred)) {
      LHS = getSignExtendExpr(LHS, FoundLHS->getType());
      RHS = getSignExtendExpr(RHS, FoundLHS->getType());
    } else {
      LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
      RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
    }
  } else if (getTypeSizeInBits(LHS->getType()) >
      getTypeSizeInBits(FoundLHS->getType())) {
    if (CmpInst::isSigned(FoundPred)) {
      FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
      FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
    } else {
      FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
      FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
    }
  }

  // Canonicalize the query to match the way instcombine will have
  // canonicalized the comparison.
  if (SimplifyICmpOperands(Pred, LHS, RHS))
    if (LHS == RHS)
      return CmpInst::isTrueWhenEqual(Pred);
  if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
    if (FoundLHS == FoundRHS)
      return CmpInst::isFalseWhenEqual(FoundPred);

  // Check to see if we can make the LHS or RHS match.
  if (LHS == FoundRHS || RHS == FoundLHS) {
    if (isa<SCEVConstant>(RHS)) {
      std::swap(FoundLHS, FoundRHS);
      FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
    } else {
      std::swap(LHS, RHS);
      Pred = ICmpInst::getSwappedPredicate(Pred);
    }
  }

  // Check whether the found predicate is the same as the desired predicate.
  if (FoundPred == Pred)
    return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);

  // Check whether swapping the found predicate makes it the same as the
  // desired predicate.
  if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
    if (isa<SCEVConstant>(RHS))
      return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
    else
      return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
                                   RHS, LHS, FoundLHS, FoundRHS);
  }

  // Unsigned comparison is the same as signed comparison when both the operands
  // are non-negative.
  if (CmpInst::isUnsigned(FoundPred) &&
      CmpInst::getSignedPredicate(FoundPred) == Pred &&
      isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS))
    return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);

  // Check if we can make progress by sharpening ranges.
  if (FoundPred == ICmpInst::ICMP_NE &&
      (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {

    const SCEVConstant *C = nullptr;
    const SCEV *V = nullptr;

    if (isa<SCEVConstant>(FoundLHS)) {
      C = cast<SCEVConstant>(FoundLHS);
      V = FoundRHS;
    } else {
      C = cast<SCEVConstant>(FoundRHS);
      V = FoundLHS;
    }

    // The guarding predicate tells us that C != V. If the known range
    // of V is [C, t), we can sharpen the range to [C + 1, t).  The
    // range we consider has to correspond to same signedness as the
    // predicate we're interested in folding.

    APInt Min = ICmpInst::isSigned(Pred) ?
        getSignedRangeMin(V) : getUnsignedRangeMin(V);

    if (Min == C->getAPInt()) {
      // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
      // This is true even if (Min + 1) wraps around -- in case of
      // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).

      APInt SharperMin = Min + 1;

      switch (Pred) {
        case ICmpInst::ICMP_SGE:
        case ICmpInst::ICMP_UGE:
          // We know V `Pred` SharperMin.  If this implies LHS `Pred`
          // RHS, we're done.
          if (isImpliedCondOperands(Pred, LHS, RHS, V,
                                    getConstant(SharperMin)))
            return true;
          LLVM_FALLTHROUGH;

        case ICmpInst::ICMP_SGT:
        case ICmpInst::ICMP_UGT:
          // We know from the range information that (V `Pred` Min ||
          // V == Min).  We know from the guarding condition that !(V
          // == Min).  This gives us
          //
          //       V `Pred` Min || V == Min && !(V == Min)
          //   =>  V `Pred` Min
          //
          // If V `Pred` Min implies LHS `Pred` RHS, we're done.

          if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min)))
            return true;
          LLVM_FALLTHROUGH;

        default:
          // No change
          break;
      }
    }
  }

  // Check whether the actual condition is beyond sufficient.
  if (FoundPred == ICmpInst::ICMP_EQ)
    if (ICmpInst::isTrueWhenEqual(Pred))
      if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
        return true;
  if (Pred == ICmpInst::ICMP_NE)
    if (!ICmpInst::isTrueWhenEqual(FoundPred))
      if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
        return true;

  // Otherwise assume the worst.
  return false;
}

bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr,
                                     const SCEV *&L, const SCEV *&R,
                                     SCEV::NoWrapFlags &Flags) {
  const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
  if (!AE || AE->getNumOperands() != 2)
    return false;

  L = AE->getOperand(0);
  R = AE->getOperand(1);
  Flags = AE->getNoWrapFlags();
  return true;
}

Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More,
                                                           const SCEV *Less) {
  // We avoid subtracting expressions here because this function is usually
  // fairly deep in the call stack (i.e. is called many times).

  // X - X = 0.
  if (More == Less)
    return APInt(getTypeSizeInBits(More->getType()), 0);

  if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) {
    const auto *LAR = cast<SCEVAddRecExpr>(Less);
    const auto *MAR = cast<SCEVAddRecExpr>(More);

    if (LAR->getLoop() != MAR->getLoop())
      return None;

    // We look at affine expressions only; not for correctness but to keep
    // getStepRecurrence cheap.
    if (!LAR->isAffine() || !MAR->isAffine())
      return None;

    if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this))
      return None;

    Less = LAR->getStart();
    More = MAR->getStart();

    // fall through
  }

  if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) {
    const auto &M = cast<SCEVConstant>(More)->getAPInt();
    const auto &L = cast<SCEVConstant>(Less)->getAPInt();
    return M - L;
  }

  SCEV::NoWrapFlags Flags;
  const SCEV *LLess = nullptr, *RLess = nullptr;
  const SCEV *LMore = nullptr, *RMore = nullptr;
  const SCEVConstant *C1 = nullptr, *C2 = nullptr;
  // Compare (X + C1) vs X.
  if (splitBinaryAdd(Less, LLess, RLess, Flags))
    if ((C1 = dyn_cast<SCEVConstant>(LLess)))
      if (RLess == More)
        return -(C1->getAPInt());

  // Compare X vs (X + C2).
  if (splitBinaryAdd(More, LMore, RMore, Flags))
    if ((C2 = dyn_cast<SCEVConstant>(LMore)))
      if (RMore == Less)
        return C2->getAPInt();

  // Compare (X + C1) vs (X + C2).
  if (C1 && C2 && RLess == RMore)
    return C2->getAPInt() - C1->getAPInt();

  return None;
}

bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
    ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
    const SCEV *FoundLHS, const SCEV *FoundRHS) {
  if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT)
    return false;

  const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS);
  if (!AddRecLHS)
    return false;

  const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS);
  if (!AddRecFoundLHS)
    return false;

  // We'd like to let SCEV reason about control dependencies, so we constrain
  // both the inequalities to be about add recurrences on the same loop.  This
  // way we can use isLoopEntryGuardedByCond later.

  const Loop *L = AddRecFoundLHS->getLoop();
  if (L != AddRecLHS->getLoop())
    return false;

  //  FoundLHS u< FoundRHS u< -C =>  (FoundLHS + C) u< (FoundRHS + C) ... (1)
  //
  //  FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
  //                                                                  ... (2)
  //
  // Informal proof for (2), assuming (1) [*]:
  //
  // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
  //
  // Then
  //
  //       FoundLHS s< FoundRHS s< INT_MIN - C
  // <=>  (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C   [ using (3) ]
  // <=>  (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
  // <=>  (FoundLHS + INT_MIN + C + INT_MIN) s<
  //                        (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
  // <=>  FoundLHS + C s< FoundRHS + C
  //
  // [*]: (1) can be proved by ruling out overflow.
  //
  // [**]: This can be proved by analyzing all the four possibilities:
  //    (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
  //    (A s>= 0, B s>= 0).
  //
  // Note:
  // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
  // will not sign underflow.  For instance, say FoundLHS = (i8 -128), FoundRHS
  // = (i8 -127) and C = (i8 -100).  Then INT_MIN - C = (i8 -28), and FoundRHS
  // s< (INT_MIN - C).  Lack of sign overflow / underflow in "FoundRHS + C" is
  // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
  // C)".

  Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS);
  Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS);
  if (!LDiff || !RDiff || *LDiff != *RDiff)
    return false;

  if (LDiff->isMinValue())
    return true;

  APInt FoundRHSLimit;

  if (Pred == CmpInst::ICMP_ULT) {
    FoundRHSLimit = -(*RDiff);
  } else {
    assert(Pred == CmpInst::ICMP_SLT && "Checked above!");
    FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff;
  }

  // Try to prove (1) or (2), as needed.
  return isAvailableAtLoopEntry(FoundRHS, L) &&
         isLoopEntryGuardedByCond(L, Pred, FoundRHS,
                                  getConstant(FoundRHSLimit));
}

bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
                                        const SCEV *LHS, const SCEV *RHS,
                                        const SCEV *FoundLHS,
                                        const SCEV *FoundRHS, unsigned Depth) {
  const PHINode *LPhi = nullptr, *RPhi = nullptr;

  auto ClearOnExit = make_scope_exit([&]() {
    if (LPhi) {
      bool Erased = PendingMerges.erase(LPhi);
      assert(Erased && "Failed to erase LPhi!");
      (void)Erased;
    }
    if (RPhi) {
      bool Erased = PendingMerges.erase(RPhi);
      assert(Erased && "Failed to erase RPhi!");
      (void)Erased;
    }
  });

  // Find respective Phis and check that they are not being pending.
  if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
    if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
      if (!PendingMerges.insert(Phi).second)
        return false;
      LPhi = Phi;
    }
  if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
    if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
      // If we detect a loop of Phi nodes being processed by this method, for
      // example:
      //
      //   %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
      //   %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
      //
      // we don't want to deal with a case that complex, so return conservative
      // answer false.
      if (!PendingMerges.insert(Phi).second)
        return false;
      RPhi = Phi;
    }

  // If none of LHS, RHS is a Phi, nothing to do here.
  if (!LPhi && !RPhi)
    return false;

  // If there is a SCEVUnknown Phi we are interested in, make it left.
  if (!LPhi) {
    std::swap(LHS, RHS);
    std::swap(FoundLHS, FoundRHS);
    std::swap(LPhi, RPhi);
    Pred = ICmpInst::getSwappedPredicate(Pred);
  }

  assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!");
  const BasicBlock *LBB = LPhi->getParent();
  const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);

  auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) {
    return isKnownViaNonRecursiveReasoning(Pred, S1, S2) ||
           isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) ||
           isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth);
  };

  if (RPhi && RPhi->getParent() == LBB) {
    // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
    // If we compare two Phis from the same block, and for each entry block
    // the predicate is true for incoming values from this block, then the
    // predicate is also true for the Phis.
    for (const BasicBlock *IncBB : predecessors(LBB)) {
      const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
      const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB));
      if (!ProvedEasily(L, R))
        return false;
    }
  } else if (RAR && RAR->getLoop()->getHeader() == LBB) {
    // Case two: RHS is also a Phi from the same basic block, and it is an
    // AddRec. It means that there is a loop which has both AddRec and Unknown
    // PHIs, for it we can compare incoming values of AddRec from above the loop
    // and latch with their respective incoming values of LPhi.
    // TODO: Generalize to handle loops with many inputs in a header.
    if (LPhi->getNumIncomingValues() != 2) return false;

    auto *RLoop = RAR->getLoop();
    auto *Predecessor = RLoop->getLoopPredecessor();
    assert(Predecessor && "Loop with AddRec with no predecessor?");
    const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor));
    if (!ProvedEasily(L1, RAR->getStart()))
      return false;
    auto *Latch = RLoop->getLoopLatch();
    assert(Latch && "Loop with AddRec with no latch?");
    const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch));
    if (!ProvedEasily(L2, RAR->getPostIncExpr(*this)))
      return false;
  } else {
    // In all other cases go over inputs of LHS and compare each of them to RHS,
    // the predicate is true for (LHS, RHS) if it is true for all such pairs.
    // At this point RHS is either a non-Phi, or it is a Phi from some block
    // different from LBB.
    for (const BasicBlock *IncBB : predecessors(LBB)) {
      // Check that RHS is available in this block.
      if (!dominates(RHS, IncBB))
        return false;
      const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
      if (!ProvedEasily(L, RHS))
        return false;
    }
  }
  return true;
}

bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
                                            const SCEV *LHS, const SCEV *RHS,
                                            const SCEV *FoundLHS,
                                            const SCEV *FoundRHS) {
  if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
    return true;

  if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS))
    return true;

  return isImpliedCondOperandsHelper(Pred, LHS, RHS,
                                     FoundLHS, FoundRHS) ||
         // ~x < ~y --> x > y
         isImpliedCondOperandsHelper(Pred, LHS, RHS,
                                     getNotSCEV(FoundRHS),
                                     getNotSCEV(FoundLHS));
}

/// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
template <typename MinMaxExprType>
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr,
                                 const SCEV *Candidate) {
  const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
  if (!MinMaxExpr)
    return false;

  return find(MinMaxExpr->operands(), Candidate) != MinMaxExpr->op_end();
}

static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE,
                                           ICmpInst::Predicate Pred,
                                           const SCEV *LHS, const SCEV *RHS) {
  // If both sides are affine addrecs for the same loop, with equal
  // steps, and we know the recurrences don't wrap, then we only
  // need to check the predicate on the starting values.

  if (!ICmpInst::isRelational(Pred))
    return false;

  const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
  if (!LAR)
    return false;
  const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
  if (!RAR)
    return false;
  if (LAR->getLoop() != RAR->getLoop())
    return false;
  if (!LAR->isAffine() || !RAR->isAffine())
    return false;

  if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE))
    return false;

  SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ?
                         SCEV::FlagNSW : SCEV::FlagNUW;
  if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW))
    return false;

  return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart());
}

/// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
/// expression?
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
                                        ICmpInst::Predicate Pred,
                                        const SCEV *LHS, const SCEV *RHS) {
  switch (Pred) {
  default:
    return false;

  case ICmpInst::ICMP_SGE:
    std::swap(LHS, RHS);
    LLVM_FALLTHROUGH;
  case ICmpInst::ICMP_SLE:
    return
        // min(A, ...) <= A
        IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) ||
        // A <= max(A, ...)
        IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS);

  case ICmpInst::ICMP_UGE:
    std::swap(LHS, RHS);
    LLVM_FALLTHROUGH;
  case ICmpInst::ICMP_ULE:
    return
        // min(A, ...) <= A
        IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) ||
        // A <= max(A, ...)
        IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
  }

  llvm_unreachable("covered switch fell through?!");
}

bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
                                             const SCEV *LHS, const SCEV *RHS,
                                             const SCEV *FoundLHS,
                                             const SCEV *FoundRHS,
                                             unsigned Depth) {
  assert(getTypeSizeInBits(LHS->getType()) ==
             getTypeSizeInBits(RHS->getType()) &&
         "LHS and RHS have different sizes?");
  assert(getTypeSizeInBits(FoundLHS->getType()) ==
             getTypeSizeInBits(FoundRHS->getType()) &&
         "FoundLHS and FoundRHS have different sizes?");
  // We want to avoid hurting the compile time with analysis of too big trees.
  if (Depth > MaxSCEVOperationsImplicationDepth)
    return false;
  // We only want to work with ICMP_SGT comparison so far.
  // TODO: Extend to ICMP_UGT?
  if (Pred == ICmpInst::ICMP_SLT) {
    Pred = ICmpInst::ICMP_SGT;
    std::swap(LHS, RHS);
    std::swap(FoundLHS, FoundRHS);
  }
  if (Pred != ICmpInst::ICMP_SGT)
    return false;

  auto GetOpFromSExt = [&](const SCEV *S) {
    if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
      return Ext->getOperand();
    // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
    // the constant in some cases.
    return S;
  };

  // Acquire values from extensions.
  auto *OrigLHS = LHS;
  auto *OrigFoundLHS = FoundLHS;
  LHS = GetOpFromSExt(LHS);
  FoundLHS = GetOpFromSExt(FoundLHS);

  // Is the SGT predicate can be proved trivially or using the found context.
  auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) {
    return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) ||
           isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS,
                                  FoundRHS, Depth + 1);
  };

  if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
    // We want to avoid creation of any new non-constant SCEV. Since we are
    // going to compare the operands to RHS, we should be certain that we don't
    // need any size extensions for this. So let's decline all cases when the
    // sizes of types of LHS and RHS do not match.
    // TODO: Maybe try to get RHS from sext to catch more cases?
    if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType()))
      return false;

    // Should not overflow.
    if (!LHSAddExpr->hasNoSignedWrap())
      return false;

    auto *LL = LHSAddExpr->getOperand(0);
    auto *LR = LHSAddExpr->getOperand(1);
    auto *MinusOne = getNegativeSCEV(getOne(RHS->getType()));

    // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
    auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) {
      return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS);
    };
    // Try to prove the following rule:
    // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
    // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
    if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
      return true;
  } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
    Value *LL, *LR;
    // FIXME: Once we have SDiv implemented, we can get rid of this matching.

    using namespace llvm::PatternMatch;

    if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) {
      // Rules for division.
      // We are going to perform some comparisons with Denominator and its
      // derivative expressions. In general case, creating a SCEV for it may
      // lead to a complex analysis of the entire graph, and in particular it
      // can request trip count recalculation for the same loop. This would
      // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
      // this, we only want to create SCEVs that are constants in this section.
      // So we bail if Denominator is not a constant.
      if (!isa<ConstantInt>(LR))
        return false;

      auto *Denominator = cast<SCEVConstant>(getSCEV(LR));

      // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
      // then a SCEV for the numerator already exists and matches with FoundLHS.
      auto *Numerator = getExistingSCEV(LL);
      if (!Numerator || Numerator->getType() != FoundLHS->getType())
        return false;

      // Make sure that the numerator matches with FoundLHS and the denominator
      // is positive.
      if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator))
        return false;

      auto *DTy = Denominator->getType();
      auto *FRHSTy = FoundRHS->getType();
      if (DTy->isPointerTy() != FRHSTy->isPointerTy())
        // One of types is a pointer and another one is not. We cannot extend
        // them properly to a wider type, so let us just reject this case.
        // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
        // to avoid this check.
        return false;

      // Given that:
      // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
      auto *WTy = getWiderType(DTy, FRHSTy);
      auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy);
      auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy);

      // Try to prove the following rule:
      // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
      // For example, given that FoundLHS > 2. It means that FoundLHS is at
      // least 3. If we divide it by Denominator < 4, we will have at least 1.
      auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2));
      if (isKnownNonPositive(RHS) &&
          IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
        return true;

      // Try to prove the following rule:
      // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
      // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
      // If we divide it by Denominator > 2, then:
      // 1. If FoundLHS is negative, then the result is 0.
      // 2. If FoundLHS is non-negative, then the result is non-negative.
      // Anyways, the result is non-negative.
      auto *MinusOne = getNegativeSCEV(getOne(WTy));
      auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt);
      if (isKnownNegative(RHS) &&
          IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
        return true;
    }
  }

  // If our expression contained SCEVUnknown Phis, and we split it down and now
  // need to prove something for them, try to prove the predicate for every
  // possible incoming values of those Phis.
  if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1))
    return true;

  return false;
}

static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
                                        const SCEV *LHS, const SCEV *RHS) {
  // zext x u<= sext x, sext x s<= zext x
  switch (Pred) {
  case ICmpInst::ICMP_SGE:
    std::swap(LHS, RHS);
    LLVM_FALLTHROUGH;
  case ICmpInst::ICMP_SLE: {
    // If operand >=s 0 then ZExt == SExt.  If operand <s 0 then SExt <s ZExt.
    const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS);
    const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS);
    if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
      return true;
    break;
  }
  case ICmpInst::ICMP_UGE:
    std::swap(LHS, RHS);
    LLVM_FALLTHROUGH;
  case ICmpInst::ICMP_ULE: {
    // If operand >=s 0 then ZExt == SExt.  If operand <s 0 then ZExt <u SExt.
    const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS);
    const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS);
    if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
      return true;
    break;
  }
  default:
    break;
  };
  return false;
}

bool
ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
                                           const SCEV *LHS, const SCEV *RHS) {
  return isKnownPredicateExtendIdiom(Pred, LHS, RHS) ||
         isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
         IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) ||
         IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) ||
         isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
}

bool
ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
                                             const SCEV *LHS, const SCEV *RHS,
                                             const SCEV *FoundLHS,
                                             const SCEV *FoundRHS) {
  switch (Pred) {
  default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
  case ICmpInst::ICMP_EQ:
  case ICmpInst::ICMP_NE:
    if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
      return true;
    break;
  case ICmpInst::ICMP_SLT:
  case ICmpInst::ICMP_SLE:
    if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
        isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS))
      return true;
    break;
  case ICmpInst::ICMP_SGT:
  case ICmpInst::ICMP_SGE:
    if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
        isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS))
      return true;
    break;
  case ICmpInst::ICMP_ULT:
  case ICmpInst::ICMP_ULE:
    if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
        isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS))
      return true;
    break;
  case ICmpInst::ICMP_UGT:
  case ICmpInst::ICMP_UGE:
    if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
        isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS))
      return true;
    break;
  }

  // Maybe it can be proved via operations?
  if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
    return true;

  return false;
}

bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
                                                     const SCEV *LHS,
                                                     const SCEV *RHS,
                                                     const SCEV *FoundLHS,
                                                     const SCEV *FoundRHS) {
  if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
    // The restriction on `FoundRHS` be lifted easily -- it exists only to
    // reduce the compile time impact of this optimization.
    return false;

  Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS);
  if (!Addend)
    return false;

  const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();

  // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
  // antecedent "`FoundLHS` `Pred` `FoundRHS`".
  ConstantRange FoundLHSRange =
      ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS);

  // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
  ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend));

  // We can also compute the range of values for `LHS` that satisfy the
  // consequent, "`LHS` `Pred` `RHS`":
  const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
  ConstantRange SatisfyingLHSRange =
      ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);

  // The antecedent implies the consequent if every value of `LHS` that
  // satisfies the antecedent also satisfies the consequent.
  return SatisfyingLHSRange.contains(LHSRange);
}

bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
                                         bool IsSigned, bool NoWrap) {
  assert(isKnownPositive(Stride) && "Positive stride expected!");

  if (NoWrap) return false;

  unsigned BitWidth = getTypeSizeInBits(RHS->getType());
  const SCEV *One = getOne(Stride->getType());

  if (IsSigned) {
    APInt MaxRHS = getSignedRangeMax(RHS);
    APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
    APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));

    // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
    return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
  }

  APInt MaxRHS = getUnsignedRangeMax(RHS);
  APInt MaxValue = APInt::getMaxValue(BitWidth);
  APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));

  // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
  return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
}

bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
                                         bool IsSigned, bool NoWrap) {
  if (NoWrap) return false;

  unsigned BitWidth = getTypeSizeInBits(RHS->getType());
  const SCEV *One = getOne(Stride->getType());

  if (IsSigned) {
    APInt MinRHS = getSignedRangeMin(RHS);
    APInt MinValue = APInt::getSignedMinValue(BitWidth);
    APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));

    // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
    return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
  }

  APInt MinRHS = getUnsignedRangeMin(RHS);
  APInt MinValue = APInt::getMinValue(BitWidth);
  APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));

  // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
  return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
}

const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
                                            bool Equality) {
  const SCEV *One = getOne(Step->getType());
  Delta = Equality ? getAddExpr(Delta, Step)
                   : getAddExpr(Delta, getMinusSCEV(Step, One));
  return getUDivExpr(Delta, Step);
}

const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start,
                                                    const SCEV *Stride,
                                                    const SCEV *End,
                                                    unsigned BitWidth,
                                                    bool IsSigned) {

  assert(!isKnownNonPositive(Stride) &&
         "Stride is expected strictly positive!");
  // Calculate the maximum backedge count based on the range of values
  // permitted by Start, End, and Stride.
  const SCEV *MaxBECount;
  APInt MinStart =
      IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start);

  APInt StrideForMaxBECount =
      IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride);

  // We already know that the stride is positive, so we paper over conservatism
  // in our range computation by forcing StrideForMaxBECount to be at least one.
  // In theory this is unnecessary, but we expect MaxBECount to be a
  // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there
  // is nothing to constant fold it to).
  APInt One(BitWidth, 1, IsSigned);
  StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount);

  APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth)
                            : APInt::getMaxValue(BitWidth);
  APInt Limit = MaxValue - (StrideForMaxBECount - 1);

  // Although End can be a MAX expression we estimate MaxEnd considering only
  // the case End = RHS of the loop termination condition. This is safe because
  // in the other case (End - Start) is zero, leading to a zero maximum backedge
  // taken count.
  APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit)
                          : APIntOps::umin(getUnsignedRangeMax(End), Limit);

  MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */,
                              getConstant(StrideForMaxBECount) /* Step */,
                              false /* Equality */);

  return MaxBECount;
}

ScalarEvolution::ExitLimit
ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
                                  const Loop *L, bool IsSigned,
                                  bool ControlsExit, bool AllowPredicates) {
  SmallPtrSet<const SCEVPredicate *, 4> Predicates;

  const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
  bool PredicatedIV = false;

  if (!IV && AllowPredicates) {
    // Try to make this an AddRec using runtime tests, in the first X
    // iterations of this loop, where X is the SCEV expression found by the
    // algorithm below.
    IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
    PredicatedIV = true;
  }

  // Avoid weird loops
  if (!IV || IV->getLoop() != L || !IV->isAffine())
    return getCouldNotCompute();

  bool NoWrap = ControlsExit &&
                IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);

  const SCEV *Stride = IV->getStepRecurrence(*this);

  bool PositiveStride = isKnownPositive(Stride);

  // Avoid negative or zero stride values.
  if (!PositiveStride) {
    // We can compute the correct backedge taken count for loops with unknown
    // strides if we can prove that the loop is not an infinite loop with side
    // effects. Here's the loop structure we are trying to handle -
    //
    // i = start
    // do {
    //   A[i] = i;
    //   i += s;
    // } while (i < end);
    //
    // The backedge taken count for such loops is evaluated as -
    // (max(end, start + stride) - start - 1) /u stride
    //
    // The additional preconditions that we need to check to prove correctness
    // of the above formula is as follows -
    //
    // a) IV is either nuw or nsw depending upon signedness (indicated by the
    //    NoWrap flag).
    // b) loop is single exit with no side effects.
    //
    //
    // Precondition a) implies that if the stride is negative, this is a single
    // trip loop. The backedge taken count formula reduces to zero in this case.
    //
    // Precondition b) implies that the unknown stride cannot be zero otherwise
    // we have UB.
    //
    // The positive stride case is the same as isKnownPositive(Stride) returning
    // true (original behavior of the function).
    //
    // We want to make sure that the stride is truly unknown as there are edge
    // cases where ScalarEvolution propagates no wrap flags to the
    // post-increment/decrement IV even though the increment/decrement operation
    // itself is wrapping. The computed backedge taken count may be wrong in
    // such cases. This is prevented by checking that the stride is not known to
    // be either positive or non-positive. For example, no wrap flags are
    // propagated to the post-increment IV of this loop with a trip count of 2 -
    //
    // unsigned char i;
    // for(i=127; i<128; i+=129)
    //   A[i] = i;
    //
    if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) ||
        !loopHasNoSideEffects(L))
      return getCouldNotCompute();
  } else if (!Stride->isOne() &&
             doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
    // Avoid proven overflow cases: this will ensure that the backedge taken
    // count will not generate any unsigned overflow. Relaxed no-overflow
    // conditions exploit NoWrapFlags, allowing to optimize in presence of
    // undefined behaviors like the case of C language.
    return getCouldNotCompute();

  ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT
                                      : ICmpInst::ICMP_ULT;
  const SCEV *Start = IV->getStart();
  const SCEV *End = RHS;
  // When the RHS is not invariant, we do not know the end bound of the loop and
  // cannot calculate the ExactBECount needed by ExitLimit. However, we can
  // calculate the MaxBECount, given the start, stride and max value for the end
  // bound of the loop (RHS), and the fact that IV does not overflow (which is
  // checked above).
  if (!isLoopInvariant(RHS, L)) {
    const SCEV *MaxBECount = computeMaxBECountForLT(
        Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
    return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount,
                     false /*MaxOrZero*/, Predicates);
  }
  // If the backedge is taken at least once, then it will be taken
  // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start
  // is the LHS value of the less-than comparison the first time it is evaluated
  // and End is the RHS.
  const SCEV *BECountIfBackedgeTaken =
    computeBECount(getMinusSCEV(End, Start), Stride, false);
  // If the loop entry is guarded by the result of the backedge test of the
  // first loop iteration, then we know the backedge will be taken at least
  // once and so the backedge taken count is as above. If not then we use the
  // expression (max(End,Start)-Start)/Stride to describe the backedge count,
  // as if the backedge is taken at least once max(End,Start) is End and so the
  // result is as above, and if not max(End,Start) is Start so we get a backedge
  // count of zero.
  const SCEV *BECount;
  if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS))
    BECount = BECountIfBackedgeTaken;
  else {
    End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start);
    BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
  }

  const SCEV *MaxBECount;
  bool MaxOrZero = false;
  if (isa<SCEVConstant>(BECount))
    MaxBECount = BECount;
  else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) {
    // If we know exactly how many times the backedge will be taken if it's
    // taken at least once, then the backedge count will either be that or
    // zero.
    MaxBECount = BECountIfBackedgeTaken;
    MaxOrZero = true;
  } else {
    MaxBECount = computeMaxBECountForLT(
        Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
  }

  if (isa<SCEVCouldNotCompute>(MaxBECount) &&
      !isa<SCEVCouldNotCompute>(BECount))
    MaxBECount = getConstant(getUnsignedRangeMax(BECount));

  return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates);
}

ScalarEvolution::ExitLimit
ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
                                     const Loop *L, bool IsSigned,
                                     bool ControlsExit, bool AllowPredicates) {
  SmallPtrSet<const SCEVPredicate *, 4> Predicates;
  // We handle only IV > Invariant
  if (!isLoopInvariant(RHS, L))
    return getCouldNotCompute();

  const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
  if (!IV && AllowPredicates)
    // Try to make this an AddRec using runtime tests, in the first X
    // iterations of this loop, where X is the SCEV expression found by the
    // algorithm below.
    IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);

  // Avoid weird loops
  if (!IV || IV->getLoop() != L || !IV->isAffine())
    return getCouldNotCompute();

  bool NoWrap = ControlsExit &&
                IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);

  const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));

  // Avoid negative or zero stride values
  if (!isKnownPositive(Stride))
    return getCouldNotCompute();

  // Avoid proven overflow cases: this will ensure that the backedge taken count
  // will not generate any unsigned overflow. Relaxed no-overflow conditions
  // exploit NoWrapFlags, allowing to optimize in presence of undefined
  // behaviors like the case of C language.
  if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
    return getCouldNotCompute();

  ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT
                                      : ICmpInst::ICMP_UGT;

  const SCEV *Start = IV->getStart();
  const SCEV *End = RHS;
  if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS))
    End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start);

  const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false);

  APInt MaxStart = IsSigned ? getSignedRangeMax(Start)
                            : getUnsignedRangeMax(Start);

  APInt MinStride = IsSigned ? getSignedRangeMin(Stride)
                             : getUnsignedRangeMin(Stride);

  unsigned BitWidth = getTypeSizeInBits(LHS->getType());
  APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
                         : APInt::getMinValue(BitWidth) + (MinStride - 1);

  // Although End can be a MIN expression we estimate MinEnd considering only
  // the case End = RHS. This is safe because in the other case (Start - End)
  // is zero, leading to a zero maximum backedge taken count.
  APInt MinEnd =
    IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit)
             : APIntOps::umax(getUnsignedRangeMin(RHS), Limit);

  const SCEV *MaxBECount = isa<SCEVConstant>(BECount)
                               ? BECount
                               : computeBECount(getConstant(MaxStart - MinEnd),
                                                getConstant(MinStride), false);

  if (isa<SCEVCouldNotCompute>(MaxBECount))
    MaxBECount = BECount;

  return ExitLimit(BECount, MaxBECount, false, Predicates);
}

const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
                                                    ScalarEvolution &SE) const {
  if (Range.isFullSet())  // Infinite loop.
    return SE.getCouldNotCompute();

  // If the start is a non-zero constant, shift the range to simplify things.
  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
    if (!SC->getValue()->isZero()) {
      SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
      Operands[0] = SE.getZero(SC->getType());
      const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
                                             getNoWrapFlags(FlagNW));
      if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
        return ShiftedAddRec->getNumIterationsInRange(
            Range.subtract(SC->getAPInt()), SE);
      // This is strange and shouldn't happen.
      return SE.getCouldNotCompute();
    }

  // The only time we can solve this is when we have all constant indices.
  // Otherwise, we cannot determine the overflow conditions.
  if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); }))
    return SE.getCouldNotCompute();

  // Okay at this point we know that all elements of the chrec are constants and
  // that the start element is zero.

  // First check to see if the range contains zero.  If not, the first
  // iteration exits.
  unsigned BitWidth = SE.getTypeSizeInBits(getType());
  if (!Range.contains(APInt(BitWidth, 0)))
    return SE.getZero(getType());

  if (isAffine()) {
    // If this is an affine expression then we have this situation:
    //   Solve {0,+,A} in Range  ===  Ax in Range

    // We know that zero is in the range.  If A is positive then we know that
    // the upper value of the range must be the first possible exit value.
    // If A is negative then the lower of the range is the last possible loop
    // value.  Also note that we already checked for a full range.
    APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
    APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();

    // The exit value should be (End+A)/A.
    APInt ExitVal = (End + A).udiv(A);
    ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);

    // Evaluate at the exit value.  If we really did fall out of the valid
    // range, then we computed our trip count, otherwise wrap around or other
    // things must have happened.
    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
    if (Range.contains(Val->getValue()))
      return SE.getCouldNotCompute();  // Something strange happened

    // Ensure that the previous value is in the range.  This is a sanity check.
    assert(Range.contains(
           EvaluateConstantChrecAtConstant(this,
           ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&
           "Linear scev computation is off in a bad way!");
    return SE.getConstant(ExitValue);
  }

  if (isQuadratic()) {
    if (auto S = SolveQuadraticAddRecRange(this, Range, SE))
      return SE.getConstant(S.getValue());
  }

  return SE.getCouldNotCompute();
}

const SCEVAddRecExpr *
SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
  assert(getNumOperands() > 1 && "AddRec with zero step?");
  // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
  // but in this case we cannot guarantee that the value returned will be an
  // AddRec because SCEV does not have a fixed point where it stops
  // simplification: it is legal to return ({rec1} + {rec2}). For example, it
  // may happen if we reach arithmetic depth limit while simplifying. So we
  // construct the returned value explicitly.
  SmallVector<const SCEV *, 3> Ops;
  // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
  // (this + Step) is {A+B,+,B+C,+...,+,N}.
  for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
    Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1)));
  // We know that the last operand is not a constant zero (otherwise it would
  // have been popped out earlier). This guarantees us that if the result has
  // the same last operand, then it will also not be popped out, meaning that
  // the returned value will be an AddRec.
  const SCEV *Last = getOperand(getNumOperands() - 1);
  assert(!Last->isZero() && "Recurrency with zero step?");
  Ops.push_back(Last);
  return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(),
                                               SCEV::FlagAnyWrap));
}

// Return true when S contains at least an undef value.
static inline bool containsUndefs(const SCEV *S) {
  return SCEVExprContains(S, [](const SCEV *S) {
    if (const auto *SU = dyn_cast<SCEVUnknown>(S))
      return isa<UndefValue>(SU->getValue());
    return false;
  });
}

namespace {

// Collect all steps of SCEV expressions.
struct SCEVCollectStrides {
  ScalarEvolution &SE;
  SmallVectorImpl<const SCEV *> &Strides;

  SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S)
      : SE(SE), Strides(S) {}

  bool follow(const SCEV *S) {
    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
      Strides.push_back(AR->getStepRecurrence(SE));
    return true;
  }

  bool isDone() const { return false; }
};

// Collect all SCEVUnknown and SCEVMulExpr expressions.
struct SCEVCollectTerms {
  SmallVectorImpl<const SCEV *> &Terms;

  SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {}

  bool follow(const SCEV *S) {
    if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) ||
        isa<SCEVSignExtendExpr>(S)) {
      if (!containsUndefs(S))
        Terms.push_back(S);

      // Stop recursion: once we collected a term, do not walk its operands.
      return false;
    }

    // Keep looking.
    return true;
  }

  bool isDone() const { return false; }
};

// Check if a SCEV contains an AddRecExpr.
struct SCEVHasAddRec {
  bool &ContainsAddRec;

  SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) {
    ContainsAddRec = false;
  }

  bool follow(const SCEV *S) {
    if (isa<SCEVAddRecExpr>(S)) {
      ContainsAddRec = true;

      // Stop recursion: once we collected a term, do not walk its operands.
      return false;
    }

    // Keep looking.
    return true;
  }

  bool isDone() const { return false; }
};

// Find factors that are multiplied with an expression that (possibly as a
// subexpression) contains an AddRecExpr. In the expression:
//
//  8 * (100 +  %p * %q * (%a + {0, +, 1}_loop))
//
// "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)"
// that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size
// parameters as they form a product with an induction variable.
//
// This collector expects all array size parameters to be in the same MulExpr.
// It might be necessary to later add support for collecting parameters that are
// spread over different nested MulExpr.
struct SCEVCollectAddRecMultiplies {
  SmallVectorImpl<const SCEV *> &Terms;
  ScalarEvolution &SE;

  SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE)
      : Terms(T), SE(SE) {}

  bool follow(const SCEV *S) {
    if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) {
      bool HasAddRec = false;
      SmallVector<const SCEV *, 0> Operands;
      for (auto Op : Mul->operands()) {
        const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op);
        if (Unknown && !isa<CallInst>(Unknown->getValue())) {
          Operands.push_back(Op);
        } else if (Unknown) {
          HasAddRec = true;
        } else {
          bool ContainsAddRec;
          SCEVHasAddRec ContiansAddRec(ContainsAddRec);
          visitAll(Op, ContiansAddRec);
          HasAddRec |= ContainsAddRec;
        }
      }
      if (Operands.size() == 0)
        return true;

      if (!HasAddRec)
        return false;

      Terms.push_back(SE.getMulExpr(Operands));
      // Stop recursion: once we collected a term, do not walk its operands.
      return false;
    }

    // Keep looking.
    return true;
  }

  bool isDone() const { return false; }
};

} // end anonymous namespace

/// Find parametric terms in this SCEVAddRecExpr. We first for parameters in
/// two places:
///   1) The strides of AddRec expressions.
///   2) Unknowns that are multiplied with AddRec expressions.
void ScalarEvolution::collectParametricTerms(const SCEV *Expr,
    SmallVectorImpl<const SCEV *> &Terms) {
  SmallVector<const SCEV *, 4> Strides;
  SCEVCollectStrides StrideCollector(*this, Strides);
  visitAll(Expr, StrideCollector);

  LLVM_DEBUG({
    dbgs() << "Strides:\n";
    for (const SCEV *S : Strides)
      dbgs() << *S << "\n";
  });

  for (const SCEV *S : Strides) {
    SCEVCollectTerms TermCollector(Terms);
    visitAll(S, TermCollector);
  }

  LLVM_DEBUG({
    dbgs() << "Terms:\n";
    for (const SCEV *T : Terms)
      dbgs() << *T << "\n";
  });

  SCEVCollectAddRecMultiplies MulCollector(Terms, *this);
  visitAll(Expr, MulCollector);
}

static bool findArrayDimensionsRec(ScalarEvolution &SE,
                                   SmallVectorImpl<const SCEV *> &Terms,
                                   SmallVectorImpl<const SCEV *> &Sizes) {
  int Last = Terms.size() - 1;
  const SCEV *Step = Terms[Last];

  // End of recursion.
  if (Last == 0) {
    if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
      SmallVector<const SCEV *, 2> Qs;
      for (const SCEV *Op : M->operands())
        if (!isa<SCEVConstant>(Op))
          Qs.push_back(Op);

      Step = SE.getMulExpr(Qs);
    }

    Sizes.push_back(Step);
    return true;
  }

  for (const SCEV *&Term : Terms) {
    // Normalize the terms before the next call to findArrayDimensionsRec.
    const SCEV *Q, *R;
    SCEVDivision::divide(SE, Term, Step, &Q, &R);

    // Bail out when GCD does not evenly divide one of the terms.
    if (!R->isZero())
      return false;

    Term = Q;
  }

  // Remove all SCEVConstants.
  Terms.erase(
      remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }),
      Terms.end());

  if (Terms.size() > 0)
    if (!findArrayDimensionsRec(SE, Terms, Sizes))
      return false;

  Sizes.push_back(Step);
  return true;
}

// Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) {
  for (const SCEV *T : Terms)
    if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>))
      return true;
  return false;
}

// Return the number of product terms in S.
static inline int numberOfTerms(const SCEV *S) {
  if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S))
    return Expr->getNumOperands();
  return 1;
}

static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
  if (isa<SCEVConstant>(T))
    return nullptr;

  if (isa<SCEVUnknown>(T))
    return T;

  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
    SmallVector<const SCEV *, 2> Factors;
    for (const SCEV *Op : M->operands())
      if (!isa<SCEVConstant>(Op))
        Factors.push_back(Op);

    return SE.getMulExpr(Factors);
  }

  return T;
}

/// Return the size of an element read or written by Inst.
const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
  Type *Ty;
  if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
    Ty = Store->getValueOperand()->getType();
  else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
    Ty = Load->getType();
  else
    return nullptr;

  Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
  return getSizeOfExpr(ETy, Ty);
}

void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
                                          SmallVectorImpl<const SCEV *> &Sizes,
                                          const SCEV *ElementSize) {
  if (Terms.size() < 1 || !ElementSize)
    return;

  // Early return when Terms do not contain parameters: we do not delinearize
  // non parametric SCEVs.
  if (!containsParameters(Terms))
    return;

  LLVM_DEBUG({
    dbgs() << "Terms:\n";
    for (const SCEV *T : Terms)
      dbgs() << *T << "\n";
  });

  // Remove duplicates.
  array_pod_sort(Terms.begin(), Terms.end());
  Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());

  // Put larger terms first.
  llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) {
    return numberOfTerms(LHS) > numberOfTerms(RHS);
  });

  // Try to divide all terms by the element size. If term is not divisible by
  // element size, proceed with the original term.
  for (const SCEV *&Term : Terms) {
    const SCEV *Q, *R;
    SCEVDivision::divide(*this, Term, ElementSize, &Q, &R);
    if (!Q->isZero())
      Term = Q;
  }

  SmallVector<const SCEV *, 4> NewTerms;

  // Remove constant factors.
  for (const SCEV *T : Terms)
    if (const SCEV *NewT = removeConstantFactors(*this, T))
      NewTerms.push_back(NewT);

  LLVM_DEBUG({
    dbgs() << "Terms after sorting:\n";
    for (const SCEV *T : NewTerms)
      dbgs() << *T << "\n";
  });

  if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) {
    Sizes.clear();
    return;
  }

  // The last element to be pushed into Sizes is the size of an element.
  Sizes.push_back(ElementSize);

  LLVM_DEBUG({
    dbgs() << "Sizes:\n";
    for (const SCEV *S : Sizes)
      dbgs() << *S << "\n";
  });
}

void ScalarEvolution::computeAccessFunctions(
    const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts,
    SmallVectorImpl<const SCEV *> &Sizes) {
  // Early exit in case this SCEV is not an affine multivariate function.
  if (Sizes.empty())
    return;

  if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr))
    if (!AR->isAffine())
      return;

  const SCEV *Res = Expr;
  int Last = Sizes.size() - 1;
  for (int i = Last; i >= 0; i--) {
    const SCEV *Q, *R;
    SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R);

    LLVM_DEBUG({
      dbgs() << "Res: " << *Res << "\n";
      dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";
      dbgs() << "Res divided by Sizes[i]:\n";
      dbgs() << "Quotient: " << *Q << "\n";
      dbgs() << "Remainder: " << *R << "\n";
    });

    Res = Q;

    // Do not record the last subscript corresponding to the size of elements in
    // the array.
    if (i == Last) {

      // Bail out if the remainder is too complex.
      if (isa<SCEVAddRecExpr>(R)) {
        Subscripts.clear();
        Sizes.clear();
        return;
      }

      continue;
    }

    // Record the access function for the current subscript.
    Subscripts.push_back(R);
  }

  // Also push in last position the remainder of the last division: it will be
  // the access function of the innermost dimension.
  Subscripts.push_back(Res);

  std::reverse(Subscripts.begin(), Subscripts.end());

  LLVM_DEBUG({
    dbgs() << "Subscripts:\n";
    for (const SCEV *S : Subscripts)
      dbgs() << *S << "\n";
  });
}

/// Splits the SCEV into two vectors of SCEVs representing the subscripts and
/// sizes of an array access. Returns the remainder of the delinearization that
/// is the offset start of the array.  The SCEV->delinearize algorithm computes
/// the multiples of SCEV coefficients: that is a pattern matching of sub
/// expressions in the stride and base of a SCEV corresponding to the
/// computation of a GCD (greatest common divisor) of base and stride.  When
/// SCEV->delinearize fails, it returns the SCEV unchanged.
///
/// For example: when analyzing the memory access A[i][j][k] in this loop nest
///
///  void foo(long n, long m, long o, double A[n][m][o]) {
///
///    for (long i = 0; i < n; i++)
///      for (long j = 0; j < m; j++)
///        for (long k = 0; k < o; k++)
///          A[i][j][k] = 1.0;
///  }
///
/// the delinearization input is the following AddRec SCEV:
///
///  AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
///
/// From this SCEV, we are able to say that the base offset of the access is %A
/// because it appears as an offset that does not divide any of the strides in
/// the loops:
///
///  CHECK: Base offset: %A
///
/// and then SCEV->delinearize determines the size of some of the dimensions of
/// the array as these are the multiples by which the strides are happening:
///
///  CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
///
/// Note that the outermost dimension remains of UnknownSize because there are
/// no strides that would help identifying the size of the last dimension: when
/// the array has been statically allocated, one could compute the size of that
/// dimension by dividing the overall size of the array by the size of the known
/// dimensions: %m * %o * 8.
///
/// Finally delinearize provides the access functions for the array reference
/// that does correspond to A[i][j][k] of the above C testcase:
///
///  CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
///
/// The testcases are checking the output of a function pass:
/// DelinearizationPass that walks through all loads and stores of a function
/// asking for the SCEV of the memory access with respect to all enclosing
/// loops, calling SCEV->delinearize on that and printing the results.
void ScalarEvolution::delinearize(const SCEV *Expr,
                                 SmallVectorImpl<const SCEV *> &Subscripts,
                                 SmallVectorImpl<const SCEV *> &Sizes,
                                 const SCEV *ElementSize) {
  // First step: collect parametric terms.
  SmallVector<const SCEV *, 4> Terms;
  collectParametricTerms(Expr, Terms);

  if (Terms.empty())
    return;

  // Second step: find subscript sizes.
  findArrayDimensions(Terms, Sizes, ElementSize);

  if (Sizes.empty())
    return;

  // Third step: compute the access functions for each subscript.
  computeAccessFunctions(Expr, Subscripts, Sizes);

  if (Subscripts.empty())
    return;

  LLVM_DEBUG({
    dbgs() << "succeeded to delinearize " << *Expr << "\n";
    dbgs() << "ArrayDecl[UnknownSize]";
    for (const SCEV *S : Sizes)
      dbgs() << "[" << *S << "]";

    dbgs() << "\nArrayRef";
    for (const SCEV *S : Subscripts)
      dbgs() << "[" << *S << "]";
    dbgs() << "\n";
  });
}

//===----------------------------------------------------------------------===//
//                   SCEVCallbackVH Class Implementation
//===----------------------------------------------------------------------===//

void ScalarEvolution::SCEVCallbackVH::deleted() {
  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
    SE->ConstantEvolutionLoopExitValue.erase(PN);
  SE->eraseValueFromMap(getValPtr());
  // this now dangles!
}

void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");

  // Forget all the expressions associated with users of the old value,
  // so that future queries will recompute the expressions using the new
  // value.
  Value *Old = getValPtr();
  SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end());
  SmallPtrSet<User *, 8> Visited;
  while (!Worklist.empty()) {
    User *U = Worklist.pop_back_val();
    // Deleting the Old value will cause this to dangle. Postpone
    // that until everything else is done.
    if (U == Old)
      continue;
    if (!Visited.insert(U).second)
      continue;
    if (PHINode *PN = dyn_cast<PHINode>(U))
      SE->ConstantEvolutionLoopExitValue.erase(PN);
    SE->eraseValueFromMap(U);
    Worklist.insert(Worklist.end(), U->user_begin(), U->user_end());
  }
  // Delete the Old value.
  if (PHINode *PN = dyn_cast<PHINode>(Old))
    SE->ConstantEvolutionLoopExitValue.erase(PN);
  SE->eraseValueFromMap(Old);
  // this now dangles!
}

ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
  : CallbackVH(V), SE(se) {}

//===----------------------------------------------------------------------===//
//                   ScalarEvolution Class Implementation
//===----------------------------------------------------------------------===//

ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
                                 AssumptionCache &AC, DominatorTree &DT,
                                 LoopInfo &LI)
    : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
      CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
      LoopDispositions(64), BlockDispositions(64) {
  // To use guards for proving predicates, we need to scan every instruction in
  // relevant basic blocks, and not just terminators.  Doing this is a waste of
  // time if the IR does not actually contain any calls to
  // @llvm.experimental.guard, so do a quick check and remember this beforehand.
  //
  // This pessimizes the case where a pass that preserves ScalarEvolution wants
  // to _add_ guards to the module when there weren't any before, and wants
  // ScalarEvolution to optimize based on those guards.  For now we prefer to be
  // efficient in lieu of being smart in that rather obscure case.

  auto *GuardDecl = F.getParent()->getFunction(
      Intrinsic::getName(Intrinsic::experimental_guard));
  HasGuards = GuardDecl && !GuardDecl->use_empty();
}

ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
    : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
      LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)),
      ValueExprMap(std::move(Arg.ValueExprMap)),
      PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)),
      PendingPhiRanges(std::move(Arg.PendingPhiRanges)),
      PendingMerges(std::move(Arg.PendingMerges)),
      MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)),
      BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)),
      PredicatedBackedgeTakenCounts(
          std::move(Arg.PredicatedBackedgeTakenCounts)),
      ConstantEvolutionLoopExitValue(
          std::move(Arg.ConstantEvolutionLoopExitValue)),
      ValuesAtScopes(std::move(Arg.ValuesAtScopes)),
      LoopDispositions(std::move(Arg.LoopDispositions)),
      LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)),
      BlockDispositions(std::move(Arg.BlockDispositions)),
      UnsignedRanges(std::move(Arg.UnsignedRanges)),
      SignedRanges(std::move(Arg.SignedRanges)),
      UniqueSCEVs(std::move(Arg.UniqueSCEVs)),
      UniquePreds(std::move(Arg.UniquePreds)),
      SCEVAllocator(std::move(Arg.SCEVAllocator)),
      LoopUsers(std::move(Arg.LoopUsers)),
      PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)),
      FirstUnknown(Arg.FirstUnknown) {
  Arg.FirstUnknown = nullptr;
}

ScalarEvolution::~ScalarEvolution() {
  // Iterate through all the SCEVUnknown instances and call their
  // destructors, so that they release their references to their values.
  for (SCEVUnknown *U = FirstUnknown; U;) {
    SCEVUnknown *Tmp = U;
    U = U->Next;
    Tmp->~SCEVUnknown();
  }
  FirstUnknown = nullptr;

  ExprValueMap.clear();
  ValueExprMap.clear();
  HasRecMap.clear();

  // Free any extra memory created for ExitNotTakenInfo in the unlikely event
  // that a loop had multiple computable exits.
  for (auto &BTCI : BackedgeTakenCounts)
    BTCI.second.clear();
  for (auto &BTCI : PredicatedBackedgeTakenCounts)
    BTCI.second.clear();

  assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
  assert(PendingPhiRanges.empty() && "getRangeRef garbage");
  assert(PendingMerges.empty() && "isImpliedViaMerge garbage");
  assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
  assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!");
}

bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
}

static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
                          const Loop *L) {
  // Print all inner loops first
  for (Loop *I : *L)
    PrintLoopInfo(OS, SE, I);

  OS << "Loop ";
  L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
  OS << ": ";

  SmallVector<BasicBlock *, 8> ExitingBlocks;
  L->getExitingBlocks(ExitingBlocks);
  if (ExitingBlocks.size() != 1)
    OS << "<multiple exits> ";

  if (SE->hasLoopInvariantBackedgeTakenCount(L))
    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n";
  else
    OS << "Unpredictable backedge-taken count.\n";

  if (ExitingBlocks.size() > 1)
    for (BasicBlock *ExitingBlock : ExitingBlocks) {
      OS << "  exit count for " << ExitingBlock->getName() << ": "
         << *SE->getExitCount(L, ExitingBlock) << "\n";
    }

  OS << "Loop ";
  L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
  OS << ": ";

  if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) {
    OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L);
    if (SE->isBackedgeTakenCountMaxOrZero(L))
      OS << ", actual taken count either this or zero.";
  } else {
    OS << "Unpredictable max backedge-taken count. ";
  }

  OS << "\n"
        "Loop ";
  L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
  OS << ": ";

  SCEVUnionPredicate Pred;
  auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred);
  if (!isa<SCEVCouldNotCompute>(PBT)) {
    OS << "Predicated backedge-taken count is " << *PBT << "\n";
    OS << " Predicates:\n";
    Pred.print(OS, 4);
  } else {
    OS << "Unpredictable predicated backedge-taken count. ";
  }
  OS << "\n";

  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
    OS << "Loop ";
    L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
    OS << ": ";
    OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n";
  }
}

static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) {
  switch (LD) {
  case ScalarEvolution::LoopVariant:
    return "Variant";
  case ScalarEvolution::LoopInvariant:
    return "Invariant";
  case ScalarEvolution::LoopComputable:
    return "Computable";
  }
  llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!");
}

void ScalarEvolution::print(raw_ostream &OS) const {
  // ScalarEvolution's implementation of the print method is to print
  // out SCEV values of all instructions that are interesting. Doing
  // this potentially causes it to create new SCEV objects though,
  // which technically conflicts with the const qualifier. This isn't
  // observable from outside the class though, so casting away the
  // const isn't dangerous.
  ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);

  OS << "Classifying expressions for: ";
  F.printAsOperand(OS, /*PrintType=*/false);
  OS << "\n";
  for (Instruction &I : instructions(F))
    if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) {
      OS << I << '\n';
      OS << "  -->  ";
      const SCEV *SV = SE.getSCEV(&I);
      SV->print(OS);
      if (!isa<SCEVCouldNotCompute>(SV)) {
        OS << " U: ";
        SE.getUnsignedRange(SV).print(OS);
        OS << " S: ";
        SE.getSignedRange(SV).print(OS);
      }

      const Loop *L = LI.getLoopFor(I.getParent());

      const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
      if (AtUse != SV) {
        OS << "  -->  ";
        AtUse->print(OS);
        if (!isa<SCEVCouldNotCompute>(AtUse)) {
          OS << " U: ";
          SE.getUnsignedRange(AtUse).print(OS);
          OS << " S: ";
          SE.getSignedRange(AtUse).print(OS);
        }
      }

      if (L) {
        OS << "\t\t" "Exits: ";
        const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
        if (!SE.isLoopInvariant(ExitValue, L)) {
          OS << "<<Unknown>>";
        } else {
          OS << *ExitValue;
        }

        bool First = true;
        for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
          if (First) {
            OS << "\t\t" "LoopDispositions: { ";
            First = false;
          } else {
            OS << ", ";
          }

          Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false);
          OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter));
        }

        for (auto *InnerL : depth_first(L)) {
          if (InnerL == L)
            continue;
          if (First) {
            OS << "\t\t" "LoopDispositions: { ";
            First = false;
          } else {
            OS << ", ";
          }

          InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
          OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL));
        }

        OS << " }";
      }

      OS << "\n";
    }

  OS << "Determining loop execution counts for: ";
  F.printAsOperand(OS, /*PrintType=*/false);
  OS << "\n";
  for (Loop *I : LI)
    PrintLoopInfo(OS, &SE, I);
}

ScalarEvolution::LoopDisposition
ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
  auto &Values = LoopDispositions[S];
  for (auto &V : Values) {
    if (V.getPointer() == L)
      return V.getInt();
  }
  Values.emplace_back(L, LoopVariant);
  LoopDisposition D = computeLoopDisposition(S, L);
  auto &Values2 = LoopDispositions[S];
  for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
    if (V.getPointer() == L) {
      V.setInt(D);
      break;
    }
  }
  return D;
}

ScalarEvolution::LoopDisposition
ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
  switch (static_cast<SCEVTypes>(S->getSCEVType())) {
  case scConstant:
    return LoopInvariant;
  case scTruncate:
  case scZeroExtend:
  case scSignExtend:
    return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
  case scAddRecExpr: {
    const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);

    // If L is the addrec's loop, it's computable.
    if (AR->getLoop() == L)
      return LoopComputable;

    // Add recurrences are never invariant in the function-body (null loop).
    if (!L)
      return LoopVariant;

    // Everything that is not defined at loop entry is variant.
    if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))
      return LoopVariant;
    assert(!L->contains(AR->getLoop()) && "Containing loop's header does not"
           " dominate the contained loop's header?");

    // This recurrence is invariant w.r.t. L if AR's loop contains L.
    if (AR->getLoop()->contains(L))
      return LoopInvariant;

    // This recurrence is variant w.r.t. L if any of its operands
    // are variant.
    for (auto *Op : AR->operands())
      if (!isLoopInvariant(Op, L))
        return LoopVariant;

    // Otherwise it's loop-invariant.
    return LoopInvariant;
  }
  case scAddExpr:
  case scMulExpr:
  case scUMaxExpr:
  case scSMaxExpr:
  case scUMinExpr:
  case scSMinExpr: {
    bool HasVarying = false;
    for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) {
      LoopDisposition D = getLoopDisposition(Op, L);
      if (D == LoopVariant)
        return LoopVariant;
      if (D == LoopComputable)
        HasVarying = true;
    }
    return HasVarying ? LoopComputable : LoopInvariant;
  }
  case scUDivExpr: {
    const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
    LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
    if (LD == LoopVariant)
      return LoopVariant;
    LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
    if (RD == LoopVariant)
      return LoopVariant;
    return (LD == LoopInvariant && RD == LoopInvariant) ?
           LoopInvariant : LoopComputable;
  }
  case scUnknown:
    // All non-instruction values are loop invariant.  All instructions are loop
    // invariant if they are not contained in the specified loop.
    // Instructions are never considered invariant in the function body
    // (null loop) because they are defined within the "loop".
    if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
      return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
    return LoopInvariant;
  case scCouldNotCompute:
    llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  }
  llvm_unreachable("Unknown SCEV kind!");
}

bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
  return getLoopDisposition(S, L) == LoopInvariant;
}

bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
  return getLoopDisposition(S, L) == LoopComputable;
}

ScalarEvolution::BlockDisposition
ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
  auto &Values = BlockDispositions[S];
  for (auto &V : Values) {
    if (V.getPointer() == BB)
      return V.getInt();
  }
  Values.emplace_back(BB, DoesNotDominateBlock);
  BlockDisposition D = computeBlockDisposition(S, BB);
  auto &Values2 = BlockDispositions[S];
  for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
    if (V.getPointer() == BB) {
      V.setInt(D);
      break;
    }
  }
  return D;
}

ScalarEvolution::BlockDisposition
ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
  switch (static_cast<SCEVTypes>(S->getSCEVType())) {
  case scConstant:
    return ProperlyDominatesBlock;
  case scTruncate:
  case scZeroExtend:
  case scSignExtend:
    return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
  case scAddRecExpr: {
    // This uses a "dominates" query instead of "properly dominates" query
    // to test for proper dominance too, because the instruction which
    // produces the addrec's value is a PHI, and a PHI effectively properly
    // dominates its entire containing block.
    const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
    if (!DT.dominates(AR->getLoop()->getHeader(), BB))
      return DoesNotDominateBlock;

    // Fall through into SCEVNAryExpr handling.
    LLVM_FALLTHROUGH;
  }
  case scAddExpr:
  case scMulExpr:
  case scUMaxExpr:
  case scSMaxExpr:
  case scUMinExpr:
  case scSMinExpr: {
    const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
    bool Proper = true;
    for (const SCEV *NAryOp : NAry->operands()) {
      BlockDisposition D = getBlockDisposition(NAryOp, BB);
      if (D == DoesNotDominateBlock)
        return DoesNotDominateBlock;
      if (D == DominatesBlock)
        Proper = false;
    }
    return Proper ? ProperlyDominatesBlock : DominatesBlock;
  }
  case scUDivExpr: {
    const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
    const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
    BlockDisposition LD = getBlockDisposition(LHS, BB);
    if (LD == DoesNotDominateBlock)
      return DoesNotDominateBlock;
    BlockDisposition RD = getBlockDisposition(RHS, BB);
    if (RD == DoesNotDominateBlock)
      return DoesNotDominateBlock;
    return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
      ProperlyDominatesBlock : DominatesBlock;
  }
  case scUnknown:
    if (Instruction *I =
          dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
      if (I->getParent() == BB)
        return DominatesBlock;
      if (DT.properlyDominates(I->getParent(), BB))
        return ProperlyDominatesBlock;
      return DoesNotDominateBlock;
    }
    return ProperlyDominatesBlock;
  case scCouldNotCompute:
    llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  }
  llvm_unreachable("Unknown SCEV kind!");
}

bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
  return getBlockDisposition(S, BB) >= DominatesBlock;
}

bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
  return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
}

bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
  return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; });
}

bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const {
  auto IsS = [&](const SCEV *X) { return S == X; };
  auto ContainsS = [&](const SCEV *X) {
    return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS);
  };
  return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken);
}

void
ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
  ValuesAtScopes.erase(S);
  LoopDispositions.erase(S);
  BlockDispositions.erase(S);
  UnsignedRanges.erase(S);
  SignedRanges.erase(S);
  ExprValueMap.erase(S);
  HasRecMap.erase(S);
  MinTrailingZerosCache.erase(S);

  for (auto I = PredicatedSCEVRewrites.begin();
       I != PredicatedSCEVRewrites.end();) {
    std::pair<const SCEV *, const Loop *> Entry = I->first;
    if (Entry.first == S)
      PredicatedSCEVRewrites.erase(I++);
    else
      ++I;
  }

  auto RemoveSCEVFromBackedgeMap =
      [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) {
        for (auto I = Map.begin(), E = Map.end(); I != E;) {
          BackedgeTakenInfo &BEInfo = I->second;
          if (BEInfo.hasOperand(S, this)) {
            BEInfo.clear();
            Map.erase(I++);
          } else
            ++I;
        }
      };

  RemoveSCEVFromBackedgeMap(BackedgeTakenCounts);
  RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts);
}

void
ScalarEvolution::getUsedLoops(const SCEV *S,
                              SmallPtrSetImpl<const Loop *> &LoopsUsed) {
  struct FindUsedLoops {
    FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
        : LoopsUsed(LoopsUsed) {}
    SmallPtrSetImpl<const Loop *> &LoopsUsed;
    bool follow(const SCEV *S) {
      if (auto *AR = dyn_cast<SCEVAddRecExpr>(S))
        LoopsUsed.insert(AR->getLoop());
      return true;
    }

    bool isDone() const { return false; }
  };

  FindUsedLoops F(LoopsUsed);
  SCEVTraversal<FindUsedLoops>(F).visitAll(S);
}

void ScalarEvolution::addToLoopUseLists(const SCEV *S) {
  SmallPtrSet<const Loop *, 8> LoopsUsed;
  getUsedLoops(S, LoopsUsed);
  for (auto *L : LoopsUsed)
    LoopUsers[L].push_back(S);
}

void ScalarEvolution::verify() const {
  ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
  ScalarEvolution SE2(F, TLI, AC, DT, LI);

  SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end());

  // Map's SCEV expressions from one ScalarEvolution "universe" to another.
  struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> {
    SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}

    const SCEV *visitConstant(const SCEVConstant *Constant) {
      return SE.getConstant(Constant->getAPInt());
    }

    const SCEV *visitUnknown(const SCEVUnknown *Expr) {
      return SE.getUnknown(Expr->getValue());
    }

    const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
      return SE.getCouldNotCompute();
    }
  };

  SCEVMapper SCM(SE2);

  while (!LoopStack.empty()) {
    auto *L = LoopStack.pop_back_val();
    LoopStack.insert(LoopStack.end(), L->begin(), L->end());

    auto *CurBECount = SCM.visit(
        const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L));
    auto *NewBECount = SE2.getBackedgeTakenCount(L);

    if (CurBECount == SE2.getCouldNotCompute() ||
        NewBECount == SE2.getCouldNotCompute()) {
      // NB! This situation is legal, but is very suspicious -- whatever pass
      // change the loop to make a trip count go from could not compute to
      // computable or vice-versa *should have* invalidated SCEV.  However, we
      // choose not to assert here (for now) since we don't want false
      // positives.
      continue;
    }

    if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) {
      // SCEV treats "undef" as an unknown but consistent value (i.e. it does
      // not propagate undef aggressively).  This means we can (and do) fail
      // verification in cases where a transform makes the trip count of a loop
      // go from "undef" to "undef+1" (say).  The transform is fine, since in
      // both cases the loop iterates "undef" times, but SCEV thinks we
      // increased the trip count of the loop by 1 incorrectly.
      continue;
    }

    if (SE.getTypeSizeInBits(CurBECount->getType()) >
        SE.getTypeSizeInBits(NewBECount->getType()))
      NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType());
    else if (SE.getTypeSizeInBits(CurBECount->getType()) <
             SE.getTypeSizeInBits(NewBECount->getType()))
      CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType());

    const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount);

    // Unless VerifySCEVStrict is set, we only compare constant deltas.
    if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) {
      dbgs() << "Trip Count for " << *L << " Changed!\n";
      dbgs() << "Old: " << *CurBECount << "\n";
      dbgs() << "New: " << *NewBECount << "\n";
      dbgs() << "Delta: " << *Delta << "\n";
      std::abort();
    }
  }
}

bool ScalarEvolution::invalidate(
    Function &F, const PreservedAnalyses &PA,
    FunctionAnalysisManager::Invalidator &Inv) {
  // Invalidate the ScalarEvolution object whenever it isn't preserved or one
  // of its dependencies is invalidated.
  auto PAC = PA.getChecker<ScalarEvolutionAnalysis>();
  return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
         Inv.invalidate<AssumptionAnalysis>(F, PA) ||
         Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
         Inv.invalidate<LoopAnalysis>(F, PA);
}

AnalysisKey ScalarEvolutionAnalysis::Key;

ScalarEvolution ScalarEvolutionAnalysis::run(Function &F,
                                             FunctionAnalysisManager &AM) {
  return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F),
                         AM.getResult<AssumptionAnalysis>(F),
                         AM.getResult<DominatorTreeAnalysis>(F),
                         AM.getResult<LoopAnalysis>(F));
}

PreservedAnalyses
ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) {
  AM.getResult<ScalarEvolutionAnalysis>(F).print(OS);
  return PreservedAnalyses::all();
}

INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution",
                      "Scalar Evolution Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution",
                    "Scalar Evolution Analysis", false, true)

char ScalarEvolutionWrapperPass::ID = 0;

ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) {
  initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
}

bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) {
  SE.reset(new ScalarEvolution(
      F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
      getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
      getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
      getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
  return false;
}

void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); }

void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const {
  SE->print(OS);
}

void ScalarEvolutionWrapperPass::verifyAnalysis() const {
  if (!VerifySCEV)
    return;

  SE->verify();
}

void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
  AU.setPreservesAll();
  AU.addRequiredTransitive<AssumptionCacheTracker>();
  AU.addRequiredTransitive<LoopInfoWrapperPass>();
  AU.addRequiredTransitive<DominatorTreeWrapperPass>();
  AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
}

const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS,
                                                        const SCEV *RHS) {
  FoldingSetNodeID ID;
  assert(LHS->getType() == RHS->getType() &&
         "Type mismatch between LHS and RHS");
  // Unique this node based on the arguments
  ID.AddInteger(SCEVPredicate::P_Equal);
  ID.AddPointer(LHS);
  ID.AddPointer(RHS);
  void *IP = nullptr;
  if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
    return S;
  SCEVEqualPredicate *Eq = new (SCEVAllocator)
      SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS);
  UniquePreds.InsertNode(Eq, IP);
  return Eq;
}

const SCEVPredicate *ScalarEvolution::getWrapPredicate(
    const SCEVAddRecExpr *AR,
    SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
  FoldingSetNodeID ID;
  // Unique this node based on the arguments
  ID.AddInteger(SCEVPredicate::P_Wrap);
  ID.AddPointer(AR);
  ID.AddInteger(AddedFlags);
  void *IP = nullptr;
  if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
    return S;
  auto *OF = new (SCEVAllocator)
      SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags);
  UniquePreds.InsertNode(OF, IP);
  return OF;
}

namespace {

class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
public:

  /// Rewrites \p S in the context of a loop L and the SCEV predication
  /// infrastructure.
  ///
  /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
  /// equivalences present in \p Pred.
  ///
  /// If \p NewPreds is non-null, rewrite is free to add further predicates to
  /// \p NewPreds such that the result will be an AddRecExpr.
  static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
                             SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
                             SCEVUnionPredicate *Pred) {
    SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred);
    return Rewriter.visit(S);
  }

  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
    if (Pred) {
      auto ExprPreds = Pred->getPredicatesForExpr(Expr);
      for (auto *Pred : ExprPreds)
        if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred))
          if (IPred->getLHS() == Expr)
            return IPred->getRHS();
    }
    return convertToAddRecWithPreds(Expr);
  }

  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
    const SCEV *Operand = visit(Expr->getOperand());
    const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
    if (AR && AR->getLoop() == L && AR->isAffine()) {
      // This couldn't be folded because the operand didn't have the nuw
      // flag. Add the nusw flag as an assumption that we could make.
      const SCEV *Step = AR->getStepRecurrence(SE);
      Type *Ty = Expr->getType();
      if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW))
        return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty),
                                SE.getSignExtendExpr(Step, Ty), L,
                                AR->getNoWrapFlags());
    }
    return SE.getZeroExtendExpr(Operand, Expr->getType());
  }

  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
    const SCEV *Operand = visit(Expr->getOperand());
    const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
    if (AR && AR->getLoop() == L && AR->isAffine()) {
      // This couldn't be folded because the operand didn't have the nsw
      // flag. Add the nssw flag as an assumption that we could make.
      const SCEV *Step = AR->getStepRecurrence(SE);
      Type *Ty = Expr->getType();
      if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW))
        return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty),
                                SE.getSignExtendExpr(Step, Ty), L,
                                AR->getNoWrapFlags());
    }
    return SE.getSignExtendExpr(Operand, Expr->getType());
  }

private:
  explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE,
                        SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
                        SCEVUnionPredicate *Pred)
      : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {}

  bool addOverflowAssumption(const SCEVPredicate *P) {
    if (!NewPreds) {
      // Check if we've already made this assumption.
      return Pred && Pred->implies(P);
    }
    NewPreds->insert(P);
    return true;
  }

  bool addOverflowAssumption(const SCEVAddRecExpr *AR,
                             SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
    auto *A = SE.getWrapPredicate(AR, AddedFlags);
    return addOverflowAssumption(A);
  }

  // If \p Expr represents a PHINode, we try to see if it can be represented
  // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
  // to add this predicate as a runtime overflow check, we return the AddRec.
  // If \p Expr does not meet these conditions (is not a PHI node, or we
  // couldn't create an AddRec for it, or couldn't add the predicate), we just
  // return \p Expr.
  const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) {
    if (!isa<PHINode>(Expr->getValue()))
      return Expr;
    Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
    PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr);
    if (!PredicatedRewrite)
      return Expr;
    for (auto *P : PredicatedRewrite->second){
      // Wrap predicates from outer loops are not supported.
      if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) {
        auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr());
        if (L != AR->getLoop())
          return Expr;
      }
      if (!addOverflowAssumption(P))
        return Expr;
    }
    return PredicatedRewrite->first;
  }

  SmallPtrSetImpl<const SCEVPredicate *> *NewPreds;
  SCEVUnionPredicate *Pred;
  const Loop *L;
};

} // end anonymous namespace

const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L,
                                                   SCEVUnionPredicate &Preds) {
  return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds);
}

const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates(
    const SCEV *S, const Loop *L,
    SmallPtrSetImpl<const SCEVPredicate *> &Preds) {
  SmallPtrSet<const SCEVPredicate *, 4> TransformPreds;
  S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr);
  auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);

  if (!AddRec)
    return nullptr;

  // Since the transformation was successful, we can now transfer the SCEV
  // predicates.
  for (auto *P : TransformPreds)
    Preds.insert(P);

  return AddRec;
}

/// SCEV predicates
SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID,
                             SCEVPredicateKind Kind)
    : FastID(ID), Kind(Kind) {}

SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID,
                                       const SCEV *LHS, const SCEV *RHS)
    : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {
  assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match");
  assert(LHS != RHS && "LHS and RHS are the same SCEV");
}

bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const {
  const auto *Op = dyn_cast<SCEVEqualPredicate>(N);

  if (!Op)
    return false;

  return Op->LHS == LHS && Op->RHS == RHS;
}

bool SCEVEqualPredicate::isAlwaysTrue() const { return false; }

const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; }

void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const {
  OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n";
}

SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
                                     const SCEVAddRecExpr *AR,
                                     IncrementWrapFlags Flags)
    : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {}

const SCEV *SCEVWrapPredicate::getExpr() const { return AR; }

bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const {
  const auto *Op = dyn_cast<SCEVWrapPredicate>(N);

  return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags;
}

bool SCEVWrapPredicate::isAlwaysTrue() const {
  SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags();
  IncrementWrapFlags IFlags = Flags;

  if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags)
    IFlags = clearFlags(IFlags, IncrementNSSW);

  return IFlags == IncrementAnyWrap;
}

void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const {
  OS.indent(Depth) << *getExpr() << " Added Flags: ";
  if (SCEVWrapPredicate::IncrementNUSW & getFlags())
    OS << "<nusw>";
  if (SCEVWrapPredicate::IncrementNSSW & getFlags())
    OS << "<nssw>";
  OS << "\n";
}

SCEVWrapPredicate::IncrementWrapFlags
SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR,
                                   ScalarEvolution &SE) {
  IncrementWrapFlags ImpliedFlags = IncrementAnyWrap;
  SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags();

  // We can safely transfer the NSW flag as NSSW.
  if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags)
    ImpliedFlags = IncrementNSSW;

  if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) {
    // If the increment is positive, the SCEV NUW flag will also imply the
    // WrapPredicate NUSW flag.
    if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
      if (Step->getValue()->getValue().isNonNegative())
        ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW);
  }

  return ImpliedFlags;
}

/// Union predicates don't get cached so create a dummy set ID for it.
SCEVUnionPredicate::SCEVUnionPredicate()
    : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {}

bool SCEVUnionPredicate::isAlwaysTrue() const {
  return all_of(Preds,
                [](const SCEVPredicate *I) { return I->isAlwaysTrue(); });
}

ArrayRef<const SCEVPredicate *>
SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) {
  auto I = SCEVToPreds.find(Expr);
  if (I == SCEVToPreds.end())
    return ArrayRef<const SCEVPredicate *>();
  return I->second;
}

bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const {
  if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N))
    return all_of(Set->Preds,
                  [this](const SCEVPredicate *I) { return this->implies(I); });

  auto ScevPredsIt = SCEVToPreds.find(N->getExpr());
  if (ScevPredsIt == SCEVToPreds.end())
    return false;
  auto &SCEVPreds = ScevPredsIt->second;

  return any_of(SCEVPreds,
                [N](const SCEVPredicate *I) { return I->implies(N); });
}

const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; }

void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const {
  for (auto Pred : Preds)
    Pred->print(OS, Depth);
}

void SCEVUnionPredicate::add(const SCEVPredicate *N) {
  if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) {
    for (auto Pred : Set->Preds)
      add(Pred);
    return;
  }

  if (implies(N))
    return;

  const SCEV *Key = N->getExpr();
  assert(Key && "Only SCEVUnionPredicate doesn't have an "
                " associated expression!");

  SCEVToPreds[Key].push_back(N);
  Preds.push_back(N);
}

PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE,
                                                     Loop &L)
    : SE(SE), L(L) {}

const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
  const SCEV *Expr = SE.getSCEV(V);
  RewriteEntry &Entry = RewriteMap[Expr];

  // If we already have an entry and the version matches, return it.
  if (Entry.second && Generation == Entry.first)
    return Entry.second;

  // We found an entry but it's stale. Rewrite the stale entry
  // according to the current predicate.
  if (Entry.second)
    Expr = Entry.second;

  const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds);
  Entry = {Generation, NewSCEV};

  return NewSCEV;
}

const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() {
  if (!BackedgeCount) {
    SCEVUnionPredicate BackedgePred;
    BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred);
    addPredicate(BackedgePred);
  }
  return BackedgeCount;
}

void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) {
  if (Preds.implies(&Pred))
    return;
  Preds.add(&Pred);
  updateGeneration();
}

const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const {
  return Preds;
}

void PredicatedScalarEvolution::updateGeneration() {
  // If the generation number wrapped recompute everything.
  if (++Generation == 0) {
    for (auto &II : RewriteMap) {
      const SCEV *Rewritten = II.second.second;
      II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)};
    }
  }
}

void PredicatedScalarEvolution::setNoOverflow(
    Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
  const SCEV *Expr = getSCEV(V);
  const auto *AR = cast<SCEVAddRecExpr>(Expr);

  auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE);

  // Clear the statically implied flags.
  Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags);
  addPredicate(*SE.getWrapPredicate(AR, Flags));

  auto II = FlagsMap.insert({V, Flags});
  if (!II.second)
    II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second);
}

bool PredicatedScalarEvolution::hasNoOverflow(
    Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
  const SCEV *Expr = getSCEV(V);
  const auto *AR = cast<SCEVAddRecExpr>(Expr);

  Flags = SCEVWrapPredicate::clearFlags(
      Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE));

  auto II = FlagsMap.find(V);

  if (II != FlagsMap.end())
    Flags = SCEVWrapPredicate::clearFlags(Flags, II->second);

  return Flags == SCEVWrapPredicate::IncrementAnyWrap;
}

const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) {
  const SCEV *Expr = this->getSCEV(V);
  SmallPtrSet<const SCEVPredicate *, 4> NewPreds;
  auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);

  if (!New)
    return nullptr;

  for (auto *P : NewPreds)
    Preds.add(P);

  updateGeneration();
  RewriteMap[SE.getSCEV(V)] = {Generation, New};
  return New;
}

PredicatedScalarEvolution::PredicatedScalarEvolution(
    const PredicatedScalarEvolution &Init)
    : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds),
      Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) {
  for (const auto &I : Init.FlagsMap)
    FlagsMap.insert(I);
}

void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const {
  // For each block.
  for (auto *BB : L.getBlocks())
    for (auto &I : *BB) {
      if (!SE.isSCEVable(I.getType()))
        continue;

      auto *Expr = SE.getSCEV(&I);
      auto II = RewriteMap.find(Expr);

      if (II == RewriteMap.end())
        continue;

      // Don't print things that are not interesting.
      if (II->second.second == Expr)
        continue;

      OS.indent(Depth) << "[PSE]" << I << ":\n";
      OS.indent(Depth + 2) << *Expr << "\n";
      OS.indent(Depth + 2) << "--> " << *II->second.second << "\n";
    }
}

// Match the mathematical pattern A - (A / B) * B, where A and B can be
// arbitrary expressions.
// It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
// 4, A / B becomes X / 8).
bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS,
                                const SCEV *&RHS) {
  const auto *Add = dyn_cast<SCEVAddExpr>(Expr);
  if (Add == nullptr || Add->getNumOperands() != 2)
    return false;

  const SCEV *A = Add->getOperand(1);
  const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0));

  if (Mul == nullptr)
    return false;

  const auto MatchURemWithDivisor = [&](const SCEV *B) {
    // (SomeExpr + (-(SomeExpr / B) * B)).
    if (Expr == getURemExpr(A, B)) {
      LHS = A;
      RHS = B;
      return true;
    }
    return false;
  };

  // (SomeExpr + (-1 * (SomeExpr / B) * B)).
  if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0)))
    return MatchURemWithDivisor(Mul->getOperand(1)) ||
           MatchURemWithDivisor(Mul->getOperand(2));

  // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)).
  if (Mul->getNumOperands() == 2)
    return MatchURemWithDivisor(Mul->getOperand(1)) ||
           MatchURemWithDivisor(Mul->getOperand(0)) ||
           MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) ||
           MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0)));
  return false;
}