reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
  185
  186
  187
  188
  189
  190
  191
  192
  193
  194
  195
  196
  197
  198
  199
  200
  201
  202
  203
  204
  205
  206
  207
  208
  209
  210
  211
  212
  213
  214
  215
  216
  217
  218
  219
  220
  221
  222
  223
  224
  225
  226
  227
  228
  229
  230
  231
  232
  233
  234
  235
  236
  237
  238
  239
  240
  241
  242
  243
  244
  245
  246
  247
  248
  249
  250
  251
  252
  253
  254
  255
  256
  257
  258
  259
  260
  261
  262
  263
  264
  265
  266
  267
  268
  269
  270
  271
  272
  273
  274
  275
  276
  277
  278
  279
  280
  281
  282
  283
  284
  285
  286
  287
  288
  289
  290
  291
  292
  293
  294
  295
  296
  297
  298
  299
  300
  301
  302
  303
  304
  305
  306
  307
  308
  309
  310
  311
  312
  313
  314
  315
  316
  317
  318
  319
  320
  321
  322
  323
  324
  325
  326
  327
  328
  329
  330
  331
  332
  333
  334
  335
  336
  337
  338
  339
  340
  341
  342
  343
  344
  345
  346
  347
  348
  349
  350
  351
  352
  353
  354
  355
  356
  357
  358
  359
  360
  361
  362
  363
  364
  365
  366
  367
  368
  369
  370
  371
  372
  373
  374
  375
  376
  377
  378
  379
  380
  381
  382
  383
  384
  385
  386
  387
  388
  389
  390
  391
  392
  393
  394
  395
  396
  397
  398
  399
  400
  401
  402
  403
  404
  405
  406
  407
  408
  409
  410
  411
  412
  413
  414
  415
  416
  417
  418
  419
  420
  421
  422
  423
  424
  425
  426
  427
  428
  429
  430
  431
  432
  433
  434
  435
  436
  437
  438
  439
  440
  441
  442
  443
  444
  445
  446
  447
  448
  449
  450
  451
  452
  453
  454
  455
  456
  457
  458
  459
  460
  461
  462
  463
  464
  465
  466
  467
  468
  469
  470
  471
  472
  473
  474
  475
  476
  477
  478
  479
  480
  481
  482
  483
  484
  485
  486
  487
  488
  489
  490
  491
  492
  493
  494
  495
  496
  497
  498
  499
  500
  501
  502
  503
  504
  505
  506
  507
  508
  509
  510
  511
  512
  513
  514
  515
  516
  517
  518
  519
  520
  521
  522
  523
  524
  525
  526
  527
  528
  529
  530
  531
  532
  533
  534
  535
  536
  537
  538
  539
  540
  541
  542
  543
  544
  545
  546
  547
  548
  549
  550
  551
  552
  553
  554
  555
  556
  557
  558
  559
  560
  561
  562
  563
  564
  565
  566
  567
  568
  569
  570
  571
  572
  573
  574
  575
  576
  577
  578
  579
  580
  581
  582
  583
  584
  585
  586
  587
  588
  589
  590
  591
  592
  593
  594
  595
  596
  597
  598
  599
  600
  601
  602
  603
  604
  605
  606
  607
  608
  609
  610
  611
  612
  613
  614
  615
  616
  617
  618
  619
  620
  621
  622
  623
  624
  625
  626
  627
  628
  629
  630
  631
  632
  633
  634
  635
  636
  637
  638
  639
  640
  641
  642
  643
  644
  645
  646
  647
  648
  649
  650
  651
  652
  653
  654
  655
  656
  657
  658
  659
  660
  661
  662
  663
  664
  665
  666
  667
  668
  669
  670
  671
  672
  673
  674
  675
  676
  677
  678
  679
  680
  681
  682
  683
  684
  685
  686
  687
  688
  689
  690
  691
  692
  693
  694
  695
  696
  697
  698
  699
  700
  701
  702
  703
  704
  705
  706
  707
  708
  709
  710
  711
  712
  713
  714
  715
  716
  717
  718
  719
  720
  721
  722
  723
  724
  725
  726
  727
  728
  729
  730
  731
  732
  733
  734
  735
  736
  737
  738
  739
  740
  741
  742
  743
  744
  745
  746
  747
  748
  749
  750
  751
  752
  753
  754
  755
  756
  757
  758
  759
  760
  761
  762
  763
  764
  765
  766
  767
  768
  769
  770
  771
  772
  773
  774
  775
  776
  777
  778
  779
  780
  781
  782
  783
  784
  785
  786
  787
  788
  789
  790
  791
  792
  793
  794
  795
  796
  797
  798
  799
  800
  801
  802
  803
  804
  805
  806
  807
  808
  809
  810
  811
  812
  813
  814
  815
  816
  817
  818
  819
  820
  821
  822
  823
  824
  825
  826
  827
  828
  829
  830
  831
  832
  833
  834
  835
  836
  837
  838
  839
  840
  841
  842
  843
  844
  845
  846
  847
  848
  849
  850
  851
  852
  853
  854
  855
  856
  857
  858
  859
  860
  861
  862
  863
  864
  865
  866
  867
  868
  869
  870
  871
  872
  873
  874
  875
  876
  877
  878
  879
  880
  881
  882
  883
  884
  885
  886
  887
  888
  889
  890
  891
  892
  893
  894
  895
  896
  897
  898
  899
  900
  901
  902
  903
  904
  905
  906
  907
  908
  909
  910
  911
  912
  913
  914
  915
  916
  917
  918
  919
  920
  921
  922
  923
  924
  925
  926
  927
  928
  929
  930
  931
  932
  933
  934
  935
  936
  937
  938
  939
  940
  941
  942
  943
  944
  945
  946
  947
  948
  949
  950
  951
  952
  953
  954
  955
  956
  957
  958
  959
  960
  961
  962
  963
  964
  965
  966
  967
  968
  969
  970
  971
  972
  973
  974
  975
  976
  977
  978
  979
  980
  981
  982
  983
  984
  985
  986
  987
  988
  989
  990
  991
  992
  993
  994
  995
  996
  997
  998
  999
 1000
 1001
 1002
 1003
 1004
 1005
 1006
 1007
 1008
 1009
 1010
 1011
 1012
 1013
 1014
 1015
 1016
 1017
 1018
 1019
 1020
 1021
 1022
 1023
 1024
 1025
 1026
 1027
 1028
 1029
 1030
 1031
 1032
 1033
 1034
 1035
 1036
 1037
 1038
 1039
 1040
 1041
 1042
 1043
 1044
 1045
 1046
 1047
 1048
 1049
 1050
 1051
 1052
 1053
 1054
 1055
 1056
 1057
 1058
 1059
 1060
 1061
 1062
 1063
 1064
 1065
 1066
 1067
 1068
 1069
 1070
 1071
 1072
 1073
 1074
 1075
 1076
 1077
 1078
 1079
 1080
 1081
 1082
 1083
 1084
 1085
 1086
 1087
 1088
 1089
 1090
 1091
 1092
 1093
 1094
 1095
 1096
 1097
 1098
 1099
 1100
 1101
 1102
 1103
 1104
 1105
 1106
 1107
 1108
 1109
 1110
 1111
 1112
 1113
 1114
 1115
 1116
 1117
 1118
 1119
 1120
 1121
 1122
 1123
 1124
 1125
 1126
 1127
 1128
 1129
 1130
 1131
 1132
 1133
 1134
 1135
 1136
 1137
 1138
 1139
 1140
 1141
 1142
 1143
 1144
 1145
 1146
 1147
 1148
 1149
 1150
 1151
 1152
 1153
 1154
 1155
 1156
 1157
 1158
 1159
 1160
 1161
 1162
 1163
 1164
 1165
 1166
 1167
 1168
 1169
 1170
 1171
 1172
 1173
 1174
 1175
 1176
 1177
 1178
 1179
 1180
 1181
 1182
 1183
 1184
 1185
 1186
 1187
 1188
 1189
 1190
 1191
 1192
 1193
 1194
 1195
 1196
 1197
 1198
 1199
 1200
 1201
 1202
 1203
 1204
 1205
 1206
 1207
 1208
 1209
 1210
 1211
 1212
 1213
 1214
 1215
 1216
 1217
 1218
 1219
 1220
 1221
 1222
 1223
 1224
 1225
 1226
 1227
 1228
 1229
 1230
 1231
 1232
 1233
 1234
 1235
 1236
 1237
 1238
 1239
 1240
 1241
 1242
 1243
 1244
 1245
 1246
 1247
 1248
 1249
 1250
 1251
 1252
 1253
 1254
 1255
 1256
 1257
 1258
 1259
 1260
 1261
 1262
 1263
 1264
 1265
 1266
 1267
 1268
 1269
 1270
 1271
 1272
 1273
 1274
 1275
 1276
 1277
 1278
 1279
 1280
 1281
 1282
 1283
 1284
 1285
 1286
 1287
 1288
 1289
 1290
 1291
 1292
 1293
 1294
 1295
 1296
 1297
 1298
 1299
 1300
 1301
 1302
 1303
 1304
 1305
 1306
 1307
 1308
 1309
 1310
 1311
 1312
 1313
 1314
 1315
 1316
 1317
 1318
 1319
 1320
 1321
 1322
 1323
 1324
 1325
 1326
 1327
 1328
 1329
 1330
 1331
 1332
 1333
 1334
 1335
 1336
 1337
 1338
 1339
 1340
 1341
 1342
 1343
 1344
 1345
 1346
 1347
 1348
 1349
 1350
 1351
 1352
 1353
 1354
 1355
 1356
 1357
 1358
 1359
 1360
 1361
 1362
 1363
 1364
 1365
 1366
 1367
 1368
 1369
 1370
 1371
 1372
 1373
 1374
 1375
 1376
 1377
 1378
 1379
 1380
 1381
 1382
 1383
 1384
 1385
 1386
 1387
 1388
 1389
 1390
 1391
 1392
 1393
 1394
 1395
 1396
 1397
 1398
 1399
 1400
 1401
 1402
 1403
 1404
 1405
 1406
 1407
 1408
 1409
 1410
 1411
 1412
 1413
 1414
 1415
 1416
 1417
 1418
 1419
 1420
 1421
 1422
 1423
 1424
 1425
 1426
 1427
 1428
 1429
 1430
 1431
 1432
 1433
 1434
 1435
 1436
 1437
 1438
 1439
 1440
 1441
 1442
 1443
 1444
 1445
 1446
 1447
 1448
 1449
 1450
 1451
 1452
 1453
 1454
 1455
 1456
 1457
 1458
 1459
 1460
 1461
 1462
 1463
 1464
 1465
 1466
 1467
 1468
 1469
 1470
 1471
 1472
 1473
 1474
 1475
 1476
 1477
 1478
 1479
 1480
 1481
 1482
 1483
 1484
 1485
 1486
 1487
 1488
 1489
 1490
 1491
 1492
 1493
 1494
 1495
 1496
 1497
 1498
 1499
 1500
 1501
 1502
 1503
 1504
 1505
 1506
 1507
 1508
 1509
 1510
 1511
 1512
 1513
 1514
 1515
 1516
 1517
 1518
 1519
 1520
 1521
 1522
 1523
 1524
 1525
 1526
 1527
 1528
 1529
 1530
 1531
 1532
 1533
 1534
 1535
 1536
 1537
 1538
 1539
 1540
 1541
 1542
 1543
 1544
 1545
 1546
 1547
 1548
 1549
 1550
 1551
 1552
 1553
 1554
 1555
 1556
 1557
 1558
 1559
 1560
 1561
 1562
 1563
 1564
 1565
 1566
 1567
 1568
 1569
 1570
 1571
 1572
 1573
 1574
 1575
 1576
 1577
 1578
 1579
 1580
 1581
 1582
 1583
 1584
 1585
 1586
 1587
 1588
 1589
 1590
 1591
 1592
 1593
 1594
 1595
 1596
 1597
 1598
 1599
 1600
 1601
 1602
 1603
 1604
 1605
 1606
 1607
 1608
 1609
 1610
 1611
 1612
 1613
 1614
 1615
 1616
 1617
 1618
 1619
 1620
 1621
 1622
 1623
 1624
 1625
 1626
 1627
 1628
 1629
 1630
 1631
 1632
 1633
 1634
 1635
 1636
 1637
 1638
 1639
 1640
 1641
 1642
 1643
 1644
 1645
 1646
 1647
 1648
 1649
 1650
 1651
 1652
 1653
 1654
 1655
 1656
 1657
 1658
 1659
 1660
 1661
 1662
 1663
 1664
 1665
 1666
 1667
 1668
 1669
 1670
 1671
 1672
 1673
 1674
 1675
 1676
 1677
 1678
 1679
 1680
 1681
 1682
 1683
 1684
 1685
 1686
 1687
 1688
 1689
 1690
 1691
 1692
 1693
 1694
 1695
 1696
 1697
 1698
 1699
 1700
 1701
 1702
 1703
 1704
 1705
 1706
 1707
 1708
 1709
 1710
 1711
 1712
 1713
 1714
 1715
 1716
 1717
 1718
 1719
 1720
 1721
 1722
 1723
 1724
 1725
 1726
 1727
 1728
 1729
 1730
 1731
 1732
 1733
 1734
 1735
 1736
 1737
 1738
 1739
 1740
 1741
 1742
 1743
 1744
 1745
 1746
 1747
 1748
 1749
 1750
 1751
 1752
 1753
 1754
 1755
 1756
 1757
 1758
 1759
 1760
 1761
 1762
 1763
 1764
 1765
 1766
 1767
 1768
 1769
 1770
 1771
 1772
 1773
 1774
 1775
 1776
 1777
 1778
 1779
 1780
 1781
 1782
 1783
 1784
 1785
 1786
 1787
 1788
 1789
 1790
 1791
 1792
 1793
 1794
 1795
 1796
 1797
 1798
 1799
 1800
 1801
 1802
 1803
 1804
 1805
 1806
 1807
 1808
 1809
 1810
 1811
 1812
 1813
 1814
 1815
 1816
 1817
 1818
 1819
 1820
 1821
 1822
 1823
 1824
 1825
 1826
 1827
 1828
 1829
 1830
 1831
 1832
 1833
 1834
 1835
 1836
 1837
 1838
 1839
 1840
 1841
 1842
 1843
 1844
 1845
 1846
 1847
 1848
 1849
 1850
 1851
 1852
 1853
 1854
 1855
 1856
 1857
 1858
 1859
 1860
 1861
 1862
 1863
 1864
 1865
 1866
 1867
 1868
 1869
 1870
 1871
 1872
 1873
 1874
 1875
 1876
 1877
 1878
 1879
 1880
 1881
 1882
 1883
 1884
 1885
 1886
 1887
 1888
 1889
 1890
 1891
 1892
 1893
 1894
 1895
 1896
 1897
 1898
 1899
 1900
 1901
 1902
 1903
 1904
 1905
 1906
 1907
 1908
 1909
 1910
 1911
 1912
 1913
 1914
 1915
 1916
 1917
 1918
 1919
 1920
 1921
 1922
 1923
 1924
 1925
 1926
 1927
 1928
 1929
 1930
 1931
 1932
 1933
 1934
 1935
 1936
 1937
 1938
 1939
 1940
 1941
 1942
 1943
 1944
 1945
 1946
 1947
 1948
 1949
 1950
 1951
 1952
 1953
 1954
 1955
 1956
 1957
 1958
 1959
 1960
 1961
 1962
 1963
 1964
 1965
 1966
 1967
 1968
 1969
 1970
 1971
 1972
 1973
 1974
 1975
 1976
 1977
 1978
 1979
 1980
 1981
 1982
 1983
 1984
 1985
 1986
 1987
 1988
 1989
 1990
 1991
 1992
 1993
 1994
 1995
 1996
 1997
 1998
 1999
 2000
 2001
 2002
 2003
 2004
 2005
 2006
 2007
 2008
 2009
 2010
 2011
 2012
 2013
 2014
 2015
 2016
 2017
 2018
 2019
 2020
 2021
 2022
 2023
 2024
 2025
 2026
 2027
 2028
 2029
 2030
 2031
 2032
 2033
 2034
 2035
 2036
 2037
 2038
 2039
 2040
 2041
 2042
 2043
 2044
 2045
 2046
 2047
 2048
 2049
 2050
 2051
 2052
 2053
 2054
 2055
 2056
 2057
 2058
 2059
 2060
 2061
 2062
 2063
 2064
 2065
 2066
 2067
 2068
 2069
 2070
 2071
 2072
 2073
 2074
 2075
 2076
 2077
 2078
 2079
 2080
 2081
 2082
 2083
 2084
 2085
 2086
 2087
 2088
 2089
 2090
 2091
 2092
 2093
 2094
 2095
 2096
 2097
 2098
 2099
 2100
 2101
 2102
 2103
 2104
 2105
 2106
 2107
 2108
 2109
 2110
 2111
 2112
 2113
 2114
 2115
 2116
 2117
 2118
 2119
 2120
 2121
 2122
 2123
 2124
 2125
 2126
 2127
 2128
 2129
 2130
 2131
 2132
 2133
 2134
 2135
 2136
 2137
 2138
 2139
 2140
 2141
 2142
 2143
 2144
 2145
 2146
 2147
 2148
 2149
 2150
 2151
 2152
 2153
 2154
 2155
 2156
 2157
 2158
 2159
 2160
 2161
 2162
 2163
 2164
 2165
 2166
 2167
 2168
 2169
 2170
 2171
 2172
 2173
 2174
 2175
 2176
 2177
 2178
 2179
 2180
 2181
 2182
 2183
 2184
 2185
 2186
 2187
 2188
 2189
 2190
 2191
 2192
 2193
 2194
 2195
 2196
 2197
 2198
 2199
 2200
 2201
 2202
 2203
 2204
 2205
 2206
 2207
 2208
 2209
 2210
 2211
 2212
 2213
 2214
 2215
 2216
 2217
 2218
 2219
 2220
 2221
 2222
 2223
 2224
 2225
 2226
 2227
 2228
 2229
 2230
 2231
 2232
 2233
 2234
 2235
 2236
 2237
 2238
 2239
 2240
 2241
 2242
 2243
 2244
 2245
 2246
 2247
 2248
 2249
 2250
 2251
 2252
 2253
 2254
 2255
 2256
 2257
 2258
 2259
 2260
 2261
 2262
 2263
 2264
 2265
 2266
 2267
 2268
 2269
 2270
 2271
 2272
 2273
 2274
 2275
 2276
 2277
 2278
 2279
 2280
 2281
 2282
 2283
 2284
 2285
 2286
 2287
 2288
 2289
 2290
 2291
 2292
 2293
 2294
 2295
 2296
 2297
 2298
 2299
 2300
 2301
 2302
 2303
 2304
 2305
 2306
 2307
 2308
 2309
 2310
 2311
 2312
 2313
 2314
 2315
 2316
 2317
 2318
 2319
 2320
 2321
 2322
 2323
 2324
 2325
 2326
 2327
 2328
 2329
 2330
 2331
 2332
 2333
 2334
 2335
 2336
 2337
 2338
 2339
 2340
 2341
 2342
 2343
 2344
 2345
 2346
 2347
 2348
 2349
 2350
 2351
 2352
 2353
 2354
 2355
 2356
 2357
 2358
 2359
 2360
 2361
 2362
 2363
 2364
 2365
 2366
 2367
 2368
 2369
 2370
 2371
 2372
 2373
 2374
 2375
 2376
 2377
 2378
 2379
 2380
 2381
 2382
 2383
 2384
 2385
 2386
 2387
 2388
 2389
 2390
 2391
 2392
 2393
 2394
 2395
 2396
 2397
 2398
 2399
 2400
 2401
 2402
 2403
 2404
 2405
 2406
 2407
 2408
 2409
 2410
 2411
 2412
 2413
 2414
 2415
 2416
 2417
 2418
 2419
 2420
 2421
 2422
 2423
 2424
 2425
 2426
 2427
 2428
 2429
 2430
 2431
 2432
 2433
 2434
 2435
 2436
 2437
 2438
 2439
 2440
 2441
 2442
 2443
 2444
 2445
 2446
 2447
 2448
 2449
 2450
 2451
 2452
 2453
 2454
 2455
 2456
 2457
 2458
 2459
 2460
 2461
 2462
 2463
 2464
 2465
 2466
 2467
 2468
 2469
 2470
 2471
 2472
 2473
 2474
 2475
 2476
 2477
 2478
 2479
 2480
 2481
 2482
 2483
 2484
 2485
 2486
 2487
 2488
 2489
 2490
 2491
 2492
 2493
 2494
 2495
 2496
 2497
 2498
 2499
 2500
 2501
 2502
 2503
 2504
 2505
 2506
 2507
 2508
 2509
 2510
 2511
 2512
 2513
 2514
 2515
 2516
 2517
 2518
 2519
 2520
 2521
 2522
 2523
 2524
 2525
 2526
 2527
 2528
 2529
 2530
 2531
 2532
 2533
 2534
 2535
 2536
 2537
 2538
 2539
 2540
 2541
 2542
 2543
 2544
 2545
 2546
 2547
 2548
 2549
 2550
 2551
 2552
 2553
 2554
 2555
 2556
 2557
 2558
 2559
 2560
 2561
 2562
 2563
 2564
 2565
 2566
 2567
 2568
 2569
 2570
 2571
 2572
 2573
 2574
 2575
 2576
 2577
 2578
 2579
 2580
 2581
 2582
 2583
 2584
 2585
 2586
 2587
 2588
 2589
 2590
 2591
 2592
 2593
 2594
 2595
 2596
 2597
 2598
 2599
 2600
 2601
 2602
 2603
 2604
 2605
 2606
 2607
 2608
 2609
 2610
 2611
 2612
 2613
 2614
 2615
 2616
 2617
 2618
 2619
 2620
 2621
 2622
 2623
 2624
 2625
 2626
 2627
 2628
 2629
 2630
 2631
 2632
 2633
 2634
 2635
 2636
 2637
 2638
 2639
 2640
 2641
 2642
 2643
 2644
 2645
 2646
 2647
 2648
 2649
 2650
 2651
 2652
 2653
 2654
 2655
 2656
 2657
 2658
 2659
 2660
 2661
 2662
 2663
 2664
 2665
 2666
 2667
 2668
 2669
 2670
 2671
 2672
 2673
 2674
 2675
 2676
 2677
 2678
 2679
 2680
 2681
 2682
 2683
 2684
 2685
 2686
 2687
 2688
 2689
 2690
 2691
 2692
 2693
 2694
 2695
 2696
 2697
 2698
 2699
 2700
 2701
 2702
 2703
 2704
 2705
 2706
 2707
 2708
 2709
 2710
 2711
 2712
 2713
 2714
 2715
 2716
 2717
 2718
 2719
 2720
 2721
 2722
 2723
 2724
 2725
 2726
 2727
 2728
 2729
 2730
 2731
 2732
 2733
 2734
 2735
 2736
 2737
 2738
 2739
 2740
 2741
 2742
 2743
 2744
 2745
 2746
 2747
 2748
 2749
 2750
 2751
 2752
 2753
 2754
 2755
 2756
 2757
 2758
 2759
 2760
 2761
 2762
 2763
 2764
 2765
 2766
 2767
 2768
 2769
 2770
 2771
 2772
 2773
 2774
 2775
 2776
 2777
 2778
 2779
 2780
 2781
 2782
 2783
 2784
 2785
 2786
 2787
 2788
 2789
 2790
 2791
 2792
 2793
 2794
 2795
 2796
 2797
 2798
 2799
 2800
 2801
 2802
 2803
 2804
 2805
 2806
 2807
 2808
 2809
 2810
 2811
 2812
 2813
 2814
 2815
 2816
 2817
 2818
 2819
 2820
 2821
 2822
 2823
 2824
 2825
 2826
 2827
 2828
 2829
 2830
 2831
 2832
 2833
 2834
 2835
 2836
 2837
 2838
 2839
 2840
 2841
 2842
 2843
 2844
 2845
 2846
 2847
 2848
 2849
 2850
 2851
 2852
 2853
 2854
 2855
 2856
 2857
 2858
 2859
 2860
 2861
 2862
 2863
 2864
 2865
 2866
 2867
 2868
 2869
 2870
 2871
 2872
 2873
 2874
 2875
 2876
 2877
 2878
 2879
 2880
 2881
 2882
 2883
 2884
 2885
 2886
 2887
 2888
 2889
 2890
 2891
 2892
 2893
 2894
 2895
 2896
 2897
 2898
 2899
 2900
 2901
 2902
 2903
 2904
 2905
 2906
 2907
 2908
 2909
 2910
 2911
 2912
 2913
 2914
 2915
 2916
 2917
 2918
 2919
 2920
 2921
 2922
 2923
 2924
 2925
 2926
 2927
 2928
 2929
 2930
 2931
 2932
 2933
 2934
 2935
 2936
 2937
 2938
 2939
 2940
 2941
 2942
 2943
 2944
 2945
 2946
 2947
 2948
 2949
 2950
 2951
 2952
 2953
 2954
 2955
 2956
 2957
 2958
 2959
 2960
 2961
 2962
 2963
 2964
 2965
 2966
 2967
 2968
 2969
 2970
 2971
 2972
 2973
 2974
 2975
 2976
 2977
 2978
 2979
 2980
 2981
 2982
 2983
 2984
 2985
 2986
 2987
 2988
 2989
 2990
 2991
 2992
 2993
 2994
 2995
 2996
 2997
 2998
 2999
 3000
 3001
 3002
 3003
 3004
 3005
 3006
 3007
 3008
 3009
 3010
 3011
 3012
 3013
 3014
 3015
 3016
 3017
 3018
 3019
 3020
 3021
 3022
 3023
 3024
 3025
 3026
 3027
 3028
 3029
 3030
 3031
 3032
 3033
 3034
 3035
 3036
 3037
 3038
 3039
 3040
 3041
 3042
 3043
 3044
 3045
 3046
 3047
 3048
 3049
 3050
 3051
 3052
 3053
 3054
 3055
 3056
 3057
 3058
 3059
 3060
 3061
 3062
 3063
 3064
 3065
 3066
 3067
 3068
 3069
 3070
 3071
 3072
 3073
 3074
 3075
 3076
 3077
 3078
 3079
 3080
 3081
 3082
 3083
 3084
 3085
 3086
 3087
 3088
 3089
 3090
 3091
 3092
 3093
 3094
 3095
 3096
 3097
 3098
 3099
 3100
 3101
 3102
 3103
 3104
 3105
 3106
 3107
 3108
 3109
 3110
 3111
 3112
 3113
 3114
 3115
 3116
 3117
 3118
 3119
 3120
 3121
 3122
 3123
 3124
 3125
 3126
 3127
 3128
 3129
 3130
 3131
 3132
 3133
 3134
 3135
 3136
 3137
 3138
 3139
 3140
 3141
 3142
 3143
 3144
 3145
 3146
 3147
 3148
 3149
 3150
 3151
 3152
 3153
 3154
 3155
 3156
 3157
 3158
 3159
 3160
 3161
 3162
 3163
 3164
 3165
 3166
 3167
 3168
 3169
 3170
 3171
 3172
 3173
 3174
 3175
 3176
 3177
 3178
 3179
 3180
 3181
 3182
 3183
 3184
 3185
 3186
 3187
 3188
 3189
 3190
 3191
 3192
 3193
 3194
 3195
 3196
 3197
 3198
 3199
 3200
 3201
 3202
 3203
 3204
 3205
 3206
 3207
 3208
 3209
 3210
 3211
 3212
 3213
 3214
 3215
 3216
 3217
 3218
 3219
 3220
 3221
 3222
 3223
 3224
 3225
 3226
 3227
 3228
 3229
 3230
 3231
 3232
 3233
 3234
 3235
 3236
 3237
 3238
 3239
 3240
 3241
 3242
 3243
 3244
 3245
 3246
 3247
 3248
 3249
 3250
 3251
 3252
 3253
 3254
 3255
 3256
 3257
 3258
 3259
 3260
 3261
 3262
 3263
 3264
 3265
 3266
 3267
 3268
 3269
 3270
 3271
 3272
 3273
 3274
 3275
 3276
 3277
 3278
 3279
 3280
 3281
 3282
 3283
 3284
 3285
 3286
 3287
 3288
 3289
 3290
 3291
 3292
 3293
 3294
 3295
 3296
 3297
 3298
 3299
 3300
 3301
 3302
 3303
 3304
 3305
 3306
 3307
 3308
 3309
 3310
 3311
 3312
 3313
 3314
 3315
 3316
 3317
 3318
 3319
 3320
 3321
 3322
 3323
 3324
 3325
 3326
 3327
 3328
 3329
 3330
 3331
 3332
 3333
 3334
 3335
 3336
 3337
 3338
 3339
 3340
 3341
 3342
 3343
 3344
 3345
 3346
 3347
 3348
 3349
 3350
 3351
 3352
 3353
 3354
 3355
 3356
 3357
 3358
 3359
 3360
 3361
 3362
 3363
 3364
 3365
 3366
 3367
 3368
 3369
 3370
 3371
 3372
 3373
 3374
 3375
 3376
 3377
 3378
 3379
 3380
 3381
 3382
 3383
 3384
 3385
 3386
 3387
 3388
 3389
 3390
 3391
 3392
 3393
 3394
 3395
 3396
 3397
 3398
 3399
 3400
 3401
 3402
 3403
 3404
 3405
 3406
 3407
 3408
 3409
 3410
 3411
 3412
 3413
 3414
 3415
 3416
 3417
 3418
 3419
 3420
 3421
 3422
 3423
 3424
 3425
 3426
 3427
 3428
 3429
 3430
 3431
 3432
 3433
 3434
 3435
 3436
 3437
 3438
 3439
 3440
 3441
 3442
 3443
 3444
 3445
 3446
 3447
 3448
 3449
 3450
 3451
 3452
 3453
 3454
 3455
 3456
 3457
 3458
 3459
 3460
 3461
 3462
 3463
 3464
 3465
 3466
 3467
 3468
 3469
 3470
 3471
 3472
 3473
 3474
 3475
 3476
 3477
 3478
 3479
 3480
 3481
 3482
 3483
 3484
 3485
 3486
 3487
 3488
 3489
 3490
 3491
 3492
 3493
 3494
 3495
 3496
 3497
 3498
 3499
 3500
 3501
 3502
 3503
 3504
 3505
 3506
 3507
 3508
 3509
 3510
 3511
 3512
 3513
 3514
 3515
 3516
 3517
 3518
 3519
 3520
 3521
 3522
 3523
 3524
 3525
 3526
 3527
 3528
 3529
 3530
 3531
 3532
 3533
 3534
 3535
 3536
 3537
 3538
 3539
 3540
 3541
 3542
 3543
 3544
 3545
 3546
 3547
 3548
 3549
 3550
 3551
 3552
 3553
 3554
 3555
 3556
 3557
 3558
 3559
 3560
 3561
 3562
 3563
 3564
 3565
 3566
 3567
 3568
 3569
 3570
 3571
 3572
 3573
 3574
 3575
 3576
 3577
 3578
 3579
 3580
 3581
 3582
 3583
 3584
 3585
 3586
 3587
 3588
 3589
 3590
 3591
 3592
 3593
 3594
 3595
 3596
 3597
 3598
 3599
 3600
 3601
 3602
 3603
 3604
 3605
 3606
 3607
 3608
 3609
 3610
 3611
 3612
 3613
 3614
 3615
 3616
 3617
 3618
 3619
 3620
 3621
 3622
 3623
 3624
 3625
 3626
 3627
 3628
 3629
 3630
 3631
 3632
 3633
 3634
 3635
 3636
 3637
 3638
 3639
 3640
 3641
 3642
 3643
 3644
 3645
 3646
 3647
 3648
 3649
 3650
 3651
 3652
 3653
 3654
 3655
 3656
 3657
 3658
 3659
 3660
 3661
 3662
 3663
 3664
 3665
 3666
 3667
 3668
 3669
 3670
 3671
 3672
 3673
 3674
 3675
 3676
 3677
 3678
 3679
 3680
 3681
 3682
 3683
 3684
 3685
 3686
 3687
 3688
 3689
 3690
 3691
 3692
 3693
 3694
 3695
 3696
 3697
 3698
 3699
 3700
 3701
 3702
 3703
 3704
 3705
 3706
 3707
 3708
 3709
 3710
 3711
 3712
 3713
 3714
 3715
 3716
 3717
 3718
 3719
 3720
 3721
 3722
 3723
 3724
 3725
 3726
 3727
 3728
 3729
 3730
 3731
 3732
 3733
 3734
 3735
 3736
 3737
 3738
 3739
 3740
 3741
 3742
 3743
 3744
 3745
 3746
 3747
 3748
 3749
 3750
 3751
 3752
 3753
 3754
 3755
 3756
 3757
 3758
 3759
 3760
 3761
 3762
 3763
 3764
 3765
 3766
 3767
 3768
 3769
 3770
 3771
 3772
 3773
 3774
 3775
 3776
 3777
 3778
 3779
 3780
 3781
 3782
 3783
 3784
 3785
 3786
 3787
 3788
 3789
 3790
 3791
 3792
 3793
 3794
 3795
 3796
 3797
 3798
 3799
 3800
 3801
 3802
 3803
 3804
 3805
 3806
 3807
 3808
 3809
 3810
 3811
 3812
 3813
 3814
 3815
 3816
 3817
 3818
 3819
 3820
 3821
 3822
 3823
 3824
 3825
 3826
 3827
 3828
 3829
 3830
 3831
 3832
 3833
 3834
 3835
 3836
 3837
 3838
 3839
 3840
 3841
 3842
 3843
 3844
 3845
 3846
 3847
 3848
 3849
 3850
 3851
 3852
 3853
 3854
 3855
 3856
 3857
 3858
 3859
 3860
 3861
 3862
 3863
 3864
 3865
 3866
 3867
 3868
 3869
 3870
 3871
 3872
 3873
 3874
 3875
 3876
 3877
 3878
 3879
 3880
 3881
 3882
 3883
 3884
 3885
 3886
 3887
 3888
 3889
 3890
 3891
 3892
 3893
 3894
 3895
 3896
 3897
 3898
 3899
 3900
 3901
 3902
 3903
 3904
 3905
 3906
 3907
 3908
 3909
 3910
 3911
 3912
 3913
 3914
 3915
 3916
 3917
 3918
 3919
 3920
 3921
 3922
 3923
 3924
 3925
 3926
 3927
 3928
 3929
 3930
 3931
 3932
 3933
 3934
 3935
 3936
 3937
 3938
 3939
 3940
 3941
 3942
 3943
 3944
 3945
 3946
 3947
 3948
 3949
 3950
 3951
 3952
 3953
 3954
 3955
 3956
 3957
 3958
 3959
 3960
 3961
 3962
 3963
 3964
 3965
 3966
 3967
 3968
 3969
 3970
 3971
 3972
 3973
 3974
 3975
 3976
 3977
 3978
 3979
 3980
 3981
 3982
 3983
 3984
 3985
 3986
 3987
 3988
 3989
 3990
 3991
 3992
 3993
 3994
 3995
 3996
 3997
 3998
 3999
 4000
 4001
 4002
 4003
 4004
 4005
 4006
 4007
 4008
 4009
 4010
 4011
 4012
 4013
 4014
 4015
 4016
 4017
 4018
 4019
 4020
 4021
 4022
 4023
 4024
 4025
 4026
 4027
 4028
 4029
 4030
 4031
 4032
 4033
 4034
 4035
 4036
 4037
 4038
 4039
 4040
 4041
 4042
 4043
 4044
 4045
 4046
 4047
 4048
 4049
 4050
 4051
 4052
 4053
 4054
 4055
 4056
 4057
 4058
 4059
 4060
 4061
 4062
 4063
 4064
 4065
 4066
 4067
 4068
 4069
 4070
 4071
 4072
 4073
 4074
 4075
 4076
 4077
 4078
 4079
 4080
 4081
 4082
 4083
 4084
 4085
 4086
 4087
 4088
 4089
 4090
 4091
 4092
 4093
 4094
 4095
 4096
 4097
 4098
 4099
 4100
 4101
 4102
 4103
 4104
 4105
 4106
 4107
 4108
 4109
 4110
 4111
 4112
 4113
 4114
 4115
 4116
 4117
 4118
 4119
 4120
 4121
 4122
 4123
 4124
 4125
 4126
 4127
 4128
 4129
 4130
 4131
 4132
 4133
 4134
 4135
 4136
 4137
 4138
 4139
 4140
 4141
 4142
 4143
 4144
 4145
 4146
 4147
 4148
 4149
 4150
 4151
 4152
 4153
 4154
 4155
 4156
 4157
 4158
 4159
 4160
 4161
 4162
 4163
 4164
 4165
 4166
 4167
 4168
 4169
 4170
 4171
 4172
 4173
 4174
 4175
 4176
 4177
 4178
 4179
 4180
 4181
 4182
 4183
 4184
 4185
 4186
 4187
 4188
 4189
 4190
 4191
 4192
 4193
 4194
 4195
 4196
 4197
 4198
 4199
 4200
 4201
 4202
 4203
 4204
 4205
 4206
 4207
 4208
 4209
 4210
 4211
 4212
 4213
 4214
 4215
 4216
 4217
 4218
 4219
 4220
 4221
 4222
 4223
 4224
 4225
 4226
 4227
 4228
 4229
 4230
 4231
 4232
 4233
 4234
 4235
 4236
 4237
 4238
 4239
 4240
 4241
 4242
 4243
 4244
 4245
 4246
 4247
 4248
 4249
 4250
 4251
 4252
 4253
 4254
 4255
 4256
 4257
 4258
 4259
 4260
 4261
 4262
 4263
 4264
 4265
 4266
 4267
 4268
 4269
 4270
 4271
 4272
 4273
 4274
 4275
 4276
 4277
 4278
 4279
 4280
 4281
 4282
 4283
 4284
 4285
 4286
 4287
 4288
 4289
 4290
 4291
 4292
 4293
 4294
 4295
 4296
 4297
 4298
 4299
 4300
 4301
 4302
 4303
 4304
 4305
 4306
 4307
 4308
 4309
 4310
 4311
 4312
 4313
 4314
 4315
 4316
 4317
 4318
 4319
 4320
 4321
 4322
 4323
 4324
 4325
 4326
 4327
 4328
 4329
 4330
 4331
 4332
 4333
 4334
 4335
 4336
 4337
 4338
 4339
 4340
 4341
 4342
 4343
 4344
 4345
 4346
 4347
 4348
 4349
 4350
 4351
 4352
 4353
 4354
 4355
 4356
 4357
 4358
 4359
 4360
 4361
 4362
 4363
 4364
 4365
 4366
 4367
 4368
 4369
 4370
 4371
 4372
 4373
 4374
 4375
 4376
 4377
 4378
 4379
 4380
 4381
 4382
 4383
 4384
 4385
 4386
 4387
 4388
 4389
 4390
 4391
 4392
 4393
 4394
 4395
 4396
 4397
 4398
 4399
 4400
 4401
 4402
 4403
 4404
 4405
 4406
 4407
 4408
 4409
 4410
 4411
 4412
 4413
 4414
 4415
 4416
 4417
 4418
 4419
 4420
 4421
 4422
 4423
 4424
 4425
 4426
 4427
 4428
 4429
 4430
 4431
 4432
 4433
 4434
 4435
 4436
 4437
 4438
 4439
 4440
 4441
 4442
 4443
 4444
 4445
 4446
 4447
 4448
 4449
 4450
 4451
 4452
 4453
 4454
 4455
 4456
 4457
 4458
 4459
 4460
 4461
 4462
 4463
 4464
 4465
 4466
 4467
 4468
 4469
 4470
 4471
 4472
 4473
 4474
 4475
 4476
 4477
 4478
 4479
 4480
 4481
 4482
 4483
 4484
 4485
 4486
 4487
 4488
 4489
 4490
 4491
 4492
 4493
 4494
 4495
 4496
 4497
 4498
 4499
 4500
 4501
 4502
 4503
 4504
 4505
 4506
 4507
 4508
 4509
 4510
 4511
 4512
 4513
 4514
 4515
 4516
 4517
 4518
 4519
 4520
 4521
 4522
 4523
 4524
 4525
 4526
 4527
 4528
 4529
 4530
 4531
 4532
 4533
 4534
 4535
 4536
 4537
 4538
 4539
 4540
 4541
 4542
 4543
 4544
 4545
 4546
 4547
 4548
 4549
 4550
 4551
 4552
 4553
 4554
 4555
 4556
 4557
 4558
 4559
 4560
 4561
 4562
 4563
 4564
 4565
 4566
 4567
 4568
 4569
 4570
 4571
 4572
 4573
 4574
 4575
 4576
 4577
 4578
 4579
 4580
 4581
 4582
 4583
 4584
 4585
 4586
 4587
 4588
 4589
 4590
 4591
 4592
 4593
 4594
 4595
 4596
 4597
 4598
 4599
 4600
 4601
 4602
 4603
 4604
 4605
 4606
 4607
 4608
 4609
 4610
 4611
 4612
 4613
 4614
 4615
 4616
 4617
 4618
 4619
 4620
 4621
 4622
 4623
 4624
 4625
 4626
 4627
 4628
 4629
 4630
 4631
 4632
 4633
 4634
 4635
 4636
 4637
 4638
 4639
 4640
 4641
 4642
 4643
 4644
 4645
 4646
 4647
 4648
 4649
 4650
 4651
 4652
 4653
 4654
 4655
 4656
 4657
 4658
 4659
 4660
 4661
 4662
 4663
 4664
 4665
 4666
 4667
 4668
 4669
 4670
 4671
 4672
 4673
 4674
 4675
 4676
 4677
 4678
 4679
 4680
 4681
 4682
 4683
 4684
 4685
 4686
 4687
 4688
 4689
 4690
 4691
 4692
 4693
 4694
 4695
 4696
 4697
 4698
 4699
 4700
 4701
 4702
 4703
 4704
 4705
 4706
 4707
 4708
 4709
 4710
 4711
 4712
 4713
 4714
 4715
 4716
 4717
 4718
 4719
 4720
 4721
 4722
 4723
 4724
 4725
 4726
 4727
 4728
 4729
 4730
 4731
 4732
 4733
 4734
 4735
 4736
 4737
 4738
 4739
 4740
 4741
 4742
 4743
 4744
 4745
 4746
 4747
 4748
 4749
 4750
 4751
 4752
 4753
 4754
 4755
 4756
 4757
 4758
 4759
 4760
 4761
 4762
 4763
 4764
 4765
 4766
 4767
 4768
 4769
 4770
 4771
 4772
 4773
 4774
 4775
 4776
 4777
 4778
 4779
 4780
 4781
 4782
 4783
 4784
 4785
 4786
 4787
 4788
 4789
 4790
 4791
 4792
 4793
 4794
 4795
 4796
 4797
 4798
 4799
 4800
 4801
 4802
 4803
 4804
 4805
 4806
 4807
 4808
 4809
 4810
 4811
 4812
 4813
 4814
 4815
 4816
 4817
 4818
 4819
 4820
 4821
 4822
 4823
 4824
 4825
 4826
 4827
 4828
 4829
 4830
 4831
 4832
 4833
 4834
 4835
 4836
 4837
 4838
 4839
 4840
 4841
 4842
 4843
 4844
 4845
 4846
 4847
 4848
 4849
 4850
 4851
 4852
 4853
 4854
 4855
 4856
 4857
 4858
 4859
 4860
 4861
 4862
 4863
 4864
 4865
 4866
 4867
 4868
 4869
 4870
 4871
 4872
 4873
 4874
 4875
 4876
 4877
 4878
 4879
 4880
 4881
 4882
 4883
 4884
 4885
 4886
 4887
 4888
 4889
 4890
 4891
 4892
 4893
 4894
 4895
 4896
 4897
 4898
 4899
 4900
 4901
 4902
 4903
 4904
 4905
 4906
 4907
 4908
 4909
 4910
 4911
 4912
 4913
 4914
 4915
 4916
 4917
 4918
 4919
 4920
 4921
 4922
 4923
 4924
 4925
 4926
 4927
 4928
 4929
 4930
 4931
 4932
 4933
 4934
 4935
 4936
 4937
 4938
 4939
 4940
 4941
 4942
 4943
 4944
 4945
 4946
 4947
 4948
 4949
 4950
 4951
 4952
 4953
 4954
 4955
 4956
 4957
 4958
 4959
 4960
 4961
 4962
 4963
 4964
 4965
 4966
 4967
 4968
 4969
 4970
 4971
 4972
 4973
 4974
 4975
 4976
 4977
 4978
 4979
 4980
 4981
 4982
 4983
 4984
 4985
 4986
 4987
 4988
 4989
 4990
 4991
 4992
 4993
 4994
 4995
 4996
 4997
 4998
 4999
 5000
 5001
 5002
 5003
 5004
 5005
 5006
 5007
 5008
 5009
 5010
 5011
 5012
 5013
 5014
 5015
 5016
 5017
 5018
 5019
 5020
 5021
 5022
 5023
 5024
 5025
 5026
 5027
 5028
 5029
 5030
 5031
 5032
 5033
 5034
 5035
 5036
 5037
 5038
 5039
 5040
 5041
 5042
 5043
 5044
 5045
 5046
 5047
 5048
 5049
 5050
 5051
 5052
 5053
 5054
 5055
 5056
 5057
 5058
 5059
 5060
 5061
 5062
 5063
 5064
 5065
 5066
 5067
 5068
 5069
 5070
 5071
 5072
 5073
 5074
 5075
 5076
 5077
 5078
 5079
 5080
 5081
 5082
 5083
 5084
 5085
 5086
 5087
 5088
 5089
 5090
 5091
 5092
 5093
 5094
 5095
 5096
 5097
 5098
 5099
 5100
 5101
 5102
 5103
 5104
 5105
 5106
 5107
 5108
 5109
 5110
 5111
 5112
 5113
 5114
 5115
 5116
 5117
 5118
 5119
 5120
 5121
 5122
 5123
 5124
 5125
 5126
 5127
 5128
 5129
 5130
 5131
 5132
 5133
 5134
 5135
 5136
 5137
 5138
 5139
 5140
 5141
 5142
 5143
 5144
 5145
 5146
 5147
 5148
 5149
 5150
 5151
 5152
 5153
 5154
 5155
 5156
 5157
 5158
 5159
 5160
 5161
 5162
 5163
 5164
 5165
 5166
 5167
 5168
 5169
 5170
 5171
 5172
 5173
 5174
 5175
 5176
 5177
 5178
 5179
 5180
 5181
 5182
 5183
 5184
 5185
 5186
 5187
 5188
 5189
 5190
 5191
 5192
 5193
 5194
 5195
 5196
 5197
 5198
 5199
 5200
 5201
 5202
 5203
 5204
 5205
 5206
 5207
 5208
 5209
 5210
 5211
 5212
 5213
 5214
 5215
 5216
 5217
 5218
 5219
 5220
 5221
 5222
 5223
 5224
 5225
 5226
 5227
 5228
 5229
 5230
 5231
 5232
 5233
 5234
 5235
 5236
 5237
 5238
 5239
 5240
 5241
 5242
 5243
 5244
 5245
 5246
 5247
 5248
 5249
 5250
 5251
 5252
 5253
 5254
 5255
 5256
 5257
 5258
 5259
 5260
 5261
 5262
 5263
 5264
 5265
 5266
 5267
 5268
 5269
 5270
 5271
 5272
 5273
 5274
 5275
 5276
 5277
 5278
 5279
 5280
 5281
 5282
 5283
 5284
 5285
 5286
 5287
 5288
 5289
 5290
 5291
 5292
 5293
 5294
 5295
 5296
 5297
 5298
 5299
 5300
 5301
 5302
 5303
 5304
 5305
 5306
 5307
 5308
 5309
 5310
 5311
 5312
 5313
 5314
 5315
 5316
 5317
 5318
 5319
 5320
 5321
 5322
 5323
 5324
 5325
 5326
 5327
 5328
 5329
 5330
 5331
 5332
 5333
 5334
 5335
 5336
 5337
 5338
 5339
 5340
 5341
 5342
 5343
 5344
 5345
 5346
 5347
 5348
 5349
 5350
 5351
 5352
 5353
 5354
 5355
 5356
 5357
 5358
 5359
 5360
 5361
 5362
 5363
 5364
 5365
 5366
 5367
 5368
 5369
 5370
 5371
 5372
 5373
 5374
 5375
 5376
 5377
 5378
 5379
 5380
 5381
 5382
 5383
 5384
 5385
 5386
 5387
 5388
 5389
 5390
 5391
 5392
 5393
 5394
 5395
 5396
 5397
 5398
 5399
 5400
 5401
 5402
 5403
 5404
 5405
 5406
 5407
 5408
 5409
 5410
 5411
 5412
 5413
 5414
 5415
 5416
 5417
 5418
 5419
 5420
 5421
 5422
 5423
 5424
 5425
 5426
 5427
 5428
 5429
 5430
 5431
 5432
 5433
 5434
 5435
 5436
 5437
 5438
 5439
 5440
 5441
 5442
 5443
 5444
 5445
 5446
 5447
 5448
 5449
 5450
 5451
 5452
 5453
 5454
 5455
 5456
 5457
 5458
 5459
 5460
 5461
 5462
 5463
 5464
 5465
 5466
 5467
 5468
 5469
 5470
 5471
 5472
 5473
 5474
 5475
 5476
 5477
 5478
 5479
 5480
 5481
 5482
 5483
 5484
 5485
 5486
 5487
 5488
 5489
 5490
 5491
 5492
 5493
 5494
 5495
 5496
 5497
 5498
 5499
 5500
 5501
 5502
 5503
 5504
 5505
 5506
 5507
 5508
 5509
 5510
 5511
 5512
 5513
 5514
 5515
 5516
 5517
 5518
 5519
 5520
 5521
 5522
 5523
 5524
 5525
 5526
 5527
 5528
 5529
 5530
 5531
 5532
 5533
 5534
 5535
 5536
 5537
 5538
 5539
 5540
 5541
 5542
 5543
 5544
 5545
 5546
 5547
 5548
 5549
 5550
 5551
 5552
 5553
 5554
 5555
 5556
 5557
 5558
 5559
 5560
 5561
 5562
 5563
 5564
 5565
 5566
 5567
 5568
 5569
 5570
 5571
 5572
 5573
 5574
 5575
 5576
 5577
 5578
 5579
 5580
 5581
 5582
 5583
 5584
 5585
 5586
 5587
 5588
 5589
 5590
 5591
 5592
 5593
 5594
 5595
 5596
 5597
 5598
 5599
 5600
 5601
 5602
 5603
 5604
 5605
 5606
 5607
 5608
 5609
 5610
 5611
 5612
 5613
 5614
 5615
 5616
 5617
 5618
 5619
 5620
 5621
 5622
 5623
 5624
 5625
 5626
 5627
 5628
 5629
 5630
 5631
 5632
 5633
 5634
 5635
 5636
 5637
 5638
 5639
 5640
 5641
 5642
 5643
 5644
 5645
 5646
 5647
 5648
 5649
 5650
 5651
 5652
 5653
 5654
 5655
 5656
 5657
 5658
 5659
 5660
 5661
 5662
 5663
 5664
 5665
 5666
 5667
 5668
 5669
 5670
 5671
 5672
 5673
 5674
 5675
 5676
 5677
 5678
 5679
 5680
 5681
 5682
 5683
 5684
 5685
 5686
 5687
 5688
 5689
 5690
 5691
 5692
 5693
 5694
 5695
 5696
 5697
 5698
 5699
 5700
 5701
 5702
 5703
 5704
 5705
 5706
 5707
 5708
 5709
 5710
 5711
 5712
 5713
 5714
 5715
 5716
 5717
 5718
 5719
 5720
 5721
 5722
 5723
 5724
 5725
 5726
 5727
 5728
 5729
 5730
 5731
 5732
 5733
 5734
 5735
 5736
 5737
 5738
 5739
 5740
 5741
 5742
 5743
 5744
 5745
 5746
 5747
 5748
 5749
 5750
 5751
 5752
 5753
 5754
 5755
 5756
 5757
 5758
 5759
 5760
 5761
 5762
 5763
 5764
 5765
 5766
 5767
 5768
 5769
 5770
 5771
 5772
 5773
 5774
 5775
 5776
 5777
 5778
 5779
 5780
 5781
 5782
 5783
 5784
 5785
 5786
 5787
 5788
 5789
 5790
 5791
 5792
 5793
 5794
 5795
 5796
 5797
 5798
 5799
 5800
 5801
 5802
 5803
 5804
 5805
 5806
 5807
 5808
 5809
 5810
 5811
 5812
 5813
 5814
 5815
 5816
 5817
 5818
 5819
 5820
 5821
 5822
 5823
 5824
 5825
 5826
 5827
 5828
 5829
 5830
 5831
 5832
 5833
 5834
 5835
 5836
 5837
 5838
 5839
 5840
 5841
 5842
 5843
 5844
 5845
 5846
 5847
 5848
 5849
 5850
 5851
 5852
 5853
 5854
 5855
 5856
 5857
 5858
 5859
 5860
 5861
 5862
 5863
 5864
 5865
 5866
 5867
 5868
 5869
 5870
 5871
 5872
 5873
 5874
 5875
 5876
 5877
 5878
 5879
 5880
 5881
 5882
 5883
 5884
 5885
 5886
 5887
 5888
 5889
 5890
 5891
 5892
 5893
 5894
 5895
 5896
 5897
 5898
 5899
 5900
 5901
 5902
 5903
 5904
 5905
 5906
 5907
 5908
 5909
 5910
 5911
 5912
 5913
 5914
 5915
 5916
 5917
 5918
 5919
 5920
 5921
 5922
 5923
 5924
 5925
 5926
 5927
 5928
 5929
 5930
 5931
 5932
 5933
 5934
 5935
 5936
 5937
 5938
 5939
 5940
 5941
 5942
 5943
 5944
 5945
 5946
 5947
 5948
 5949
 5950
 5951
 5952
 5953
 5954
 5955
 5956
 5957
 5958
 5959
 5960
 5961
 5962
 5963
 5964
 5965
 5966
 5967
 5968
 5969
 5970
 5971
 5972
 5973
 5974
 5975
 5976
 5977
 5978
 5979
 5980
 5981
 5982
 5983
 5984
 5985
 5986
 5987
 5988
 5989
 5990
 5991
 5992
 5993
 5994
 5995
 5996
 5997
 5998
 5999
 6000
 6001
 6002
 6003
 6004
 6005
 6006
 6007
 6008
 6009
 6010
 6011
 6012
 6013
 6014
 6015
 6016
 6017
 6018
 6019
 6020
 6021
 6022
 6023
 6024
 6025
 6026
 6027
 6028
 6029
 6030
 6031
 6032
 6033
 6034
 6035
 6036
 6037
 6038
 6039
 6040
 6041
 6042
 6043
 6044
 6045
 6046
 6047
 6048
 6049
 6050
 6051
 6052
 6053
 6054
 6055
 6056
 6057
 6058
 6059
 6060
 6061
 6062
 6063
 6064
 6065
 6066
 6067
 6068
 6069
 6070
 6071
 6072
 6073
 6074
 6075
 6076
 6077
 6078
 6079
 6080
 6081
 6082
 6083
 6084
 6085
 6086
 6087
 6088
 6089
 6090
 6091
 6092
 6093
 6094
 6095
 6096
 6097
 6098
 6099
 6100
 6101
 6102
 6103
 6104
 6105
 6106
 6107
 6108
 6109
 6110
 6111
 6112
 6113
 6114
 6115
 6116
 6117
 6118
 6119
 6120
 6121
 6122
 6123
 6124
 6125
 6126
 6127
 6128
 6129
 6130
 6131
 6132
 6133
 6134
 6135
 6136
 6137
 6138
 6139
 6140
 6141
 6142
 6143
 6144
 6145
 6146
 6147
 6148
 6149
 6150
 6151
 6152
 6153
 6154
 6155
 6156
 6157
 6158
 6159
 6160
 6161
 6162
 6163
 6164
 6165
 6166
 6167
 6168
 6169
 6170
 6171
 6172
 6173
 6174
 6175
 6176
 6177
 6178
 6179
 6180
 6181
 6182
 6183
 6184
 6185
 6186
 6187
 6188
 6189
 6190
 6191
 6192
 6193
 6194
 6195
 6196
 6197
 6198
 6199
 6200
 6201
 6202
 6203
 6204
 6205
 6206
 6207
 6208
 6209
 6210
 6211
 6212
 6213
 6214
 6215
 6216
 6217
 6218
 6219
 6220
 6221
 6222
 6223
 6224
 6225
 6226
 6227
 6228
 6229
 6230
 6231
 6232
 6233
 6234
 6235
 6236
 6237
 6238
 6239
 6240
 6241
 6242
 6243
 6244
 6245
 6246
 6247
 6248
 6249
 6250
 6251
 6252
 6253
 6254
 6255
 6256
 6257
 6258
 6259
 6260
 6261
 6262
 6263
 6264
 6265
 6266
 6267
 6268
 6269
 6270
 6271
 6272
 6273
 6274
 6275
 6276
 6277
 6278
 6279
 6280
 6281
 6282
 6283
 6284
 6285
 6286
 6287
 6288
 6289
 6290
 6291
 6292
 6293
 6294
 6295
 6296
 6297
 6298
 6299
 6300
 6301
 6302
 6303
 6304
 6305
 6306
 6307
 6308
 6309
 6310
 6311
 6312
 6313
 6314
 6315
 6316
 6317
 6318
 6319
 6320
 6321
 6322
 6323
 6324
 6325
 6326
 6327
 6328
 6329
 6330
 6331
 6332
 6333
 6334
 6335
 6336
 6337
 6338
 6339
 6340
 6341
 6342
 6343
 6344
 6345
 6346
 6347
 6348
 6349
 6350
 6351
 6352
 6353
 6354
 6355
 6356
 6357
 6358
 6359
 6360
 6361
 6362
 6363
 6364
 6365
 6366
 6367
 6368
 6369
 6370
 6371
 6372
 6373
 6374
 6375
 6376
 6377
 6378
 6379
 6380
 6381
 6382
 6383
 6384
 6385
 6386
 6387
 6388
 6389
 6390
 6391
 6392
 6393
 6394
 6395
 6396
 6397
 6398
 6399
 6400
 6401
 6402
 6403
 6404
 6405
 6406
 6407
 6408
 6409
 6410
 6411
 6412
 6413
 6414
 6415
 6416
 6417
 6418
 6419
 6420
 6421
 6422
 6423
 6424
 6425
 6426
 6427
 6428
 6429
 6430
 6431
 6432
 6433
 6434
 6435
 6436
 6437
 6438
 6439
 6440
 6441
 6442
 6443
 6444
 6445
 6446
 6447
 6448
 6449
 6450
 6451
 6452
 6453
 6454
 6455
 6456
 6457
 6458
 6459
 6460
 6461
 6462
 6463
 6464
 6465
 6466
 6467
 6468
 6469
 6470
 6471
 6472
 6473
 6474
 6475
 6476
 6477
 6478
 6479
 6480
 6481
 6482
 6483
 6484
 6485
 6486
 6487
 6488
 6489
 6490
 6491
 6492
 6493
 6494
 6495
 6496
 6497
 6498
 6499
 6500
 6501
 6502
 6503
 6504
 6505
 6506
 6507
 6508
 6509
 6510
 6511
 6512
 6513
 6514
 6515
 6516
 6517
 6518
 6519
 6520
 6521
 6522
 6523
 6524
 6525
 6526
 6527
 6528
 6529
 6530
 6531
 6532
 6533
 6534
 6535
 6536
 6537
 6538
 6539
 6540
 6541
 6542
 6543
 6544
 6545
 6546
 6547
 6548
 6549
 6550
 6551
 6552
 6553
 6554
 6555
 6556
 6557
 6558
 6559
 6560
 6561
 6562
 6563
 6564
 6565
 6566
 6567
 6568
 6569
 6570
 6571
 6572
 6573
 6574
 6575
 6576
 6577
 6578
 6579
 6580
 6581
 6582
 6583
 6584
 6585
 6586
 6587
 6588
 6589
 6590
 6591
 6592
 6593
 6594
 6595
 6596
 6597
 6598
 6599
 6600
 6601
 6602
 6603
 6604
 6605
 6606
 6607
 6608
 6609
 6610
 6611
 6612
 6613
 6614
 6615
 6616
 6617
 6618
 6619
 6620
 6621
 6622
 6623
 6624
 6625
 6626
 6627
 6628
 6629
 6630
 6631
 6632
 6633
 6634
 6635
 6636
 6637
 6638
 6639
 6640
 6641
 6642
 6643
 6644
 6645
 6646
 6647
 6648
 6649
 6650
 6651
 6652
 6653
 6654
 6655
 6656
 6657
 6658
 6659
 6660
 6661
 6662
 6663
 6664
 6665
 6666
 6667
 6668
 6669
 6670
 6671
 6672
 6673
 6674
 6675
 6676
 6677
 6678
 6679
 6680
 6681
 6682
 6683
 6684
 6685
 6686
 6687
 6688
 6689
 6690
 6691
 6692
 6693
 6694
 6695
 6696
 6697
 6698
 6699
 6700
 6701
 6702
 6703
 6704
 6705
 6706
 6707
 6708
 6709
 6710
 6711
 6712
 6713
 6714
 6715
 6716
 6717
 6718
 6719
 6720
 6721
 6722
 6723
 6724
 6725
 6726
 6727
 6728
 6729
 6730
 6731
 6732
 6733
 6734
 6735
 6736
 6737
 6738
 6739
 6740
 6741
 6742
 6743
 6744
 6745
 6746
 6747
 6748
 6749
 6750
 6751
 6752
 6753
 6754
 6755
 6756
 6757
 6758
 6759
 6760
 6761
 6762
 6763
 6764
 6765
 6766
 6767
 6768
 6769
 6770
 6771
 6772
 6773
 6774
 6775
 6776
 6777
 6778
 6779
 6780
 6781
 6782
 6783
 6784
 6785
 6786
 6787
 6788
 6789
 6790
 6791
 6792
 6793
 6794
 6795
 6796
 6797
 6798
 6799
 6800
 6801
 6802
 6803
 6804
 6805
 6806
 6807
 6808
 6809
 6810
 6811
 6812
 6813
 6814
 6815
 6816
 6817
 6818
 6819
 6820
 6821
 6822
 6823
 6824
 6825
 6826
 6827
 6828
 6829
 6830
 6831
 6832
 6833
 6834
 6835
 6836
 6837
 6838
 6839
 6840
 6841
 6842
 6843
 6844
 6845
 6846
 6847
 6848
 6849
 6850
 6851
 6852
 6853
 6854
 6855
 6856
 6857
 6858
 6859
 6860
 6861
 6862
 6863
 6864
 6865
 6866
 6867
 6868
 6869
 6870
 6871
 6872
 6873
 6874
 6875
 6876
 6877
 6878
 6879
 6880
 6881
 6882
 6883
 6884
 6885
 6886
 6887
 6888
 6889
 6890
 6891
 6892
 6893
 6894
 6895
 6896
 6897
 6898
 6899
 6900
 6901
 6902
 6903
 6904
 6905
 6906
 6907
 6908
 6909
 6910
 6911
 6912
 6913
 6914
 6915
 6916
 6917
 6918
 6919
 6920
 6921
 6922
 6923
 6924
 6925
 6926
 6927
 6928
 6929
 6930
 6931
 6932
 6933
 6934
 6935
 6936
 6937
 6938
 6939
 6940
 6941
 6942
 6943
 6944
 6945
 6946
 6947
 6948
 6949
 6950
 6951
 6952
 6953
 6954
 6955
 6956
 6957
 6958
 6959
 6960
 6961
 6962
 6963
 6964
 6965
 6966
 6967
 6968
 6969
 6970
 6971
 6972
 6973
 6974
 6975
 6976
 6977
 6978
 6979
 6980
 6981
 6982
 6983
 6984
 6985
 6986
 6987
 6988
 6989
 6990
 6991
 6992
 6993
 6994
 6995
 6996
 6997
 6998
 6999
 7000
 7001
 7002
 7003
 7004
 7005
 7006
 7007
 7008
 7009
 7010
 7011
 7012
 7013
 7014
 7015
 7016
 7017
 7018
 7019
 7020
 7021
 7022
 7023
 7024
 7025
 7026
 7027
 7028
 7029
 7030
 7031
 7032
 7033
 7034
 7035
 7036
 7037
 7038
 7039
 7040
 7041
 7042
 7043
 7044
 7045
 7046
 7047
 7048
 7049
 7050
 7051
 7052
 7053
 7054
 7055
 7056
 7057
 7058
 7059
 7060
 7061
 7062
 7063
 7064
 7065
 7066
 7067
 7068
 7069
 7070
 7071
 7072
 7073
 7074
 7075
 7076
 7077
 7078
 7079
 7080
 7081
 7082
 7083
 7084
 7085
 7086
 7087
 7088
 7089
 7090
 7091
 7092
 7093
 7094
 7095
 7096
 7097
 7098
 7099
 7100
 7101
 7102
 7103
 7104
 7105
 7106
 7107
 7108
 7109
 7110
 7111
 7112
 7113
 7114
 7115
 7116
 7117
 7118
 7119
 7120
 7121
 7122
 7123
 7124
 7125
 7126
 7127
 7128
 7129
 7130
 7131
 7132
 7133
 7134
 7135
 7136
 7137
 7138
 7139
 7140
 7141
 7142
 7143
 7144
 7145
 7146
 7147
 7148
 7149
 7150
 7151
 7152
 7153
 7154
 7155
 7156
 7157
 7158
 7159
 7160
 7161
 7162
 7163
 7164
 7165
 7166
 7167
 7168
 7169
 7170
 7171
 7172
 7173
 7174
 7175
 7176
 7177
 7178
 7179
 7180
 7181
 7182
 7183
 7184
 7185
 7186
 7187
 7188
 7189
 7190
 7191
 7192
 7193
 7194
 7195
 7196
 7197
 7198
 7199
 7200
 7201
 7202
 7203
 7204
 7205
 7206
 7207
 7208
 7209
 7210
 7211
 7212
 7213
 7214
 7215
 7216
 7217
 7218
 7219
 7220
 7221
 7222
 7223
 7224
 7225
 7226
 7227
 7228
 7229
 7230
 7231
 7232
 7233
 7234
 7235
 7236
 7237
 7238
 7239
 7240
 7241
 7242
 7243
 7244
 7245
 7246
 7247
 7248
 7249
 7250
 7251
 7252
 7253
 7254
 7255
 7256
 7257
 7258
 7259
 7260
 7261
 7262
 7263
 7264
 7265
 7266
 7267
 7268
 7269
 7270
 7271
 7272
 7273
 7274
 7275
 7276
 7277
 7278
 7279
 7280
 7281
 7282
 7283
 7284
 7285
 7286
 7287
 7288
 7289
 7290
 7291
 7292
 7293
 7294
 7295
 7296
 7297
 7298
 7299
 7300
 7301
 7302
 7303
 7304
 7305
 7306
 7307
 7308
 7309
 7310
 7311
 7312
 7313
 7314
 7315
 7316
 7317
 7318
 7319
 7320
 7321
 7322
 7323
 7324
 7325
 7326
 7327
 7328
 7329
 7330
 7331
 7332
 7333
 7334
 7335
 7336
 7337
 7338
 7339
 7340
 7341
 7342
 7343
 7344
 7345
 7346
 7347
 7348
 7349
 7350
 7351
 7352
 7353
 7354
 7355
 7356
 7357
 7358
 7359
 7360
 7361
 7362
 7363
 7364
 7365
 7366
 7367
 7368
 7369
 7370
 7371
 7372
 7373
 7374
 7375
 7376
 7377
 7378
 7379
 7380
 7381
 7382
 7383
 7384
 7385
 7386
 7387
 7388
 7389
 7390
 7391
 7392
 7393
 7394
 7395
 7396
 7397
 7398
 7399
 7400
 7401
 7402
 7403
 7404
 7405
 7406
 7407
 7408
 7409
 7410
 7411
 7412
 7413
 7414
 7415
 7416
 7417
 7418
 7419
 7420
 7421
 7422
 7423
 7424
 7425
 7426
 7427
 7428
 7429
 7430
 7431
 7432
 7433
 7434
 7435
 7436
 7437
 7438
 7439
 7440
 7441
 7442
 7443
 7444
 7445
 7446
 7447
 7448
 7449
 7450
 7451
 7452
 7453
 7454
 7455
 7456
 7457
 7458
 7459
 7460
 7461
 7462
 7463
 7464
 7465
 7466
 7467
 7468
 7469
 7470
 7471
 7472
 7473
 7474
 7475
 7476
 7477
 7478
 7479
 7480
 7481
 7482
 7483
 7484
 7485
 7486
 7487
 7488
 7489
 7490
 7491
 7492
 7493
 7494
 7495
 7496
 7497
 7498
 7499
 7500
 7501
 7502
 7503
 7504
 7505
 7506
 7507
 7508
 7509
 7510
 7511
 7512
 7513
 7514
 7515
 7516
 7517
 7518
 7519
 7520
 7521
 7522
 7523
 7524
 7525
 7526
 7527
 7528
 7529
 7530
 7531
 7532
 7533
 7534
 7535
 7536
 7537
 7538
 7539
 7540
 7541
 7542
 7543
 7544
 7545
 7546
 7547
 7548
 7549
 7550
 7551
 7552
 7553
 7554
 7555
 7556
 7557
 7558
 7559
 7560
 7561
 7562
 7563
 7564
 7565
 7566
 7567
 7568
 7569
 7570
 7571
 7572
 7573
 7574
 7575
 7576
 7577
 7578
 7579
 7580
 7581
 7582
 7583
 7584
 7585
 7586
 7587
 7588
 7589
 7590
 7591
 7592
 7593
 7594
 7595
 7596
 7597
 7598
 7599
 7600
 7601
 7602
 7603
 7604
 7605
 7606
 7607
 7608
 7609
 7610
 7611
 7612
 7613
 7614
 7615
 7616
 7617
 7618
 7619
 7620
 7621
 7622
 7623
 7624
 7625
 7626
 7627
 7628
 7629
 7630
 7631
 7632
 7633
 7634
 7635
 7636
 7637
 7638
 7639
 7640
 7641
 7642
 7643
 7644
 7645
 7646
 7647
 7648
 7649
 7650
 7651
 7652
 7653
 7654
 7655
 7656
 7657
 7658
 7659
 7660
 7661
 7662
 7663
 7664
 7665
 7666
 7667
 7668
 7669
 7670
 7671
 7672
 7673
 7674
 7675
 7676
 7677
 7678
 7679
 7680
 7681
 7682
 7683
 7684
 7685
 7686
 7687
 7688
 7689
 7690
 7691
 7692
 7693
 7694
 7695
 7696
 7697
 7698
 7699
 7700
 7701
 7702
 7703
 7704
 7705
 7706
 7707
 7708
 7709
 7710
 7711
 7712
 7713
 7714
 7715
 7716
 7717
 7718
 7719
 7720
 7721
 7722
 7723
 7724
 7725
 7726
 7727
 7728
 7729
 7730
 7731
 7732
 7733
 7734
 7735
 7736
 7737
 7738
 7739
 7740
 7741
 7742
 7743
 7744
 7745
 7746
 7747
 7748
 7749
 7750
 7751
 7752
 7753
 7754
 7755
 7756
 7757
 7758
 7759
 7760
 7761
 7762
 7763
 7764
 7765
 7766
 7767
 7768
 7769
 7770
 7771
 7772
 7773
 7774
 7775
 7776
 7777
 7778
 7779
 7780
 7781
 7782
 7783
 7784
 7785
 7786
 7787
 7788
 7789
 7790
 7791
 7792
 7793
 7794
 7795
 7796
 7797
 7798
 7799
 7800
 7801
 7802
 7803
 7804
 7805
 7806
 7807
 7808
 7809
 7810
 7811
 7812
 7813
 7814
 7815
 7816
 7817
 7818
 7819
 7820
 7821
 7822
 7823
 7824
 7825
 7826
 7827
 7828
 7829
 7830
 7831
 7832
 7833
 7834
 7835
 7836
 7837
 7838
 7839
 7840
 7841
 7842
 7843
 7844
 7845
 7846
 7847
 7848
 7849
 7850
 7851
 7852
 7853
 7854
 7855
 7856
 7857
 7858
 7859
 7860
 7861
 7862
 7863
 7864
 7865
 7866
 7867
 7868
 7869
 7870
 7871
 7872
 7873
 7874
 7875
 7876
 7877
 7878
 7879
 7880
 7881
 7882
 7883
 7884
 7885
 7886
 7887
 7888
 7889
 7890
 7891
 7892
 7893
 7894
 7895
 7896
 7897
 7898
 7899
 7900
 7901
 7902
 7903
 7904
 7905
 7906
 7907
 7908
 7909
 7910
 7911
 7912
 7913
 7914
 7915
 7916
 7917
 7918
 7919
 7920
 7921
 7922
 7923
 7924
 7925
 7926
 7927
 7928
 7929
 7930
 7931
 7932
 7933
 7934
 7935
 7936
 7937
 7938
 7939
 7940
 7941
 7942
 7943
 7944
 7945
 7946
 7947
 7948
 7949
 7950
 7951
 7952
 7953
 7954
 7955
 7956
 7957
 7958
 7959
 7960
 7961
 7962
 7963
 7964
 7965
 7966
 7967
 7968
 7969
 7970
 7971
 7972
 7973
 7974
 7975
 7976
 7977
 7978
 7979
 7980
 7981
 7982
 7983
 7984
 7985
 7986
 7987
 7988
 7989
 7990
 7991
 7992
 7993
 7994
 7995
 7996
 7997
 7998
 7999
 8000
 8001
 8002
 8003
 8004
 8005
 8006
 8007
 8008
 8009
 8010
 8011
 8012
 8013
 8014
 8015
 8016
 8017
 8018
 8019
 8020
 8021
 8022
 8023
 8024
 8025
 8026
 8027
 8028
 8029
 8030
 8031
 8032
 8033
 8034
 8035
 8036
 8037
 8038
 8039
 8040
 8041
 8042
 8043
 8044
 8045
 8046
 8047
 8048
 8049
 8050
 8051
 8052
 8053
 8054
 8055
 8056
 8057
 8058
 8059
 8060
 8061
 8062
 8063
 8064
 8065
 8066
 8067
 8068
 8069
 8070
 8071
 8072
 8073
 8074
 8075
 8076
 8077
 8078
 8079
 8080
 8081
 8082
 8083
 8084
 8085
 8086
 8087
 8088
 8089
 8090
 8091
 8092
 8093
 8094
 8095
 8096
 8097
 8098
 8099
 8100
 8101
 8102
 8103
 8104
 8105
 8106
 8107
 8108
 8109
 8110
 8111
 8112
 8113
 8114
 8115
 8116
 8117
 8118
 8119
 8120
 8121
 8122
 8123
 8124
 8125
 8126
 8127
 8128
 8129
 8130
 8131
 8132
 8133
 8134
 8135
 8136
 8137
 8138
 8139
 8140
 8141
 8142
 8143
 8144
 8145
 8146
 8147
 8148
 8149
 8150
 8151
 8152
 8153
 8154
 8155
 8156
 8157
 8158
 8159
 8160
 8161
 8162
 8163
 8164
 8165
 8166
 8167
 8168
 8169
 8170
 8171
 8172
 8173
 8174
 8175
 8176
 8177
 8178
 8179
 8180
 8181
 8182
 8183
 8184
 8185
 8186
 8187
 8188
 8189
 8190
 8191
 8192
 8193
 8194
 8195
 8196
 8197
 8198
 8199
 8200
 8201
 8202
 8203
 8204
 8205
 8206
 8207
 8208
 8209
 8210
 8211
 8212
 8213
 8214
 8215
 8216
 8217
 8218
 8219
 8220
 8221
 8222
 8223
 8224
 8225
 8226
 8227
 8228
 8229
 8230
 8231
 8232
 8233
 8234
 8235
 8236
 8237
 8238
 8239
 8240
 8241
 8242
 8243
 8244
 8245
 8246
 8247
 8248
 8249
 8250
 8251
 8252
 8253
 8254
 8255
 8256
 8257
 8258
 8259
 8260
 8261
 8262
 8263
 8264
 8265
 8266
 8267
 8268
 8269
 8270
 8271
 8272
 8273
 8274
 8275
 8276
 8277
 8278
 8279
 8280
 8281
 8282
 8283
 8284
 8285
 8286
 8287
 8288
 8289
 8290
 8291
 8292
 8293
 8294
 8295
 8296
 8297
 8298
 8299
 8300
 8301
 8302
 8303
 8304
 8305
 8306
 8307
 8308
 8309
 8310
 8311
 8312
 8313
 8314
 8315
 8316
 8317
 8318
 8319
 8320
 8321
 8322
 8323
 8324
 8325
 8326
 8327
 8328
 8329
 8330
 8331
 8332
 8333
 8334
 8335
 8336
 8337
 8338
 8339
 8340
 8341
 8342
 8343
 8344
 8345
 8346
 8347
 8348
 8349
 8350
 8351
 8352
 8353
 8354
 8355
 8356
 8357
 8358
 8359
 8360
 8361
 8362
 8363
 8364
 8365
 8366
 8367
 8368
 8369
 8370
 8371
 8372
 8373
 8374
 8375
 8376
 8377
 8378
 8379
 8380
 8381
 8382
 8383
 8384
 8385
 8386
 8387
 8388
 8389
 8390
 8391
 8392
 8393
 8394
 8395
 8396
 8397
 8398
 8399
 8400
 8401
 8402
 8403
 8404
 8405
 8406
 8407
 8408
 8409
 8410
 8411
 8412
 8413
 8414
 8415
 8416
 8417
 8418
 8419
 8420
 8421
 8422
 8423
 8424
 8425
 8426
 8427
 8428
 8429
 8430
 8431
 8432
 8433
 8434
 8435
 8436
 8437
 8438
 8439
 8440
 8441
 8442
 8443
 8444
 8445
 8446
 8447
 8448
 8449
 8450
 8451
 8452
 8453
 8454
 8455
 8456
 8457
 8458
 8459
 8460
 8461
 8462
 8463
 8464
 8465
 8466
 8467
 8468
 8469
 8470
 8471
 8472
 8473
 8474
 8475
 8476
 8477
 8478
 8479
 8480
 8481
 8482
 8483
 8484
 8485
 8486
 8487
 8488
 8489
 8490
 8491
 8492
 8493
 8494
 8495
 8496
 8497
 8498
 8499
 8500
 8501
 8502
 8503
 8504
 8505
 8506
 8507
 8508
 8509
 8510
 8511
 8512
 8513
 8514
 8515
 8516
 8517
 8518
 8519
 8520
 8521
 8522
 8523
 8524
 8525
 8526
 8527
 8528
 8529
 8530
 8531
 8532
 8533
 8534
 8535
 8536
 8537
 8538
 8539
 8540
 8541
 8542
 8543
 8544
 8545
 8546
 8547
 8548
 8549
 8550
 8551
 8552
 8553
 8554
 8555
 8556
 8557
 8558
 8559
 8560
 8561
 8562
 8563
 8564
 8565
 8566
 8567
 8568
 8569
 8570
 8571
 8572
 8573
 8574
 8575
 8576
 8577
 8578
 8579
 8580
 8581
 8582
 8583
 8584
 8585
 8586
 8587
 8588
 8589
 8590
 8591
 8592
 8593
 8594
 8595
 8596
 8597
 8598
 8599
 8600
 8601
 8602
 8603
 8604
 8605
 8606
 8607
 8608
 8609
 8610
 8611
 8612
 8613
 8614
 8615
 8616
 8617
 8618
 8619
 8620
 8621
 8622
 8623
 8624
 8625
 8626
 8627
 8628
 8629
 8630
 8631
 8632
 8633
 8634
 8635
 8636
 8637
 8638
 8639
 8640
 8641
 8642
 8643
 8644
 8645
 8646
 8647
 8648
 8649
 8650
 8651
 8652
 8653
 8654
 8655
 8656
 8657
 8658
 8659
 8660
 8661
 8662
 8663
 8664
 8665
 8666
 8667
 8668
 8669
 8670
 8671
 8672
 8673
 8674
 8675
 8676
 8677
 8678
 8679
 8680
 8681
 8682
 8683
 8684
 8685
 8686
 8687
 8688
 8689
 8690
 8691
 8692
 8693
 8694
 8695
 8696
 8697
 8698
 8699
 8700
 8701
 8702
 8703
 8704
 8705
 8706
 8707
 8708
 8709
 8710
 8711
 8712
 8713
 8714
 8715
 8716
 8717
 8718
 8719
 8720
 8721
 8722
 8723
 8724
 8725
 8726
 8727
 8728
 8729
 8730
 8731
 8732
 8733
 8734
 8735
 8736
 8737
 8738
 8739
 8740
 8741
 8742
 8743
 8744
 8745
 8746
 8747
 8748
 8749
 8750
 8751
 8752
 8753
 8754
 8755
 8756
 8757
 8758
 8759
 8760
 8761
 8762
 8763
 8764
 8765
 8766
 8767
 8768
 8769
 8770
 8771
 8772
 8773
 8774
 8775
 8776
 8777
 8778
 8779
 8780
 8781
 8782
 8783
 8784
 8785
 8786
 8787
 8788
 8789
 8790
 8791
 8792
 8793
 8794
 8795
 8796
 8797
 8798
 8799
 8800
 8801
 8802
 8803
 8804
 8805
 8806
 8807
 8808
 8809
 8810
 8811
 8812
 8813
 8814
 8815
 8816
 8817
 8818
 8819
 8820
 8821
 8822
 8823
 8824
 8825
 8826
 8827
 8828
 8829
 8830
 8831
 8832
 8833
 8834
 8835
 8836
 8837
 8838
 8839
 8840
 8841
 8842
 8843
 8844
 8845
 8846
 8847
 8848
 8849
 8850
 8851
 8852
 8853
 8854
 8855
 8856
 8857
 8858
 8859
 8860
 8861
 8862
 8863
 8864
 8865
 8866
 8867
 8868
 8869
 8870
 8871
 8872
 8873
 8874
 8875
 8876
 8877
 8878
 8879
 8880
 8881
 8882
 8883
 8884
 8885
 8886
 8887
 8888
 8889
 8890
 8891
 8892
 8893
 8894
 8895
 8896
 8897
 8898
 8899
 8900
 8901
 8902
 8903
 8904
 8905
 8906
 8907
 8908
 8909
 8910
 8911
 8912
 8913
 8914
 8915
 8916
 8917
 8918
 8919
 8920
 8921
 8922
 8923
 8924
 8925
 8926
 8927
 8928
 8929
 8930
 8931
 8932
 8933
 8934
 8935
 8936
 8937
 8938
 8939
 8940
 8941
 8942
 8943
 8944
 8945
 8946
 8947
 8948
 8949
 8950
 8951
 8952
 8953
 8954
 8955
 8956
 8957
 8958
 8959
 8960
 8961
 8962
 8963
 8964
 8965
 8966
 8967
 8968
 8969
 8970
 8971
 8972
 8973
 8974
 8975
 8976
 8977
 8978
 8979
 8980
 8981
 8982
 8983
 8984
 8985
 8986
 8987
 8988
 8989
 8990
 8991
 8992
 8993
 8994
 8995
 8996
 8997
 8998
 8999
 9000
 9001
 9002
 9003
 9004
 9005
 9006
 9007
 9008
 9009
 9010
 9011
 9012
 9013
 9014
 9015
 9016
 9017
 9018
 9019
 9020
 9021
 9022
 9023
 9024
 9025
 9026
 9027
 9028
 9029
 9030
 9031
 9032
 9033
 9034
 9035
 9036
 9037
 9038
 9039
 9040
 9041
 9042
 9043
 9044
 9045
 9046
 9047
 9048
 9049
 9050
 9051
 9052
 9053
 9054
 9055
 9056
 9057
 9058
 9059
 9060
 9061
 9062
 9063
 9064
 9065
 9066
 9067
 9068
 9069
 9070
 9071
 9072
 9073
 9074
 9075
 9076
 9077
 9078
 9079
 9080
 9081
 9082
 9083
 9084
 9085
 9086
 9087
 9088
 9089
 9090
 9091
 9092
 9093
 9094
 9095
 9096
 9097
 9098
 9099
 9100
 9101
 9102
 9103
 9104
 9105
 9106
 9107
 9108
 9109
 9110
 9111
 9112
 9113
 9114
 9115
 9116
 9117
 9118
 9119
 9120
 9121
 9122
 9123
 9124
 9125
 9126
 9127
 9128
 9129
 9130
 9131
 9132
 9133
 9134
 9135
 9136
 9137
 9138
 9139
 9140
 9141
 9142
 9143
 9144
 9145
 9146
 9147
 9148
 9149
 9150
 9151
 9152
 9153
 9154
 9155
 9156
 9157
 9158
 9159
 9160
 9161
 9162
 9163
 9164
 9165
 9166
 9167
 9168
 9169
 9170
 9171
 9172
 9173
 9174
 9175
 9176
 9177
 9178
 9179
 9180
 9181
 9182
 9183
 9184
 9185
 9186
 9187
 9188
 9189
 9190
 9191
 9192
 9193
 9194
 9195
 9196
 9197
 9198
 9199
 9200
 9201
 9202
 9203
 9204
 9205
 9206
 9207
 9208
 9209
 9210
 9211
 9212
 9213
 9214
 9215
 9216
 9217
 9218
 9219
 9220
 9221
 9222
 9223
 9224
 9225
 9226
 9227
 9228
 9229
 9230
 9231
 9232
 9233
 9234
 9235
 9236
 9237
 9238
 9239
 9240
 9241
 9242
 9243
 9244
 9245
 9246
 9247
 9248
 9249
 9250
 9251
 9252
 9253
 9254
 9255
 9256
 9257
 9258
 9259
 9260
 9261
 9262
 9263
 9264
 9265
 9266
 9267
 9268
 9269
 9270
 9271
 9272
 9273
 9274
 9275
 9276
 9277
 9278
 9279
 9280
 9281
 9282
 9283
 9284
 9285
 9286
 9287
 9288
 9289
 9290
 9291
 9292
 9293
 9294
 9295
 9296
 9297
 9298
 9299
 9300
 9301
 9302
 9303
 9304
 9305
 9306
 9307
 9308
 9309
 9310
 9311
 9312
 9313
 9314
 9315
 9316
 9317
 9318
 9319
 9320
 9321
 9322
 9323
 9324
 9325
 9326
 9327
 9328
 9329
 9330
 9331
 9332
 9333
 9334
 9335
 9336
 9337
 9338
 9339
 9340
 9341
 9342
 9343
 9344
 9345
 9346
 9347
 9348
 9349
 9350
 9351
 9352
 9353
 9354
 9355
 9356
 9357
 9358
 9359
 9360
 9361
 9362
 9363
 9364
 9365
 9366
 9367
 9368
 9369
 9370
 9371
 9372
 9373
 9374
 9375
 9376
 9377
 9378
 9379
 9380
 9381
 9382
 9383
 9384
 9385
 9386
 9387
 9388
 9389
 9390
 9391
 9392
 9393
 9394
 9395
 9396
 9397
 9398
 9399
 9400
 9401
 9402
 9403
 9404
 9405
 9406
 9407
 9408
 9409
 9410
 9411
 9412
 9413
 9414
 9415
 9416
 9417
 9418
 9419
 9420
 9421
 9422
 9423
 9424
 9425
 9426
 9427
 9428
 9429
 9430
 9431
 9432
 9433
 9434
 9435
 9436
 9437
 9438
 9439
 9440
 9441
 9442
 9443
 9444
 9445
 9446
 9447
 9448
 9449
 9450
 9451
 9452
 9453
 9454
 9455
 9456
 9457
 9458
 9459
 9460
 9461
 9462
 9463
 9464
 9465
 9466
 9467
 9468
 9469
 9470
 9471
 9472
 9473
 9474
 9475
 9476
 9477
 9478
 9479
 9480
 9481
 9482
 9483
 9484
 9485
 9486
 9487
 9488
 9489
 9490
 9491
 9492
 9493
 9494
 9495
 9496
 9497
 9498
 9499
 9500
 9501
 9502
 9503
 9504
 9505
 9506
 9507
 9508
 9509
 9510
 9511
 9512
 9513
 9514
 9515
 9516
 9517
 9518
 9519
 9520
 9521
 9522
 9523
 9524
 9525
 9526
 9527
 9528
 9529
 9530
 9531
 9532
 9533
 9534
 9535
 9536
 9537
 9538
 9539
 9540
 9541
 9542
 9543
 9544
 9545
 9546
 9547
 9548
 9549
 9550
 9551
 9552
 9553
 9554
 9555
 9556
 9557
 9558
 9559
 9560
 9561
 9562
 9563
 9564
 9565
 9566
 9567
 9568
 9569
 9570
 9571
 9572
 9573
 9574
 9575
 9576
 9577
 9578
 9579
 9580
 9581
 9582
 9583
 9584
 9585
 9586
 9587
 9588
 9589
 9590
 9591
 9592
 9593
 9594
 9595
 9596
 9597
 9598
 9599
 9600
 9601
 9602
 9603
 9604
 9605
 9606
 9607
 9608
 9609
 9610
 9611
 9612
 9613
 9614
 9615
 9616
 9617
 9618
 9619
 9620
 9621
 9622
 9623
 9624
 9625
 9626
 9627
 9628
 9629
 9630
 9631
 9632
 9633
 9634
 9635
 9636
 9637
 9638
 9639
 9640
 9641
 9642
 9643
 9644
 9645
 9646
 9647
 9648
 9649
 9650
 9651
 9652
 9653
 9654
 9655
 9656
 9657
 9658
 9659
 9660
 9661
 9662
 9663
 9664
 9665
 9666
 9667
 9668
 9669
 9670
 9671
 9672
 9673
 9674
 9675
 9676
 9677
 9678
 9679
 9680
 9681
 9682
 9683
 9684
 9685
 9686
 9687
 9688
 9689
 9690
 9691
 9692
 9693
 9694
 9695
 9696
 9697
 9698
 9699
 9700
 9701
 9702
 9703
 9704
 9705
 9706
 9707
 9708
 9709
 9710
 9711
 9712
 9713
 9714
 9715
 9716
 9717
 9718
 9719
 9720
 9721
 9722
 9723
 9724
 9725
 9726
 9727
 9728
 9729
 9730
 9731
 9732
 9733
 9734
 9735
 9736
 9737
 9738
 9739
 9740
 9741
 9742
 9743
 9744
 9745
 9746
 9747
 9748
 9749
 9750
 9751
 9752
 9753
 9754
 9755
 9756
 9757
 9758
 9759
 9760
 9761
 9762
 9763
 9764
 9765
 9766
 9767
 9768
 9769
 9770
 9771
 9772
 9773
 9774
 9775
 9776
 9777
 9778
 9779
 9780
 9781
 9782
 9783
 9784
 9785
 9786
 9787
 9788
 9789
 9790
 9791
 9792
 9793
 9794
 9795
 9796
 9797
 9798
 9799
 9800
 9801
 9802
 9803
 9804
 9805
 9806
 9807
 9808
 9809
 9810
 9811
 9812
 9813
 9814
 9815
 9816
 9817
 9818
 9819
 9820
 9821
 9822
 9823
 9824
 9825
 9826
 9827
 9828
 9829
 9830
 9831
 9832
 9833
 9834
 9835
 9836
 9837
 9838
 9839
 9840
 9841
 9842
 9843
 9844
 9845
 9846
 9847
 9848
 9849
 9850
 9851
 9852
 9853
 9854
 9855
 9856
 9857
 9858
 9859
 9860
 9861
 9862
 9863
 9864
 9865
 9866
 9867
 9868
 9869
 9870
 9871
 9872
 9873
 9874
 9875
 9876
 9877
 9878
 9879
 9880
 9881
 9882
 9883
 9884
 9885
 9886
 9887
 9888
 9889
 9890
 9891
 9892
 9893
 9894
 9895
 9896
 9897
 9898
 9899
 9900
 9901
 9902
 9903
 9904
 9905
 9906
 9907
 9908
 9909
 9910
 9911
 9912
 9913
 9914
 9915
 9916
 9917
 9918
 9919
 9920
 9921
 9922
 9923
 9924
 9925
 9926
 9927
 9928
 9929
 9930
 9931
 9932
 9933
 9934
 9935
 9936
 9937
 9938
 9939
 9940
 9941
 9942
 9943
 9944
 9945
 9946
 9947
 9948
 9949
 9950
 9951
 9952
 9953
 9954
 9955
 9956
 9957
 9958
 9959
 9960
 9961
 9962
 9963
 9964
 9965
 9966
 9967
 9968
 9969
 9970
 9971
 9972
 9973
 9974
 9975
 9976
 9977
 9978
 9979
 9980
 9981
 9982
 9983
 9984
 9985
 9986
 9987
 9988
 9989
 9990
 9991
 9992
 9993
 9994
 9995
 9996
 9997
 9998
 9999
10000
10001
10002
10003
10004
10005
10006
10007
10008
10009
10010
10011
10012
10013
10014
10015
10016
10017
10018
10019
10020
10021
10022
10023
10024
10025
10026
10027
10028
10029
10030
10031
10032
10033
10034
10035
10036
10037
10038
10039
10040
10041
10042
10043
10044
10045
10046
10047
10048
10049
10050
10051
10052
10053
10054
10055
10056
10057
10058
10059
10060
10061
10062
10063
10064
10065
10066
10067
10068
10069
10070
10071
10072
10073
10074
10075
10076
10077
10078
10079
10080
10081
10082
10083
10084
10085
10086
10087
10088
10089
10090
10091
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110
10111
10112
10113
10114
10115
10116
10117
10118
10119
10120
10121
10122
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132
10133
10134
10135
10136
10137
10138
10139
10140
10141
10142
10143
10144
10145
10146
10147
10148
10149
10150
10151
10152
10153
10154
10155
10156
10157
10158
10159
10160
10161
10162
10163
10164
10165
10166
10167
10168
10169
10170
10171
10172
10173
10174
10175
10176
10177
10178
10179
10180
10181
10182
10183
10184
10185
10186
10187
10188
10189
10190
10191
10192
10193
10194
10195
10196
10197
10198
10199
10200
10201
10202
10203
10204
10205
10206
10207
10208
10209
10210
10211
10212
10213
10214
10215
10216
10217
10218
10219
10220
10221
10222
10223
10224
10225
10226
10227
10228
10229
10230
10231
10232
10233
10234
10235
10236
10237
10238
10239
10240
10241
10242
10243
10244
10245
10246
10247
10248
10249
10250
10251
10252
10253
10254
10255
10256
10257
10258
10259
10260
10261
10262
10263
10264
10265
10266
10267
10268
10269
10270
10271
10272
10273
10274
10275
10276
10277
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289
10290
10291
10292
10293
10294
10295
10296
10297
10298
10299
10300
10301
10302
10303
10304
10305
10306
10307
10308
10309
10310
10311
10312
10313
10314
10315
10316
10317
10318
10319
10320
10321
10322
10323
10324
10325
10326
10327
10328
10329
10330
10331
10332
10333
10334
10335
10336
10337
10338
10339
10340
10341
10342
10343
10344
10345
10346
10347
10348
10349
10350
10351
10352
10353
10354
10355
10356
10357
10358
10359
10360
10361
10362
10363
10364
10365
10366
10367
10368
10369
10370
10371
10372
10373
10374
10375
10376
10377
10378
10379
10380
10381
10382
10383
10384
10385
10386
10387
10388
10389
10390
10391
10392
10393
10394
10395
10396
10397
10398
10399
10400
10401
10402
10403
10404
10405
10406
10407
10408
10409
10410
10411
10412
10413
10414
10415
10416
10417
10418
10419
10420
10421
10422
10423
10424
10425
10426
10427
10428
10429
10430
10431
10432
10433
10434
10435
10436
10437
10438
10439
10440
10441
10442
10443
10444
10445
10446
10447
10448
10449
10450
10451
10452
10453
10454
10455
10456
10457
10458
10459
10460
10461
10462
10463
10464
10465
10466
10467
10468
10469
10470
10471
10472
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482
10483
10484
10485
10486
10487
10488
10489
10490
10491
10492
10493
10494
10495
10496
10497
10498
10499
10500
10501
10502
10503
10504
10505
10506
10507
10508
10509
10510
10511
10512
10513
10514
10515
10516
10517
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527
10528
10529
10530
10531
10532
10533
10534
10535
10536
10537
10538
10539
10540
10541
10542
10543
10544
10545
10546
10547
10548
10549
10550
10551
10552
10553
10554
10555
10556
10557
10558
10559
10560
10561
10562
10563
10564
10565
10566
10567
10568
10569
10570
10571
10572
10573
10574
10575
10576
10577
10578
10579
10580
10581
10582
10583
10584
10585
10586
10587
10588
10589
10590
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600
10601
10602
10603
10604
10605
10606
10607
10608
10609
10610
10611
10612
10613
10614
10615
10616
10617
10618
10619
10620
10621
10622
10623
10624
10625
10626
10627
10628
10629
10630
10631
10632
10633
10634
10635
10636
10637
10638
10639
10640
10641
10642
10643
10644
10645
10646
10647
10648
10649
10650
10651
10652
10653
10654
10655
10656
10657
10658
10659
10660
10661
10662
10663
10664
10665
10666
10667
10668
10669
10670
10671
10672
10673
10674
10675
10676
10677
10678
10679
10680
10681
10682
10683
10684
10685
10686
10687
10688
10689
10690
10691
10692
10693
10694
10695
10696
10697
10698
10699
10700
10701
10702
10703
10704
10705
10706
10707
10708
10709
10710
10711
10712
10713
10714
10715
10716
10717
10718
10719
10720
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731
10732
10733
10734
10735
10736
10737
10738
10739
10740
10741
10742
10743
10744
10745
10746
10747
10748
10749
10750
10751
10752
10753
10754
10755
10756
10757
10758
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769
10770
10771
10772
10773
10774
10775
10776
10777
10778
10779
10780
10781
10782
10783
10784
10785
10786
10787
10788
10789
10790
10791
10792
10793
10794
10795
10796
10797
10798
10799
10800
10801
10802
10803
10804
10805
10806
10807
10808
10809
10810
10811
10812
10813
10814
10815
10816
10817
10818
10819
10820
10821
10822
10823
10824
10825
10826
10827
10828
10829
10830
10831
10832
10833
10834
10835
10836
10837
10838
10839
10840
10841
10842
10843
10844
10845
10846
10847
10848
10849
10850
10851
10852
10853
10854
10855
10856
10857
10858
10859
10860
10861
10862
10863
10864
10865
10866
10867
10868
10869
10870
10871
10872
10873
10874
10875
10876
10877
10878
10879
10880
10881
10882
10883
10884
10885
10886
10887
10888
10889
10890
10891
10892
10893
10894
10895
10896
10897
10898
10899
10900
10901
10902
10903
10904
10905
10906
10907
10908
10909
10910
10911
10912
10913
10914
10915
10916
10917
10918
10919
10920
10921
10922
10923
10924
10925
10926
10927
10928
10929
10930
10931
10932
10933
10934
10935
10936
10937
10938
10939
10940
10941
10942
10943
10944
10945
10946
10947
10948
10949
10950
10951
10952
10953
10954
10955
10956
10957
10958
10959
10960
10961
10962
10963
10964
10965
10966
10967
10968
10969
10970
10971
10972
10973
10974
10975
10976
10977
10978
10979
10980
10981
10982
10983
10984
10985
10986
10987
10988
10989
10990
10991
10992
10993
10994
10995
10996
10997
10998
10999
11000
11001
11002
11003
11004
11005
11006
11007
11008
11009
11010
11011
11012
11013
11014
11015
11016
11017
11018
11019
11020
11021
11022
11023
11024
11025
11026
11027
11028
11029
11030
11031
11032
11033
11034
11035
11036
11037
11038
11039
11040
11041
11042
11043
11044
11045
11046
11047
11048
11049
11050
11051
11052
11053
11054
11055
11056
11057
11058
11059
11060
11061
11062
11063
11064
11065
11066
11067
11068
11069
11070
11071
11072
11073
11074
11075
11076
11077
11078
11079
11080
11081
11082
11083
11084
11085
11086
11087
11088
11089
11090
11091
11092
11093
11094
11095
11096
11097
11098
11099
11100
11101
11102
11103
11104
11105
11106
11107
11108
11109
11110
11111
11112
11113
11114
11115
11116
11117
11118
11119
11120
11121
11122
11123
11124
11125
11126
11127
11128
11129
11130
11131
11132
11133
11134
11135
11136
11137
11138
11139
11140
11141
11142
11143
11144
11145
11146
11147
11148
11149
11150
11151
11152
11153
11154
11155
11156
11157
11158
11159
11160
11161
11162
11163
11164
11165
11166
11167
11168
11169
11170
11171
11172
11173
11174
11175
11176
11177
11178
11179
11180
11181
11182
11183
11184
11185
11186
11187
11188
11189
11190
11191
11192
11193
11194
11195
11196
11197
11198
11199
11200
11201
11202
11203
11204
11205
11206
11207
11208
11209
11210
11211
11212
11213
11214
11215
11216
11217
11218
11219
11220
11221
11222
11223
11224
11225
11226
11227
11228
11229
11230
11231
11232
11233
11234
11235
11236
11237
11238
11239
11240
11241
11242
11243
11244
11245
11246
11247
11248
11249
11250
11251
11252
11253
11254
11255
11256
11257
11258
11259
11260
11261
11262
11263
11264
11265
11266
11267
11268
11269
11270
11271
11272
11273
11274
11275
11276
11277
11278
11279
11280
11281
11282
11283
11284
11285
11286
11287
11288
11289
11290
11291
11292
11293
11294
11295
11296
11297
11298
11299
11300
11301
11302
11303
11304
11305
11306
11307
11308
11309
11310
11311
11312
11313
11314
11315
11316
11317
11318
11319
11320
11321
11322
11323
11324
11325
11326
11327
11328
11329
11330
11331
11332
11333
11334
11335
11336
11337
11338
11339
11340
11341
11342
11343
11344
11345
11346
11347
11348
11349
11350
11351
11352
11353
11354
11355
11356
11357
11358
11359
11360
11361
11362
11363
11364
11365
11366
11367
11368
11369
11370
11371
11372
11373
11374
11375
11376
11377
11378
11379
11380
11381
11382
11383
11384
11385
11386
11387
11388
11389
11390
11391
11392
11393
11394
11395
11396
11397
11398
11399
11400
11401
11402
11403
11404
11405
11406
11407
11408
11409
11410
11411
11412
11413
11414
11415
11416
11417
11418
11419
11420
11421
11422
11423
11424
11425
11426
11427
11428
11429
11430
11431
11432
11433
11434
11435
11436
11437
11438
11439
11440
11441
11442
11443
11444
11445
11446
11447
11448
11449
11450
11451
11452
11453
11454
11455
11456
11457
11458
11459
11460
11461
11462
11463
11464
11465
11466
11467
11468
11469
11470
11471
11472
11473
11474
11475
11476
11477
11478
11479
11480
11481
11482
11483
11484
11485
11486
11487
11488
11489
11490
11491
11492
11493
11494
11495
11496
11497
11498
11499
11500
11501
11502
11503
11504
11505
11506
11507
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518
11519
11520
11521
11522
11523
11524
11525
11526
11527
11528
11529
11530
11531
11532
11533
11534
11535
11536
11537
11538
11539
11540
11541
11542
11543
11544
11545
11546
11547
11548
11549
11550
11551
11552
11553
11554
11555
11556
11557
11558
11559
11560
11561
11562
11563
11564
11565
11566
11567
11568
11569
11570
11571
11572
11573
11574
11575
11576
11577
11578
11579
11580
11581
11582
11583
11584
11585
11586
11587
11588
11589
11590
11591
11592
11593
11594
11595
11596
11597
11598
11599
11600
11601
11602
11603
11604
11605
11606
11607
11608
11609
11610
11611
11612
11613
11614
11615
11616
11617
11618
11619
11620
11621
11622
11623
11624
11625
11626
11627
11628
11629
11630
11631
11632
11633
11634
11635
11636
11637
11638
11639
11640
11641
11642
11643
11644
11645
11646
11647
11648
11649
11650
11651
11652
11653
11654
11655
11656
11657
11658
11659
11660
11661
11662
11663
11664
11665
11666
11667
11668
11669
11670
11671
11672
11673
11674
11675
11676
11677
11678
11679
11680
11681
11682
11683
11684
11685
11686
11687
11688
11689
11690
11691
11692
11693
11694
11695
11696
11697
11698
11699
11700
11701
11702
11703
11704
11705
11706
11707
11708
11709
11710
11711
11712
11713
11714
11715
11716
11717
11718
11719
11720
11721
11722
11723
11724
11725
11726
11727
11728
11729
11730
11731
11732
11733
11734
11735
11736
11737
11738
11739
11740
11741
11742
11743
11744
11745
11746
11747
11748
11749
11750
11751
11752
11753
11754
11755
11756
11757
11758
11759
11760
11761
11762
11763
11764
11765
11766
11767
11768
11769
11770
11771
11772
11773
11774
11775
11776
11777
11778
11779
11780
11781
11782
11783
11784
11785
11786
11787
11788
11789
11790
11791
11792
11793
11794
11795
11796
11797
11798
11799
11800
11801
11802
11803
11804
11805
11806
11807
11808
11809
11810
11811
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821
11822
11823
11824
11825
11826
11827
11828
11829
11830
11831
11832
11833
11834
11835
11836
11837
11838
11839
11840
11841
11842
11843
11844
11845
11846
11847
11848
11849
11850
11851
11852
11853
11854
11855
11856
11857
11858
11859
11860
11861
11862
11863
11864
11865
11866
11867
11868
11869
11870
11871
11872
11873
11874
11875
11876
11877
11878
11879
11880
11881
11882
11883
11884
11885
11886
11887
11888
11889
11890
11891
11892
11893
11894
11895
11896
11897
11898
11899
11900
11901
11902
11903
11904
11905
11906
11907
11908
11909
11910
11911
11912
11913
11914
11915
11916
11917
11918
11919
11920
11921
11922
11923
11924
11925
11926
11927
11928
11929
11930
11931
11932
11933
11934
11935
11936
11937
11938
11939
11940
11941
11942
11943
11944
11945
11946
11947
11948
11949
11950
11951
11952
11953
11954
11955
11956
11957
11958
11959
11960
11961
11962
11963
11964
11965
11966
11967
11968
11969
11970
11971
11972
11973
11974
11975
11976
11977
11978
11979
11980
11981
11982
11983
11984
11985
11986
11987
11988
11989
11990
11991
11992
11993
11994
11995
11996
11997
11998
11999
12000
12001
12002
12003
12004
12005
12006
12007
12008
12009
12010
12011
12012
12013
12014
12015
12016
12017
12018
12019
12020
12021
12022
12023
12024
12025
12026
12027
12028
12029
12030
12031
12032
12033
12034
12035
12036
12037
12038
12039
12040
12041
12042
12043
12044
12045
12046
12047
12048
12049
12050
12051
12052
12053
12054
12055
12056
12057
12058
12059
12060
12061
12062
12063
12064
12065
12066
12067
12068
12069
12070
12071
12072
12073
12074
12075
12076
12077
12078
12079
12080
12081
12082
12083
12084
12085
12086
12087
12088
12089
12090
12091
12092
12093
12094
12095
12096
12097
12098
12099
12100
12101
12102
12103
12104
12105
12106
12107
12108
12109
12110
12111
12112
12113
12114
12115
12116
12117
12118
12119
12120
12121
12122
12123
12124
12125
12126
12127
12128
12129
12130
12131
12132
12133
12134
12135
12136
12137
12138
12139
12140
12141
12142
12143
12144
12145
12146
12147
12148
12149
12150
12151
12152
12153
12154
12155
12156
12157
12158
12159
12160
12161
12162
12163
12164
12165
12166
12167
12168
12169
12170
12171
12172
12173
12174
12175
12176
12177
12178
12179
12180
12181
12182
12183
12184
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194
12195
12196
12197
12198
12199
12200
12201
12202
12203
12204
12205
12206
12207
12208
12209
12210
12211
12212
12213
12214
12215
12216
12217
12218
12219
12220
12221
12222
12223
12224
12225
12226
12227
12228
12229
12230
12231
12232
12233
12234
12235
12236
12237
12238
12239
12240
12241
12242
12243
12244
12245
12246
12247
12248
12249
12250
12251
12252
12253
12254
12255
12256
12257
12258
12259
12260
12261
12262
12263
12264
12265
12266
12267
12268
12269
12270
12271
12272
12273
12274
12275
12276
12277
12278
12279
12280
12281
12282
12283
12284
12285
12286
12287
12288
12289
12290
12291
12292
12293
12294
12295
12296
12297
12298
12299
12300
12301
12302
12303
12304
12305
12306
12307
12308
12309
12310
12311
12312
12313
12314
12315
12316
12317
12318
12319
12320
12321
12322
12323
12324
12325
12326
12327
12328
12329
12330
12331
12332
12333
12334
12335
12336
12337
12338
12339
12340
12341
12342
12343
12344
12345
12346
12347
12348
12349
12350
12351
12352
12353
12354
12355
12356
12357
12358
12359
12360
12361
12362
12363
12364
12365
12366
12367
12368
12369
12370
12371
12372
12373
12374
12375
12376
12377
12378
12379
12380
12381
12382
12383
12384
12385
12386
12387
12388
12389
12390
12391
12392
12393
12394
12395
12396
12397
12398
12399
12400
12401
12402
12403
12404
12405
12406
12407
12408
12409
12410
12411
12412
12413
12414
12415
12416
12417
12418
12419
12420
12421
12422
12423
12424
12425
12426
12427
12428
12429
12430
12431
12432
12433
12434
12435
12436
12437
12438
12439
12440
12441
12442
12443
12444
12445
12446
12447
12448
12449
12450
12451
12452
12453
12454
12455
12456
12457
12458
12459
12460
12461
12462
12463
12464
12465
12466
12467
12468
12469
12470
12471
12472
12473
12474
12475
12476
12477
12478
12479
12480
12481
12482
12483
12484
12485
12486
12487
12488
12489
12490
12491
12492
12493
12494
12495
12496
12497
12498
12499
12500
12501
12502
12503
12504
12505
12506
12507
12508
12509
12510
12511
12512
12513
12514
12515
12516
12517
12518
12519
12520
12521
12522
12523
12524
12525
12526
12527
12528
12529
12530
12531
12532
12533
12534
12535
12536
12537
12538
12539
12540
12541
12542
12543
12544
12545
12546
12547
12548
12549
12550
12551
12552
12553
12554
12555
12556
12557
12558
12559
12560
12561
12562
12563
12564
12565
12566
12567
12568
12569
12570
12571
12572
12573
12574
12575
12576
12577
12578
12579
12580
12581
12582
12583
12584
12585
12586
12587
12588
12589
12590
12591
12592
12593
12594
12595
12596
12597
12598
12599
12600
12601
12602
12603
12604
12605
12606
12607
12608
12609
12610
12611
12612
12613
12614
12615
12616
12617
12618
12619
12620
12621
12622
12623
12624
12625
12626
12627
12628
12629
12630
12631
12632
12633
12634
12635
12636
12637
12638
12639
12640
12641
12642
12643
12644
12645
12646
12647
12648
12649
12650
12651
12652
12653
12654
12655
12656
12657
12658
12659
12660
12661
12662
12663
12664
12665
12666
12667
12668
12669
12670
12671
12672
12673
12674
12675
12676
12677
12678
12679
12680
12681
12682
12683
12684
12685
12686
12687
12688
12689
12690
12691
12692
12693
12694
12695
12696
12697
12698
12699
12700
12701
12702
12703
12704
12705
12706
12707
12708
12709
12710
12711
12712
12713
12714
12715
12716
12717
12718
12719
12720
12721
12722
12723
12724
12725
12726
12727
12728
12729
12730
12731
12732
12733
12734
12735
12736
12737
12738
12739
12740
12741
12742
12743
12744
12745
12746
12747
12748
12749
12750
12751
12752
12753
12754
12755
12756
12757
12758
12759
12760
12761
12762
12763
12764
12765
12766
12767
12768
12769
12770
12771
12772
12773
12774
12775
12776
12777
12778
12779
12780
12781
12782
12783
12784
12785
12786
12787
12788
12789
12790
12791
12792
12793
12794
12795
12796
12797
12798
12799
12800
12801
12802
12803
12804
12805
12806
12807
12808
12809
12810
12811
12812
12813
12814
12815
12816
12817
12818
12819
12820
12821
12822
12823
12824
12825
12826
12827
12828
12829
12830
12831
12832
12833
12834
12835
12836
12837
12838
12839
12840
12841
12842
12843
12844
12845
12846
12847
12848
12849
12850
12851
12852
12853
12854
12855
12856
12857
12858
12859
12860
12861
12862
12863
12864
12865
12866
12867
12868
12869
12870
12871
12872
12873
12874
12875
12876
12877
12878
12879
12880
12881
12882
12883
12884
12885
12886
12887
12888
12889
12890
12891
12892
12893
12894
12895
12896
12897
12898
12899
12900
12901
12902
12903
12904
12905
12906
12907
12908
12909
12910
12911
12912
12913
12914
12915
12916
12917
12918
12919
12920
12921
12922
12923
12924
12925
12926
12927
12928
12929
12930
12931
12932
12933
12934
12935
12936
12937
12938
12939
12940
12941
12942
12943
12944
12945
12946
12947
12948
12949
12950
12951
12952
12953
12954
12955
12956
12957
12958
12959
12960
12961
12962
12963
12964
12965
12966
12967
12968
12969
12970
12971
12972
12973
12974
12975
12976
12977
12978
12979
12980
12981
12982
12983
12984
12985
12986
12987
12988
12989
12990
12991
12992
12993
12994
12995
12996
12997
12998
12999
13000
13001
13002
13003
13004
13005
13006
13007
13008
13009
13010
13011
13012
13013
13014
13015
13016
13017
13018
13019
13020
13021
13022
13023
13024
13025
13026
13027
13028
13029
13030
13031
13032
13033
13034
13035
13036
13037
13038
13039
13040
13041
13042
13043
13044
13045
13046
13047
13048
13049
13050
13051
13052
13053
13054
13055
13056
13057
13058
13059
13060
13061
13062
13063
13064
13065
13066
13067
13068
13069
13070
13071
13072
13073
13074
13075
13076
13077
13078
13079
13080
13081
13082
13083
13084
13085
13086
13087
13088
13089
13090
13091
13092
13093
13094
13095
13096
13097
13098
13099
13100
13101
13102
13103
13104
13105
13106
13107
13108
13109
13110
13111
13112
13113
13114
13115
13116
13117
13118
13119
13120
13121
13122
13123
13124
13125
13126
13127
13128
13129
13130
13131
13132
13133
13134
13135
13136
13137
13138
13139
13140
13141
13142
13143
13144
13145
13146
13147
13148
13149
13150
13151
13152
13153
13154
13155
13156
13157
13158
13159
13160
13161
13162
13163
13164
13165
13166
13167
13168
13169
13170
13171
13172
13173
13174
13175
13176
13177
13178
13179
13180
13181
13182
13183
13184
13185
13186
13187
13188
13189
13190
13191
13192
13193
13194
13195
13196
13197
13198
13199
13200
13201
13202
13203
13204
13205
13206
13207
13208
13209
13210
13211
13212
13213
13214
13215
13216
13217
13218
13219
13220
13221
13222
13223
13224
13225
13226
13227
13228
13229
13230
13231
13232
13233
13234
13235
13236
13237
13238
13239
13240
13241
13242
13243
13244
13245
13246
13247
13248
13249
13250
13251
13252
13253
13254
13255
13256
13257
13258
13259
13260
13261
13262
13263
13264
13265
13266
13267
13268
13269
13270
13271
13272
13273
13274
13275
13276
13277
13278
13279
13280
13281
13282
13283
13284
13285
13286
13287
13288
13289
13290
13291
13292
13293
13294
13295
13296
13297
13298
13299
13300
13301
13302
13303
13304
13305
13306
13307
13308
13309
13310
13311
13312
13313
13314
13315
13316
13317
13318
13319
13320
13321
13322
13323
13324
13325
13326
13327
13328
13329
13330
13331
13332
13333
13334
13335
13336
13337
13338
13339
13340
13341
13342
13343
13344
13345
13346
13347
13348
13349
13350
13351
13352
13353
13354
13355
13356
13357
13358
13359
13360
13361
13362
13363
13364
13365
13366
13367
13368
13369
13370
13371
13372
13373
13374
13375
13376
13377
13378
13379
13380
13381
13382
13383
13384
13385
13386
13387
13388
13389
13390
13391
13392
13393
13394
13395
13396
13397
13398
13399
13400
13401
13402
13403
13404
13405
13406
13407
13408
13409
13410
13411
13412
13413
13414
13415
13416
13417
13418
13419
13420
13421
13422
13423
13424
13425
13426
13427
13428
13429
13430
13431
13432
13433
13434
13435
13436
13437
13438
13439
13440
13441
13442
13443
13444
13445
13446
13447
13448
13449
13450
13451
13452
13453
13454
13455
13456
13457
13458
13459
13460
13461
13462
13463
13464
13465
13466
13467
13468
13469
13470
13471
13472
13473
13474
13475
13476
13477
13478
13479
13480
13481
13482
13483
13484
13485
13486
13487
13488
13489
13490
13491
13492
13493
13494
13495
13496
13497
13498
13499
13500
13501
13502
13503
13504
13505
13506
13507
13508
13509
13510
13511
13512
13513
13514
13515
13516
13517
13518
13519
13520
13521
13522
13523
13524
13525
13526
13527
13528
13529
13530
13531
13532
13533
13534
13535
13536
13537
13538
13539
13540
13541
13542
13543
13544
13545
13546
13547
13548
13549
13550
13551
13552
13553
13554
13555
13556
13557
13558
13559
13560
13561
13562
13563
13564
13565
13566
13567
13568
13569
13570
13571
13572
13573
13574
13575
13576
13577
13578
13579
13580
13581
13582
13583
13584
13585
13586
13587
13588
13589
13590
13591
13592
13593
13594
13595
13596
13597
13598
13599
13600
13601
13602
13603
13604
13605
13606
13607
13608
13609
13610
13611
13612
13613
13614
13615
13616
13617
13618
13619
13620
13621
13622
13623
13624
13625
13626
13627
13628
13629
13630
13631
13632
13633
13634
13635
13636
13637
13638
13639
13640
13641
13642
13643
13644
13645
13646
13647
13648
13649
13650
13651
13652
13653
13654
13655
13656
13657
13658
13659
13660
13661
13662
13663
13664
13665
13666
13667
13668
13669
13670
13671
13672
13673
13674
13675
13676
13677
13678
13679
13680
13681
13682
13683
13684
13685
13686
13687
13688
13689
13690
13691
13692
13693
13694
13695
13696
13697
13698
13699
13700
13701
13702
13703
13704
13705
13706
13707
13708
13709
13710
13711
13712
13713
13714
13715
13716
13717
13718
13719
13720
13721
13722
13723
13724
13725
13726
13727
13728
13729
13730
13731
13732
13733
13734
13735
13736
13737
13738
13739
13740
13741
13742
13743
13744
13745
13746
13747
13748
13749
13750
13751
13752
13753
13754
13755
13756
13757
13758
13759
13760
13761
13762
13763
13764
13765
13766
13767
13768
13769
13770
13771
13772
13773
13774
13775
13776
13777
13778
13779
13780
13781
13782
13783
13784
13785
13786
13787
13788
13789
13790
13791
13792
13793
13794
13795
13796
13797
13798
13799
13800
13801
13802
13803
13804
13805
13806
13807
13808
13809
13810
13811
13812
13813
13814
13815
13816
13817
13818
13819
13820
13821
13822
13823
13824
13825
13826
13827
13828
13829
13830
13831
13832
13833
13834
13835
13836
13837
13838
13839
13840
13841
13842
13843
13844
13845
13846
13847
13848
13849
13850
13851
13852
13853
13854
13855
13856
13857
13858
13859
13860
13861
13862
13863
13864
13865
13866
13867
13868
13869
13870
13871
13872
13873
13874
13875
13876
13877
13878
13879
13880
13881
13882
13883
13884
13885
13886
13887
13888
13889
13890
13891
13892
13893
13894
13895
13896
13897
13898
13899
13900
13901
13902
13903
13904
13905
13906
13907
13908
13909
13910
13911
13912
13913
13914
13915
13916
13917
13918
13919
13920
13921
13922
13923
13924
13925
13926
13927
13928
13929
13930
13931
13932
13933
13934
13935
13936
13937
13938
13939
13940
13941
13942
13943
13944
13945
13946
13947
13948
13949
13950
13951
13952
13953
13954
13955
13956
13957
13958
13959
13960
13961
13962
13963
13964
13965
13966
13967
13968
13969
13970
13971
13972
13973
13974
13975
13976
13977
13978
13979
13980
13981
13982
13983
13984
13985
13986
13987
13988
13989
13990
13991
13992
13993
13994
13995
13996
13997
13998
13999
14000
14001
14002
14003
14004
14005
14006
14007
14008
14009
14010
14011
14012
14013
14014
14015
14016
14017
14018
14019
14020
14021
14022
14023
14024
14025
14026
14027
14028
14029
14030
14031
14032
14033
14034
14035
14036
14037
14038
14039
14040
14041
14042
14043
14044
14045
14046
14047
14048
14049
14050
14051
14052
14053
14054
14055
14056
14057
14058
14059
14060
14061
14062
14063
14064
14065
14066
14067
14068
14069
14070
14071
14072
14073
14074
14075
14076
14077
14078
14079
14080
14081
14082
14083
14084
14085
14086
14087
14088
14089
14090
14091
14092
14093
14094
14095
14096
14097
14098
14099
14100
14101
14102
14103
14104
14105
14106
14107
14108
14109
14110
14111
14112
14113
14114
14115
14116
14117
14118
14119
14120
14121
14122
14123
14124
14125
14126
14127
14128
14129
14130
14131
14132
14133
14134
14135
14136
14137
14138
14139
14140
14141
14142
14143
14144
14145
14146
14147
14148
14149
14150
14151
14152
14153
14154
14155
14156
14157
14158
14159
14160
14161
14162
14163
14164
14165
14166
14167
14168
14169
14170
14171
14172
14173
14174
14175
14176
14177
14178
14179
14180
14181
14182
14183
14184
14185
14186
14187
14188
14189
14190
14191
14192
14193
14194
14195
14196
14197
14198
14199
14200
14201
14202
14203
14204
14205
14206
14207
14208
14209
14210
14211
14212
14213
14214
14215
14216
14217
14218
14219
14220
14221
14222
14223
14224
14225
14226
14227
14228
14229
14230
14231
14232
14233
14234
14235
14236
14237
14238
14239
14240
14241
14242
14243
14244
14245
14246
14247
14248
14249
14250
14251
14252
14253
14254
14255
14256
14257
14258
14259
14260
14261
14262
14263
14264
14265
14266
14267
14268
14269
14270
14271
14272
14273
14274
14275
14276
14277
14278
14279
14280
14281
14282
14283
14284
14285
14286
14287
14288
14289
14290
14291
14292
14293
14294
14295
14296
14297
14298
14299
14300
14301
14302
14303
14304
14305
14306
14307
14308
14309
14310
14311
14312
14313
14314
14315
14316
14317
14318
14319
14320
14321
14322
14323
14324
14325
14326
14327
14328
14329
14330
14331
14332
14333
14334
14335
14336
14337
14338
14339
14340
14341
14342
14343
14344
14345
14346
14347
14348
14349
14350
14351
14352
14353
14354
14355
14356
14357
14358
14359
14360
14361
14362
14363
14364
14365
14366
14367
14368
14369
14370
14371
14372
14373
14374
14375
14376
14377
14378
14379
14380
14381
14382
14383
14384
14385
14386
14387
14388
14389
14390
14391
14392
14393
14394
14395
14396
14397
14398
14399
14400
14401
14402
14403
14404
14405
14406
14407
14408
14409
14410
14411
14412
14413
14414
14415
14416
14417
14418
14419
14420
14421
14422
14423
14424
14425
14426
14427
14428
14429
14430
14431
14432
14433
14434
14435
14436
14437
14438
14439
14440
14441
14442
14443
14444
14445
14446
14447
14448
14449
14450
14451
14452
14453
14454
14455
14456
14457
14458
14459
14460
14461
14462
14463
14464
14465
14466
14467
14468
14469
14470
14471
14472
14473
14474
14475
14476
14477
14478
14479
14480
14481
14482
14483
14484
14485
14486
14487
14488
14489
14490
14491
14492
14493
14494
14495
14496
14497
14498
14499
14500
14501
14502
14503
14504
14505
14506
14507
14508
14509
14510
14511
14512
14513
14514
14515
14516
14517
14518
14519
14520
14521
14522
14523
14524
14525
14526
14527
14528
14529
14530
14531
14532
14533
14534
14535
14536
14537
14538
14539
14540
14541
14542
14543
14544
14545
14546
14547
14548
14549
14550
14551
14552
14553
14554
14555
14556
14557
14558
14559
14560
14561
14562
14563
14564
14565
14566
14567
14568
14569
14570
14571
14572
14573
14574
14575
14576
14577
14578
14579
14580
14581
14582
14583
14584
14585
14586
14587
14588
14589
14590
14591
14592
14593
14594
14595
14596
14597
14598
14599
14600
14601
14602
14603
14604
14605
14606
14607
14608
14609
14610
14611
14612
14613
14614
14615
14616
14617
14618
14619
14620
14621
14622
14623
14624
14625
14626
14627
14628
14629
14630
14631
14632
14633
14634
14635
14636
14637
14638
14639
14640
14641
14642
14643
14644
14645
14646
14647
14648
14649
14650
14651
14652
14653
14654
14655
14656
14657
14658
14659
14660
14661
14662
14663
14664
14665
14666
14667
14668
14669
14670
14671
14672
14673
14674
14675
14676
14677
14678
14679
14680
14681
14682
14683
14684
14685
14686
14687
14688
14689
14690
14691
14692
14693
14694
14695
14696
14697
14698
14699
14700
14701
14702
14703
14704
14705
14706
14707
14708
14709
14710
14711
14712
14713
14714
14715
14716
14717
14718
14719
14720
14721
14722
14723
14724
14725
14726
14727
14728
14729
14730
14731
14732
14733
14734
14735
14736
14737
14738
14739
14740
14741
14742
14743
14744
14745
14746
14747
14748
14749
14750
14751
14752
14753
14754
14755
14756
14757
14758
14759
14760
14761
14762
14763
14764
14765
14766
14767
14768
14769
14770
14771
14772
14773
14774
14775
14776
14777
14778
14779
14780
14781
14782
14783
14784
14785
14786
14787
14788
14789
14790
14791
14792
14793
14794
14795
14796
14797
14798
14799
14800
14801
14802
14803
14804
14805
14806
14807
14808
14809
14810
14811
14812
14813
14814
14815
14816
14817
14818
14819
14820
14821
14822
14823
14824
14825
14826
14827
14828
14829
14830
14831
14832
14833
14834
14835
14836
14837
14838
14839
14840
14841
14842
14843
14844
14845
14846
14847
14848
14849
14850
14851
14852
14853
14854
14855
14856
14857
14858
14859
14860
14861
14862
14863
14864
14865
14866
14867
14868
14869
14870
14871
14872
14873
14874
14875
14876
14877
14878
14879
14880
14881
14882
14883
14884
14885
14886
14887
14888
14889
14890
14891
14892
14893
14894
14895
14896
14897
14898
14899
14900
14901
14902
14903
14904
14905
14906
14907
14908
14909
14910
14911
14912
14913
14914
14915
14916
14917
14918
14919
14920
14921
14922
14923
14924
14925
14926
14927
14928
14929
14930
14931
14932
14933
14934
14935
14936
14937
14938
14939
14940
14941
14942
14943
14944
14945
14946
14947
14948
14949
14950
14951
14952
14953
14954
14955
14956
14957
14958
14959
14960
14961
14962
14963
14964
14965
14966
14967
14968
14969
14970
14971
14972
14973
14974
14975
14976
14977
14978
14979
14980
14981
14982
14983
14984
14985
14986
14987
14988
14989
14990
14991
14992
14993
14994
14995
14996
14997
14998
14999
15000
15001
15002
15003
15004
15005
15006
15007
15008
15009
15010
15011
15012
15013
15014
15015
15016
15017
15018
15019
15020
15021
15022
15023
15024
15025
15026
15027
15028
15029
15030
15031
15032
15033
15034
15035
15036
15037
15038
15039
15040
15041
15042
15043
15044
15045
15046
15047
15048
15049
15050
15051
15052
15053
15054
15055
15056
15057
15058
15059
15060
15061
15062
15063
15064
15065
15066
15067
15068
15069
15070
15071
15072
15073
15074
15075
15076
15077
15078
15079
15080
15081
15082
15083
15084
15085
15086
15087
15088
15089
15090
15091
15092
15093
15094
15095
15096
15097
15098
15099
15100
15101
15102
15103
15104
15105
15106
15107
15108
15109
15110
15111
15112
15113
15114
15115
15116
15117
15118
15119
15120
15121
15122
15123
15124
15125
15126
15127
15128
15129
15130
15131
15132
15133
15134
15135
15136
15137
15138
15139
15140
15141
15142
15143
15144
15145
15146
15147
15148
15149
15150
15151
15152
15153
15154
15155
15156
15157
15158
15159
15160
15161
15162
15163
15164
15165
15166
15167
15168
15169
15170
15171
15172
15173
15174
15175
15176
15177
15178
15179
15180
15181
15182
15183
15184
15185
15186
15187
15188
15189
15190
15191
15192
15193
15194
15195
15196
15197
15198
15199
15200
15201
15202
15203
15204
15205
15206
15207
15208
15209
15210
15211
15212
15213
15214
15215
15216
15217
15218
15219
15220
15221
15222
15223
15224
15225
15226
15227
15228
15229
15230
15231
15232
15233
15234
15235
15236
15237
15238
15239
15240
15241
15242
15243
15244
15245
15246
15247
15248
15249
15250
15251
15252
15253
15254
15255
15256
15257
15258
15259
15260
15261
15262
15263
15264
15265
15266
15267
15268
15269
15270
15271
15272
15273
15274
15275
15276
15277
15278
15279
15280
15281
15282
15283
15284
15285
15286
15287
15288
15289
15290
15291
15292
15293
15294
15295
15296
15297
15298
15299
15300
15301
15302
15303
15304
15305
15306
15307
15308
15309
15310
15311
15312
15313
15314
15315
15316
15317
15318
15319
15320
15321
15322
15323
15324
15325
15326
15327
15328
15329
15330
15331
15332
15333
15334
15335
15336
15337
15338
15339
15340
15341
15342
15343
15344
15345
15346
15347
15348
15349
15350
15351
15352
15353
15354
15355
15356
15357
15358
15359
15360
15361
15362
15363
15364
15365
15366
15367
15368
15369
15370
15371
15372
15373
15374
15375
15376
15377
15378
15379
15380
15381
15382
15383
15384
15385
15386
15387
15388
15389
15390
15391
15392
15393
15394
15395
15396
15397
15398
15399
15400
15401
15402
15403
15404
15405
15406
15407
15408
15409
15410
15411
15412
15413
15414
15415
15416
15417
15418
15419
15420
15421
15422
15423
15424
15425
15426
15427
15428
15429
15430
15431
15432
15433
15434
15435
15436
15437
15438
15439
15440
15441
15442
15443
15444
15445
15446
15447
15448
15449
15450
15451
15452
15453
15454
15455
15456
15457
15458
15459
15460
15461
15462
15463
15464
15465
15466
15467
15468
15469
15470
15471
15472
15473
15474
15475
15476
15477
15478
15479
15480
15481
15482
15483
15484
15485
15486
15487
15488
15489
15490
15491
15492
15493
15494
15495
15496
15497
15498
15499
15500
15501
15502
15503
15504
15505
15506
15507
15508
15509
15510
15511
15512
15513
15514
15515
15516
15517
15518
15519
15520
15521
15522
15523
15524
15525
15526
15527
15528
15529
15530
15531
15532
15533
15534
15535
15536
15537
15538
15539
15540
15541
15542
15543
15544
15545
15546
15547
15548
15549
15550
15551
15552
15553
15554
15555
15556
15557
15558
15559
15560
15561
15562
15563
15564
15565
15566
15567
15568
15569
15570
15571
15572
15573
15574
15575
15576
15577
15578
15579
15580
15581
15582
15583
15584
15585
15586
15587
15588
15589
15590
15591
15592
15593
15594
15595
15596
15597
15598
15599
15600
15601
15602
15603
15604
15605
15606
15607
15608
15609
15610
15611
15612
15613
15614
15615
15616
15617
15618
15619
15620
15621
15622
15623
15624
15625
15626
15627
15628
15629
15630
15631
15632
15633
15634
15635
15636
15637
15638
15639
15640
15641
15642
15643
15644
15645
15646
15647
15648
15649
15650
15651
15652
15653
15654
15655
15656
15657
15658
15659
15660
15661
15662
15663
15664
15665
15666
15667
15668
15669
15670
15671
15672
15673
15674
15675
15676
15677
15678
15679
15680
15681
15682
15683
15684
15685
15686
15687
15688
15689
15690
15691
15692
15693
15694
15695
15696
15697
15698
15699
15700
15701
15702
15703
15704
15705
15706
15707
15708
15709
15710
15711
15712
15713
15714
15715
15716
15717
15718
15719
15720
15721
15722
15723
15724
15725
15726
15727
15728
15729
15730
15731
15732
15733
15734
15735
15736
15737
15738
15739
15740
15741
15742
15743
15744
15745
15746
15747
15748
15749
15750
15751
15752
15753
15754
15755
15756
15757
15758
15759
15760
15761
15762
15763
15764
15765
15766
15767
15768
15769
15770
15771
15772
15773
15774
15775
15776
15777
15778
15779
15780
15781
15782
15783
15784
15785
15786
15787
15788
15789
15790
15791
15792
15793
15794
15795
15796
15797
15798
15799
15800
15801
15802
15803
15804
15805
15806
15807
15808
15809
15810
15811
15812
15813
15814
15815
15816
15817
15818
15819
15820
15821
15822
15823
15824
15825
15826
15827
15828
15829
15830
15831
15832
15833
15834
15835
15836
15837
15838
15839
15840
15841
15842
15843
15844
15845
15846
15847
15848
15849
15850
15851
15852
15853
15854
15855
15856
15857
15858
15859
15860
15861
15862
15863
15864
15865
15866
15867
15868
15869
15870
15871
15872
15873
15874
15875
15876
15877
15878
15879
15880
15881
15882
15883
15884
15885
15886
15887
15888
15889
15890
15891
15892
15893
15894
15895
15896
15897
15898
15899
15900
15901
15902
15903
15904
15905
15906
15907
15908
15909
15910
15911
15912
15913
15914
15915
15916
15917
15918
15919
15920
15921
15922
15923
15924
15925
15926
15927
15928
15929
15930
15931
15932
15933
15934
15935
15936
15937
15938
15939
15940
15941
15942
15943
15944
15945
15946
15947
15948
15949
15950
15951
15952
15953
15954
15955
15956
15957
15958
15959
15960
15961
15962
15963
15964
15965
15966
15967
15968
15969
15970
15971
15972
15973
15974
15975
15976
15977
15978
15979
15980
15981
15982
15983
15984
15985
15986
15987
15988
15989
15990
15991
15992
15993
15994
15995
15996
15997
15998
15999
16000
16001
16002
16003
16004
16005
16006
16007
16008
16009
16010
16011
16012
16013
16014
16015
16016
16017
16018
16019
16020
16021
16022
16023
16024
16025
16026
16027
16028
16029
16030
16031
16032
16033
16034
16035
16036
16037
16038
16039
16040
16041
16042
16043
16044
16045
16046
16047
16048
16049
16050
16051
16052
16053
16054
16055
16056
16057
16058
16059
16060
16061
16062
16063
16064
16065
16066
16067
16068
16069
16070
16071
16072
16073
16074
16075
16076
16077
16078
16079
16080
16081
16082
16083
16084
16085
16086
16087
16088
16089
16090
16091
16092
16093
16094
16095
16096
16097
16098
16099
16100
16101
16102
16103
16104
16105
16106
16107
16108
16109
16110
16111
16112
16113
16114
16115
16116
16117
16118
16119
16120
16121
16122
16123
16124
16125
16126
16127
16128
16129
16130
16131
16132
16133
16134
16135
16136
16137
16138
16139
16140
16141
16142
16143
16144
16145
16146
16147
16148
16149
16150
16151
16152
16153
16154
16155
16156
16157
16158
16159
16160
16161
16162
16163
16164
16165
16166
16167
16168
16169
16170
16171
16172
16173
16174
16175
16176
16177
16178
16179
16180
16181
16182
16183
16184
16185
16186
16187
16188
16189
16190
16191
16192
16193
16194
16195
16196
16197
16198
16199
16200
16201
16202
16203
16204
16205
16206
16207
16208
16209
16210
16211
16212
16213
16214
16215
16216
16217
16218
16219
16220
16221
16222
16223
16224
16225
16226
16227
16228
16229
16230
16231
16232
16233
16234
16235
16236
16237
16238
16239
16240
16241
16242
16243
16244
16245
16246
16247
16248
16249
16250
16251
16252
16253
16254
16255
16256
16257
16258
16259
16260
16261
16262
16263
16264
16265
16266
16267
16268
16269
16270
16271
16272
16273
16274
16275
16276
16277
16278
16279
16280
16281
16282
16283
16284
16285
16286
16287
16288
16289
16290
16291
16292
16293
16294
16295
16296
16297
16298
16299
16300
16301
16302
16303
16304
16305
16306
16307
16308
16309
16310
16311
16312
16313
16314
16315
16316
16317
16318
16319
16320
16321
16322
16323
16324
16325
16326
16327
16328
16329
16330
16331
16332
16333
16334
16335
16336
16337
16338
16339
16340
16341
16342
16343
16344
16345
16346
16347
16348
16349
16350
16351
16352
16353
16354
16355
16356
16357
16358
16359
16360
16361
16362
16363
16364
16365
16366
16367
16368
16369
16370
16371
16372
16373
16374
16375
16376
16377
16378
16379
16380
16381
16382
16383
16384
16385
16386
16387
16388
16389
16390
16391
16392
16393
16394
16395
16396
16397
16398
16399
16400
16401
16402
16403
16404
16405
16406
16407
16408
16409
16410
16411
16412
16413
16414
16415
16416
16417
16418
16419
16420
16421
16422
16423
16424
16425
16426
16427
16428
16429
16430
16431
16432
16433
16434
16435
16436
16437
16438
16439
16440
16441
16442
16443
16444
16445
16446
16447
16448
16449
16450
16451
16452
16453
16454
16455
16456
16457
16458
16459
16460
16461
16462
16463
16464
16465
16466
16467
16468
16469
16470
16471
16472
16473
16474
16475
16476
16477
16478
16479
16480
16481
16482
16483
16484
16485
16486
16487
16488
16489
16490
16491
16492
16493
16494
16495
16496
16497
16498
16499
16500
16501
16502
16503
16504
16505
16506
16507
16508
16509
16510
16511
16512
16513
16514
16515
16516
16517
16518
16519
16520
16521
16522
16523
16524
16525
16526
16527
16528
16529
16530
16531
16532
16533
16534
16535
16536
16537
16538
16539
16540
16541
16542
16543
16544
16545
16546
16547
16548
16549
16550
16551
16552
16553
16554
16555
16556
16557
16558
16559
16560
16561
16562
16563
16564
16565
16566
16567
16568
16569
16570
16571
16572
16573
16574
16575
16576
16577
16578
16579
16580
16581
16582
16583
16584
16585
16586
16587
16588
16589
16590
16591
16592
16593
16594
16595
16596
16597
16598
16599
16600
16601
16602
16603
16604
16605
16606
16607
16608
16609
16610
16611
16612
16613
16614
16615
16616
16617
16618
16619
16620
16621
16622
16623
16624
16625
16626
16627
16628
16629
16630
16631
16632
16633
16634
16635
16636
16637
16638
16639
16640
16641
16642
16643
16644
16645
16646
16647
16648
16649
16650
16651
16652
16653
16654
16655
16656
16657
16658
16659
16660
16661
16662
16663
16664
16665
16666
16667
16668
16669
16670
16671
16672
16673
16674
16675
16676
16677
16678
16679
16680
16681
16682
16683
16684
16685
16686
16687
16688
16689
16690
16691
16692
16693
16694
16695
16696
16697
16698
16699
16700
16701
16702
16703
16704
16705
16706
16707
16708
16709
16710
16711
16712
16713
16714
16715
16716
16717
16718
16719
16720
16721
16722
16723
16724
16725
16726
16727
16728
16729
16730
16731
16732
16733
16734
16735
16736
16737
16738
16739
16740
16741
16742
16743
16744
16745
16746
16747
16748
16749
16750
16751
16752
16753
16754
16755
16756
16757
16758
16759
16760
16761
16762
16763
16764
16765
16766
16767
16768
16769
16770
16771
16772
16773
16774
16775
16776
16777
16778
16779
16780
16781
16782
16783
16784
16785
16786
16787
16788
16789
16790
16791
16792
16793
16794
16795
16796
16797
16798
16799
16800
16801
16802
16803
16804
16805
16806
16807
16808
16809
16810
16811
16812
16813
16814
16815
16816
16817
16818
16819
16820
16821
16822
16823
16824
16825
16826
16827
16828
16829
16830
16831
16832
16833
16834
16835
16836
16837
16838
16839
16840
16841
16842
16843
16844
16845
16846
16847
16848
16849
16850
16851
16852
16853
16854
16855
16856
16857
16858
16859
16860
16861
16862
16863
16864
16865
16866
16867
16868
16869
16870
16871
16872
16873
16874
16875
16876
16877
16878
16879
16880
16881
16882
16883
16884
16885
16886
16887
16888
16889
16890
16891
16892
16893
16894
16895
16896
16897
16898
16899
16900
16901
16902
16903
16904
16905
16906
16907
16908
16909
16910
16911
16912
16913
16914
16915
16916
16917
16918
16919
16920
16921
16922
16923
16924
16925
16926
16927
16928
16929
16930
16931
16932
16933
16934
16935
16936
16937
16938
16939
16940
16941
16942
16943
16944
16945
16946
16947
16948
16949
16950
16951
16952
16953
16954
16955
16956
16957
16958
16959
16960
16961
16962
16963
16964
16965
16966
16967
16968
16969
16970
16971
16972
16973
16974
16975
16976
16977
16978
16979
16980
16981
16982
16983
16984
16985
16986
16987
16988
16989
16990
16991
16992
16993
16994
16995
16996
16997
16998
16999
17000
17001
17002
17003
17004
17005
17006
17007
17008
17009
17010
17011
17012
17013
17014
17015
17016
17017
17018
17019
17020
17021
17022
17023
17024
17025
17026
17027
17028
17029
17030
17031
17032
17033
17034
17035
17036
17037
17038
17039
17040
17041
17042
17043
17044
17045
17046
17047
17048
17049
17050
17051
17052
17053
17054
17055
17056
17057
17058
17059
17060
17061
17062
17063
17064
17065
17066
17067
17068
17069
17070
17071
17072
17073
17074
17075
17076
17077
17078
17079
17080
17081
17082
17083
17084
17085
17086
17087
17088
17089
17090
17091
17092
17093
17094
17095
17096
17097
17098
17099
17100
17101
17102
17103
17104
17105
17106
17107
17108
17109
17110
17111
17112
17113
17114
17115
17116
17117
17118
17119
17120
17121
17122
17123
17124
17125
17126
17127
17128
17129
17130
17131
17132
17133
17134
17135
17136
17137
17138
17139
17140
17141
17142
17143
17144
17145
17146
17147
17148
17149
17150
17151
17152
17153
17154
17155
17156
17157
17158
17159
17160
17161
17162
17163
17164
17165
17166
17167
17168
17169
17170
17171
17172
17173
17174
17175
17176
17177
17178
17179
17180
17181
17182
17183
17184
17185
17186
17187
17188
17189
17190
17191
17192
17193
17194
17195
17196
17197
17198
17199
17200
17201
17202
17203
17204
17205
17206
17207
17208
17209
17210
17211
17212
17213
17214
17215
17216
17217
17218
17219
17220
17221
17222
17223
17224
17225
17226
17227
17228
17229
17230
17231
17232
17233
17234
17235
17236
17237
17238
17239
17240
17241
17242
17243
17244
17245
17246
17247
17248
17249
17250
17251
17252
17253
17254
17255
17256
17257
17258
17259
17260
17261
17262
17263
17264
17265
17266
17267
17268
17269
17270
17271
17272
17273
17274
17275
17276
17277
17278
17279
17280
17281
17282
17283
17284
17285
17286
17287
17288
17289
17290
17291
17292
17293
17294
17295
17296
17297
17298
17299
17300
17301
17302
17303
17304
17305
17306
17307
17308
17309
17310
17311
17312
17313
17314
17315
17316
17317
17318
17319
17320
17321
17322
17323
17324
17325
17326
17327
17328
17329
17330
17331
17332
17333
17334
17335
17336
17337
17338
17339
17340
17341
17342
17343
17344
17345
17346
17347
17348
17349
17350
17351
17352
17353
17354
17355
17356
17357
17358
17359
17360
17361
17362
17363
17364
17365
17366
17367
17368
17369
17370
17371
17372
17373
17374
17375
17376
17377
17378
17379
17380
17381
17382
17383
17384
17385
17386
17387
17388
17389
17390
17391
17392
17393
17394
17395
17396
17397
17398
17399
17400
17401
17402
17403
17404
17405
17406
17407
17408
17409
17410
17411
17412
17413
17414
17415
17416
17417
17418
17419
17420
17421
17422
17423
17424
17425
17426
17427
17428
17429
17430
17431
17432
17433
17434
17435
17436
17437
17438
17439
17440
17441
17442
17443
17444
17445
17446
17447
17448
17449
17450
17451
17452
17453
17454
17455
17456
17457
17458
17459
17460
17461
17462
17463
17464
17465
17466
17467
17468
17469
17470
17471
17472
17473
17474
17475
17476
17477
17478
17479
17480
17481
17482
17483
17484
17485
17486
17487
17488
17489
17490
17491
17492
17493
17494
17495
17496
17497
17498
17499
17500
17501
17502
17503
17504
17505
17506
17507
17508
17509
17510
17511
17512
17513
17514
17515
17516
17517
17518
17519
17520
17521
17522
17523
17524
17525
17526
17527
17528
17529
17530
17531
17532
17533
17534
17535
17536
17537
17538
17539
17540
17541
17542
17543
17544
17545
17546
17547
17548
17549
17550
17551
17552
17553
17554
17555
17556
17557
17558
17559
17560
17561
17562
17563
17564
17565
17566
17567
17568
17569
17570
17571
17572
17573
17574
17575
17576
17577
17578
17579
17580
17581
17582
17583
17584
17585
17586
17587
17588
17589
17590
17591
17592
17593
17594
17595
17596
17597
17598
17599
17600
17601
17602
17603
17604
17605
17606
17607
17608
17609
17610
17611
17612
17613
17614
17615
17616
17617
17618
17619
17620
17621
17622
17623
17624
17625
17626
17627
17628
17629
17630
17631
17632
17633
17634
17635
17636
17637
17638
17639
17640
17641
17642
17643
17644
17645
17646
17647
17648
17649
17650
17651
17652
17653
17654
17655
17656
17657
17658
17659
17660
17661
17662
17663
17664
17665
17666
17667
17668
17669
17670
17671
17672
17673
17674
17675
17676
17677
17678
17679
17680
17681
17682
17683
17684
17685
17686
17687
17688
17689
17690
17691
17692
17693
17694
17695
17696
17697
17698
17699
17700
17701
17702
17703
17704
17705
17706
17707
17708
17709
17710
17711
17712
17713
17714
17715
17716
17717
17718
17719
17720
17721
17722
17723
17724
17725
17726
17727
17728
17729
17730
17731
17732
17733
17734
17735
17736
17737
17738
17739
17740
17741
17742
17743
17744
17745
17746
17747
17748
17749
17750
17751
17752
17753
17754
17755
17756
17757
17758
17759
17760
17761
17762
17763
17764
17765
17766
17767
17768
17769
17770
17771
17772
17773
17774
17775
17776
17777
17778
17779
17780
17781
17782
17783
17784
17785
17786
17787
17788
17789
17790
17791
17792
17793
17794
17795
17796
17797
17798
17799
17800
17801
17802
17803
17804
17805
17806
17807
17808
17809
17810
17811
17812
17813
17814
17815
17816
17817
17818
17819
17820
17821
17822
17823
17824
17825
17826
17827
17828
17829
17830
17831
17832
17833
17834
17835
17836
17837
17838
17839
17840
17841
17842
17843
17844
17845
17846
17847
17848
17849
17850
17851
17852
17853
17854
17855
17856
17857
17858
17859
17860
17861
17862
17863
17864
17865
17866
17867
17868
17869
17870
17871
17872
17873
17874
17875
17876
17877
17878
17879
17880
17881
17882
17883
17884
17885
17886
17887
17888
17889
17890
17891
17892
17893
17894
17895
17896
17897
17898
17899
17900
17901
17902
17903
17904
17905
17906
17907
17908
17909
17910
17911
17912
17913
17914
17915
17916
17917
17918
17919
17920
17921
17922
17923
17924
17925
17926
17927
17928
17929
17930
17931
17932
17933
17934
17935
17936
17937
17938
17939
17940
17941
17942
17943
17944
17945
17946
17947
17948
17949
17950
17951
17952
17953
17954
17955
17956
17957
17958
17959
17960
17961
17962
17963
17964
17965
17966
17967
17968
17969
17970
17971
17972
17973
17974
17975
17976
17977
17978
17979
17980
17981
17982
17983
17984
17985
17986
17987
17988
17989
17990
17991
17992
17993
17994
17995
17996
17997
17998
17999
18000
18001
18002
18003
18004
18005
18006
18007
18008
18009
18010
18011
18012
18013
18014
18015
18016
18017
18018
18019
18020
18021
18022
18023
18024
18025
18026
18027
18028
18029
18030
18031
18032
18033
18034
18035
18036
18037
18038
18039
18040
18041
18042
18043
18044
18045
18046
18047
18048
18049
18050
18051
18052
18053
18054
18055
18056
18057
18058
18059
18060
18061
18062
18063
18064
18065
18066
18067
18068
18069
18070
18071
18072
18073
18074
18075
18076
18077
18078
18079
18080
18081
18082
18083
18084
18085
18086
18087
18088
18089
18090
18091
18092
18093
18094
18095
18096
18097
18098
18099
18100
18101
18102
18103
18104
18105
18106
18107
18108
18109
18110
18111
18112
18113
18114
18115
18116
18117
18118
18119
18120
18121
18122
18123
18124
18125
18126
18127
18128
18129
18130
18131
18132
18133
18134
18135
18136
18137
18138
18139
18140
18141
18142
18143
18144
18145
18146
18147
18148
18149
18150
18151
18152
18153
18154
18155
18156
18157
18158
18159
18160
18161
18162
18163
18164
18165
18166
18167
18168
18169
18170
18171
18172
18173
18174
18175
18176
18177
18178
18179
18180
18181
18182
18183
18184
18185
18186
18187
18188
18189
18190
18191
18192
18193
18194
18195
18196
18197
18198
18199
18200
18201
18202
18203
18204
18205
18206
18207
18208
18209
18210
18211
18212
18213
18214
18215
18216
18217
18218
18219
18220
18221
18222
18223
18224
18225
18226
18227
18228
18229
18230
18231
18232
18233
18234
18235
18236
18237
18238
18239
18240
18241
18242
18243
18244
18245
18246
18247
18248
18249
18250
18251
18252
18253
18254
18255
18256
18257
18258
18259
18260
18261
18262
18263
18264
18265
18266
18267
18268
18269
18270
18271
18272
18273
18274
18275
18276
18277
18278
18279
18280
18281
18282
18283
18284
18285
18286
18287
18288
18289
18290
18291
18292
18293
18294
18295
18296
18297
18298
18299
18300
18301
18302
18303
18304
18305
18306
18307
18308
18309
18310
18311
18312
18313
18314
18315
18316
18317
18318
18319
18320
18321
18322
18323
18324
18325
18326
18327
18328
18329
18330
18331
18332
18333
18334
18335
18336
18337
18338
18339
18340
18341
18342
18343
18344
18345
18346
18347
18348
18349
18350
18351
18352
18353
18354
18355
18356
18357
18358
18359
18360
18361
18362
18363
18364
18365
18366
18367
18368
18369
18370
18371
18372
18373
18374
18375
18376
18377
18378
18379
18380
18381
18382
18383
18384
18385
18386
18387
18388
18389
18390
18391
18392
18393
18394
18395
18396
18397
18398
18399
18400
18401
18402
18403
18404
18405
18406
18407
18408
18409
18410
18411
18412
18413
18414
18415
18416
18417
18418
18419
18420
18421
18422
18423
18424
18425
18426
18427
18428
18429
18430
18431
18432
18433
18434
18435
18436
18437
18438
18439
18440
18441
18442
18443
18444
18445
18446
18447
18448
18449
18450
18451
18452
18453
18454
18455
18456
18457
18458
18459
18460
18461
18462
18463
18464
18465
18466
18467
18468
18469
18470
18471
18472
18473
18474
18475
18476
18477
18478
18479
18480
18481
18482
18483
18484
18485
18486
18487
18488
18489
18490
18491
18492
18493
18494
18495
18496
18497
18498
18499
18500
18501
18502
18503
18504
18505
18506
18507
18508
18509
18510
18511
18512
18513
18514
18515
18516
18517
18518
18519
18520
18521
18522
18523
18524
18525
18526
18527
18528
18529
18530
18531
18532
18533
18534
18535
18536
18537
18538
18539
18540
18541
18542
18543
18544
18545
18546
18547
18548
18549
18550
18551
18552
18553
18554
18555
18556
18557
18558
18559
18560
18561
18562
18563
18564
18565
18566
18567
18568
18569
18570
18571
18572
18573
18574
18575
18576
18577
18578
18579
18580
18581
18582
18583
18584
18585
18586
18587
18588
18589
18590
18591
18592
18593
18594
18595
18596
18597
18598
18599
18600
18601
18602
18603
18604
18605
18606
18607
18608
18609
18610
18611
18612
18613
18614
18615
18616
18617
18618
18619
18620
18621
18622
18623
18624
18625
18626
18627
18628
18629
18630
18631
18632
18633
18634
18635
18636
18637
18638
18639
18640
18641
18642
18643
18644
18645
18646
18647
18648
18649
18650
18651
18652
18653
18654
18655
18656
18657
18658
18659
18660
18661
18662
18663
18664
18665
18666
18667
18668
18669
18670
18671
18672
18673
18674
18675
18676
18677
18678
18679
18680
18681
18682
18683
18684
18685
18686
18687
18688
18689
18690
18691
18692
18693
18694
18695
18696
18697
18698
18699
18700
18701
18702
18703
18704
18705
18706
18707
18708
18709
18710
18711
18712
18713
18714
18715
18716
18717
18718
18719
18720
18721
18722
18723
18724
18725
18726
18727
18728
18729
18730
18731
18732
18733
18734
18735
18736
18737
18738
18739
18740
18741
18742
18743
18744
18745
18746
18747
18748
18749
18750
18751
18752
18753
18754
18755
18756
18757
18758
18759
18760
18761
18762
18763
18764
18765
18766
18767
18768
18769
18770
18771
18772
18773
18774
18775
18776
18777
18778
18779
18780
18781
18782
18783
18784
18785
18786
18787
18788
18789
18790
18791
18792
18793
18794
18795
18796
18797
18798
18799
18800
18801
18802
18803
18804
18805
18806
18807
18808
18809
18810
18811
18812
18813
18814
18815
18816
18817
18818
18819
18820
18821
18822
18823
18824
18825
18826
18827
18828
18829
18830
18831
18832
18833
18834
18835
18836
18837
18838
18839
18840
18841
18842
18843
18844
18845
18846
18847
18848
18849
18850
18851
18852
18853
18854
18855
18856
18857
18858
18859
18860
18861
18862
18863
18864
18865
18866
18867
18868
18869
18870
18871
18872
18873
18874
18875
18876
18877
18878
18879
18880
18881
18882
18883
18884
18885
18886
18887
18888
18889
18890
18891
18892
18893
18894
18895
18896
18897
18898
18899
18900
18901
18902
18903
18904
18905
18906
18907
18908
18909
18910
18911
18912
18913
18914
18915
18916
18917
18918
18919
18920
18921
18922
18923
18924
18925
18926
18927
18928
18929
18930
18931
18932
18933
18934
18935
18936
18937
18938
18939
18940
18941
18942
18943
18944
18945
18946
18947
18948
18949
18950
18951
18952
18953
18954
18955
18956
18957
18958
18959
18960
18961
18962
18963
18964
18965
18966
18967
18968
18969
18970
18971
18972
18973
18974
18975
18976
18977
18978
18979
18980
18981
18982
18983
18984
18985
18986
18987
18988
18989
18990
18991
18992
18993
18994
18995
18996
18997
18998
18999
19000
19001
19002
19003
19004
19005
19006
19007
19008
19009
19010
19011
19012
19013
19014
19015
19016
19017
19018
19019
19020
19021
19022
19023
19024
19025
19026
19027
19028
19029
19030
19031
19032
19033
19034
19035
19036
19037
19038
19039
19040
19041
19042
19043
19044
19045
19046
19047
19048
19049
19050
19051
19052
19053
19054
19055
19056
19057
19058
19059
19060
19061
19062
19063
19064
19065
19066
19067
19068
19069
19070
19071
19072
19073
19074
19075
19076
19077
19078
19079
19080
19081
19082
19083
19084
19085
19086
19087
19088
19089
19090
19091
19092
19093
19094
19095
19096
19097
19098
19099
19100
19101
19102
19103
19104
19105
19106
19107
19108
19109
19110
19111
19112
19113
19114
19115
19116
19117
19118
19119
19120
19121
19122
19123
19124
19125
19126
19127
19128
19129
19130
19131
19132
19133
19134
19135
19136
19137
19138
19139
19140
19141
19142
19143
19144
19145
19146
19147
19148
19149
19150
19151
19152
19153
19154
19155
19156
19157
19158
19159
19160
19161
19162
19163
19164
19165
19166
19167
19168
19169
19170
19171
19172
19173
19174
19175
19176
19177
19178
19179
19180
19181
19182
19183
19184
19185
19186
19187
19188
19189
19190
19191
19192
19193
19194
19195
19196
19197
19198
19199
19200
19201
19202
19203
19204
19205
19206
19207
19208
19209
19210
19211
19212
19213
19214
19215
19216
19217
19218
19219
19220
19221
19222
19223
19224
19225
19226
19227
19228
19229
19230
19231
19232
19233
19234
19235
19236
19237
19238
19239
19240
19241
19242
19243
19244
19245
19246
19247
19248
19249
19250
19251
19252
19253
19254
19255
19256
19257
19258
19259
19260
19261
19262
19263
19264
19265
19266
19267
19268
19269
19270
19271
19272
19273
19274
19275
19276
19277
19278
19279
19280
19281
19282
19283
19284
19285
19286
19287
19288
19289
19290
19291
19292
19293
19294
19295
19296
19297
19298
19299
19300
19301
19302
19303
19304
19305
19306
19307
19308
19309
19310
19311
19312
19313
19314
19315
19316
19317
19318
19319
19320
19321
19322
19323
19324
19325
19326
19327
19328
19329
19330
19331
19332
19333
19334
19335
19336
19337
19338
19339
19340
19341
19342
19343
19344
19345
19346
19347
19348
19349
19350
19351
19352
19353
19354
19355
19356
19357
19358
19359
19360
19361
19362
19363
19364
19365
19366
19367
19368
19369
19370
19371
19372
19373
19374
19375
19376
19377
19378
19379
19380
19381
19382
19383
19384
19385
19386
19387
19388
19389
19390
19391
19392
19393
19394
19395
19396
19397
19398
19399
19400
19401
19402
19403
19404
19405
19406
19407
19408
19409
19410
19411
19412
19413
19414
19415
19416
19417
19418
19419
19420
19421
19422
19423
19424
19425
19426
19427
19428
19429
19430
19431
19432
19433
19434
19435
19436
19437
19438
19439
19440
19441
19442
19443
19444
19445
19446
19447
19448
19449
19450
19451
19452
19453
19454
19455
19456
19457
19458
19459
19460
19461
19462
19463
19464
19465
19466
19467
19468
19469
19470
19471
19472
19473
19474
19475
19476
19477
19478
19479
19480
19481
19482
19483
19484
19485
19486
19487
19488
19489
19490
19491
19492
19493
19494
19495
19496
19497
19498
19499
19500
19501
19502
19503
19504
19505
19506
19507
19508
19509
19510
19511
19512
19513
19514
19515
19516
19517
19518
19519
19520
19521
19522
19523
19524
19525
19526
19527
19528
19529
19530
19531
19532
19533
19534
19535
19536
19537
19538
19539
19540
19541
19542
19543
19544
19545
19546
19547
19548
19549
19550
19551
19552
19553
19554
19555
19556
19557
19558
19559
19560
19561
19562
19563
19564
19565
19566
19567
19568
19569
19570
19571
19572
19573
19574
19575
19576
19577
19578
19579
19580
19581
19582
19583
19584
19585
19586
19587
19588
19589
19590
19591
19592
19593
19594
19595
19596
19597
19598
19599
19600
19601
19602
19603
19604
19605
19606
19607
19608
19609
19610
19611
19612
19613
19614
19615
19616
19617
19618
19619
19620
19621
19622
19623
19624
19625
19626
19627
19628
19629
19630
19631
19632
19633
19634
19635
19636
19637
19638
19639
19640
19641
19642
19643
19644
19645
19646
19647
19648
19649
19650
19651
19652
19653
19654
19655
19656
19657
19658
19659
19660
19661
19662
19663
19664
19665
19666
19667
19668
19669
19670
19671
19672
19673
19674
19675
19676
19677
19678
19679
19680
19681
19682
19683
19684
19685
19686
19687
19688
19689
19690
19691
19692
19693
19694
19695
19696
19697
19698
19699
19700
19701
19702
19703
19704
19705
19706
19707
19708
19709
19710
19711
19712
19713
19714
19715
19716
19717
19718
19719
19720
19721
19722
19723
19724
19725
19726
19727
19728
19729
19730
19731
19732
19733
19734
19735
19736
19737
19738
19739
19740
19741
19742
19743
19744
19745
19746
19747
19748
19749
19750
19751
19752
19753
19754
19755
19756
19757
19758
19759
19760
19761
19762
19763
19764
19765
19766
19767
19768
19769
19770
19771
19772
19773
19774
19775
19776
19777
19778
19779
19780
19781
19782
19783
19784
19785
19786
19787
19788
19789
19790
19791
19792
19793
19794
19795
19796
19797
19798
19799
19800
19801
19802
19803
19804
19805
19806
19807
19808
19809
19810
19811
19812
19813
19814
19815
19816
19817
19818
19819
19820
19821
19822
19823
19824
19825
19826
19827
19828
19829
19830
19831
19832
19833
19834
19835
19836
19837
19838
19839
19840
19841
19842
19843
19844
19845
19846
19847
19848
19849
19850
19851
19852
19853
19854
19855
19856
19857
19858
19859
19860
19861
19862
19863
19864
19865
19866
19867
19868
19869
19870
19871
19872
19873
19874
19875
19876
19877
19878
19879
19880
19881
19882
19883
19884
19885
19886
19887
19888
19889
19890
19891
19892
19893
19894
19895
19896
19897
19898
19899
19900
19901
19902
19903
19904
19905
19906
19907
19908
19909
19910
19911
19912
19913
19914
19915
19916
19917
19918
19919
19920
19921
19922
19923
19924
19925
19926
19927
19928
19929
19930
19931
19932
19933
19934
19935
19936
19937
19938
19939
19940
19941
19942
19943
19944
19945
19946
19947
19948
19949
19950
19951
19952
19953
19954
19955
19956
19957
19958
19959
19960
19961
19962
19963
19964
19965
19966
19967
19968
19969
19970
19971
19972
19973
19974
19975
19976
19977
19978
19979
19980
19981
19982
19983
19984
19985
19986
19987
19988
19989
19990
19991
19992
19993
19994
19995
19996
19997
19998
19999
20000
20001
20002
20003
20004
20005
20006
20007
20008
20009
20010
20011
20012
20013
20014
20015
20016
20017
20018
20019
20020
20021
20022
20023
20024
20025
20026
20027
20028
20029
20030
20031
20032
20033
20034
20035
20036
20037
20038
20039
20040
20041
20042
20043
20044
20045
20046
20047
20048
20049
20050
20051
20052
20053
20054
20055
20056
20057
20058
20059
20060
20061
20062
20063
20064
20065
20066
20067
20068
20069
20070
20071
20072
20073
20074
20075
20076
20077
20078
20079
20080
20081
20082
20083
20084
20085
20086
20087
20088
20089
20090
20091
20092
20093
20094
20095
20096
20097
20098
20099
20100
20101
20102
20103
20104
20105
20106
20107
20108
20109
20110
20111
20112
20113
20114
20115
20116
20117
20118
20119
20120
20121
20122
20123
20124
20125
20126
20127
20128
20129
20130
20131
20132
20133
20134
20135
20136
20137
20138
20139
20140
20141
20142
20143
20144
20145
20146
20147
20148
20149
20150
20151
20152
20153
20154
20155
20156
20157
20158
20159
20160
20161
20162
20163
20164
20165
20166
20167
20168
20169
20170
20171
20172
20173
20174
20175
20176
20177
20178
20179
20180
20181
20182
20183
20184
20185
20186
20187
20188
20189
20190
20191
20192
20193
20194
20195
20196
20197
20198
20199
20200
20201
20202
20203
20204
20205
20206
20207
20208
20209
20210
20211
20212
20213
20214
20215
20216
20217
20218
20219
20220
20221
20222
20223
20224
20225
20226
20227
20228
20229
20230
20231
20232
20233
20234
20235
20236
20237
20238
20239
20240
20241
20242
20243
20244
20245
20246
20247
20248
20249
20250
20251
20252
20253
20254
20255
20256
20257
20258
20259
20260
20261
20262
20263
20264
20265
20266
20267
20268
20269
20270
20271
20272
20273
20274
20275
20276
20277
20278
20279
20280
20281
20282
20283
20284
20285
20286
20287
20288
20289
20290
20291
20292
20293
20294
20295
20296
20297
20298
20299
20300
20301
20302
20303
20304
20305
20306
20307
20308
20309
20310
20311
20312
20313
20314
20315
20316
20317
20318
20319
20320
20321
20322
20323
20324
20325
20326
20327
20328
20329
20330
20331
20332
20333
20334
20335
20336
20337
20338
20339
20340
20341
20342
20343
20344
20345
20346
20347
20348
20349
20350
20351
20352
20353
20354
20355
20356
20357
20358
20359
20360
20361
20362
20363
20364
20365
20366
20367
20368
20369
20370
20371
20372
20373
20374
20375
20376
20377
20378
20379
20380
20381
20382
20383
20384
20385
20386
20387
20388
20389
20390
20391
20392
20393
20394
20395
20396
20397
20398
20399
20400
20401
20402
20403
20404
20405
20406
20407
20408
20409
20410
20411
20412
20413
20414
20415
20416
20417
20418
20419
20420
20421
20422
20423
20424
20425
20426
20427
20428
20429
20430
20431
20432
20433
20434
20435
20436
20437
20438
20439
20440
20441
20442
20443
20444
20445
20446
20447
20448
20449
20450
20451
20452
20453
20454
20455
20456
20457
20458
20459
20460
20461
20462
20463
20464
20465
20466
20467
20468
20469
20470
20471
20472
20473
20474
20475
20476
20477
20478
20479
20480
20481
20482
20483
20484
20485
20486
20487
20488
20489
20490
20491
20492
20493
20494
20495
20496
20497
20498
20499
20500
20501
20502
20503
20504
20505
20506
20507
20508
20509
20510
20511
20512
20513
20514
20515
20516
20517
20518
20519
20520
20521
20522
20523
20524
20525
20526
20527
20528
20529
20530
20531
20532
20533
20534
20535
20536
20537
20538
20539
20540
20541
20542
20543
20544
20545
20546
20547
20548
20549
20550
20551
20552
20553
20554
20555
20556
20557
20558
20559
20560
20561
20562
20563
20564
20565
20566
20567
20568
20569
20570
20571
20572
20573
20574
20575
20576
20577
20578
20579
20580
20581
20582
20583
20584
20585
20586
20587
20588
20589
20590
20591
20592
20593
20594
20595
20596
20597
20598
20599
20600
20601
20602
20603
20604
20605
20606
20607
20608
20609
20610
20611
20612
20613
20614
20615
20616
20617
20618
20619
20620
20621
20622
20623
20624
20625
20626
20627
20628
20629
20630
20631
20632
20633
20634
20635
20636
20637
20638
20639
20640
20641
20642
20643
20644
20645
20646
20647
20648
20649
20650
20651
20652
20653
20654
20655
20656
20657
20658
20659
20660
20661
20662
20663
20664
20665
20666
20667
20668
20669
20670
20671
20672
20673
20674
20675
20676
20677
20678
20679
20680
20681
20682
20683
20684
20685
20686
20687
20688
20689
20690
20691
20692
20693
20694
20695
20696
20697
20698
20699
20700
20701
20702
20703
20704
20705
20706
20707
20708
20709
20710
20711
20712
20713
20714
20715
20716
20717
20718
20719
20720
20721
20722
20723
20724
20725
20726
20727
20728
20729
20730
20731
20732
20733
20734
20735
20736
20737
20738
20739
20740
20741
20742
20743
20744
20745
20746
20747
20748
20749
20750
20751
20752
20753
20754
20755
20756
20757
20758
20759
20760
20761
20762
20763
20764
20765
20766
20767
20768
20769
20770
20771
20772
20773
20774
20775
20776
20777
20778
20779
20780
20781
20782
20783
20784
20785
20786
20787
20788
20789
20790
20791
20792
20793
20794
20795
20796
20797
20798
20799
20800
20801
20802
20803
20804
20805
20806
20807
20808
20809
20810
20811
20812
20813
20814
20815
20816
20817
20818
20819
20820
20821
20822
20823
20824
20825
20826
20827
20828
20829
20830
20831
20832
20833
20834
20835
20836
20837
20838
20839
20840
20841
20842
20843
20844
20845
20846
20847
20848
20849
20850
20851
20852
20853
20854
20855
20856
20857
20858
20859
20860
20861
20862
20863
20864
20865
20866
20867
20868
20869
20870
20871
20872
20873
20874
20875
20876
20877
20878
20879
20880
20881
20882
20883
20884
20885
20886
20887
20888
20889
20890
20891
20892
20893
//===- DAGCombiner.cpp - Implement a DAG node combiner --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass combines dag nodes to form fewer, simpler DAG nodes.  It can be run
// both before and after the DAG is legalized.
//
// This pass is not a substitute for the LLVM IR instcombine pass. This pass is
// primarily intended to handle simplification opportunities that are implicit
// in the LLVM IR and exposed by the various codegen lowering phases.
//
//===----------------------------------------------------------------------===//

#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <iterator>
#include <string>
#include <tuple>
#include <utility>

using namespace llvm;

#define DEBUG_TYPE "dagcombine"

STATISTIC(NodesCombined   , "Number of dag nodes combined");
STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created");
STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created");
STATISTIC(OpsNarrowed     , "Number of load/op/store narrowed");
STATISTIC(LdStFP2Int      , "Number of fp load/store pairs transformed to int");
STATISTIC(SlicedLoads, "Number of load sliced");
STATISTIC(NumFPLogicOpsConv, "Number of logic ops converted to fp ops");

static cl::opt<bool>
CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
                 cl::desc("Enable DAG combiner's use of IR alias analysis"));

static cl::opt<bool>
UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true),
        cl::desc("Enable DAG combiner's use of TBAA"));

#ifndef NDEBUG
static cl::opt<std::string>
CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden,
                   cl::desc("Only use DAG-combiner alias analysis in this"
                            " function"));
#endif

/// Hidden option to stress test load slicing, i.e., when this option
/// is enabled, load slicing bypasses most of its profitability guards.
static cl::opt<bool>
StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden,
                  cl::desc("Bypass the profitability model of load slicing"),
                  cl::init(false));

static cl::opt<bool>
  MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true),
                    cl::desc("DAG combiner may split indexing from loads"));

static cl::opt<bool>
    EnableStoreMerging("combiner-store-merging", cl::Hidden, cl::init(true),
                       cl::desc("DAG combiner enable merging multiple stores "
                                "into a wider store"));

static cl::opt<unsigned> TokenFactorInlineLimit(
    "combiner-tokenfactor-inline-limit", cl::Hidden, cl::init(2048),
    cl::desc("Limit the number of operands to inline for Token Factors"));

static cl::opt<unsigned> StoreMergeDependenceLimit(
    "combiner-store-merge-dependence-limit", cl::Hidden, cl::init(10),
    cl::desc("Limit the number of times for the same StoreNode and RootNode "
             "to bail out in store merging dependence check"));

namespace {

  class DAGCombiner {
    SelectionDAG &DAG;
    const TargetLowering &TLI;
    CombineLevel Level;
    CodeGenOpt::Level OptLevel;
    bool LegalOperations = false;
    bool LegalTypes = false;
    bool ForCodeSize;

    /// Worklist of all of the nodes that need to be simplified.
    ///
    /// This must behave as a stack -- new nodes to process are pushed onto the
    /// back and when processing we pop off of the back.
    ///
    /// The worklist will not contain duplicates but may contain null entries
    /// due to nodes being deleted from the underlying DAG.
    SmallVector<SDNode *, 64> Worklist;

    /// Mapping from an SDNode to its position on the worklist.
    ///
    /// This is used to find and remove nodes from the worklist (by nulling
    /// them) when they are deleted from the underlying DAG. It relies on
    /// stable indices of nodes within the worklist.
    DenseMap<SDNode *, unsigned> WorklistMap;
    /// This records all nodes attempted to add to the worklist since we
    /// considered a new worklist entry. As we keep do not add duplicate nodes
    /// in the worklist, this is different from the tail of the worklist.
    SmallSetVector<SDNode *, 32> PruningList;

    /// Set of nodes which have been combined (at least once).
    ///
    /// This is used to allow us to reliably add any operands of a DAG node
    /// which have not yet been combined to the worklist.
    SmallPtrSet<SDNode *, 32> CombinedNodes;

    /// Map from candidate StoreNode to the pair of RootNode and count.
    /// The count is used to track how many times we have seen the StoreNode
    /// with the same RootNode bail out in dependence check. If we have seen
    /// the bail out for the same pair many times over a limit, we won't
    /// consider the StoreNode with the same RootNode as store merging
    /// candidate again.
    DenseMap<SDNode *, std::pair<SDNode *, unsigned>> StoreRootCountMap;

    // AA - Used for DAG load/store alias analysis.
    AliasAnalysis *AA;

    /// When an instruction is simplified, add all users of the instruction to
    /// the work lists because they might get more simplified now.
    void AddUsersToWorklist(SDNode *N) {
      for (SDNode *Node : N->uses())
        AddToWorklist(Node);
    }

    // Prune potentially dangling nodes. This is called after
    // any visit to a node, but should also be called during a visit after any
    // failed combine which may have created a DAG node.
    void clearAddedDanglingWorklistEntries() {
      // Check any nodes added to the worklist to see if they are prunable.
      while (!PruningList.empty()) {
        auto *N = PruningList.pop_back_val();
        if (N->use_empty())
          recursivelyDeleteUnusedNodes(N);
      }
    }

    SDNode *getNextWorklistEntry() {
      // Before we do any work, remove nodes that are not in use.
      clearAddedDanglingWorklistEntries();
      SDNode *N = nullptr;
      // The Worklist holds the SDNodes in order, but it may contain null
      // entries.
      while (!N && !Worklist.empty()) {
        N = Worklist.pop_back_val();
      }

      if (N) {
        bool GoodWorklistEntry = WorklistMap.erase(N);
        (void)GoodWorklistEntry;
        assert(GoodWorklistEntry &&
               "Found a worklist entry without a corresponding map entry!");
      }
      return N;
    }

    /// Call the node-specific routine that folds each particular type of node.
    SDValue visit(SDNode *N);

  public:
    DAGCombiner(SelectionDAG &D, AliasAnalysis *AA, CodeGenOpt::Level OL)
        : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
          OptLevel(OL), AA(AA) {
      ForCodeSize = DAG.getMachineFunction().getFunction().hasOptSize();

      MaximumLegalStoreInBits = 0;
      for (MVT VT : MVT::all_valuetypes())
        if (EVT(VT).isSimple() && VT != MVT::Other &&
            TLI.isTypeLegal(EVT(VT)) &&
            VT.getSizeInBits() >= MaximumLegalStoreInBits)
          MaximumLegalStoreInBits = VT.getSizeInBits();
    }

    void ConsiderForPruning(SDNode *N) {
      // Mark this for potential pruning.
      PruningList.insert(N);
    }

    /// Add to the worklist making sure its instance is at the back (next to be
    /// processed.)
    void AddToWorklist(SDNode *N) {
      assert(N->getOpcode() != ISD::DELETED_NODE &&
             "Deleted Node added to Worklist");

      // Skip handle nodes as they can't usefully be combined and confuse the
      // zero-use deletion strategy.
      if (N->getOpcode() == ISD::HANDLENODE)
        return;

      ConsiderForPruning(N);

      if (WorklistMap.insert(std::make_pair(N, Worklist.size())).second)
        Worklist.push_back(N);
    }

    /// Remove all instances of N from the worklist.
    void removeFromWorklist(SDNode *N) {
      CombinedNodes.erase(N);
      PruningList.remove(N);
      StoreRootCountMap.erase(N);

      auto It = WorklistMap.find(N);
      if (It == WorklistMap.end())
        return; // Not in the worklist.

      // Null out the entry rather than erasing it to avoid a linear operation.
      Worklist[It->second] = nullptr;
      WorklistMap.erase(It);
    }

    void deleteAndRecombine(SDNode *N);
    bool recursivelyDeleteUnusedNodes(SDNode *N);

    /// Replaces all uses of the results of one DAG node with new values.
    SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
                      bool AddTo = true);

    /// Replaces all uses of the results of one DAG node with new values.
    SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) {
      return CombineTo(N, &Res, 1, AddTo);
    }

    /// Replaces all uses of the results of one DAG node with new values.
    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1,
                      bool AddTo = true) {
      SDValue To[] = { Res0, Res1 };
      return CombineTo(N, To, 2, AddTo);
    }

    void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO);

  private:
    unsigned MaximumLegalStoreInBits;

    /// Check the specified integer node value to see if it can be simplified or
    /// if things it uses can be simplified by bit propagation.
    /// If so, return true.
    bool SimplifyDemandedBits(SDValue Op) {
      unsigned BitWidth = Op.getScalarValueSizeInBits();
      APInt DemandedBits = APInt::getAllOnesValue(BitWidth);
      return SimplifyDemandedBits(Op, DemandedBits);
    }

    bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits) {
      EVT VT = Op.getValueType();
      unsigned NumElts = VT.isVector() ? VT.getVectorNumElements() : 1;
      APInt DemandedElts = APInt::getAllOnesValue(NumElts);
      return SimplifyDemandedBits(Op, DemandedBits, DemandedElts);
    }

    /// Check the specified vector node value to see if it can be simplified or
    /// if things it uses can be simplified as it only uses some of the
    /// elements. If so, return true.
    bool SimplifyDemandedVectorElts(SDValue Op) {
      unsigned NumElts = Op.getValueType().getVectorNumElements();
      APInt DemandedElts = APInt::getAllOnesValue(NumElts);
      return SimplifyDemandedVectorElts(Op, DemandedElts);
    }

    bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
                              const APInt &DemandedElts);
    bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
                                    bool AssumeSingleUse = false);

    bool CombineToPreIndexedLoadStore(SDNode *N);
    bool CombineToPostIndexedLoadStore(SDNode *N);
    SDValue SplitIndexingFromLoad(LoadSDNode *LD);
    bool SliceUpLoad(SDNode *N);

    // Scalars have size 0 to distinguish from singleton vectors.
    SDValue ForwardStoreValueToDirectLoad(LoadSDNode *LD);
    bool getTruncatedStoreValue(StoreSDNode *ST, SDValue &Val);
    bool extendLoadedValueToExtension(LoadSDNode *LD, SDValue &Val);

    /// Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed
    ///   load.
    ///
    /// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced.
    /// \param InVecVT type of the input vector to EVE with bitcasts resolved.
    /// \param EltNo index of the vector element to load.
    /// \param OriginalLoad load that EVE came from to be replaced.
    /// \returns EVE on success SDValue() on failure.
    SDValue scalarizeExtractedVectorLoad(SDNode *EVE, EVT InVecVT,
                                         SDValue EltNo,
                                         LoadSDNode *OriginalLoad);
    void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad);
    SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace);
    SDValue SExtPromoteOperand(SDValue Op, EVT PVT);
    SDValue ZExtPromoteOperand(SDValue Op, EVT PVT);
    SDValue PromoteIntBinOp(SDValue Op);
    SDValue PromoteIntShiftOp(SDValue Op);
    SDValue PromoteExtend(SDValue Op);
    bool PromoteLoad(SDValue Op);

    /// Call the node-specific routine that knows how to fold each
    /// particular type of node. If that doesn't do anything, try the
    /// target-specific DAG combines.
    SDValue combine(SDNode *N);

    // Visitation implementation - Implement dag node combining for different
    // node types.  The semantics are as follows:
    // Return Value:
    //   SDValue.getNode() == 0 - No change was made
    //   SDValue.getNode() == N - N was replaced, is dead and has been handled.
    //   otherwise              - N should be replaced by the returned Operand.
    //
    SDValue visitTokenFactor(SDNode *N);
    SDValue visitMERGE_VALUES(SDNode *N);
    SDValue visitADD(SDNode *N);
    SDValue visitADDLike(SDNode *N);
    SDValue visitADDLikeCommutative(SDValue N0, SDValue N1, SDNode *LocReference);
    SDValue visitSUB(SDNode *N);
    SDValue visitADDSAT(SDNode *N);
    SDValue visitSUBSAT(SDNode *N);
    SDValue visitADDC(SDNode *N);
    SDValue visitADDO(SDNode *N);
    SDValue visitUADDOLike(SDValue N0, SDValue N1, SDNode *N);
    SDValue visitSUBC(SDNode *N);
    SDValue visitSUBO(SDNode *N);
    SDValue visitADDE(SDNode *N);
    SDValue visitADDCARRY(SDNode *N);
    SDValue visitADDCARRYLike(SDValue N0, SDValue N1, SDValue CarryIn, SDNode *N);
    SDValue visitSUBE(SDNode *N);
    SDValue visitSUBCARRY(SDNode *N);
    SDValue visitMUL(SDNode *N);
    SDValue visitMULFIX(SDNode *N);
    SDValue useDivRem(SDNode *N);
    SDValue visitSDIV(SDNode *N);
    SDValue visitSDIVLike(SDValue N0, SDValue N1, SDNode *N);
    SDValue visitUDIV(SDNode *N);
    SDValue visitUDIVLike(SDValue N0, SDValue N1, SDNode *N);
    SDValue visitREM(SDNode *N);
    SDValue visitMULHU(SDNode *N);
    SDValue visitMULHS(SDNode *N);
    SDValue visitSMUL_LOHI(SDNode *N);
    SDValue visitUMUL_LOHI(SDNode *N);
    SDValue visitMULO(SDNode *N);
    SDValue visitIMINMAX(SDNode *N);
    SDValue visitAND(SDNode *N);
    SDValue visitANDLike(SDValue N0, SDValue N1, SDNode *N);
    SDValue visitOR(SDNode *N);
    SDValue visitORLike(SDValue N0, SDValue N1, SDNode *N);
    SDValue visitXOR(SDNode *N);
    SDValue SimplifyVBinOp(SDNode *N);
    SDValue visitSHL(SDNode *N);
    SDValue visitSRA(SDNode *N);
    SDValue visitSRL(SDNode *N);
    SDValue visitFunnelShift(SDNode *N);
    SDValue visitRotate(SDNode *N);
    SDValue visitABS(SDNode *N);
    SDValue visitBSWAP(SDNode *N);
    SDValue visitBITREVERSE(SDNode *N);
    SDValue visitCTLZ(SDNode *N);
    SDValue visitCTLZ_ZERO_UNDEF(SDNode *N);
    SDValue visitCTTZ(SDNode *N);
    SDValue visitCTTZ_ZERO_UNDEF(SDNode *N);
    SDValue visitCTPOP(SDNode *N);
    SDValue visitSELECT(SDNode *N);
    SDValue visitVSELECT(SDNode *N);
    SDValue visitSELECT_CC(SDNode *N);
    SDValue visitSETCC(SDNode *N);
    SDValue visitSETCCCARRY(SDNode *N);
    SDValue visitSIGN_EXTEND(SDNode *N);
    SDValue visitZERO_EXTEND(SDNode *N);
    SDValue visitANY_EXTEND(SDNode *N);
    SDValue visitAssertExt(SDNode *N);
    SDValue visitSIGN_EXTEND_INREG(SDNode *N);
    SDValue visitSIGN_EXTEND_VECTOR_INREG(SDNode *N);
    SDValue visitZERO_EXTEND_VECTOR_INREG(SDNode *N);
    SDValue visitTRUNCATE(SDNode *N);
    SDValue visitBITCAST(SDNode *N);
    SDValue visitBUILD_PAIR(SDNode *N);
    SDValue visitFADD(SDNode *N);
    SDValue visitFSUB(SDNode *N);
    SDValue visitFMUL(SDNode *N);
    SDValue visitFMA(SDNode *N);
    SDValue visitFDIV(SDNode *N);
    SDValue visitFREM(SDNode *N);
    SDValue visitFSQRT(SDNode *N);
    SDValue visitFCOPYSIGN(SDNode *N);
    SDValue visitFPOW(SDNode *N);
    SDValue visitSINT_TO_FP(SDNode *N);
    SDValue visitUINT_TO_FP(SDNode *N);
    SDValue visitFP_TO_SINT(SDNode *N);
    SDValue visitFP_TO_UINT(SDNode *N);
    SDValue visitFP_ROUND(SDNode *N);
    SDValue visitFP_EXTEND(SDNode *N);
    SDValue visitFNEG(SDNode *N);
    SDValue visitFABS(SDNode *N);
    SDValue visitFCEIL(SDNode *N);
    SDValue visitFTRUNC(SDNode *N);
    SDValue visitFFLOOR(SDNode *N);
    SDValue visitFMINNUM(SDNode *N);
    SDValue visitFMAXNUM(SDNode *N);
    SDValue visitFMINIMUM(SDNode *N);
    SDValue visitFMAXIMUM(SDNode *N);
    SDValue visitBRCOND(SDNode *N);
    SDValue visitBR_CC(SDNode *N);
    SDValue visitLOAD(SDNode *N);

    SDValue replaceStoreChain(StoreSDNode *ST, SDValue BetterChain);
    SDValue replaceStoreOfFPConstant(StoreSDNode *ST);

    SDValue visitSTORE(SDNode *N);
    SDValue visitLIFETIME_END(SDNode *N);
    SDValue visitINSERT_VECTOR_ELT(SDNode *N);
    SDValue visitEXTRACT_VECTOR_ELT(SDNode *N);
    SDValue visitBUILD_VECTOR(SDNode *N);
    SDValue visitCONCAT_VECTORS(SDNode *N);
    SDValue visitEXTRACT_SUBVECTOR(SDNode *N);
    SDValue visitVECTOR_SHUFFLE(SDNode *N);
    SDValue visitSCALAR_TO_VECTOR(SDNode *N);
    SDValue visitINSERT_SUBVECTOR(SDNode *N);
    SDValue visitMLOAD(SDNode *N);
    SDValue visitMSTORE(SDNode *N);
    SDValue visitMGATHER(SDNode *N);
    SDValue visitMSCATTER(SDNode *N);
    SDValue visitFP_TO_FP16(SDNode *N);
    SDValue visitFP16_TO_FP(SDNode *N);
    SDValue visitVECREDUCE(SDNode *N);

    SDValue visitFADDForFMACombine(SDNode *N);
    SDValue visitFSUBForFMACombine(SDNode *N);
    SDValue visitFMULForFMADistributiveCombine(SDNode *N);

    SDValue XformToShuffleWithZero(SDNode *N);
    bool reassociationCanBreakAddressingModePattern(unsigned Opc,
                                                    const SDLoc &DL, SDValue N0,
                                                    SDValue N1);
    SDValue reassociateOpsCommutative(unsigned Opc, const SDLoc &DL, SDValue N0,
                                      SDValue N1);
    SDValue reassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0,
                           SDValue N1, SDNodeFlags Flags);

    SDValue visitShiftByConstant(SDNode *N);

    SDValue foldSelectOfConstants(SDNode *N);
    SDValue foldVSelectOfConstants(SDNode *N);
    SDValue foldBinOpIntoSelect(SDNode *BO);
    bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
    SDValue hoistLogicOpWithSameOpcodeHands(SDNode *N);
    SDValue SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2);
    SDValue SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
                             SDValue N2, SDValue N3, ISD::CondCode CC,
                             bool NotExtCompare = false);
    SDValue convertSelectOfFPConstantsToLoadOffset(
        const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2, SDValue N3,
        ISD::CondCode CC);
    SDValue foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, SDValue N1,
                                   SDValue N2, SDValue N3, ISD::CondCode CC);
    SDValue foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1,
                              const SDLoc &DL);
    SDValue unfoldMaskedMerge(SDNode *N);
    SDValue unfoldExtremeBitClearingToShifts(SDNode *N);
    SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
                          const SDLoc &DL, bool foldBooleans);
    SDValue rebuildSetCC(SDValue N);

    bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
                           SDValue &CC) const;
    bool isOneUseSetCC(SDValue N) const;
    bool isCheaperToUseNegatedFPOps(SDValue X, SDValue Y);

    SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
                                         unsigned HiOp);
    SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
    SDValue CombineExtLoad(SDNode *N);
    SDValue CombineZExtLogicopShiftLoad(SDNode *N);
    SDValue combineRepeatedFPDivisors(SDNode *N);
    SDValue combineInsertEltToShuffle(SDNode *N, unsigned InsIndex);
    SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
    SDValue BuildSDIV(SDNode *N);
    SDValue BuildSDIVPow2(SDNode *N);
    SDValue BuildUDIV(SDNode *N);
    SDValue BuildLogBase2(SDValue V, const SDLoc &DL);
    SDValue BuildDivEstimate(SDValue N, SDValue Op, SDNodeFlags Flags);
    SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags);
    SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags Flags);
    SDValue buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags, bool Recip);
    SDValue buildSqrtNROneConst(SDValue Arg, SDValue Est, unsigned Iterations,
                                SDNodeFlags Flags, bool Reciprocal);
    SDValue buildSqrtNRTwoConst(SDValue Arg, SDValue Est, unsigned Iterations,
                                SDNodeFlags Flags, bool Reciprocal);
    SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
                               bool DemandHighBits = true);
    SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1);
    SDValue MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg,
                              SDValue InnerPos, SDValue InnerNeg,
                              unsigned PosOpcode, unsigned NegOpcode,
                              const SDLoc &DL);
    SDValue MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL);
    SDValue MatchLoadCombine(SDNode *N);
    SDValue MatchStoreCombine(StoreSDNode *N);
    SDValue ReduceLoadWidth(SDNode *N);
    SDValue ReduceLoadOpStoreWidth(SDNode *N);
    SDValue splitMergedValStore(StoreSDNode *ST);
    SDValue TransformFPLoadStorePair(SDNode *N);
    SDValue convertBuildVecZextToZext(SDNode *N);
    SDValue reduceBuildVecExtToExtBuildVec(SDNode *N);
    SDValue reduceBuildVecToShuffle(SDNode *N);
    SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N,
                                  ArrayRef<int> VectorMask, SDValue VecIn1,
                                  SDValue VecIn2, unsigned LeftIdx,
                                  bool DidSplitVec);
    SDValue matchVSelectOpSizesWithSetCC(SDNode *Cast);

    /// Walk up chain skipping non-aliasing memory nodes,
    /// looking for aliasing nodes and adding them to the Aliases vector.
    void GatherAllAliases(SDNode *N, SDValue OriginalChain,
                          SmallVectorImpl<SDValue> &Aliases);

    /// Return true if there is any possibility that the two addresses overlap.
    bool isAlias(SDNode *Op0, SDNode *Op1) const;

    /// Walk up chain skipping non-aliasing memory nodes, looking for a better
    /// chain (aliasing node.)
    SDValue FindBetterChain(SDNode *N, SDValue Chain);

    /// Try to replace a store and any possibly adjacent stores on
    /// consecutive chains with better chains. Return true only if St is
    /// replaced.
    ///
    /// Notice that other chains may still be replaced even if the function
    /// returns false.
    bool findBetterNeighborChains(StoreSDNode *St);

    // Helper for findBetterNeighborChains. Walk up store chain add additional
    // chained stores that do not overlap and can be parallelized.
    bool parallelizeChainedStores(StoreSDNode *St);

    /// Holds a pointer to an LSBaseSDNode as well as information on where it
    /// is located in a sequence of memory operations connected by a chain.
    struct MemOpLink {
      // Ptr to the mem node.
      LSBaseSDNode *MemNode;

      // Offset from the base ptr.
      int64_t OffsetFromBase;

      MemOpLink(LSBaseSDNode *N, int64_t Offset)
          : MemNode(N), OffsetFromBase(Offset) {}
    };

    /// This is a helper function for visitMUL to check the profitability
    /// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
    /// MulNode is the original multiply, AddNode is (add x, c1),
    /// and ConstNode is c2.
    bool isMulAddWithConstProfitable(SDNode *MulNode,
                                     SDValue &AddNode,
                                     SDValue &ConstNode);

    /// This is a helper function for visitAND and visitZERO_EXTEND.  Returns
    /// true if the (and (load x) c) pattern matches an extload.  ExtVT returns
    /// the type of the loaded value to be extended.
    bool isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN,
                          EVT LoadResultTy, EVT &ExtVT);

    /// Helper function to calculate whether the given Load/Store can have its
    /// width reduced to ExtVT.
    bool isLegalNarrowLdSt(LSBaseSDNode *LDSTN, ISD::LoadExtType ExtType,
                           EVT &MemVT, unsigned ShAmt = 0);

    /// Used by BackwardsPropagateMask to find suitable loads.
    bool SearchForAndLoads(SDNode *N, SmallVectorImpl<LoadSDNode*> &Loads,
                           SmallPtrSetImpl<SDNode*> &NodesWithConsts,
                           ConstantSDNode *Mask, SDNode *&NodeToMask);
    /// Attempt to propagate a given AND node back to load leaves so that they
    /// can be combined into narrow loads.
    bool BackwardsPropagateMask(SDNode *N, SelectionDAG &DAG);

    /// Helper function for MergeConsecutiveStores which merges the
    /// component store chains.
    SDValue getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes,
                                unsigned NumStores);

    /// This is a helper function for MergeConsecutiveStores. When the
    /// source elements of the consecutive stores are all constants or
    /// all extracted vector elements, try to merge them into one
    /// larger store introducing bitcasts if necessary.  \return True
    /// if a merged store was created.
    bool MergeStoresOfConstantsOrVecElts(SmallVectorImpl<MemOpLink> &StoreNodes,
                                         EVT MemVT, unsigned NumStores,
                                         bool IsConstantSrc, bool UseVector,
                                         bool UseTrunc);

    /// This is a helper function for MergeConsecutiveStores. Stores
    /// that potentially may be merged with St are placed in
    /// StoreNodes. RootNode is a chain predecessor to all store
    /// candidates.
    void getStoreMergeCandidates(StoreSDNode *St,
                                 SmallVectorImpl<MemOpLink> &StoreNodes,
                                 SDNode *&Root);

    /// Helper function for MergeConsecutiveStores. Checks if
    /// candidate stores have indirect dependency through their
    /// operands. RootNode is the predecessor to all stores calculated
    /// by getStoreMergeCandidates and is used to prune the dependency check.
    /// \return True if safe to merge.
    bool checkMergeStoreCandidatesForDependencies(
        SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores,
        SDNode *RootNode);

    /// Merge consecutive store operations into a wide store.
    /// This optimization uses wide integers or vectors when possible.
    /// \return number of stores that were merged into a merged store (the
    /// affected nodes are stored as a prefix in \p StoreNodes).
    bool MergeConsecutiveStores(StoreSDNode *St);

    /// Try to transform a truncation where C is a constant:
    ///     (trunc (and X, C)) -> (and (trunc X), (trunc C))
    ///
    /// \p N needs to be a truncation and its first operand an AND. Other
    /// requirements are checked by the function (e.g. that trunc is
    /// single-use) and if missed an empty SDValue is returned.
    SDValue distributeTruncateThroughAnd(SDNode *N);

    /// Helper function to determine whether the target supports operation
    /// given by \p Opcode for type \p VT, that is, whether the operation
    /// is legal or custom before legalizing operations, and whether is
    /// legal (but not custom) after legalization.
    bool hasOperation(unsigned Opcode, EVT VT) {
      if (LegalOperations)
        return TLI.isOperationLegal(Opcode, VT);
      return TLI.isOperationLegalOrCustom(Opcode, VT);
    }

  public:
    /// Runs the dag combiner on all nodes in the work list
    void Run(CombineLevel AtLevel);

    SelectionDAG &getDAG() const { return DAG; }

    /// Returns a type large enough to hold any valid shift amount - before type
    /// legalization these can be huge.
    EVT getShiftAmountTy(EVT LHSTy) {
      assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
      return TLI.getShiftAmountTy(LHSTy, DAG.getDataLayout(), LegalTypes);
    }

    /// This method returns true if we are running before type legalization or
    /// if the specified VT is legal.
    bool isTypeLegal(const EVT &VT) {
      if (!LegalTypes) return true;
      return TLI.isTypeLegal(VT);
    }

    /// Convenience wrapper around TargetLowering::getSetCCResultType
    EVT getSetCCResultType(EVT VT) const {
      return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
    }

    void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
                         SDValue OrigLoad, SDValue ExtLoad,
                         ISD::NodeType ExtType);
  };

/// This class is a DAGUpdateListener that removes any deleted
/// nodes from the worklist.
class WorklistRemover : public SelectionDAG::DAGUpdateListener {
  DAGCombiner &DC;

public:
  explicit WorklistRemover(DAGCombiner &dc)
    : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {}

  void NodeDeleted(SDNode *N, SDNode *E) override {
    DC.removeFromWorklist(N);
  }
};

class WorklistInserter : public SelectionDAG::DAGUpdateListener {
  DAGCombiner &DC;

public:
  explicit WorklistInserter(DAGCombiner &dc)
      : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {}

  // FIXME: Ideally we could add N to the worklist, but this causes exponential
  //        compile time costs in large DAGs, e.g. Halide.
  void NodeInserted(SDNode *N) override { DC.ConsiderForPruning(N); }
};

} // end anonymous namespace

//===----------------------------------------------------------------------===//
//  TargetLowering::DAGCombinerInfo implementation
//===----------------------------------------------------------------------===//

void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {
  ((DAGCombiner*)DC)->AddToWorklist(N);
}

SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo) {
  return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo);
}

SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, SDValue Res, bool AddTo) {
  return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo);
}

SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) {
  return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo);
}

bool TargetLowering::DAGCombinerInfo::
recursivelyDeleteUnusedNodes(SDNode *N) {
  return ((DAGCombiner*)DC)->recursivelyDeleteUnusedNodes(N);
}

void TargetLowering::DAGCombinerInfo::
CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
  return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO);
}

//===----------------------------------------------------------------------===//
// Helper Functions
//===----------------------------------------------------------------------===//

void DAGCombiner::deleteAndRecombine(SDNode *N) {
  removeFromWorklist(N);

  // If the operands of this node are only used by the node, they will now be
  // dead. Make sure to re-visit them and recursively delete dead nodes.
  for (const SDValue &Op : N->ops())
    // For an operand generating multiple values, one of the values may
    // become dead allowing further simplification (e.g. split index
    // arithmetic from an indexed load).
    if (Op->hasOneUse() || Op->getNumValues() > 1)
      AddToWorklist(Op.getNode());

  DAG.DeleteNode(N);
}

// APInts must be the same size for most operations, this helper
// function zero extends the shorter of the pair so that they match.
// We provide an Offset so that we can create bitwidths that won't overflow.
static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset = 0) {
  unsigned Bits = Offset + std::max(LHS.getBitWidth(), RHS.getBitWidth());
  LHS = LHS.zextOrSelf(Bits);
  RHS = RHS.zextOrSelf(Bits);
}

// Return true if this node is a setcc, or is a select_cc
// that selects between the target values used for true and false, making it
// equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to
// the appropriate nodes based on the type of node we are checking. This
// simplifies life a bit for the callers.
bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
                                    SDValue &CC) const {
  if (N.getOpcode() == ISD::SETCC) {
    LHS = N.getOperand(0);
    RHS = N.getOperand(1);
    CC  = N.getOperand(2);
    return true;
  }

  if (N.getOpcode() != ISD::SELECT_CC ||
      !TLI.isConstTrueVal(N.getOperand(2).getNode()) ||
      !TLI.isConstFalseVal(N.getOperand(3).getNode()))
    return false;

  if (TLI.getBooleanContents(N.getValueType()) ==
      TargetLowering::UndefinedBooleanContent)
    return false;

  LHS = N.getOperand(0);
  RHS = N.getOperand(1);
  CC  = N.getOperand(4);
  return true;
}

/// Return true if this is a SetCC-equivalent operation with only one use.
/// If this is true, it allows the users to invert the operation for free when
/// it is profitable to do so.
bool DAGCombiner::isOneUseSetCC(SDValue N) const {
  SDValue N0, N1, N2;
  if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse())
    return true;
  return false;
}

// Returns the SDNode if it is a constant float BuildVector
// or constant float.
static SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) {
  if (isa<ConstantFPSDNode>(N))
    return N.getNode();
  if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
    return N.getNode();
  return nullptr;
}

// Determines if it is a constant integer or a build vector of constant
// integers (and undefs).
// Do not permit build vector implicit truncation.
static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) {
  if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N))
    return !(Const->isOpaque() && NoOpaques);
  if (N.getOpcode() != ISD::BUILD_VECTOR)
    return false;
  unsigned BitWidth = N.getScalarValueSizeInBits();
  for (const SDValue &Op : N->op_values()) {
    if (Op.isUndef())
      continue;
    ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Op);
    if (!Const || Const->getAPIntValue().getBitWidth() != BitWidth ||
        (Const->isOpaque() && NoOpaques))
      return false;
  }
  return true;
}

// Determines if a BUILD_VECTOR is composed of all-constants possibly mixed with
// undef's.
static bool isAnyConstantBuildVector(SDValue V, bool NoOpaques = false) {
  if (V.getOpcode() != ISD::BUILD_VECTOR)
    return false;
  return isConstantOrConstantVector(V, NoOpaques) ||
         ISD::isBuildVectorOfConstantFPSDNodes(V.getNode());
}

bool DAGCombiner::reassociationCanBreakAddressingModePattern(unsigned Opc,
                                                             const SDLoc &DL,
                                                             SDValue N0,
                                                             SDValue N1) {
  // Currently this only tries to ensure we don't undo the GEP splits done by
  // CodeGenPrepare when shouldConsiderGEPOffsetSplit is true. To ensure this,
  // we check if the following transformation would be problematic:
  // (load/store (add, (add, x, offset1), offset2)) ->
  // (load/store (add, x, offset1+offset2)).

  if (Opc != ISD::ADD || N0.getOpcode() != ISD::ADD)
    return false;

  if (N0.hasOneUse())
    return false;

  auto *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
  auto *C2 = dyn_cast<ConstantSDNode>(N1);
  if (!C1 || !C2)
    return false;

  const APInt &C1APIntVal = C1->getAPIntValue();
  const APInt &C2APIntVal = C2->getAPIntValue();
  if (C1APIntVal.getBitWidth() > 64 || C2APIntVal.getBitWidth() > 64)
    return false;

  const APInt CombinedValueIntVal = C1APIntVal + C2APIntVal;
  if (CombinedValueIntVal.getBitWidth() > 64)
    return false;
  const int64_t CombinedValue = CombinedValueIntVal.getSExtValue();

  for (SDNode *Node : N0->uses()) {
    auto LoadStore = dyn_cast<MemSDNode>(Node);
    if (LoadStore) {
      // Is x[offset2] already not a legal addressing mode? If so then
      // reassociating the constants breaks nothing (we test offset2 because
      // that's the one we hope to fold into the load or store).
      TargetLoweringBase::AddrMode AM;
      AM.HasBaseReg = true;
      AM.BaseOffs = C2APIntVal.getSExtValue();
      EVT VT = LoadStore->getMemoryVT();
      unsigned AS = LoadStore->getAddressSpace();
      Type *AccessTy = VT.getTypeForEVT(*DAG.getContext());
      if (!TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS))
        continue;

      // Would x[offset1+offset2] still be a legal addressing mode?
      AM.BaseOffs = CombinedValue;
      if (!TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS))
        return true;
    }
  }

  return false;
}

// Helper for DAGCombiner::reassociateOps. Try to reassociate an expression
// such as (Opc N0, N1), if \p N0 is the same kind of operation as \p Opc.
SDValue DAGCombiner::reassociateOpsCommutative(unsigned Opc, const SDLoc &DL,
                                               SDValue N0, SDValue N1) {
  EVT VT = N0.getValueType();

  if (N0.getOpcode() != Opc)
    return SDValue();

  // Don't reassociate reductions.
  if (N0->getFlags().hasVectorReduction())
    return SDValue();

  if (SDNode *C1 = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) {
    if (SDNode *C2 = DAG.isConstantIntBuildVectorOrConstantInt(N1)) {
      // Reassociate: (op (op x, c1), c2) -> (op x, (op c1, c2))
      if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, C1, C2))
        return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
      return SDValue();
    }
    if (N0.hasOneUse()) {
      // Reassociate: (op (op x, c1), y) -> (op (op x, y), c1)
      //              iff (op x, c1) has one use
      SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0.getOperand(0), N1);
      if (!OpNode.getNode())
        return SDValue();
      return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1));
    }
  }
  return SDValue();
}

// Try to reassociate commutative binops.
SDValue DAGCombiner::reassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0,
                                    SDValue N1, SDNodeFlags Flags) {
  assert(TLI.isCommutativeBinOp(Opc) && "Operation not commutative.");
  // Don't reassociate reductions.
  if (Flags.hasVectorReduction())
    return SDValue();

  // Floating-point reassociation is not allowed without loose FP math.
  if (N0.getValueType().isFloatingPoint() ||
      N1.getValueType().isFloatingPoint())
    if (!Flags.hasAllowReassociation() || !Flags.hasNoSignedZeros())
      return SDValue();

  if (SDValue Combined = reassociateOpsCommutative(Opc, DL, N0, N1))
    return Combined;
  if (SDValue Combined = reassociateOpsCommutative(Opc, DL, N1, N0))
    return Combined;
  return SDValue();
}

SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
                               bool AddTo) {
  assert(N->getNumValues() == NumTo && "Broken CombineTo call!");
  ++NodesCombined;
  LLVM_DEBUG(dbgs() << "\nReplacing.1 "; N->dump(&DAG); dbgs() << "\nWith: ";
             To[0].getNode()->dump(&DAG);
             dbgs() << " and " << NumTo - 1 << " other values\n");
  for (unsigned i = 0, e = NumTo; i != e; ++i)
    assert((!To[i].getNode() ||
            N->getValueType(i) == To[i].getValueType()) &&
           "Cannot combine value to value of different type!");

  WorklistRemover DeadNodes(*this);
  DAG.ReplaceAllUsesWith(N, To);
  if (AddTo) {
    // Push the new nodes and any users onto the worklist
    for (unsigned i = 0, e = NumTo; i != e; ++i) {
      if (To[i].getNode()) {
        AddToWorklist(To[i].getNode());
        AddUsersToWorklist(To[i].getNode());
      }
    }
  }

  // Finally, if the node is now dead, remove it from the graph.  The node
  // may not be dead if the replacement process recursively simplified to
  // something else needing this node.
  if (N->use_empty())
    deleteAndRecombine(N);
  return SDValue(N, 0);
}

void DAGCombiner::
CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
  // Replace all uses.  If any nodes become isomorphic to other nodes and
  // are deleted, make sure to remove them from our worklist.
  WorklistRemover DeadNodes(*this);
  DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New);

  // Push the new node and any (possibly new) users onto the worklist.
  AddToWorklist(TLO.New.getNode());
  AddUsersToWorklist(TLO.New.getNode());

  // Finally, if the node is now dead, remove it from the graph.  The node
  // may not be dead if the replacement process recursively simplified to
  // something else needing this node.
  if (TLO.Old.getNode()->use_empty())
    deleteAndRecombine(TLO.Old.getNode());
}

/// Check the specified integer node value to see if it can be simplified or if
/// things it uses can be simplified by bit propagation. If so, return true.
bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
                                       const APInt &DemandedElts) {
  TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
  KnownBits Known;
  if (!TLI.SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO))
    return false;

  // Revisit the node.
  AddToWorklist(Op.getNode());

  // Replace the old value with the new one.
  ++NodesCombined;
  LLVM_DEBUG(dbgs() << "\nReplacing.2 "; TLO.Old.getNode()->dump(&DAG);
             dbgs() << "\nWith: "; TLO.New.getNode()->dump(&DAG);
             dbgs() << '\n');

  CommitTargetLoweringOpt(TLO);
  return true;
}

/// Check the specified vector node value to see if it can be simplified or
/// if things it uses can be simplified as it only uses some of the elements.
/// If so, return true.
bool DAGCombiner::SimplifyDemandedVectorElts(SDValue Op,
                                             const APInt &DemandedElts,
                                             bool AssumeSingleUse) {
  TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
  APInt KnownUndef, KnownZero;
  if (!TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero,
                                      TLO, 0, AssumeSingleUse))
    return false;

  // Revisit the node.
  AddToWorklist(Op.getNode());

  // Replace the old value with the new one.
  ++NodesCombined;
  LLVM_DEBUG(dbgs() << "\nReplacing.2 "; TLO.Old.getNode()->dump(&DAG);
             dbgs() << "\nWith: "; TLO.New.getNode()->dump(&DAG);
             dbgs() << '\n');

  CommitTargetLoweringOpt(TLO);
  return true;
}

void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {
  SDLoc DL(Load);
  EVT VT = Load->getValueType(0);
  SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, SDValue(ExtLoad, 0));

  LLVM_DEBUG(dbgs() << "\nReplacing.9 "; Load->dump(&DAG); dbgs() << "\nWith: ";
             Trunc.getNode()->dump(&DAG); dbgs() << '\n');
  WorklistRemover DeadNodes(*this);
  DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc);
  DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1));
  deleteAndRecombine(Load);
  AddToWorklist(Trunc.getNode());
}

SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
  Replace = false;
  SDLoc DL(Op);
  if (ISD::isUNINDEXEDLoad(Op.getNode())) {
    LoadSDNode *LD = cast<LoadSDNode>(Op);
    EVT MemVT = LD->getMemoryVT();
    ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? ISD::EXTLOAD
                                                      : LD->getExtensionType();
    Replace = true;
    return DAG.getExtLoad(ExtType, DL, PVT,
                          LD->getChain(), LD->getBasePtr(),
                          MemVT, LD->getMemOperand());
  }

  unsigned Opc = Op.getOpcode();
  switch (Opc) {
  default: break;
  case ISD::AssertSext:
    if (SDValue Op0 = SExtPromoteOperand(Op.getOperand(0), PVT))
      return DAG.getNode(ISD::AssertSext, DL, PVT, Op0, Op.getOperand(1));
    break;
  case ISD::AssertZext:
    if (SDValue Op0 = ZExtPromoteOperand(Op.getOperand(0), PVT))
      return DAG.getNode(ISD::AssertZext, DL, PVT, Op0, Op.getOperand(1));
    break;
  case ISD::Constant: {
    unsigned ExtOpc =
      Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
    return DAG.getNode(ExtOpc, DL, PVT, Op);
  }
  }

  if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT))
    return SDValue();
  return DAG.getNode(ISD::ANY_EXTEND, DL, PVT, Op);
}

SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) {
  if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT))
    return SDValue();
  EVT OldVT = Op.getValueType();
  SDLoc DL(Op);
  bool Replace = false;
  SDValue NewOp = PromoteOperand(Op, PVT, Replace);
  if (!NewOp.getNode())
    return SDValue();
  AddToWorklist(NewOp.getNode());

  if (Replace)
    ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
  return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, NewOp.getValueType(), NewOp,
                     DAG.getValueType(OldVT));
}

SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) {
  EVT OldVT = Op.getValueType();
  SDLoc DL(Op);
  bool Replace = false;
  SDValue NewOp = PromoteOperand(Op, PVT, Replace);
  if (!NewOp.getNode())
    return SDValue();
  AddToWorklist(NewOp.getNode());

  if (Replace)
    ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
  return DAG.getZeroExtendInReg(NewOp, DL, OldVT);
}

/// Promote the specified integer binary operation if the target indicates it is
/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
/// i32 since i16 instructions are longer.
SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {
  if (!LegalOperations)
    return SDValue();

  EVT VT = Op.getValueType();
  if (VT.isVector() || !VT.isInteger())
    return SDValue();

  // If operation type is 'undesirable', e.g. i16 on x86, consider
  // promoting it.
  unsigned Opc = Op.getOpcode();
  if (TLI.isTypeDesirableForOp(Opc, VT))
    return SDValue();

  EVT PVT = VT;
  // Consult target whether it is a good idea to promote this operation and
  // what's the right type to promote it to.
  if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
    assert(PVT != VT && "Don't know what type to promote to!");

    LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG));

    bool Replace0 = false;
    SDValue N0 = Op.getOperand(0);
    SDValue NN0 = PromoteOperand(N0, PVT, Replace0);

    bool Replace1 = false;
    SDValue N1 = Op.getOperand(1);
    SDValue NN1 = PromoteOperand(N1, PVT, Replace1);
    SDLoc DL(Op);

    SDValue RV =
        DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, NN0, NN1));

    // We are always replacing N0/N1's use in N and only need
    // additional replacements if there are additional uses.
    Replace0 &= !N0->hasOneUse();
    Replace1 &= (N0 != N1) && !N1->hasOneUse();

    // Combine Op here so it is preserved past replacements.
    CombineTo(Op.getNode(), RV);

    // If operands have a use ordering, make sure we deal with
    // predecessor first.
    if (Replace0 && Replace1 && N0.getNode()->isPredecessorOf(N1.getNode())) {
      std::swap(N0, N1);
      std::swap(NN0, NN1);
    }

    if (Replace0) {
      AddToWorklist(NN0.getNode());
      ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode());
    }
    if (Replace1) {
      AddToWorklist(NN1.getNode());
      ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode());
    }
    return Op;
  }
  return SDValue();
}

/// Promote the specified integer shift operation if the target indicates it is
/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
/// i32 since i16 instructions are longer.
SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) {
  if (!LegalOperations)
    return SDValue();

  EVT VT = Op.getValueType();
  if (VT.isVector() || !VT.isInteger())
    return SDValue();

  // If operation type is 'undesirable', e.g. i16 on x86, consider
  // promoting it.
  unsigned Opc = Op.getOpcode();
  if (TLI.isTypeDesirableForOp(Opc, VT))
    return SDValue();

  EVT PVT = VT;
  // Consult target whether it is a good idea to promote this operation and
  // what's the right type to promote it to.
  if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
    assert(PVT != VT && "Don't know what type to promote to!");

    LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG));

    bool Replace = false;
    SDValue N0 = Op.getOperand(0);
    SDValue N1 = Op.getOperand(1);
    if (Opc == ISD::SRA)
      N0 = SExtPromoteOperand(N0, PVT);
    else if (Opc == ISD::SRL)
      N0 = ZExtPromoteOperand(N0, PVT);
    else
      N0 = PromoteOperand(N0, PVT, Replace);

    if (!N0.getNode())
      return SDValue();

    SDLoc DL(Op);
    SDValue RV =
        DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, N0, N1));

    if (Replace)
      ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode());

    // Deal with Op being deleted.
    if (Op && Op.getOpcode() != ISD::DELETED_NODE)
      return RV;
  }
  return SDValue();
}

SDValue DAGCombiner::PromoteExtend(SDValue Op) {
  if (!LegalOperations)
    return SDValue();

  EVT VT = Op.getValueType();
  if (VT.isVector() || !VT.isInteger())
    return SDValue();

  // If operation type is 'undesirable', e.g. i16 on x86, consider
  // promoting it.
  unsigned Opc = Op.getOpcode();
  if (TLI.isTypeDesirableForOp(Opc, VT))
    return SDValue();

  EVT PVT = VT;
  // Consult target whether it is a good idea to promote this operation and
  // what's the right type to promote it to.
  if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
    assert(PVT != VT && "Don't know what type to promote to!");
    // fold (aext (aext x)) -> (aext x)
    // fold (aext (zext x)) -> (zext x)
    // fold (aext (sext x)) -> (sext x)
    LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG));
    return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0));
  }
  return SDValue();
}

bool DAGCombiner::PromoteLoad(SDValue Op) {
  if (!LegalOperations)
    return false;

  if (!ISD::isUNINDEXEDLoad(Op.getNode()))
    return false;

  EVT VT = Op.getValueType();
  if (VT.isVector() || !VT.isInteger())
    return false;

  // If operation type is 'undesirable', e.g. i16 on x86, consider
  // promoting it.
  unsigned Opc = Op.getOpcode();
  if (TLI.isTypeDesirableForOp(Opc, VT))
    return false;

  EVT PVT = VT;
  // Consult target whether it is a good idea to promote this operation and
  // what's the right type to promote it to.
  if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
    assert(PVT != VT && "Don't know what type to promote to!");

    SDLoc DL(Op);
    SDNode *N = Op.getNode();
    LoadSDNode *LD = cast<LoadSDNode>(N);
    EVT MemVT = LD->getMemoryVT();
    ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? ISD::EXTLOAD
                                                      : LD->getExtensionType();
    SDValue NewLD = DAG.getExtLoad(ExtType, DL, PVT,
                                   LD->getChain(), LD->getBasePtr(),
                                   MemVT, LD->getMemOperand());
    SDValue Result = DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD);

    LLVM_DEBUG(dbgs() << "\nPromoting "; N->dump(&DAG); dbgs() << "\nTo: ";
               Result.getNode()->dump(&DAG); dbgs() << '\n');
    WorklistRemover DeadNodes(*this);
    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1));
    deleteAndRecombine(N);
    AddToWorklist(Result.getNode());
    return true;
  }
  return false;
}

/// Recursively delete a node which has no uses and any operands for
/// which it is the only use.
///
/// Note that this both deletes the nodes and removes them from the worklist.
/// It also adds any nodes who have had a user deleted to the worklist as they
/// may now have only one use and subject to other combines.
bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) {
  if (!N->use_empty())
    return false;

  SmallSetVector<SDNode *, 16> Nodes;
  Nodes.insert(N);
  do {
    N = Nodes.pop_back_val();
    if (!N)
      continue;

    if (N->use_empty()) {
      for (const SDValue &ChildN : N->op_values())
        Nodes.insert(ChildN.getNode());

      removeFromWorklist(N);
      DAG.DeleteNode(N);
    } else {
      AddToWorklist(N);
    }
  } while (!Nodes.empty());
  return true;
}

//===----------------------------------------------------------------------===//
//  Main DAG Combiner implementation
//===----------------------------------------------------------------------===//

void DAGCombiner::Run(CombineLevel AtLevel) {
  // set the instance variables, so that the various visit routines may use it.
  Level = AtLevel;
  LegalOperations = Level >= AfterLegalizeVectorOps;
  LegalTypes = Level >= AfterLegalizeTypes;

  WorklistInserter AddNodes(*this);

  // Add all the dag nodes to the worklist.
  for (SDNode &Node : DAG.allnodes())
    AddToWorklist(&Node);

  // Create a dummy node (which is not added to allnodes), that adds a reference
  // to the root node, preventing it from being deleted, and tracking any
  // changes of the root.
  HandleSDNode Dummy(DAG.getRoot());

  // While we have a valid worklist entry node, try to combine it.
  while (SDNode *N = getNextWorklistEntry()) {
    // If N has no uses, it is dead.  Make sure to revisit all N's operands once
    // N is deleted from the DAG, since they too may now be dead or may have a
    // reduced number of uses, allowing other xforms.
    if (recursivelyDeleteUnusedNodes(N))
      continue;

    WorklistRemover DeadNodes(*this);

    // If this combine is running after legalizing the DAG, re-legalize any
    // nodes pulled off the worklist.
    if (Level == AfterLegalizeDAG) {
      SmallSetVector<SDNode *, 16> UpdatedNodes;
      bool NIsValid = DAG.LegalizeOp(N, UpdatedNodes);

      for (SDNode *LN : UpdatedNodes) {
        AddUsersToWorklist(LN);
        AddToWorklist(LN);
      }
      if (!NIsValid)
        continue;
    }

    LLVM_DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG));

    // Add any operands of the new node which have not yet been combined to the
    // worklist as well. Because the worklist uniques things already, this
    // won't repeatedly process the same operand.
    CombinedNodes.insert(N);
    for (const SDValue &ChildN : N->op_values())
      if (!CombinedNodes.count(ChildN.getNode()))
        AddToWorklist(ChildN.getNode());

    SDValue RV = combine(N);

    if (!RV.getNode())
      continue;

    ++NodesCombined;

    // If we get back the same node we passed in, rather than a new node or
    // zero, we know that the node must have defined multiple values and
    // CombineTo was used.  Since CombineTo takes care of the worklist
    // mechanics for us, we have no work to do in this case.
    if (RV.getNode() == N)
      continue;

    assert(N->getOpcode() != ISD::DELETED_NODE &&
           RV.getOpcode() != ISD::DELETED_NODE &&
           "Node was deleted but visit returned new node!");

    LLVM_DEBUG(dbgs() << " ... into: "; RV.getNode()->dump(&DAG));

    if (N->getNumValues() == RV.getNode()->getNumValues())
      DAG.ReplaceAllUsesWith(N, RV.getNode());
    else {
      assert(N->getValueType(0) == RV.getValueType() &&
             N->getNumValues() == 1 && "Type mismatch");
      DAG.ReplaceAllUsesWith(N, &RV);
    }

    // Push the new node and any users onto the worklist
    AddToWorklist(RV.getNode());
    AddUsersToWorklist(RV.getNode());

    // Finally, if the node is now dead, remove it from the graph.  The node
    // may not be dead if the replacement process recursively simplified to
    // something else needing this node. This will also take care of adding any
    // operands which have lost a user to the worklist.
    recursivelyDeleteUnusedNodes(N);
  }

  // If the root changed (e.g. it was a dead load, update the root).
  DAG.setRoot(Dummy.getValue());
  DAG.RemoveDeadNodes();
}

SDValue DAGCombiner::visit(SDNode *N) {
  switch (N->getOpcode()) {
  default: break;
  case ISD::TokenFactor:        return visitTokenFactor(N);
  case ISD::MERGE_VALUES:       return visitMERGE_VALUES(N);
  case ISD::ADD:                return visitADD(N);
  case ISD::SUB:                return visitSUB(N);
  case ISD::SADDSAT:
  case ISD::UADDSAT:            return visitADDSAT(N);
  case ISD::SSUBSAT:
  case ISD::USUBSAT:            return visitSUBSAT(N);
  case ISD::ADDC:               return visitADDC(N);
  case ISD::SADDO:
  case ISD::UADDO:              return visitADDO(N);
  case ISD::SUBC:               return visitSUBC(N);
  case ISD::SSUBO:
  case ISD::USUBO:              return visitSUBO(N);
  case ISD::ADDE:               return visitADDE(N);
  case ISD::ADDCARRY:           return visitADDCARRY(N);
  case ISD::SUBE:               return visitSUBE(N);
  case ISD::SUBCARRY:           return visitSUBCARRY(N);
  case ISD::SMULFIX:
  case ISD::SMULFIXSAT:
  case ISD::UMULFIX:
  case ISD::UMULFIXSAT:         return visitMULFIX(N);
  case ISD::MUL:                return visitMUL(N);
  case ISD::SDIV:               return visitSDIV(N);
  case ISD::UDIV:               return visitUDIV(N);
  case ISD::SREM:
  case ISD::UREM:               return visitREM(N);
  case ISD::MULHU:              return visitMULHU(N);
  case ISD::MULHS:              return visitMULHS(N);
  case ISD::SMUL_LOHI:          return visitSMUL_LOHI(N);
  case ISD::UMUL_LOHI:          return visitUMUL_LOHI(N);
  case ISD::SMULO:
  case ISD::UMULO:              return visitMULO(N);
  case ISD::SMIN:
  case ISD::SMAX:
  case ISD::UMIN:
  case ISD::UMAX:               return visitIMINMAX(N);
  case ISD::AND:                return visitAND(N);
  case ISD::OR:                 return visitOR(N);
  case ISD::XOR:                return visitXOR(N);
  case ISD::SHL:                return visitSHL(N);
  case ISD::SRA:                return visitSRA(N);
  case ISD::SRL:                return visitSRL(N);
  case ISD::ROTR:
  case ISD::ROTL:               return visitRotate(N);
  case ISD::FSHL:
  case ISD::FSHR:               return visitFunnelShift(N);
  case ISD::ABS:                return visitABS(N);
  case ISD::BSWAP:              return visitBSWAP(N);
  case ISD::BITREVERSE:         return visitBITREVERSE(N);
  case ISD::CTLZ:               return visitCTLZ(N);
  case ISD::CTLZ_ZERO_UNDEF:    return visitCTLZ_ZERO_UNDEF(N);
  case ISD::CTTZ:               return visitCTTZ(N);
  case ISD::CTTZ_ZERO_UNDEF:    return visitCTTZ_ZERO_UNDEF(N);
  case ISD::CTPOP:              return visitCTPOP(N);
  case ISD::SELECT:             return visitSELECT(N);
  case ISD::VSELECT:            return visitVSELECT(N);
  case ISD::SELECT_CC:          return visitSELECT_CC(N);
  case ISD::SETCC:              return visitSETCC(N);
  case ISD::SETCCCARRY:         return visitSETCCCARRY(N);
  case ISD::SIGN_EXTEND:        return visitSIGN_EXTEND(N);
  case ISD::ZERO_EXTEND:        return visitZERO_EXTEND(N);
  case ISD::ANY_EXTEND:         return visitANY_EXTEND(N);
  case ISD::AssertSext:
  case ISD::AssertZext:         return visitAssertExt(N);
  case ISD::SIGN_EXTEND_INREG:  return visitSIGN_EXTEND_INREG(N);
  case ISD::SIGN_EXTEND_VECTOR_INREG: return visitSIGN_EXTEND_VECTOR_INREG(N);
  case ISD::ZERO_EXTEND_VECTOR_INREG: return visitZERO_EXTEND_VECTOR_INREG(N);
  case ISD::TRUNCATE:           return visitTRUNCATE(N);
  case ISD::BITCAST:            return visitBITCAST(N);
  case ISD::BUILD_PAIR:         return visitBUILD_PAIR(N);
  case ISD::FADD:               return visitFADD(N);
  case ISD::FSUB:               return visitFSUB(N);
  case ISD::FMUL:               return visitFMUL(N);
  case ISD::FMA:                return visitFMA(N);
  case ISD::FDIV:               return visitFDIV(N);
  case ISD::FREM:               return visitFREM(N);
  case ISD::FSQRT:              return visitFSQRT(N);
  case ISD::FCOPYSIGN:          return visitFCOPYSIGN(N);
  case ISD::FPOW:               return visitFPOW(N);
  case ISD::SINT_TO_FP:         return visitSINT_TO_FP(N);
  case ISD::UINT_TO_FP:         return visitUINT_TO_FP(N);
  case ISD::FP_TO_SINT:         return visitFP_TO_SINT(N);
  case ISD::FP_TO_UINT:         return visitFP_TO_UINT(N);
  case ISD::FP_ROUND:           return visitFP_ROUND(N);
  case ISD::FP_EXTEND:          return visitFP_EXTEND(N);
  case ISD::FNEG:               return visitFNEG(N);
  case ISD::FABS:               return visitFABS(N);
  case ISD::FFLOOR:             return visitFFLOOR(N);
  case ISD::FMINNUM:            return visitFMINNUM(N);
  case ISD::FMAXNUM:            return visitFMAXNUM(N);
  case ISD::FMINIMUM:           return visitFMINIMUM(N);
  case ISD::FMAXIMUM:           return visitFMAXIMUM(N);
  case ISD::FCEIL:              return visitFCEIL(N);
  case ISD::FTRUNC:             return visitFTRUNC(N);
  case ISD::BRCOND:             return visitBRCOND(N);
  case ISD::BR_CC:              return visitBR_CC(N);
  case ISD::LOAD:               return visitLOAD(N);
  case ISD::STORE:              return visitSTORE(N);
  case ISD::INSERT_VECTOR_ELT:  return visitINSERT_VECTOR_ELT(N);
  case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N);
  case ISD::BUILD_VECTOR:       return visitBUILD_VECTOR(N);
  case ISD::CONCAT_VECTORS:     return visitCONCAT_VECTORS(N);
  case ISD::EXTRACT_SUBVECTOR:  return visitEXTRACT_SUBVECTOR(N);
  case ISD::VECTOR_SHUFFLE:     return visitVECTOR_SHUFFLE(N);
  case ISD::SCALAR_TO_VECTOR:   return visitSCALAR_TO_VECTOR(N);
  case ISD::INSERT_SUBVECTOR:   return visitINSERT_SUBVECTOR(N);
  case ISD::MGATHER:            return visitMGATHER(N);
  case ISD::MLOAD:              return visitMLOAD(N);
  case ISD::MSCATTER:           return visitMSCATTER(N);
  case ISD::MSTORE:             return visitMSTORE(N);
  case ISD::LIFETIME_END:       return visitLIFETIME_END(N);
  case ISD::FP_TO_FP16:         return visitFP_TO_FP16(N);
  case ISD::FP16_TO_FP:         return visitFP16_TO_FP(N);
  case ISD::VECREDUCE_FADD:
  case ISD::VECREDUCE_FMUL:
  case ISD::VECREDUCE_ADD:
  case ISD::VECREDUCE_MUL:
  case ISD::VECREDUCE_AND:
  case ISD::VECREDUCE_OR:
  case ISD::VECREDUCE_XOR:
  case ISD::VECREDUCE_SMAX:
  case ISD::VECREDUCE_SMIN:
  case ISD::VECREDUCE_UMAX:
  case ISD::VECREDUCE_UMIN:
  case ISD::VECREDUCE_FMAX:
  case ISD::VECREDUCE_FMIN:     return visitVECREDUCE(N);
  }
  return SDValue();
}

SDValue DAGCombiner::combine(SDNode *N) {
  SDValue RV = visit(N);

  // If nothing happened, try a target-specific DAG combine.
  if (!RV.getNode()) {
    assert(N->getOpcode() != ISD::DELETED_NODE &&
           "Node was deleted but visit returned NULL!");

    if (N->getOpcode() >= ISD::BUILTIN_OP_END ||
        TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) {

      // Expose the DAG combiner to the target combiner impls.
      TargetLowering::DAGCombinerInfo
        DagCombineInfo(DAG, Level, false, this);

      RV = TLI.PerformDAGCombine(N, DagCombineInfo);
    }
  }

  // If nothing happened still, try promoting the operation.
  if (!RV.getNode()) {
    switch (N->getOpcode()) {
    default: break;
    case ISD::ADD:
    case ISD::SUB:
    case ISD::MUL:
    case ISD::AND:
    case ISD::OR:
    case ISD::XOR:
      RV = PromoteIntBinOp(SDValue(N, 0));
      break;
    case ISD::SHL:
    case ISD::SRA:
    case ISD::SRL:
      RV = PromoteIntShiftOp(SDValue(N, 0));
      break;
    case ISD::SIGN_EXTEND:
    case ISD::ZERO_EXTEND:
    case ISD::ANY_EXTEND:
      RV = PromoteExtend(SDValue(N, 0));
      break;
    case ISD::LOAD:
      if (PromoteLoad(SDValue(N, 0)))
        RV = SDValue(N, 0);
      break;
    }
  }

  // If N is a commutative binary node, try to eliminate it if the commuted
  // version is already present in the DAG.
  if (!RV.getNode() && TLI.isCommutativeBinOp(N->getOpcode()) &&
      N->getNumValues() == 1) {
    SDValue N0 = N->getOperand(0);
    SDValue N1 = N->getOperand(1);

    // Constant operands are canonicalized to RHS.
    if (N0 != N1 && (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1))) {
      SDValue Ops[] = {N1, N0};
      SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops,
                                            N->getFlags());
      if (CSENode)
        return SDValue(CSENode, 0);
    }
  }

  return RV;
}

/// Given a node, return its input chain if it has one, otherwise return a null
/// sd operand.
static SDValue getInputChainForNode(SDNode *N) {
  if (unsigned NumOps = N->getNumOperands()) {
    if (N->getOperand(0).getValueType() == MVT::Other)
      return N->getOperand(0);
    if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
      return N->getOperand(NumOps-1);
    for (unsigned i = 1; i < NumOps-1; ++i)
      if (N->getOperand(i).getValueType() == MVT::Other)
        return N->getOperand(i);
  }
  return SDValue();
}

SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
  // If N has two operands, where one has an input chain equal to the other,
  // the 'other' chain is redundant.
  if (N->getNumOperands() == 2) {
    if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1))
      return N->getOperand(0);
    if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0))
      return N->getOperand(1);
  }

  // Don't simplify token factors if optnone.
  if (OptLevel == CodeGenOpt::None)
    return SDValue();

  // If the sole user is a token factor, we should make sure we have a
  // chance to merge them together. This prevents TF chains from inhibiting
  // optimizations.
  if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::TokenFactor)
    AddToWorklist(*(N->use_begin()));

  SmallVector<SDNode *, 8> TFs;     // List of token factors to visit.
  SmallVector<SDValue, 8> Ops;      // Ops for replacing token factor.
  SmallPtrSet<SDNode*, 16> SeenOps;
  bool Changed = false;             // If we should replace this token factor.

  // Start out with this token factor.
  TFs.push_back(N);

  // Iterate through token factors.  The TFs grows when new token factors are
  // encountered.
  for (unsigned i = 0; i < TFs.size(); ++i) {
    // Limit number of nodes to inline, to avoid quadratic compile times.
    // We have to add the outstanding Token Factors to Ops, otherwise we might
    // drop Ops from the resulting Token Factors.
    if (Ops.size() > TokenFactorInlineLimit) {
      for (unsigned j = i; j < TFs.size(); j++)
        Ops.emplace_back(TFs[j], 0);
      // Drop unprocessed Token Factors from TFs, so we do not add them to the
      // combiner worklist later.
      TFs.resize(i);
      break;
    }

    SDNode *TF = TFs[i];
    // Check each of the operands.
    for (const SDValue &Op : TF->op_values()) {
      switch (Op.getOpcode()) {
      case ISD::EntryToken:
        // Entry tokens don't need to be added to the list. They are
        // redundant.
        Changed = true;
        break;

      case ISD::TokenFactor:
        if (Op.hasOneUse() && !is_contained(TFs, Op.getNode())) {
          // Queue up for processing.
          TFs.push_back(Op.getNode());
          Changed = true;
          break;
        }
        LLVM_FALLTHROUGH;

      default:
        // Only add if it isn't already in the list.
        if (SeenOps.insert(Op.getNode()).second)
          Ops.push_back(Op);
        else
          Changed = true;
        break;
      }
    }
  }

  // Re-visit inlined Token Factors, to clean them up in case they have been
  // removed. Skip the first Token Factor, as this is the current node.
  for (unsigned i = 1, e = TFs.size(); i < e; i++)
    AddToWorklist(TFs[i]);

  // Remove Nodes that are chained to another node in the list. Do so
  // by walking up chains breath-first stopping when we've seen
  // another operand. In general we must climb to the EntryNode, but we can exit
  // early if we find all remaining work is associated with just one operand as
  // no further pruning is possible.

  // List of nodes to search through and original Ops from which they originate.
  SmallVector<std::pair<SDNode *, unsigned>, 8> Worklist;
  SmallVector<unsigned, 8> OpWorkCount; // Count of work for each Op.
  SmallPtrSet<SDNode *, 16> SeenChains;
  bool DidPruneOps = false;

  unsigned NumLeftToConsider = 0;
  for (const SDValue &Op : Ops) {
    Worklist.push_back(std::make_pair(Op.getNode(), NumLeftToConsider++));
    OpWorkCount.push_back(1);
  }

  auto AddToWorklist = [&](unsigned CurIdx, SDNode *Op, unsigned OpNumber) {
    // If this is an Op, we can remove the op from the list. Remark any
    // search associated with it as from the current OpNumber.
    if (SeenOps.count(Op) != 0) {
      Changed = true;
      DidPruneOps = true;
      unsigned OrigOpNumber = 0;
      while (OrigOpNumber < Ops.size() && Ops[OrigOpNumber].getNode() != Op)
        OrigOpNumber++;
      assert((OrigOpNumber != Ops.size()) &&
             "expected to find TokenFactor Operand");
      // Re-mark worklist from OrigOpNumber to OpNumber
      for (unsigned i = CurIdx + 1; i < Worklist.size(); ++i) {
        if (Worklist[i].second == OrigOpNumber) {
          Worklist[i].second = OpNumber;
        }
      }
      OpWorkCount[OpNumber] += OpWorkCount[OrigOpNumber];
      OpWorkCount[OrigOpNumber] = 0;
      NumLeftToConsider--;
    }
    // Add if it's a new chain
    if (SeenChains.insert(Op).second) {
      OpWorkCount[OpNumber]++;
      Worklist.push_back(std::make_pair(Op, OpNumber));
    }
  };

  for (unsigned i = 0; i < Worklist.size() && i < 1024; ++i) {
    // We need at least be consider at least 2 Ops to prune.
    if (NumLeftToConsider <= 1)
      break;
    auto CurNode = Worklist[i].first;
    auto CurOpNumber = Worklist[i].second;
    assert((OpWorkCount[CurOpNumber] > 0) &&
           "Node should not appear in worklist");
    switch (CurNode->getOpcode()) {
    case ISD::EntryToken:
      // Hitting EntryToken is the only way for the search to terminate without
      // hitting
      // another operand's search. Prevent us from marking this operand
      // considered.
      NumLeftToConsider++;
      break;
    case ISD::TokenFactor:
      for (const SDValue &Op : CurNode->op_values())
        AddToWorklist(i, Op.getNode(), CurOpNumber);
      break;
    case ISD::LIFETIME_START:
    case ISD::LIFETIME_END:
    case ISD::CopyFromReg:
    case ISD::CopyToReg:
      AddToWorklist(i, CurNode->getOperand(0).getNode(), CurOpNumber);
      break;
    default:
      if (auto *MemNode = dyn_cast<MemSDNode>(CurNode))
        AddToWorklist(i, MemNode->getChain().getNode(), CurOpNumber);
      break;
    }
    OpWorkCount[CurOpNumber]--;
    if (OpWorkCount[CurOpNumber] == 0)
      NumLeftToConsider--;
  }

  // If we've changed things around then replace token factor.
  if (Changed) {
    SDValue Result;
    if (Ops.empty()) {
      // The entry token is the only possible outcome.
      Result = DAG.getEntryNode();
    } else {
      if (DidPruneOps) {
        SmallVector<SDValue, 8> PrunedOps;
        //
        for (const SDValue &Op : Ops) {
          if (SeenChains.count(Op.getNode()) == 0)
            PrunedOps.push_back(Op);
        }
        Result = DAG.getTokenFactor(SDLoc(N), PrunedOps);
      } else {
        Result = DAG.getTokenFactor(SDLoc(N), Ops);
      }
    }
    return Result;
  }
  return SDValue();
}

/// MERGE_VALUES can always be eliminated.
SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
  WorklistRemover DeadNodes(*this);
  // Replacing results may cause a different MERGE_VALUES to suddenly
  // be CSE'd with N, and carry its uses with it. Iterate until no
  // uses remain, to ensure that the node can be safely deleted.
  // First add the users of this node to the work list so that they
  // can be tried again once they have new operands.
  AddUsersToWorklist(N);
  do {
    // Do as a single replacement to avoid rewalking use lists.
    SmallVector<SDValue, 8> Ops;
    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
      Ops.push_back(N->getOperand(i));
    DAG.ReplaceAllUsesWith(N, Ops.data());
  } while (!N->use_empty());
  deleteAndRecombine(N);
  return SDValue(N, 0);   // Return N so it doesn't get rechecked!
}

/// If \p N is a ConstantSDNode with isOpaque() == false return it casted to a
/// ConstantSDNode pointer else nullptr.
static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) {
  ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N);
  return Const != nullptr && !Const->isOpaque() ? Const : nullptr;
}

SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) {
  assert(TLI.isBinOp(BO->getOpcode()) && BO->getNumValues() == 1 &&
         "Unexpected binary operator");

  // Don't do this unless the old select is going away. We want to eliminate the
  // binary operator, not replace a binop with a select.
  // TODO: Handle ISD::SELECT_CC.
  unsigned SelOpNo = 0;
  SDValue Sel = BO->getOperand(0);
  if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse()) {
    SelOpNo = 1;
    Sel = BO->getOperand(1);
  }

  if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse())
    return SDValue();

  SDValue CT = Sel.getOperand(1);
  if (!isConstantOrConstantVector(CT, true) &&
      !isConstantFPBuildVectorOrConstantFP(CT))
    return SDValue();

  SDValue CF = Sel.getOperand(2);
  if (!isConstantOrConstantVector(CF, true) &&
      !isConstantFPBuildVectorOrConstantFP(CF))
    return SDValue();

  // Bail out if any constants are opaque because we can't constant fold those.
  // The exception is "and" and "or" with either 0 or -1 in which case we can
  // propagate non constant operands into select. I.e.:
  // and (select Cond, 0, -1), X --> select Cond, 0, X
  // or X, (select Cond, -1, 0) --> select Cond, -1, X
  auto BinOpcode = BO->getOpcode();
  bool CanFoldNonConst =
      (BinOpcode == ISD::AND || BinOpcode == ISD::OR) &&
      (isNullOrNullSplat(CT) || isAllOnesOrAllOnesSplat(CT)) &&
      (isNullOrNullSplat(CF) || isAllOnesOrAllOnesSplat(CF));

  SDValue CBO = BO->getOperand(SelOpNo ^ 1);
  if (!CanFoldNonConst &&
      !isConstantOrConstantVector(CBO, true) &&
      !isConstantFPBuildVectorOrConstantFP(CBO))
    return SDValue();

  EVT VT = Sel.getValueType();

  // In case of shift value and shift amount may have different VT. For instance
  // on x86 shift amount is i8 regardles of LHS type. Bail out if we have
  // swapped operands and value types do not match. NB: x86 is fine if operands
  // are not swapped with shift amount VT being not bigger than shifted value.
  // TODO: that is possible to check for a shift operation, correct VTs and
  // still perform optimization on x86 if needed.
  if (SelOpNo && VT != CBO.getValueType())
    return SDValue();

  // We have a select-of-constants followed by a binary operator with a
  // constant. Eliminate the binop by pulling the constant math into the select.
  // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
  SDLoc DL(Sel);
  SDValue NewCT = SelOpNo ? DAG.getNode(BinOpcode, DL, VT, CBO, CT)
                          : DAG.getNode(BinOpcode, DL, VT, CT, CBO);
  if (!CanFoldNonConst && !NewCT.isUndef() &&
      !isConstantOrConstantVector(NewCT, true) &&
      !isConstantFPBuildVectorOrConstantFP(NewCT))
    return SDValue();

  SDValue NewCF = SelOpNo ? DAG.getNode(BinOpcode, DL, VT, CBO, CF)
                          : DAG.getNode(BinOpcode, DL, VT, CF, CBO);
  if (!CanFoldNonConst && !NewCF.isUndef() &&
      !isConstantOrConstantVector(NewCF, true) &&
      !isConstantFPBuildVectorOrConstantFP(NewCF))
    return SDValue();

  SDValue SelectOp = DAG.getSelect(DL, VT, Sel.getOperand(0), NewCT, NewCF);
  SelectOp->setFlags(BO->getFlags());
  return SelectOp;
}

static SDValue foldAddSubBoolOfMaskedVal(SDNode *N, SelectionDAG &DAG) {
  assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
         "Expecting add or sub");

  // Match a constant operand and a zext operand for the math instruction:
  // add Z, C
  // sub C, Z
  bool IsAdd = N->getOpcode() == ISD::ADD;
  SDValue C = IsAdd ? N->getOperand(1) : N->getOperand(0);
  SDValue Z = IsAdd ? N->getOperand(0) : N->getOperand(1);
  auto *CN = dyn_cast<ConstantSDNode>(C);
  if (!CN || Z.getOpcode() != ISD::ZERO_EXTEND)
    return SDValue();

  // Match the zext operand as a setcc of a boolean.
  if (Z.getOperand(0).getOpcode() != ISD::SETCC ||
      Z.getOperand(0).getValueType() != MVT::i1)
    return SDValue();

  // Match the compare as: setcc (X & 1), 0, eq.
  SDValue SetCC = Z.getOperand(0);
  ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
  if (CC != ISD::SETEQ || !isNullConstant(SetCC.getOperand(1)) ||
      SetCC.getOperand(0).getOpcode() != ISD::AND ||
      !isOneConstant(SetCC.getOperand(0).getOperand(1)))
    return SDValue();

  // We are adding/subtracting a constant and an inverted low bit. Turn that
  // into a subtract/add of the low bit with incremented/decremented constant:
  // add (zext i1 (seteq (X & 1), 0)), C --> sub C+1, (zext (X & 1))
  // sub C, (zext i1 (seteq (X & 1), 0)) --> add C-1, (zext (X & 1))
  EVT VT = C.getValueType();
  SDLoc DL(N);
  SDValue LowBit = DAG.getZExtOrTrunc(SetCC.getOperand(0), DL, VT);
  SDValue C1 = IsAdd ? DAG.getConstant(CN->getAPIntValue() + 1, DL, VT) :
                       DAG.getConstant(CN->getAPIntValue() - 1, DL, VT);
  return DAG.getNode(IsAdd ? ISD::SUB : ISD::ADD, DL, VT, C1, LowBit);
}

/// Try to fold a 'not' shifted sign-bit with add/sub with constant operand into
/// a shift and add with a different constant.
static SDValue foldAddSubOfSignBit(SDNode *N, SelectionDAG &DAG) {
  assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
         "Expecting add or sub");

  // We need a constant operand for the add/sub, and the other operand is a
  // logical shift right: add (srl), C or sub C, (srl).
  // TODO - support non-uniform vector amounts.
  bool IsAdd = N->getOpcode() == ISD::ADD;
  SDValue ConstantOp = IsAdd ? N->getOperand(1) : N->getOperand(0);
  SDValue ShiftOp = IsAdd ? N->getOperand(0) : N->getOperand(1);
  ConstantSDNode *C = isConstOrConstSplat(ConstantOp);
  if (!C || ShiftOp.getOpcode() != ISD::SRL)
    return SDValue();

  // The shift must be of a 'not' value.
  SDValue Not = ShiftOp.getOperand(0);
  if (!Not.hasOneUse() || !isBitwiseNot(Not))
    return SDValue();

  // The shift must be moving the sign bit to the least-significant-bit.
  EVT VT = ShiftOp.getValueType();
  SDValue ShAmt = ShiftOp.getOperand(1);
  ConstantSDNode *ShAmtC = isConstOrConstSplat(ShAmt);
  if (!ShAmtC || ShAmtC->getAPIntValue() != (VT.getScalarSizeInBits() - 1))
    return SDValue();

  // Eliminate the 'not' by adjusting the shift and add/sub constant:
  // add (srl (not X), 31), C --> add (sra X, 31), (C + 1)
  // sub C, (srl (not X), 31) --> add (srl X, 31), (C - 1)
  SDLoc DL(N);
  auto ShOpcode = IsAdd ? ISD::SRA : ISD::SRL;
  SDValue NewShift = DAG.getNode(ShOpcode, DL, VT, Not.getOperand(0), ShAmt);
  APInt NewC = IsAdd ? C->getAPIntValue() + 1 : C->getAPIntValue() - 1;
  return DAG.getNode(ISD::ADD, DL, VT, NewShift, DAG.getConstant(NewC, DL, VT));
}

/// Try to fold a node that behaves like an ADD (note that N isn't necessarily
/// an ISD::ADD here, it could for example be an ISD::OR if we know that there
/// are no common bits set in the operands).
SDValue DAGCombiner::visitADDLike(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  SDLoc DL(N);

  // fold vector ops
  if (VT.isVector()) {
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

    // fold (add x, 0) -> x, vector edition
    if (ISD::isBuildVectorAllZeros(N1.getNode()))
      return N0;
    if (ISD::isBuildVectorAllZeros(N0.getNode()))
      return N1;
  }

  // fold (add x, undef) -> undef
  if (N0.isUndef())
    return N0;

  if (N1.isUndef())
    return N1;

  if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
    // canonicalize constant to RHS
    if (!DAG.isConstantIntBuildVectorOrConstantInt(N1))
      return DAG.getNode(ISD::ADD, DL, VT, N1, N0);
    // fold (add c1, c2) -> c1+c2
    return DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, N0.getNode(),
                                      N1.getNode());
  }

  // fold (add x, 0) -> x
  if (isNullConstant(N1))
    return N0;

  if (isConstantOrConstantVector(N1, /* NoOpaque */ true)) {
    // fold ((A-c1)+c2) -> (A+(c2-c1))
    if (N0.getOpcode() == ISD::SUB &&
        isConstantOrConstantVector(N0.getOperand(1), /* NoOpaque */ true)) {
      SDValue Sub = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, N1.getNode(),
                                               N0.getOperand(1).getNode());
      assert(Sub && "Constant folding failed");
      return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), Sub);
    }

    // fold ((c1-A)+c2) -> (c1+c2)-A
    if (N0.getOpcode() == ISD::SUB &&
        isConstantOrConstantVector(N0.getOperand(0), /* NoOpaque */ true)) {
      SDValue Add = DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, N1.getNode(),
                                               N0.getOperand(0).getNode());
      assert(Add && "Constant folding failed");
      return DAG.getNode(ISD::SUB, DL, VT, Add, N0.getOperand(1));
    }

    // add (sext i1 X), 1 -> zext (not i1 X)
    // We don't transform this pattern:
    //   add (zext i1 X), -1 -> sext (not i1 X)
    // because most (?) targets generate better code for the zext form.
    if (N0.getOpcode() == ISD::SIGN_EXTEND && N0.hasOneUse() &&
        isOneOrOneSplat(N1)) {
      SDValue X = N0.getOperand(0);
      if ((!LegalOperations ||
           (TLI.isOperationLegal(ISD::XOR, X.getValueType()) &&
            TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) &&
          X.getScalarValueSizeInBits() == 1) {
        SDValue Not = DAG.getNOT(DL, X, X.getValueType());
        return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Not);
      }
    }

    // Undo the add -> or combine to merge constant offsets from a frame index.
    if (N0.getOpcode() == ISD::OR &&
        isa<FrameIndexSDNode>(N0.getOperand(0)) &&
        isa<ConstantSDNode>(N0.getOperand(1)) &&
        DAG.haveNoCommonBitsSet(N0.getOperand(0), N0.getOperand(1))) {
      SDValue Add0 = DAG.getNode(ISD::ADD, DL, VT, N1, N0.getOperand(1));
      return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), Add0);
    }
  }

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  // reassociate add
  if (!reassociationCanBreakAddressingModePattern(ISD::ADD, DL, N0, N1)) {
    if (SDValue RADD = reassociateOps(ISD::ADD, DL, N0, N1, N->getFlags()))
      return RADD;
  }
  // fold ((0-A) + B) -> B-A
  if (N0.getOpcode() == ISD::SUB && isNullOrNullSplat(N0.getOperand(0)))
    return DAG.getNode(ISD::SUB, DL, VT, N1, N0.getOperand(1));

  // fold (A + (0-B)) -> A-B
  if (N1.getOpcode() == ISD::SUB && isNullOrNullSplat(N1.getOperand(0)))
    return DAG.getNode(ISD::SUB, DL, VT, N0, N1.getOperand(1));

  // fold (A+(B-A)) -> B
  if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
    return N1.getOperand(0);

  // fold ((B-A)+A) -> B
  if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1))
    return N0.getOperand(0);

  // fold ((A-B)+(C-A)) -> (C-B)
  if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB &&
      N0.getOperand(0) == N1.getOperand(1))
    return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0),
                       N0.getOperand(1));

  // fold ((A-B)+(B-C)) -> (A-C)
  if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB &&
      N0.getOperand(1) == N1.getOperand(0))
    return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0),
                       N1.getOperand(1));

  // fold (A+(B-(A+C))) to (B-C)
  if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
      N0 == N1.getOperand(1).getOperand(0))
    return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0),
                       N1.getOperand(1).getOperand(1));

  // fold (A+(B-(C+A))) to (B-C)
  if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
      N0 == N1.getOperand(1).getOperand(1))
    return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0),
                       N1.getOperand(1).getOperand(0));

  // fold (A+((B-A)+or-C)) to (B+or-C)
  if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) &&
      N1.getOperand(0).getOpcode() == ISD::SUB &&
      N0 == N1.getOperand(0).getOperand(1))
    return DAG.getNode(N1.getOpcode(), DL, VT, N1.getOperand(0).getOperand(0),
                       N1.getOperand(1));

  // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant
  if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) {
    SDValue N00 = N0.getOperand(0);
    SDValue N01 = N0.getOperand(1);
    SDValue N10 = N1.getOperand(0);
    SDValue N11 = N1.getOperand(1);

    if (isConstantOrConstantVector(N00) || isConstantOrConstantVector(N10))
      return DAG.getNode(ISD::SUB, DL, VT,
                         DAG.getNode(ISD::ADD, SDLoc(N0), VT, N00, N10),
                         DAG.getNode(ISD::ADD, SDLoc(N1), VT, N01, N11));
  }

  // fold (add (umax X, C), -C) --> (usubsat X, C)
  if (N0.getOpcode() == ISD::UMAX && hasOperation(ISD::USUBSAT, VT)) {
    auto MatchUSUBSAT = [](ConstantSDNode *Max, ConstantSDNode *Op) {
      return (!Max && !Op) ||
             (Max && Op && Max->getAPIntValue() == (-Op->getAPIntValue()));
    };
    if (ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchUSUBSAT,
                                  /*AllowUndefs*/ true))
      return DAG.getNode(ISD::USUBSAT, DL, VT, N0.getOperand(0),
                         N0.getOperand(1));
  }

  if (SimplifyDemandedBits(SDValue(N, 0)))
    return SDValue(N, 0);

  if (isOneOrOneSplat(N1)) {
    // fold (add (xor a, -1), 1) -> (sub 0, a)
    if (isBitwiseNot(N0))
      return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
                         N0.getOperand(0));

    // fold (add (add (xor a, -1), b), 1) -> (sub b, a)
    if (N0.getOpcode() == ISD::ADD ||
        N0.getOpcode() == ISD::UADDO ||
        N0.getOpcode() == ISD::SADDO) {
      SDValue A, Xor;

      if (isBitwiseNot(N0.getOperand(0))) {
        A = N0.getOperand(1);
        Xor = N0.getOperand(0);
      } else if (isBitwiseNot(N0.getOperand(1))) {
        A = N0.getOperand(0);
        Xor = N0.getOperand(1);
      }

      if (Xor)
        return DAG.getNode(ISD::SUB, DL, VT, A, Xor.getOperand(0));
    }

    // Look for:
    //   add (add x, y), 1
    // And if the target does not like this form then turn into:
    //   sub y, (xor x, -1)
    if (!TLI.preferIncOfAddToSubOfNot(VT) && N0.hasOneUse() &&
        N0.getOpcode() == ISD::ADD) {
      SDValue Not = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(0),
                                DAG.getAllOnesConstant(DL, VT));
      return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(1), Not);
    }
  }

  // (x - y) + -1  ->  add (xor y, -1), x
  if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
      isAllOnesOrAllOnesSplat(N1)) {
    SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(1), N1);
    return DAG.getNode(ISD::ADD, DL, VT, Xor, N0.getOperand(0));
  }

  if (SDValue Combined = visitADDLikeCommutative(N0, N1, N))
    return Combined;

  if (SDValue Combined = visitADDLikeCommutative(N1, N0, N))
    return Combined;

  return SDValue();
}

SDValue DAGCombiner::visitADD(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  SDLoc DL(N);

  if (SDValue Combined = visitADDLike(N))
    return Combined;

  if (SDValue V = foldAddSubBoolOfMaskedVal(N, DAG))
    return V;

  if (SDValue V = foldAddSubOfSignBit(N, DAG))
    return V;

  // fold (a+b) -> (a|b) iff a and b share no bits.
  if ((!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) &&
      DAG.haveNoCommonBitsSet(N0, N1))
    return DAG.getNode(ISD::OR, DL, VT, N0, N1);

  return SDValue();
}

SDValue DAGCombiner::visitADDSAT(SDNode *N) {
  unsigned Opcode = N->getOpcode();
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  SDLoc DL(N);

  // fold vector ops
  if (VT.isVector()) {
    // TODO SimplifyVBinOp

    // fold (add_sat x, 0) -> x, vector edition
    if (ISD::isBuildVectorAllZeros(N1.getNode()))
      return N0;
    if (ISD::isBuildVectorAllZeros(N0.getNode()))
      return N1;
  }

  // fold (add_sat x, undef) -> -1
  if (N0.isUndef() || N1.isUndef())
    return DAG.getAllOnesConstant(DL, VT);

  if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
    // canonicalize constant to RHS
    if (!DAG.isConstantIntBuildVectorOrConstantInt(N1))
      return DAG.getNode(Opcode, DL, VT, N1, N0);
    // fold (add_sat c1, c2) -> c3
    return DAG.FoldConstantArithmetic(Opcode, DL, VT, N0.getNode(),
                                      N1.getNode());
  }

  // fold (add_sat x, 0) -> x
  if (isNullConstant(N1))
    return N0;

  // If it cannot overflow, transform into an add.
  if (Opcode == ISD::UADDSAT)
    if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
      return DAG.getNode(ISD::ADD, DL, VT, N0, N1);

  return SDValue();
}

static SDValue getAsCarry(const TargetLowering &TLI, SDValue V) {
  bool Masked = false;

  // First, peel away TRUNCATE/ZERO_EXTEND/AND nodes due to legalization.
  while (true) {
    if (V.getOpcode() == ISD::TRUNCATE || V.getOpcode() == ISD::ZERO_EXTEND) {
      V = V.getOperand(0);
      continue;
    }

    if (V.getOpcode() == ISD::AND && isOneConstant(V.getOperand(1))) {
      Masked = true;
      V = V.getOperand(0);
      continue;
    }

    break;
  }

  // If this is not a carry, return.
  if (V.getResNo() != 1)
    return SDValue();

  if (V.getOpcode() != ISD::ADDCARRY && V.getOpcode() != ISD::SUBCARRY &&
      V.getOpcode() != ISD::UADDO && V.getOpcode() != ISD::USUBO)
    return SDValue();

  EVT VT = V.getNode()->getValueType(0);
  if (!TLI.isOperationLegalOrCustom(V.getOpcode(), VT))
    return SDValue();

  // If the result is masked, then no matter what kind of bool it is we can
  // return. If it isn't, then we need to make sure the bool type is either 0 or
  // 1 and not other values.
  if (Masked ||
      TLI.getBooleanContents(V.getValueType()) ==
          TargetLoweringBase::ZeroOrOneBooleanContent)
    return V;

  return SDValue();
}

/// Given the operands of an add/sub operation, see if the 2nd operand is a
/// masked 0/1 whose source operand is actually known to be 0/-1. If so, invert
/// the opcode and bypass the mask operation.
static SDValue foldAddSubMasked1(bool IsAdd, SDValue N0, SDValue N1,
                                 SelectionDAG &DAG, const SDLoc &DL) {
  if (N1.getOpcode() != ISD::AND || !isOneOrOneSplat(N1->getOperand(1)))
    return SDValue();

  EVT VT = N0.getValueType();
  if (DAG.ComputeNumSignBits(N1.getOperand(0)) != VT.getScalarSizeInBits())
    return SDValue();

  // add N0, (and (AssertSext X, i1), 1) --> sub N0, X
  // sub N0, (and (AssertSext X, i1), 1) --> add N0, X
  return DAG.getNode(IsAdd ? ISD::SUB : ISD::ADD, DL, VT, N0, N1.getOperand(0));
}

/// Helper for doing combines based on N0 and N1 being added to each other.
SDValue DAGCombiner::visitADDLikeCommutative(SDValue N0, SDValue N1,
                                          SDNode *LocReference) {
  EVT VT = N0.getValueType();
  SDLoc DL(LocReference);

  // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n))
  if (N1.getOpcode() == ISD::SHL && N1.getOperand(0).getOpcode() == ISD::SUB &&
      isNullOrNullSplat(N1.getOperand(0).getOperand(0)))
    return DAG.getNode(ISD::SUB, DL, VT, N0,
                       DAG.getNode(ISD::SHL, DL, VT,
                                   N1.getOperand(0).getOperand(1),
                                   N1.getOperand(1)));

  if (SDValue V = foldAddSubMasked1(true, N0, N1, DAG, DL))
    return V;

  // Look for:
  //   add (add x, 1), y
  // And if the target does not like this form then turn into:
  //   sub y, (xor x, -1)
  if (!TLI.preferIncOfAddToSubOfNot(VT) && N0.hasOneUse() &&
      N0.getOpcode() == ISD::ADD && isOneOrOneSplat(N0.getOperand(1))) {
    SDValue Not = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(0),
                              DAG.getAllOnesConstant(DL, VT));
    return DAG.getNode(ISD::SUB, DL, VT, N1, Not);
  }

  // Hoist one-use subtraction by non-opaque constant:
  //   (x - C) + y  ->  (x + y) - C
  // This is necessary because SUB(X,C) -> ADD(X,-C) doesn't work for vectors.
  if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
      isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) {
    SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), N1);
    return DAG.getNode(ISD::SUB, DL, VT, Add, N0.getOperand(1));
  }
  // Hoist one-use subtraction from non-opaque constant:
  //   (C - x) + y  ->  (y - x) + C
  if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
      isConstantOrConstantVector(N0.getOperand(0), /*NoOpaques=*/true)) {
    SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N1, N0.getOperand(1));
    return DAG.getNode(ISD::ADD, DL, VT, Sub, N0.getOperand(0));
  }

  // If the target's bool is represented as 0/1, prefer to make this 'sub 0/1'
  // rather than 'add 0/-1' (the zext should get folded).
  // add (sext i1 Y), X --> sub X, (zext i1 Y)
  if (N0.getOpcode() == ISD::SIGN_EXTEND &&
      N0.getOperand(0).getScalarValueSizeInBits() == 1 &&
      TLI.getBooleanContents(VT) == TargetLowering::ZeroOrOneBooleanContent) {
    SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
    return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt);
  }

  // add X, (sextinreg Y i1) -> sub X, (and Y 1)
  if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) {
    VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1));
    if (TN->getVT() == MVT::i1) {
      SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0),
                                 DAG.getConstant(1, DL, VT));
      return DAG.getNode(ISD::SUB, DL, VT, N0, ZExt);
    }
  }

  // (add X, (addcarry Y, 0, Carry)) -> (addcarry X, Y, Carry)
  if (N1.getOpcode() == ISD::ADDCARRY && isNullConstant(N1.getOperand(1)) &&
      N1.getResNo() == 0)
    return DAG.getNode(ISD::ADDCARRY, DL, N1->getVTList(),
                       N0, N1.getOperand(0), N1.getOperand(2));

  // (add X, Carry) -> (addcarry X, 0, Carry)
  if (TLI.isOperationLegalOrCustom(ISD::ADDCARRY, VT))
    if (SDValue Carry = getAsCarry(TLI, N1))
      return DAG.getNode(ISD::ADDCARRY, DL,
                         DAG.getVTList(VT, Carry.getValueType()), N0,
                         DAG.getConstant(0, DL, VT), Carry);

  return SDValue();
}

SDValue DAGCombiner::visitADDC(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  SDLoc DL(N);

  // If the flag result is dead, turn this into an ADD.
  if (!N->hasAnyUseOfValue(1))
    return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
                     DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));

  // canonicalize constant to RHS.
  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
  if (N0C && !N1C)
    return DAG.getNode(ISD::ADDC, DL, N->getVTList(), N1, N0);

  // fold (addc x, 0) -> x + no carry out
  if (isNullConstant(N1))
    return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
                                        DL, MVT::Glue));

  // If it cannot overflow, transform into an add.
  if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
    return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
                     DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));

  return SDValue();
}

static SDValue flipBoolean(SDValue V, const SDLoc &DL,
                           SelectionDAG &DAG, const TargetLowering &TLI) {
  EVT VT = V.getValueType();

  SDValue Cst;
  switch (TLI.getBooleanContents(VT)) {
  case TargetLowering::ZeroOrOneBooleanContent:
  case TargetLowering::UndefinedBooleanContent:
    Cst = DAG.getConstant(1, DL, VT);
    break;
  case TargetLowering::ZeroOrNegativeOneBooleanContent:
    Cst = DAG.getAllOnesConstant(DL, VT);
    break;
  }

  return DAG.getNode(ISD::XOR, DL, VT, V, Cst);
}

/**
 * Flips a boolean if it is cheaper to compute. If the Force parameters is set,
 * then the flip also occurs if computing the inverse is the same cost.
 * This function returns an empty SDValue in case it cannot flip the boolean
 * without increasing the cost of the computation. If you want to flip a boolean
 * no matter what, use flipBoolean.
 */
static SDValue extractBooleanFlip(SDValue V, SelectionDAG &DAG,
                                  const TargetLowering &TLI,
                                  bool Force) {
  if (Force && isa<ConstantSDNode>(V))
    return flipBoolean(V, SDLoc(V), DAG, TLI);

  if (V.getOpcode() != ISD::XOR)
    return SDValue();

  ConstantSDNode *Const = isConstOrConstSplat(V.getOperand(1), false);
  if (!Const)
    return SDValue();

  EVT VT = V.getValueType();

  bool IsFlip = false;
  switch(TLI.getBooleanContents(VT)) {
    case TargetLowering::ZeroOrOneBooleanContent:
      IsFlip = Const->isOne();
      break;
    case TargetLowering::ZeroOrNegativeOneBooleanContent:
      IsFlip = Const->isAllOnesValue();
      break;
    case TargetLowering::UndefinedBooleanContent:
      IsFlip = (Const->getAPIntValue() & 0x01) == 1;
      break;
  }

  if (IsFlip)
    return V.getOperand(0);
  if (Force)
    return flipBoolean(V, SDLoc(V), DAG, TLI);
  return SDValue();
}

SDValue DAGCombiner::visitADDO(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  bool IsSigned = (ISD::SADDO == N->getOpcode());

  EVT CarryVT = N->getValueType(1);
  SDLoc DL(N);

  // If the flag result is dead, turn this into an ADD.
  if (!N->hasAnyUseOfValue(1))
    return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
                     DAG.getUNDEF(CarryVT));

  // canonicalize constant to RHS.
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
      !DAG.isConstantIntBuildVectorOrConstantInt(N1))
    return DAG.getNode(N->getOpcode(), DL, N->getVTList(), N1, N0);

  // fold (addo x, 0) -> x + no carry out
  if (isNullOrNullSplat(N1))
    return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT));

  if (!IsSigned) {
    // If it cannot overflow, transform into an add.
    if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
      return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
                       DAG.getConstant(0, DL, CarryVT));

    // fold (uaddo (xor a, -1), 1) -> (usub 0, a) and flip carry.
    if (isBitwiseNot(N0) && isOneOrOneSplat(N1)) {
      SDValue Sub = DAG.getNode(ISD::USUBO, DL, N->getVTList(),
                                DAG.getConstant(0, DL, VT), N0.getOperand(0));
      return CombineTo(N, Sub,
                       flipBoolean(Sub.getValue(1), DL, DAG, TLI));
    }

    if (SDValue Combined = visitUADDOLike(N0, N1, N))
      return Combined;

    if (SDValue Combined = visitUADDOLike(N1, N0, N))
      return Combined;
  }

  return SDValue();
}

SDValue DAGCombiner::visitUADDOLike(SDValue N0, SDValue N1, SDNode *N) {
  EVT VT = N0.getValueType();
  if (VT.isVector())
    return SDValue();

  // (uaddo X, (addcarry Y, 0, Carry)) -> (addcarry X, Y, Carry)
  // If Y + 1 cannot overflow.
  if (N1.getOpcode() == ISD::ADDCARRY && isNullConstant(N1.getOperand(1))) {
    SDValue Y = N1.getOperand(0);
    SDValue One = DAG.getConstant(1, SDLoc(N), Y.getValueType());
    if (DAG.computeOverflowKind(Y, One) == SelectionDAG::OFK_Never)
      return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(), N0, Y,
                         N1.getOperand(2));
  }

  // (uaddo X, Carry) -> (addcarry X, 0, Carry)
  if (TLI.isOperationLegalOrCustom(ISD::ADDCARRY, VT))
    if (SDValue Carry = getAsCarry(TLI, N1))
      return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(), N0,
                         DAG.getConstant(0, SDLoc(N), VT), Carry);

  return SDValue();
}

SDValue DAGCombiner::visitADDE(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue CarryIn = N->getOperand(2);

  // canonicalize constant to RHS
  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
  if (N0C && !N1C)
    return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(),
                       N1, N0, CarryIn);

  // fold (adde x, y, false) -> (addc x, y)
  if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
    return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N0, N1);

  return SDValue();
}

SDValue DAGCombiner::visitADDCARRY(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue CarryIn = N->getOperand(2);
  SDLoc DL(N);

  // canonicalize constant to RHS
  ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
  if (N0C && !N1C)
    return DAG.getNode(ISD::ADDCARRY, DL, N->getVTList(), N1, N0, CarryIn);

  // fold (addcarry x, y, false) -> (uaddo x, y)
  if (isNullConstant(CarryIn)) {
    if (!LegalOperations ||
        TLI.isOperationLegalOrCustom(ISD::UADDO, N->getValueType(0)))
      return DAG.getNode(ISD::UADDO, DL, N->getVTList(), N0, N1);
  }

  // fold (addcarry 0, 0, X) -> (and (ext/trunc X), 1) and no carry.
  if (isNullConstant(N0) && isNullConstant(N1)) {
    EVT VT = N0.getValueType();
    EVT CarryVT = CarryIn.getValueType();
    SDValue CarryExt = DAG.getBoolExtOrTrunc(CarryIn, DL, VT, CarryVT);
    AddToWorklist(CarryExt.getNode());
    return CombineTo(N, DAG.getNode(ISD::AND, DL, VT, CarryExt,
                                    DAG.getConstant(1, DL, VT)),
                     DAG.getConstant(0, DL, CarryVT));
  }

  if (SDValue Combined = visitADDCARRYLike(N0, N1, CarryIn, N))
    return Combined;

  if (SDValue Combined = visitADDCARRYLike(N1, N0, CarryIn, N))
    return Combined;

  return SDValue();
}

/**
 * If we are facing some sort of diamond carry propapagtion pattern try to
 * break it up to generate something like:
 *   (addcarry X, 0, (addcarry A, B, Z):Carry)
 *
 * The end result is usually an increase in operation required, but because the
 * carry is now linearized, other tranforms can kick in and optimize the DAG.
 *
 * Patterns typically look something like
 *            (uaddo A, B)
 *             /       \
 *          Carry      Sum
 *            |          \
 *            | (addcarry *, 0, Z)
 *            |       /
 *             \   Carry
 *              |   /
 * (addcarry X, *, *)
 *
 * But numerous variation exist. Our goal is to identify A, B, X and Z and
 * produce a combine with a single path for carry propagation.
 */
static SDValue combineADDCARRYDiamond(DAGCombiner &Combiner, SelectionDAG &DAG,
                                      SDValue X, SDValue Carry0, SDValue Carry1,
                                      SDNode *N) {
  if (Carry1.getResNo() != 1 || Carry0.getResNo() != 1)
    return SDValue();
  if (Carry1.getOpcode() != ISD::UADDO)
    return SDValue();

  SDValue Z;

  /**
   * First look for a suitable Z. It will present itself in the form of
   * (addcarry Y, 0, Z) or its equivalent (uaddo Y, 1) for Z=true
   */
  if (Carry0.getOpcode() == ISD::ADDCARRY &&
      isNullConstant(Carry0.getOperand(1))) {
    Z = Carry0.getOperand(2);
  } else if (Carry0.getOpcode() == ISD::UADDO &&
             isOneConstant(Carry0.getOperand(1))) {
    EVT VT = Combiner.getSetCCResultType(Carry0.getValueType());
    Z = DAG.getConstant(1, SDLoc(Carry0.getOperand(1)), VT);
  } else {
    // We couldn't find a suitable Z.
    return SDValue();
  }


  auto cancelDiamond = [&](SDValue A,SDValue B) {
    SDLoc DL(N);
    SDValue NewY = DAG.getNode(ISD::ADDCARRY, DL, Carry0->getVTList(), A, B, Z);
    Combiner.AddToWorklist(NewY.getNode());
    return DAG.getNode(ISD::ADDCARRY, DL, N->getVTList(), X,
                       DAG.getConstant(0, DL, X.getValueType()),
                       NewY.getValue(1));
  };

  /**
   *      (uaddo A, B)
   *           |
   *          Sum
   *           |
   * (addcarry *, 0, Z)
   */
  if (Carry0.getOperand(0) == Carry1.getValue(0)) {
    return cancelDiamond(Carry1.getOperand(0), Carry1.getOperand(1));
  }

  /**
   * (addcarry A, 0, Z)
   *         |
   *        Sum
   *         |
   *  (uaddo *, B)
   */
  if (Carry1.getOperand(0) == Carry0.getValue(0)) {
    return cancelDiamond(Carry0.getOperand(0), Carry1.getOperand(1));
  }

  if (Carry1.getOperand(1) == Carry0.getValue(0)) {
    return cancelDiamond(Carry1.getOperand(0), Carry0.getOperand(0));
  }

  return SDValue();
}

SDValue DAGCombiner::visitADDCARRYLike(SDValue N0, SDValue N1, SDValue CarryIn,
                                       SDNode *N) {
  // fold (addcarry (xor a, -1), b, c) -> (subcarry b, a, !c) and flip carry.
  if (isBitwiseNot(N0))
    if (SDValue NotC = extractBooleanFlip(CarryIn, DAG, TLI, true)) {
      SDLoc DL(N);
      SDValue Sub = DAG.getNode(ISD::SUBCARRY, DL, N->getVTList(), N1,
                                N0.getOperand(0), NotC);
      return CombineTo(N, Sub,
                       flipBoolean(Sub.getValue(1), DL, DAG, TLI));
    }

  // Iff the flag result is dead:
  // (addcarry (add|uaddo X, Y), 0, Carry) -> (addcarry X, Y, Carry)
  // Don't do this if the Carry comes from the uaddo. It won't remove the uaddo
  // or the dependency between the instructions.
  if ((N0.getOpcode() == ISD::ADD ||
       (N0.getOpcode() == ISD::UADDO && N0.getResNo() == 0 &&
        N0.getValue(1) != CarryIn)) &&
      isNullConstant(N1) && !N->hasAnyUseOfValue(1))
    return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(),
                       N0.getOperand(0), N0.getOperand(1), CarryIn);

  /**
   * When one of the addcarry argument is itself a carry, we may be facing
   * a diamond carry propagation. In which case we try to transform the DAG
   * to ensure linear carry propagation if that is possible.
   */
  if (auto Y = getAsCarry(TLI, N1)) {
    // Because both are carries, Y and Z can be swapped.
    if (auto R = combineADDCARRYDiamond(*this, DAG, N0, Y, CarryIn, N))
      return R;
    if (auto R = combineADDCARRYDiamond(*this, DAG, N0, CarryIn, Y, N))
      return R;
  }

  return SDValue();
}

// Since it may not be valid to emit a fold to zero for vector initializers
// check if we can before folding.
static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT,
                             SelectionDAG &DAG, bool LegalOperations) {
  if (!VT.isVector())
    return DAG.getConstant(0, DL, VT);
  if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
    return DAG.getConstant(0, DL, VT);
  return SDValue();
}

SDValue DAGCombiner::visitSUB(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  SDLoc DL(N);

  // fold vector ops
  if (VT.isVector()) {
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

    // fold (sub x, 0) -> x, vector edition
    if (ISD::isBuildVectorAllZeros(N1.getNode()))
      return N0;
  }

  // fold (sub x, x) -> 0
  // FIXME: Refactor this and xor and other similar operations together.
  if (N0 == N1)
    return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
      DAG.isConstantIntBuildVectorOrConstantInt(N1)) {
    // fold (sub c1, c2) -> c1-c2
    return DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, N0.getNode(),
                                      N1.getNode());
  }

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);

  // fold (sub x, c) -> (add x, -c)
  if (N1C) {
    return DAG.getNode(ISD::ADD, DL, VT, N0,
                       DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
  }

  if (isNullOrNullSplat(N0)) {
    unsigned BitWidth = VT.getScalarSizeInBits();
    // Right-shifting everything out but the sign bit followed by negation is
    // the same as flipping arithmetic/logical shift type without the negation:
    // -(X >>u 31) -> (X >>s 31)
    // -(X >>s 31) -> (X >>u 31)
    if (N1->getOpcode() == ISD::SRA || N1->getOpcode() == ISD::SRL) {
      ConstantSDNode *ShiftAmt = isConstOrConstSplat(N1.getOperand(1));
      if (ShiftAmt && ShiftAmt->getAPIntValue() == (BitWidth - 1)) {
        auto NewSh = N1->getOpcode() == ISD::SRA ? ISD::SRL : ISD::SRA;
        if (!LegalOperations || TLI.isOperationLegal(NewSh, VT))
          return DAG.getNode(NewSh, DL, VT, N1.getOperand(0), N1.getOperand(1));
      }
    }

    // 0 - X --> 0 if the sub is NUW.
    if (N->getFlags().hasNoUnsignedWrap())
      return N0;

    if (DAG.MaskedValueIsZero(N1, ~APInt::getSignMask(BitWidth))) {
      // N1 is either 0 or the minimum signed value. If the sub is NSW, then
      // N1 must be 0 because negating the minimum signed value is undefined.
      if (N->getFlags().hasNoSignedWrap())
        return N0;

      // 0 - X --> X if X is 0 or the minimum signed value.
      return N1;
    }
  }

  // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
  if (isAllOnesOrAllOnesSplat(N0))
    return DAG.getNode(ISD::XOR, DL, VT, N1, N0);

  // fold (A - (0-B)) -> A+B
  if (N1.getOpcode() == ISD::SUB && isNullOrNullSplat(N1.getOperand(0)))
    return DAG.getNode(ISD::ADD, DL, VT, N0, N1.getOperand(1));

  // fold A-(A-B) -> B
  if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0))
    return N1.getOperand(1);

  // fold (A+B)-A -> B
  if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1)
    return N0.getOperand(1);

  // fold (A+B)-B -> A
  if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1)
    return N0.getOperand(0);

  // fold (A+C1)-C2 -> A+(C1-C2)
  if (N0.getOpcode() == ISD::ADD &&
      isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
      isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) {
    SDValue NewC = DAG.FoldConstantArithmetic(
        ISD::SUB, DL, VT, N0.getOperand(1).getNode(), N1.getNode());
    assert(NewC && "Constant folding failed");
    return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), NewC);
  }

  // fold C2-(A+C1) -> (C2-C1)-A
  if (N1.getOpcode() == ISD::ADD) {
    SDValue N11 = N1.getOperand(1);
    if (isConstantOrConstantVector(N0, /* NoOpaques */ true) &&
        isConstantOrConstantVector(N11, /* NoOpaques */ true)) {
      SDValue NewC = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, N0.getNode(),
                                                N11.getNode());
      assert(NewC && "Constant folding failed");
      return DAG.getNode(ISD::SUB, DL, VT, NewC, N1.getOperand(0));
    }
  }

  // fold (A-C1)-C2 -> A-(C1+C2)
  if (N0.getOpcode() == ISD::SUB &&
      isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
      isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) {
    SDValue NewC = DAG.FoldConstantArithmetic(
        ISD::ADD, DL, VT, N0.getOperand(1).getNode(), N1.getNode());
    assert(NewC && "Constant folding failed");
    return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), NewC);
  }

  // fold (c1-A)-c2 -> (c1-c2)-A
  if (N0.getOpcode() == ISD::SUB &&
      isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
      isConstantOrConstantVector(N0.getOperand(0), /* NoOpaques */ true)) {
    SDValue NewC = DAG.FoldConstantArithmetic(
        ISD::SUB, DL, VT, N0.getOperand(0).getNode(), N1.getNode());
    assert(NewC && "Constant folding failed");
    return DAG.getNode(ISD::SUB, DL, VT, NewC, N0.getOperand(1));
  }

  // fold ((A+(B+or-C))-B) -> A+or-C
  if (N0.getOpcode() == ISD::ADD &&
      (N0.getOperand(1).getOpcode() == ISD::SUB ||
       N0.getOperand(1).getOpcode() == ISD::ADD) &&
      N0.getOperand(1).getOperand(0) == N1)
    return DAG.getNode(N0.getOperand(1).getOpcode(), DL, VT, N0.getOperand(0),
                       N0.getOperand(1).getOperand(1));

  // fold ((A+(C+B))-B) -> A+C
  if (N0.getOpcode() == ISD::ADD && N0.getOperand(1).getOpcode() == ISD::ADD &&
      N0.getOperand(1).getOperand(1) == N1)
    return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0),
                       N0.getOperand(1).getOperand(0));

  // fold ((A-(B-C))-C) -> A-B
  if (N0.getOpcode() == ISD::SUB && N0.getOperand(1).getOpcode() == ISD::SUB &&
      N0.getOperand(1).getOperand(1) == N1)
    return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0),
                       N0.getOperand(1).getOperand(0));

  // fold (A-(B-C)) -> A+(C-B)
  if (N1.getOpcode() == ISD::SUB && N1.hasOneUse())
    return DAG.getNode(ISD::ADD, DL, VT, N0,
                       DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(1),
                                   N1.getOperand(0)));

  // fold (X - (-Y * Z)) -> (X + (Y * Z))
  if (N1.getOpcode() == ISD::MUL && N1.hasOneUse()) {
    if (N1.getOperand(0).getOpcode() == ISD::SUB &&
        isNullOrNullSplat(N1.getOperand(0).getOperand(0))) {
      SDValue Mul = DAG.getNode(ISD::MUL, DL, VT,
                                N1.getOperand(0).getOperand(1),
                                N1.getOperand(1));
      return DAG.getNode(ISD::ADD, DL, VT, N0, Mul);
    }
    if (N1.getOperand(1).getOpcode() == ISD::SUB &&
        isNullOrNullSplat(N1.getOperand(1).getOperand(0))) {
      SDValue Mul = DAG.getNode(ISD::MUL, DL, VT,
                                N1.getOperand(0),
                                N1.getOperand(1).getOperand(1));
      return DAG.getNode(ISD::ADD, DL, VT, N0, Mul);
    }
  }

  // If either operand of a sub is undef, the result is undef
  if (N0.isUndef())
    return N0;
  if (N1.isUndef())
    return N1;

  if (SDValue V = foldAddSubBoolOfMaskedVal(N, DAG))
    return V;

  if (SDValue V = foldAddSubOfSignBit(N, DAG))
    return V;

  if (SDValue V = foldAddSubMasked1(false, N0, N1, DAG, SDLoc(N)))
    return V;

  // (x - y) - 1  ->  add (xor y, -1), x
  if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB && isOneOrOneSplat(N1)) {
    SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(1),
                              DAG.getAllOnesConstant(DL, VT));
    return DAG.getNode(ISD::ADD, DL, VT, Xor, N0.getOperand(0));
  }

  // Look for:
  //   sub y, (xor x, -1)
  // And if the target does not like this form then turn into:
  //   add (add x, y), 1
  if (TLI.preferIncOfAddToSubOfNot(VT) && N1.hasOneUse() && isBitwiseNot(N1)) {
    SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, N1.getOperand(0));
    return DAG.getNode(ISD::ADD, DL, VT, Add, DAG.getConstant(1, DL, VT));
  }

  // Hoist one-use addition by non-opaque constant:
  //   (x + C) - y  ->  (x - y) + C
  if (N0.hasOneUse() && N0.getOpcode() == ISD::ADD &&
      isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) {
    SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), N1);
    return DAG.getNode(ISD::ADD, DL, VT, Sub, N0.getOperand(1));
  }
  // y - (x + C)  ->  (y - x) - C
  if (N1.hasOneUse() && N1.getOpcode() == ISD::ADD &&
      isConstantOrConstantVector(N1.getOperand(1), /*NoOpaques=*/true)) {
    SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, N1.getOperand(0));
    return DAG.getNode(ISD::SUB, DL, VT, Sub, N1.getOperand(1));
  }
  // (x - C) - y  ->  (x - y) - C
  // This is necessary because SUB(X,C) -> ADD(X,-C) doesn't work for vectors.
  if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
      isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) {
    SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), N1);
    return DAG.getNode(ISD::SUB, DL, VT, Sub, N0.getOperand(1));
  }
  // (C - x) - y  ->  C - (x + y)
  if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
      isConstantOrConstantVector(N0.getOperand(0), /*NoOpaques=*/true)) {
    SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(1), N1);
    return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), Add);
  }

  // If the target's bool is represented as 0/-1, prefer to make this 'add 0/-1'
  // rather than 'sub 0/1' (the sext should get folded).
  // sub X, (zext i1 Y) --> add X, (sext i1 Y)
  if (N1.getOpcode() == ISD::ZERO_EXTEND &&
      N1.getOperand(0).getScalarValueSizeInBits() == 1 &&
      TLI.getBooleanContents(VT) ==
          TargetLowering::ZeroOrNegativeOneBooleanContent) {
    SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, N1.getOperand(0));
    return DAG.getNode(ISD::ADD, DL, VT, N0, SExt);
  }

  // fold Y = sra (X, size(X)-1); sub (xor (X, Y), Y) -> (abs X)
  if (TLI.isOperationLegalOrCustom(ISD::ABS, VT)) {
    if (N0.getOpcode() == ISD::XOR && N1.getOpcode() == ISD::SRA) {
      SDValue X0 = N0.getOperand(0), X1 = N0.getOperand(1);
      SDValue S0 = N1.getOperand(0);
      if ((X0 == S0 && X1 == N1) || (X0 == N1 && X1 == S0)) {
        unsigned OpSizeInBits = VT.getScalarSizeInBits();
        if (ConstantSDNode *C = isConstOrConstSplat(N1.getOperand(1)))
          if (C->getAPIntValue() == (OpSizeInBits - 1))
            return DAG.getNode(ISD::ABS, SDLoc(N), VT, S0);
      }
    }
  }

  // If the relocation model supports it, consider symbol offsets.
  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
    if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) {
      // fold (sub Sym, c) -> Sym-c
      if (N1C && GA->getOpcode() == ISD::GlobalAddress)
        return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT,
                                    GA->getOffset() -
                                        (uint64_t)N1C->getSExtValue());
      // fold (sub Sym+c1, Sym+c2) -> c1-c2
      if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1))
        if (GA->getGlobal() == GB->getGlobal())
          return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(),
                                 DL, VT);
    }

  // sub X, (sextinreg Y i1) -> add X, (and Y 1)
  if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) {
    VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1));
    if (TN->getVT() == MVT::i1) {
      SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0),
                                 DAG.getConstant(1, DL, VT));
      return DAG.getNode(ISD::ADD, DL, VT, N0, ZExt);
    }
  }

  // Prefer an add for more folding potential and possibly better codegen:
  // sub N0, (lshr N10, width-1) --> add N0, (ashr N10, width-1)
  if (!LegalOperations && N1.getOpcode() == ISD::SRL && N1.hasOneUse()) {
    SDValue ShAmt = N1.getOperand(1);
    ConstantSDNode *ShAmtC = isConstOrConstSplat(ShAmt);
    if (ShAmtC &&
        ShAmtC->getAPIntValue() == (N1.getScalarValueSizeInBits() - 1)) {
      SDValue SRA = DAG.getNode(ISD::SRA, DL, VT, N1.getOperand(0), ShAmt);
      return DAG.getNode(ISD::ADD, DL, VT, N0, SRA);
    }
  }

  if (TLI.isOperationLegalOrCustom(ISD::ADDCARRY, VT)) {
    // (sub Carry, X)  ->  (addcarry (sub 0, X), 0, Carry)
    if (SDValue Carry = getAsCarry(TLI, N0)) {
      SDValue X = N1;
      SDValue Zero = DAG.getConstant(0, DL, VT);
      SDValue NegX = DAG.getNode(ISD::SUB, DL, VT, Zero, X);
      return DAG.getNode(ISD::ADDCARRY, DL,
                         DAG.getVTList(VT, Carry.getValueType()), NegX, Zero,
                         Carry);
    }
  }

  return SDValue();
}

SDValue DAGCombiner::visitSUBSAT(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  SDLoc DL(N);

  // fold vector ops
  if (VT.isVector()) {
    // TODO SimplifyVBinOp

    // fold (sub_sat x, 0) -> x, vector edition
    if (ISD::isBuildVectorAllZeros(N1.getNode()))
      return N0;
  }

  // fold (sub_sat x, undef) -> 0
  if (N0.isUndef() || N1.isUndef())
    return DAG.getConstant(0, DL, VT);

  // fold (sub_sat x, x) -> 0
  if (N0 == N1)
    return DAG.getConstant(0, DL, VT);

  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
      DAG.isConstantIntBuildVectorOrConstantInt(N1)) {
    // fold (sub_sat c1, c2) -> c3
    return DAG.FoldConstantArithmetic(N->getOpcode(), DL, VT, N0.getNode(),
                                      N1.getNode());
  }

  // fold (sub_sat x, 0) -> x
  if (isNullConstant(N1))
    return N0;

  return SDValue();
}

SDValue DAGCombiner::visitSUBC(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  SDLoc DL(N);

  // If the flag result is dead, turn this into an SUB.
  if (!N->hasAnyUseOfValue(1))
    return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1),
                     DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));

  // fold (subc x, x) -> 0 + no borrow
  if (N0 == N1)
    return CombineTo(N, DAG.getConstant(0, DL, VT),
                     DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));

  // fold (subc x, 0) -> x + no borrow
  if (isNullConstant(N1))
    return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));

  // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow
  if (isAllOnesConstant(N0))
    return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0),
                     DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));

  return SDValue();
}

SDValue DAGCombiner::visitSUBO(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  bool IsSigned = (ISD::SSUBO == N->getOpcode());

  EVT CarryVT = N->getValueType(1);
  SDLoc DL(N);

  // If the flag result is dead, turn this into an SUB.
  if (!N->hasAnyUseOfValue(1))
    return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1),
                     DAG.getUNDEF(CarryVT));

  // fold (subo x, x) -> 0 + no borrow
  if (N0 == N1)
    return CombineTo(N, DAG.getConstant(0, DL, VT),
                     DAG.getConstant(0, DL, CarryVT));

  ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);

  // fold (subox, c) -> (addo x, -c)
  if (IsSigned && N1C && !N1C->getAPIntValue().isMinSignedValue()) {
    return DAG.getNode(ISD::SADDO, DL, N->getVTList(), N0,
                       DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
  }

  // fold (subo x, 0) -> x + no borrow
  if (isNullOrNullSplat(N1))
    return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT));

  // Canonicalize (usubo -1, x) -> ~x, i.e. (xor x, -1) + no borrow
  if (!IsSigned && isAllOnesOrAllOnesSplat(N0))
    return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0),
                     DAG.getConstant(0, DL, CarryVT));

  return SDValue();
}

SDValue DAGCombiner::visitSUBE(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue CarryIn = N->getOperand(2);

  // fold (sube x, y, false) -> (subc x, y)
  if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
    return DAG.getNode(ISD::SUBC, SDLoc(N), N->getVTList(), N0, N1);

  return SDValue();
}

SDValue DAGCombiner::visitSUBCARRY(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue CarryIn = N->getOperand(2);

  // fold (subcarry x, y, false) -> (usubo x, y)
  if (isNullConstant(CarryIn)) {
    if (!LegalOperations ||
        TLI.isOperationLegalOrCustom(ISD::USUBO, N->getValueType(0)))
      return DAG.getNode(ISD::USUBO, SDLoc(N), N->getVTList(), N0, N1);
  }

  return SDValue();
}

// Notice that "mulfix" can be any of SMULFIX, SMULFIXSAT, UMULFIX and
// UMULFIXSAT here.
SDValue DAGCombiner::visitMULFIX(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue Scale = N->getOperand(2);
  EVT VT = N0.getValueType();

  // fold (mulfix x, undef, scale) -> 0
  if (N0.isUndef() || N1.isUndef())
    return DAG.getConstant(0, SDLoc(N), VT);

  // Canonicalize constant to RHS (vector doesn't have to splat)
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
     !DAG.isConstantIntBuildVectorOrConstantInt(N1))
    return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0, Scale);

  // fold (mulfix x, 0, scale) -> 0
  if (isNullConstant(N1))
    return DAG.getConstant(0, SDLoc(N), VT);

  return SDValue();
}

SDValue DAGCombiner::visitMUL(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();

  // fold (mul x, undef) -> 0
  if (N0.isUndef() || N1.isUndef())
    return DAG.getConstant(0, SDLoc(N), VT);

  bool N0IsConst = false;
  bool N1IsConst = false;
  bool N1IsOpaqueConst = false;
  bool N0IsOpaqueConst = false;
  APInt ConstValue0, ConstValue1;
  // fold vector ops
  if (VT.isVector()) {
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

    N0IsConst = ISD::isConstantSplatVector(N0.getNode(), ConstValue0);
    N1IsConst = ISD::isConstantSplatVector(N1.getNode(), ConstValue1);
    assert((!N0IsConst ||
            ConstValue0.getBitWidth() == VT.getScalarSizeInBits()) &&
           "Splat APInt should be element width");
    assert((!N1IsConst ||
            ConstValue1.getBitWidth() == VT.getScalarSizeInBits()) &&
           "Splat APInt should be element width");
  } else {
    N0IsConst = isa<ConstantSDNode>(N0);
    if (N0IsConst) {
      ConstValue0 = cast<ConstantSDNode>(N0)->getAPIntValue();
      N0IsOpaqueConst = cast<ConstantSDNode>(N0)->isOpaque();
    }
    N1IsConst = isa<ConstantSDNode>(N1);
    if (N1IsConst) {
      ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue();
      N1IsOpaqueConst = cast<ConstantSDNode>(N1)->isOpaque();
    }
  }

  // fold (mul c1, c2) -> c1*c2
  if (N0IsConst && N1IsConst && !N0IsOpaqueConst && !N1IsOpaqueConst)
    return DAG.FoldConstantArithmetic(ISD::MUL, SDLoc(N), VT,
                                      N0.getNode(), N1.getNode());

  // canonicalize constant to RHS (vector doesn't have to splat)
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
     !DAG.isConstantIntBuildVectorOrConstantInt(N1))
    return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0);
  // fold (mul x, 0) -> 0
  if (N1IsConst && ConstValue1.isNullValue())
    return N1;
  // fold (mul x, 1) -> x
  if (N1IsConst && ConstValue1.isOneValue())
    return N0;

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  // fold (mul x, -1) -> 0-x
  if (N1IsConst && ConstValue1.isAllOnesValue()) {
    SDLoc DL(N);
    return DAG.getNode(ISD::SUB, DL, VT,
                       DAG.getConstant(0, DL, VT), N0);
  }
  // fold (mul x, (1 << c)) -> x << c
  if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
      DAG.isKnownToBeAPowerOfTwo(N1) &&
      (!VT.isVector() || Level <= AfterLegalizeVectorOps)) {
    SDLoc DL(N);
    SDValue LogBase2 = BuildLogBase2(N1, DL);
    EVT ShiftVT = getShiftAmountTy(N0.getValueType());
    SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT);
    return DAG.getNode(ISD::SHL, DL, VT, N0, Trunc);
  }
  // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
  if (N1IsConst && !N1IsOpaqueConst && (-ConstValue1).isPowerOf2()) {
    unsigned Log2Val = (-ConstValue1).logBase2();
    SDLoc DL(N);
    // FIXME: If the input is something that is easily negated (e.g. a
    // single-use add), we should put the negate there.
    return DAG.getNode(ISD::SUB, DL, VT,
                       DAG.getConstant(0, DL, VT),
                       DAG.getNode(ISD::SHL, DL, VT, N0,
                            DAG.getConstant(Log2Val, DL,
                                      getShiftAmountTy(N0.getValueType()))));
  }

  // Try to transform multiply-by-(power-of-2 +/- 1) into shift and add/sub.
  // mul x, (2^N + 1) --> add (shl x, N), x
  // mul x, (2^N - 1) --> sub (shl x, N), x
  // Examples: x * 33 --> (x << 5) + x
  //           x * 15 --> (x << 4) - x
  //           x * -33 --> -((x << 5) + x)
  //           x * -15 --> -((x << 4) - x) ; this reduces --> x - (x << 4)
  if (N1IsConst && TLI.decomposeMulByConstant(*DAG.getContext(), VT, N1)) {
    // TODO: We could handle more general decomposition of any constant by
    //       having the target set a limit on number of ops and making a
    //       callback to determine that sequence (similar to sqrt expansion).
    unsigned MathOp = ISD::DELETED_NODE;
    APInt MulC = ConstValue1.abs();
    if ((MulC - 1).isPowerOf2())
      MathOp = ISD::ADD;
    else if ((MulC + 1).isPowerOf2())
      MathOp = ISD::SUB;

    if (MathOp != ISD::DELETED_NODE) {
      unsigned ShAmt =
          MathOp == ISD::ADD ? (MulC - 1).logBase2() : (MulC + 1).logBase2();
      assert(ShAmt < VT.getScalarSizeInBits() &&
             "multiply-by-constant generated out of bounds shift");
      SDLoc DL(N);
      SDValue Shl =
          DAG.getNode(ISD::SHL, DL, VT, N0, DAG.getConstant(ShAmt, DL, VT));
      SDValue R = DAG.getNode(MathOp, DL, VT, Shl, N0);
      if (ConstValue1.isNegative())
        R = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), R);
      return R;
    }
  }

  // (mul (shl X, c1), c2) -> (mul X, c2 << c1)
  if (N0.getOpcode() == ISD::SHL &&
      isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
      isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) {
    SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT, N1, N0.getOperand(1));
    if (isConstantOrConstantVector(C3))
      return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), C3);
  }

  // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
  // use.
  {
    SDValue Sh(nullptr, 0), Y(nullptr, 0);

    // Check for both (mul (shl X, C), Y)  and  (mul Y, (shl X, C)).
    if (N0.getOpcode() == ISD::SHL &&
        isConstantOrConstantVector(N0.getOperand(1)) &&
        N0.getNode()->hasOneUse()) {
      Sh = N0; Y = N1;
    } else if (N1.getOpcode() == ISD::SHL &&
               isConstantOrConstantVector(N1.getOperand(1)) &&
               N1.getNode()->hasOneUse()) {
      Sh = N1; Y = N0;
    }

    if (Sh.getNode()) {
      SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT, Sh.getOperand(0), Y);
      return DAG.getNode(ISD::SHL, SDLoc(N), VT, Mul, Sh.getOperand(1));
    }
  }

  // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
  if (DAG.isConstantIntBuildVectorOrConstantInt(N1) &&
      N0.getOpcode() == ISD::ADD &&
      DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1)) &&
      isMulAddWithConstProfitable(N, N0, N1))
      return DAG.getNode(ISD::ADD, SDLoc(N), VT,
                         DAG.getNode(ISD::MUL, SDLoc(N0), VT,
                                     N0.getOperand(0), N1),
                         DAG.getNode(ISD::MUL, SDLoc(N1), VT,
                                     N0.getOperand(1), N1));

  // reassociate mul
  if (SDValue RMUL = reassociateOps(ISD::MUL, SDLoc(N), N0, N1, N->getFlags()))
    return RMUL;

  return SDValue();
}

/// Return true if divmod libcall is available.
static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned,
                                     const TargetLowering &TLI) {
  RTLIB::Libcall LC;
  EVT NodeType = Node->getValueType(0);
  if (!NodeType.isSimple())
    return false;
  switch (NodeType.getSimpleVT().SimpleTy) {
  default: return false; // No libcall for vector types.
  case MVT::i8:   LC= isSigned ? RTLIB::SDIVREM_I8  : RTLIB::UDIVREM_I8;  break;
  case MVT::i16:  LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
  case MVT::i32:  LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
  case MVT::i64:  LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
  case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
  }

  return TLI.getLibcallName(LC) != nullptr;
}

/// Issue divrem if both quotient and remainder are needed.
SDValue DAGCombiner::useDivRem(SDNode *Node) {
  if (Node->use_empty())
    return SDValue(); // This is a dead node, leave it alone.

  unsigned Opcode = Node->getOpcode();
  bool isSigned = (Opcode == ISD::SDIV) || (Opcode == ISD::SREM);
  unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;

  // DivMod lib calls can still work on non-legal types if using lib-calls.
  EVT VT = Node->getValueType(0);
  if (VT.isVector() || !VT.isInteger())
    return SDValue();

  if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT))
    return SDValue();

  // If DIVREM is going to get expanded into a libcall,
  // but there is no libcall available, then don't combine.
  if (!TLI.isOperationLegalOrCustom(DivRemOpc, VT) &&
      !isDivRemLibcallAvailable(Node, isSigned, TLI))
    return SDValue();

  // If div is legal, it's better to do the normal expansion
  unsigned OtherOpcode = 0;
  if ((Opcode == ISD::SDIV) || (Opcode == ISD::UDIV)) {
    OtherOpcode = isSigned ? ISD::SREM : ISD::UREM;
    if (TLI.isOperationLegalOrCustom(Opcode, VT))
      return SDValue();
  } else {
    OtherOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
    if (TLI.isOperationLegalOrCustom(OtherOpcode, VT))
      return SDValue();
  }

  SDValue Op0 = Node->getOperand(0);
  SDValue Op1 = Node->getOperand(1);
  SDValue combined;
  for (SDNode::use_iterator UI = Op0.getNode()->use_begin(),
         UE = Op0.getNode()->use_end(); UI != UE; ++UI) {
    SDNode *User = *UI;
    if (User == Node || User->getOpcode() == ISD::DELETED_NODE ||
        User->use_empty())
      continue;
    // Convert the other matching node(s), too;
    // otherwise, the DIVREM may get target-legalized into something
    // target-specific that we won't be able to recognize.
    unsigned UserOpc = User->getOpcode();
    if ((UserOpc == Opcode || UserOpc == OtherOpcode || UserOpc == DivRemOpc) &&
        User->getOperand(0) == Op0 &&
        User->getOperand(1) == Op1) {
      if (!combined) {
        if (UserOpc == OtherOpcode) {
          SDVTList VTs = DAG.getVTList(VT, VT);
          combined = DAG.getNode(DivRemOpc, SDLoc(Node), VTs, Op0, Op1);
        } else if (UserOpc == DivRemOpc) {
          combined = SDValue(User, 0);
        } else {
          assert(UserOpc == Opcode);
          continue;
        }
      }
      if (UserOpc == ISD::SDIV || UserOpc == ISD::UDIV)
        CombineTo(User, combined);
      else if (UserOpc == ISD::SREM || UserOpc == ISD::UREM)
        CombineTo(User, combined.getValue(1));
    }
  }
  return combined;
}

static SDValue simplifyDivRem(SDNode *N, SelectionDAG &DAG) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);

  unsigned Opc = N->getOpcode();
  bool IsDiv = (ISD::SDIV == Opc) || (ISD::UDIV == Opc);
  ConstantSDNode *N1C = isConstOrConstSplat(N1);

  // X / undef -> undef
  // X % undef -> undef
  // X / 0 -> undef
  // X % 0 -> undef
  // NOTE: This includes vectors where any divisor element is zero/undef.
  if (DAG.isUndef(Opc, {N0, N1}))
    return DAG.getUNDEF(VT);

  // undef / X -> 0
  // undef % X -> 0
  if (N0.isUndef())
    return DAG.getConstant(0, DL, VT);

  // 0 / X -> 0
  // 0 % X -> 0
  ConstantSDNode *N0C = isConstOrConstSplat(N0);
  if (N0C && N0C->isNullValue())
    return N0;

  // X / X -> 1
  // X % X -> 0
  if (N0 == N1)
    return DAG.getConstant(IsDiv ? 1 : 0, DL, VT);

  // X / 1 -> X
  // X % 1 -> 0
  // If this is a boolean op (single-bit element type), we can't have
  // division-by-zero or remainder-by-zero, so assume the divisor is 1.
  // TODO: Similarly, if we're zero-extending a boolean divisor, then assume
  // it's a 1.
  if ((N1C && N1C->isOne()) || (VT.getScalarType() == MVT::i1))
    return IsDiv ? N0 : DAG.getConstant(0, DL, VT);

  return SDValue();
}

SDValue DAGCombiner::visitSDIV(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  EVT CCVT = getSetCCResultType(VT);

  // fold vector ops
  if (VT.isVector())
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

  SDLoc DL(N);

  // fold (sdiv c1, c2) -> c1/c2
  ConstantSDNode *N0C = isConstOrConstSplat(N0);
  ConstantSDNode *N1C = isConstOrConstSplat(N1);
  if (N0C && N1C && !N0C->isOpaque() && !N1C->isOpaque())
    return DAG.FoldConstantArithmetic(ISD::SDIV, DL, VT, N0C, N1C);
  // fold (sdiv X, -1) -> 0-X
  if (N1C && N1C->isAllOnesValue())
    return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), N0);
  // fold (sdiv X, MIN_SIGNED) -> select(X == MIN_SIGNED, 1, 0)
  if (N1C && N1C->getAPIntValue().isMinSignedValue())
    return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
                         DAG.getConstant(1, DL, VT),
                         DAG.getConstant(0, DL, VT));

  if (SDValue V = simplifyDivRem(N, DAG))
    return V;

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  // If we know the sign bits of both operands are zero, strength reduce to a
  // udiv instead.  Handles (X&15) /s 4 -> X&15 >> 2
  if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
    return DAG.getNode(ISD::UDIV, DL, N1.getValueType(), N0, N1);

  if (SDValue V = visitSDIVLike(N0, N1, N)) {
    // If the corresponding remainder node exists, update its users with
    // (Dividend - (Quotient * Divisor).
    if (SDNode *RemNode = DAG.getNodeIfExists(ISD::SREM, N->getVTList(),
                                              { N0, N1 })) {
      SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, V, N1);
      SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
      AddToWorklist(Mul.getNode());
      AddToWorklist(Sub.getNode());
      CombineTo(RemNode, Sub);
    }
    return V;
  }

  // sdiv, srem -> sdivrem
  // If the divisor is constant, then return DIVREM only if isIntDivCheap() is
  // true.  Otherwise, we break the simplification logic in visitREM().
  AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
  if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr))
    if (SDValue DivRem = useDivRem(N))
        return DivRem;

  return SDValue();
}

SDValue DAGCombiner::visitSDIVLike(SDValue N0, SDValue N1, SDNode *N) {
  SDLoc DL(N);
  EVT VT = N->getValueType(0);
  EVT CCVT = getSetCCResultType(VT);
  unsigned BitWidth = VT.getScalarSizeInBits();

  // Helper for determining whether a value is a power-2 constant scalar or a
  // vector of such elements.
  auto IsPowerOfTwo = [](ConstantSDNode *C) {
    if (C->isNullValue() || C->isOpaque())
      return false;
    if (C->getAPIntValue().isPowerOf2())
      return true;
    if ((-C->getAPIntValue()).isPowerOf2())
      return true;
    return false;
  };

  // fold (sdiv X, pow2) -> simple ops after legalize
  // FIXME: We check for the exact bit here because the generic lowering gives
  // better results in that case. The target-specific lowering should learn how
  // to handle exact sdivs efficiently.
  if (!N->getFlags().hasExact() && ISD::matchUnaryPredicate(N1, IsPowerOfTwo)) {
    // Target-specific implementation of sdiv x, pow2.
    if (SDValue Res = BuildSDIVPow2(N))
      return Res;

    // Create constants that are functions of the shift amount value.
    EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType());
    SDValue Bits = DAG.getConstant(BitWidth, DL, ShiftAmtTy);
    SDValue C1 = DAG.getNode(ISD::CTTZ, DL, VT, N1);
    C1 = DAG.getZExtOrTrunc(C1, DL, ShiftAmtTy);
    SDValue Inexact = DAG.getNode(ISD::SUB, DL, ShiftAmtTy, Bits, C1);
    if (!isConstantOrConstantVector(Inexact))
      return SDValue();

    // Splat the sign bit into the register
    SDValue Sign = DAG.getNode(ISD::SRA, DL, VT, N0,
                               DAG.getConstant(BitWidth - 1, DL, ShiftAmtTy));
    AddToWorklist(Sign.getNode());

    // Add (N0 < 0) ? abs2 - 1 : 0;
    SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, Sign, Inexact);
    AddToWorklist(Srl.getNode());
    SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Srl);
    AddToWorklist(Add.getNode());
    SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Add, C1);
    AddToWorklist(Sra.getNode());

    // Special case: (sdiv X, 1) -> X
    // Special Case: (sdiv X, -1) -> 0-X
    SDValue One = DAG.getConstant(1, DL, VT);
    SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
    SDValue IsOne = DAG.getSetCC(DL, CCVT, N1, One, ISD::SETEQ);
    SDValue IsAllOnes = DAG.getSetCC(DL, CCVT, N1, AllOnes, ISD::SETEQ);
    SDValue IsOneOrAllOnes = DAG.getNode(ISD::OR, DL, CCVT, IsOne, IsAllOnes);
    Sra = DAG.getSelect(DL, VT, IsOneOrAllOnes, N0, Sra);

    // If dividing by a positive value, we're done. Otherwise, the result must
    // be negated.
    SDValue Zero = DAG.getConstant(0, DL, VT);
    SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, Zero, Sra);

    // FIXME: Use SELECT_CC once we improve SELECT_CC constant-folding.
    SDValue IsNeg = DAG.getSetCC(DL, CCVT, N1, Zero, ISD::SETLT);
    SDValue Res = DAG.getSelect(DL, VT, IsNeg, Sub, Sra);
    return Res;
  }

  // If integer divide is expensive and we satisfy the requirements, emit an
  // alternate sequence.  Targets may check function attributes for size/speed
  // trade-offs.
  AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
  if (isConstantOrConstantVector(N1) &&
      !TLI.isIntDivCheap(N->getValueType(0), Attr))
    if (SDValue Op = BuildSDIV(N))
      return Op;

  return SDValue();
}

SDValue DAGCombiner::visitUDIV(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  EVT CCVT = getSetCCResultType(VT);

  // fold vector ops
  if (VT.isVector())
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

  SDLoc DL(N);

  // fold (udiv c1, c2) -> c1/c2
  ConstantSDNode *N0C = isConstOrConstSplat(N0);
  ConstantSDNode *N1C = isConstOrConstSplat(N1);
  if (N0C && N1C)
    if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::UDIV, DL, VT,
                                                    N0C, N1C))
      return Folded;
  // fold (udiv X, -1) -> select(X == -1, 1, 0)
  if (N1C && N1C->getAPIntValue().isAllOnesValue())
    return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
                         DAG.getConstant(1, DL, VT),
                         DAG.getConstant(0, DL, VT));

  if (SDValue V = simplifyDivRem(N, DAG))
    return V;

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  if (SDValue V = visitUDIVLike(N0, N1, N)) {
    // If the corresponding remainder node exists, update its users with
    // (Dividend - (Quotient * Divisor).
    if (SDNode *RemNode = DAG.getNodeIfExists(ISD::UREM, N->getVTList(),
                                              { N0, N1 })) {
      SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, V, N1);
      SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
      AddToWorklist(Mul.getNode());
      AddToWorklist(Sub.getNode());
      CombineTo(RemNode, Sub);
    }
    return V;
  }

  // sdiv, srem -> sdivrem
  // If the divisor is constant, then return DIVREM only if isIntDivCheap() is
  // true.  Otherwise, we break the simplification logic in visitREM().
  AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
  if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr))
    if (SDValue DivRem = useDivRem(N))
        return DivRem;

  return SDValue();
}

SDValue DAGCombiner::visitUDIVLike(SDValue N0, SDValue N1, SDNode *N) {
  SDLoc DL(N);
  EVT VT = N->getValueType(0);

  // fold (udiv x, (1 << c)) -> x >>u c
  if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
      DAG.isKnownToBeAPowerOfTwo(N1)) {
    SDValue LogBase2 = BuildLogBase2(N1, DL);
    AddToWorklist(LogBase2.getNode());

    EVT ShiftVT = getShiftAmountTy(N0.getValueType());
    SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT);
    AddToWorklist(Trunc.getNode());
    return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc);
  }

  // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
  if (N1.getOpcode() == ISD::SHL) {
    SDValue N10 = N1.getOperand(0);
    if (isConstantOrConstantVector(N10, /*NoOpaques*/ true) &&
        DAG.isKnownToBeAPowerOfTwo(N10)) {
      SDValue LogBase2 = BuildLogBase2(N10, DL);
      AddToWorklist(LogBase2.getNode());

      EVT ADDVT = N1.getOperand(1).getValueType();
      SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ADDVT);
      AddToWorklist(Trunc.getNode());
      SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT, N1.getOperand(1), Trunc);
      AddToWorklist(Add.getNode());
      return DAG.getNode(ISD::SRL, DL, VT, N0, Add);
    }
  }

  // fold (udiv x, c) -> alternate
  AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
  if (isConstantOrConstantVector(N1) &&
      !TLI.isIntDivCheap(N->getValueType(0), Attr))
    if (SDValue Op = BuildUDIV(N))
      return Op;

  return SDValue();
}

// handles ISD::SREM and ISD::UREM
SDValue DAGCombiner::visitREM(SDNode *N) {
  unsigned Opcode = N->getOpcode();
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  EVT CCVT = getSetCCResultType(VT);

  bool isSigned = (Opcode == ISD::SREM);
  SDLoc DL(N);

  // fold (rem c1, c2) -> c1%c2
  ConstantSDNode *N0C = isConstOrConstSplat(N0);
  ConstantSDNode *N1C = isConstOrConstSplat(N1);
  if (N0C && N1C)
    if (SDValue Folded = DAG.FoldConstantArithmetic(Opcode, DL, VT, N0C, N1C))
      return Folded;
  // fold (urem X, -1) -> select(X == -1, 0, x)
  if (!isSigned && N1C && N1C->getAPIntValue().isAllOnesValue())
    return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
                         DAG.getConstant(0, DL, VT), N0);

  if (SDValue V = simplifyDivRem(N, DAG))
    return V;

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  if (isSigned) {
    // If we know the sign bits of both operands are zero, strength reduce to a
    // urem instead.  Handles (X & 0x0FFFFFFF) %s 16 -> X&15
    if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
      return DAG.getNode(ISD::UREM, DL, VT, N0, N1);
  } else {
    SDValue NegOne = DAG.getAllOnesConstant(DL, VT);
    if (DAG.isKnownToBeAPowerOfTwo(N1)) {
      // fold (urem x, pow2) -> (and x, pow2-1)
      SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne);
      AddToWorklist(Add.getNode());
      return DAG.getNode(ISD::AND, DL, VT, N0, Add);
    }
    if (N1.getOpcode() == ISD::SHL &&
        DAG.isKnownToBeAPowerOfTwo(N1.getOperand(0))) {
      // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
      SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne);
      AddToWorklist(Add.getNode());
      return DAG.getNode(ISD::AND, DL, VT, N0, Add);
    }
  }

  AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();

  // If X/C can be simplified by the division-by-constant logic, lower
  // X%C to the equivalent of X-X/C*C.
  // Reuse the SDIVLike/UDIVLike combines - to avoid mangling nodes, the
  // speculative DIV must not cause a DIVREM conversion.  We guard against this
  // by skipping the simplification if isIntDivCheap().  When div is not cheap,
  // combine will not return a DIVREM.  Regardless, checking cheapness here
  // makes sense since the simplification results in fatter code.
  if (DAG.isKnownNeverZero(N1) && !TLI.isIntDivCheap(VT, Attr)) {
    SDValue OptimizedDiv =
        isSigned ? visitSDIVLike(N0, N1, N) : visitUDIVLike(N0, N1, N);
    if (OptimizedDiv.getNode()) {
      // If the equivalent Div node also exists, update its users.
      unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
      if (SDNode *DivNode = DAG.getNodeIfExists(DivOpcode, N->getVTList(),
                                                { N0, N1 }))
        CombineTo(DivNode, OptimizedDiv);
      SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, OptimizedDiv, N1);
      SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
      AddToWorklist(OptimizedDiv.getNode());
      AddToWorklist(Mul.getNode());
      return Sub;
    }
  }

  // sdiv, srem -> sdivrem
  if (SDValue DivRem = useDivRem(N))
    return DivRem.getValue(1);

  return SDValue();
}

SDValue DAGCombiner::visitMULHS(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);

  if (VT.isVector()) {
    // fold (mulhs x, 0) -> 0
    // do not return N0/N1, because undef node may exist.
    if (ISD::isBuildVectorAllZeros(N0.getNode()) ||
        ISD::isBuildVectorAllZeros(N1.getNode()))
      return DAG.getConstant(0, DL, VT);
  }

  // fold (mulhs x, 0) -> 0
  if (isNullConstant(N1))
    return N1;
  // fold (mulhs x, 1) -> (sra x, size(x)-1)
  if (isOneConstant(N1))
    return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0,
                       DAG.getConstant(N0.getScalarValueSizeInBits() - 1, DL,
                                       getShiftAmountTy(N0.getValueType())));

  // fold (mulhs x, undef) -> 0
  if (N0.isUndef() || N1.isUndef())
    return DAG.getConstant(0, DL, VT);

  // If the type twice as wide is legal, transform the mulhs to a wider multiply
  // plus a shift.
  if (VT.isSimple() && !VT.isVector()) {
    MVT Simple = VT.getSimpleVT();
    unsigned SimpleSize = Simple.getSizeInBits();
    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
    if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
      N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0);
      N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1);
      N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
      N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
            DAG.getConstant(SimpleSize, DL,
                            getShiftAmountTy(N1.getValueType())));
      return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
    }
  }

  return SDValue();
}

SDValue DAGCombiner::visitMULHU(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);

  if (VT.isVector()) {
    // fold (mulhu x, 0) -> 0
    // do not return N0/N1, because undef node may exist.
    if (ISD::isBuildVectorAllZeros(N0.getNode()) ||
        ISD::isBuildVectorAllZeros(N1.getNode()))
      return DAG.getConstant(0, DL, VT);
  }

  // fold (mulhu x, 0) -> 0
  if (isNullConstant(N1))
    return N1;
  // fold (mulhu x, 1) -> 0
  if (isOneConstant(N1))
    return DAG.getConstant(0, DL, N0.getValueType());
  // fold (mulhu x, undef) -> 0
  if (N0.isUndef() || N1.isUndef())
    return DAG.getConstant(0, DL, VT);

  // fold (mulhu x, (1 << c)) -> x >> (bitwidth - c)
  if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
      DAG.isKnownToBeAPowerOfTwo(N1) && hasOperation(ISD::SRL, VT)) {
    unsigned NumEltBits = VT.getScalarSizeInBits();
    SDValue LogBase2 = BuildLogBase2(N1, DL);
    SDValue SRLAmt = DAG.getNode(
        ISD::SUB, DL, VT, DAG.getConstant(NumEltBits, DL, VT), LogBase2);
    EVT ShiftVT = getShiftAmountTy(N0.getValueType());
    SDValue Trunc = DAG.getZExtOrTrunc(SRLAmt, DL, ShiftVT);
    return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc);
  }

  // If the type twice as wide is legal, transform the mulhu to a wider multiply
  // plus a shift.
  if (VT.isSimple() && !VT.isVector()) {
    MVT Simple = VT.getSimpleVT();
    unsigned SimpleSize = Simple.getSizeInBits();
    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
    if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
      N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0);
      N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1);
      N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
      N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
            DAG.getConstant(SimpleSize, DL,
                            getShiftAmountTy(N1.getValueType())));
      return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
    }
  }

  return SDValue();
}

/// Perform optimizations common to nodes that compute two values. LoOp and HiOp
/// give the opcodes for the two computations that are being performed. Return
/// true if a simplification was made.
SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
                                                unsigned HiOp) {
  // If the high half is not needed, just compute the low half.
  bool HiExists = N->hasAnyUseOfValue(1);
  if (!HiExists && (!LegalOperations ||
                    TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) {
    SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
    return CombineTo(N, Res, Res);
  }

  // If the low half is not needed, just compute the high half.
  bool LoExists = N->hasAnyUseOfValue(0);
  if (!LoExists && (!LegalOperations ||
                    TLI.isOperationLegalOrCustom(HiOp, N->getValueType(1)))) {
    SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
    return CombineTo(N, Res, Res);
  }

  // If both halves are used, return as it is.
  if (LoExists && HiExists)
    return SDValue();

  // If the two computed results can be simplified separately, separate them.
  if (LoExists) {
    SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
    AddToWorklist(Lo.getNode());
    SDValue LoOpt = combine(Lo.getNode());
    if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() &&
        (!LegalOperations ||
         TLI.isOperationLegalOrCustom(LoOpt.getOpcode(), LoOpt.getValueType())))
      return CombineTo(N, LoOpt, LoOpt);
  }

  if (HiExists) {
    SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
    AddToWorklist(Hi.getNode());
    SDValue HiOpt = combine(Hi.getNode());
    if (HiOpt.getNode() && HiOpt != Hi &&
        (!LegalOperations ||
         TLI.isOperationLegalOrCustom(HiOpt.getOpcode(), HiOpt.getValueType())))
      return CombineTo(N, HiOpt, HiOpt);
  }

  return SDValue();
}

SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
  if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS))
    return Res;

  EVT VT = N->getValueType(0);
  SDLoc DL(N);

  // If the type is twice as wide is legal, transform the mulhu to a wider
  // multiply plus a shift.
  if (VT.isSimple() && !VT.isVector()) {
    MVT Simple = VT.getSimpleVT();
    unsigned SimpleSize = Simple.getSizeInBits();
    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
    if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
      SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0));
      SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1));
      Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
      // Compute the high part as N1.
      Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
            DAG.getConstant(SimpleSize, DL,
                            getShiftAmountTy(Lo.getValueType())));
      Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
      // Compute the low part as N0.
      Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
      return CombineTo(N, Lo, Hi);
    }
  }

  return SDValue();
}

SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
  if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU))
    return Res;

  EVT VT = N->getValueType(0);
  SDLoc DL(N);

  // (umul_lohi N0, 0) -> (0, 0)
  if (isNullConstant(N->getOperand(1))) {
    SDValue Zero = DAG.getConstant(0, DL, VT);
    return CombineTo(N, Zero, Zero);
  }

  // (umul_lohi N0, 1) -> (N0, 0)
  if (isOneConstant(N->getOperand(1))) {
    SDValue Zero = DAG.getConstant(0, DL, VT);
    return CombineTo(N, N->getOperand(0), Zero);
  }

  // If the type is twice as wide is legal, transform the mulhu to a wider
  // multiply plus a shift.
  if (VT.isSimple() && !VT.isVector()) {
    MVT Simple = VT.getSimpleVT();
    unsigned SimpleSize = Simple.getSizeInBits();
    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
    if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
      SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0));
      SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1));
      Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
      // Compute the high part as N1.
      Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
            DAG.getConstant(SimpleSize, DL,
                            getShiftAmountTy(Lo.getValueType())));
      Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
      // Compute the low part as N0.
      Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
      return CombineTo(N, Lo, Hi);
    }
  }

  return SDValue();
}

SDValue DAGCombiner::visitMULO(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  bool IsSigned = (ISD::SMULO == N->getOpcode());

  EVT CarryVT = N->getValueType(1);
  SDLoc DL(N);

  // canonicalize constant to RHS.
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
      !DAG.isConstantIntBuildVectorOrConstantInt(N1))
    return DAG.getNode(N->getOpcode(), DL, N->getVTList(), N1, N0);

  // fold (mulo x, 0) -> 0 + no carry out
  if (isNullOrNullSplat(N1))
    return CombineTo(N, DAG.getConstant(0, DL, VT),
                     DAG.getConstant(0, DL, CarryVT));

  // (mulo x, 2) -> (addo x, x)
  if (ConstantSDNode *C2 = isConstOrConstSplat(N1))
    if (C2->getAPIntValue() == 2)
      return DAG.getNode(IsSigned ? ISD::SADDO : ISD::UADDO, DL,
                         N->getVTList(), N0, N0);

  return SDValue();
}

SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();

  // fold vector ops
  if (VT.isVector())
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

  // fold operation with constant operands.
  ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
  ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
  if (N0C && N1C)
    return DAG.FoldConstantArithmetic(N->getOpcode(), SDLoc(N), VT, N0C, N1C);

  // canonicalize constant to RHS
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
     !DAG.isConstantIntBuildVectorOrConstantInt(N1))
    return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0);

  // Is sign bits are zero, flip between UMIN/UMAX and SMIN/SMAX.
  // Only do this if the current op isn't legal and the flipped is.
  unsigned Opcode = N->getOpcode();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (!TLI.isOperationLegal(Opcode, VT) &&
      (N0.isUndef() || DAG.SignBitIsZero(N0)) &&
      (N1.isUndef() || DAG.SignBitIsZero(N1))) {
    unsigned AltOpcode;
    switch (Opcode) {
    case ISD::SMIN: AltOpcode = ISD::UMIN; break;
    case ISD::SMAX: AltOpcode = ISD::UMAX; break;
    case ISD::UMIN: AltOpcode = ISD::SMIN; break;
    case ISD::UMAX: AltOpcode = ISD::SMAX; break;
    default: llvm_unreachable("Unknown MINMAX opcode");
    }
    if (TLI.isOperationLegal(AltOpcode, VT))
      return DAG.getNode(AltOpcode, SDLoc(N), VT, N0, N1);
  }

  return SDValue();
}

/// If this is a bitwise logic instruction and both operands have the same
/// opcode, try to sink the other opcode after the logic instruction.
SDValue DAGCombiner::hoistLogicOpWithSameOpcodeHands(SDNode *N) {
  SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  unsigned LogicOpcode = N->getOpcode();
  unsigned HandOpcode = N0.getOpcode();
  assert((LogicOpcode == ISD::AND || LogicOpcode == ISD::OR ||
          LogicOpcode == ISD::XOR) && "Expected logic opcode");
  assert(HandOpcode == N1.getOpcode() && "Bad input!");

  // Bail early if none of these transforms apply.
  if (N0.getNumOperands() == 0)
    return SDValue();

  // FIXME: We should check number of uses of the operands to not increase
  //        the instruction count for all transforms.

  // Handle size-changing casts.
  SDValue X = N0.getOperand(0);
  SDValue Y = N1.getOperand(0);
  EVT XVT = X.getValueType();
  SDLoc DL(N);
  if (HandOpcode == ISD::ANY_EXTEND || HandOpcode == ISD::ZERO_EXTEND ||
      HandOpcode == ISD::SIGN_EXTEND) {
    // If both operands have other uses, this transform would create extra
    // instructions without eliminating anything.
    if (!N0.hasOneUse() && !N1.hasOneUse())
      return SDValue();
    // We need matching integer source types.
    if (XVT != Y.getValueType())
      return SDValue();
    // Don't create an illegal op during or after legalization. Don't ever
    // create an unsupported vector op.
    if ((VT.isVector() || LegalOperations) &&
        !TLI.isOperationLegalOrCustom(LogicOpcode, XVT))
      return SDValue();
    // Avoid infinite looping with PromoteIntBinOp.
    // TODO: Should we apply desirable/legal constraints to all opcodes?
    if (HandOpcode == ISD::ANY_EXTEND && LegalTypes &&
        !TLI.isTypeDesirableForOp(LogicOpcode, XVT))
      return SDValue();
    // logic_op (hand_op X), (hand_op Y) --> hand_op (logic_op X, Y)
    SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
    return DAG.getNode(HandOpcode, DL, VT, Logic);
  }

  // logic_op (truncate x), (truncate y) --> truncate (logic_op x, y)
  if (HandOpcode == ISD::TRUNCATE) {
    // If both operands have other uses, this transform would create extra
    // instructions without eliminating anything.
    if (!N0.hasOneUse() && !N1.hasOneUse())
      return SDValue();
    // We need matching source types.
    if (XVT != Y.getValueType())
      return SDValue();
    // Don't create an illegal op during or after legalization.
    if (LegalOperations && !TLI.isOperationLegal(LogicOpcode, XVT))
      return SDValue();
    // Be extra careful sinking truncate. If it's free, there's no benefit in
    // widening a binop. Also, don't create a logic op on an illegal type.
    if (TLI.isZExtFree(VT, XVT) && TLI.isTruncateFree(XVT, VT))
      return SDValue();
    if (!TLI.isTypeLegal(XVT))
      return SDValue();
    SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
    return DAG.getNode(HandOpcode, DL, VT, Logic);
  }

  // For binops SHL/SRL/SRA/AND:
  //   logic_op (OP x, z), (OP y, z) --> OP (logic_op x, y), z
  if ((HandOpcode == ISD::SHL || HandOpcode == ISD::SRL ||
       HandOpcode == ISD::SRA || HandOpcode == ISD::AND) &&
      N0.getOperand(1) == N1.getOperand(1)) {
    // If either operand has other uses, this transform is not an improvement.
    if (!N0.hasOneUse() || !N1.hasOneUse())
      return SDValue();
    SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
    return DAG.getNode(HandOpcode, DL, VT, Logic, N0.getOperand(1));
  }

  // Unary ops: logic_op (bswap x), (bswap y) --> bswap (logic_op x, y)
  if (HandOpcode == ISD::BSWAP) {
    // If either operand has other uses, this transform is not an improvement.
    if (!N0.hasOneUse() || !N1.hasOneUse())
      return SDValue();
    SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
    return DAG.getNode(HandOpcode, DL, VT, Logic);
  }

  // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B))
  // Only perform this optimization up until type legalization, before
  // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by
  // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and
  // we don't want to undo this promotion.
  // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper
  // on scalars.
  if ((HandOpcode == ISD::BITCAST || HandOpcode == ISD::SCALAR_TO_VECTOR) &&
       Level <= AfterLegalizeTypes) {
    // Input types must be integer and the same.
    if (XVT.isInteger() && XVT == Y.getValueType() &&
        !(VT.isVector() && TLI.isTypeLegal(VT) &&
          !XVT.isVector() && !TLI.isTypeLegal(XVT))) {
      SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
      return DAG.getNode(HandOpcode, DL, VT, Logic);
    }
  }

  // Xor/and/or are indifferent to the swizzle operation (shuffle of one value).
  // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B))
  // If both shuffles use the same mask, and both shuffle within a single
  // vector, then it is worthwhile to move the swizzle after the operation.
  // The type-legalizer generates this pattern when loading illegal
  // vector types from memory. In many cases this allows additional shuffle
  // optimizations.
  // There are other cases where moving the shuffle after the xor/and/or
  // is profitable even if shuffles don't perform a swizzle.
  // If both shuffles use the same mask, and both shuffles have the same first
  // or second operand, then it might still be profitable to move the shuffle
  // after the xor/and/or operation.
  if (HandOpcode == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) {
    auto *SVN0 = cast<ShuffleVectorSDNode>(N0);
    auto *SVN1 = cast<ShuffleVectorSDNode>(N1);
    assert(X.getValueType() == Y.getValueType() &&
           "Inputs to shuffles are not the same type");

    // Check that both shuffles use the same mask. The masks are known to be of
    // the same length because the result vector type is the same.
    // Check also that shuffles have only one use to avoid introducing extra
    // instructions.
    if (!SVN0->hasOneUse() || !SVN1->hasOneUse() ||
        !SVN0->getMask().equals(SVN1->getMask()))
      return SDValue();

    // Don't try to fold this node if it requires introducing a
    // build vector of all zeros that might be illegal at this stage.
    SDValue ShOp = N0.getOperand(1);
    if (LogicOpcode == ISD::XOR && !ShOp.isUndef())
      ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);

    // (logic_op (shuf (A, C), shuf (B, C))) --> shuf (logic_op (A, B), C)
    if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) {
      SDValue Logic = DAG.getNode(LogicOpcode, DL, VT,
                                  N0.getOperand(0), N1.getOperand(0));
      return DAG.getVectorShuffle(VT, DL, Logic, ShOp, SVN0->getMask());
    }

    // Don't try to fold this node if it requires introducing a
    // build vector of all zeros that might be illegal at this stage.
    ShOp = N0.getOperand(0);
    if (LogicOpcode == ISD::XOR && !ShOp.isUndef())
      ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);

    // (logic_op (shuf (C, A), shuf (C, B))) --> shuf (C, logic_op (A, B))
    if (N0.getOperand(0) == N1.getOperand(0) && ShOp.getNode()) {
      SDValue Logic = DAG.getNode(LogicOpcode, DL, VT, N0.getOperand(1),
                                  N1.getOperand(1));
      return DAG.getVectorShuffle(VT, DL, ShOp, Logic, SVN0->getMask());
    }
  }

  return SDValue();
}

/// Try to make (and/or setcc (LL, LR), setcc (RL, RR)) more efficient.
SDValue DAGCombiner::foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1,
                                       const SDLoc &DL) {
  SDValue LL, LR, RL, RR, N0CC, N1CC;
  if (!isSetCCEquivalent(N0, LL, LR, N0CC) ||
      !isSetCCEquivalent(N1, RL, RR, N1CC))
    return SDValue();

  assert(N0.getValueType() == N1.getValueType() &&
         "Unexpected operand types for bitwise logic op");
  assert(LL.getValueType() == LR.getValueType() &&
         RL.getValueType() == RR.getValueType() &&
         "Unexpected operand types for setcc");

  // If we're here post-legalization or the logic op type is not i1, the logic
  // op type must match a setcc result type. Also, all folds require new
  // operations on the left and right operands, so those types must match.
  EVT VT = N0.getValueType();
  EVT OpVT = LL.getValueType();
  if (LegalOperations || VT.getScalarType() != MVT::i1)
    if (VT != getSetCCResultType(OpVT))
      return SDValue();
  if (OpVT != RL.getValueType())
    return SDValue();

  ISD::CondCode CC0 = cast<CondCodeSDNode>(N0CC)->get();
  ISD::CondCode CC1 = cast<CondCodeSDNode>(N1CC)->get();
  bool IsInteger = OpVT.isInteger();
  if (LR == RR && CC0 == CC1 && IsInteger) {
    bool IsZero = isNullOrNullSplat(LR);
    bool IsNeg1 = isAllOnesOrAllOnesSplat(LR);

    // All bits clear?
    bool AndEqZero = IsAnd && CC1 == ISD::SETEQ && IsZero;
    // All sign bits clear?
    bool AndGtNeg1 = IsAnd && CC1 == ISD::SETGT && IsNeg1;
    // Any bits set?
    bool OrNeZero = !IsAnd && CC1 == ISD::SETNE && IsZero;
    // Any sign bits set?
    bool OrLtZero = !IsAnd && CC1 == ISD::SETLT && IsZero;

    // (and (seteq X,  0), (seteq Y,  0)) --> (seteq (or X, Y),  0)
    // (and (setgt X, -1), (setgt Y, -1)) --> (setgt (or X, Y), -1)
    // (or  (setne X,  0), (setne Y,  0)) --> (setne (or X, Y),  0)
    // (or  (setlt X,  0), (setlt Y,  0)) --> (setlt (or X, Y),  0)
    if (AndEqZero || AndGtNeg1 || OrNeZero || OrLtZero) {
      SDValue Or = DAG.getNode(ISD::OR, SDLoc(N0), OpVT, LL, RL);
      AddToWorklist(Or.getNode());
      return DAG.getSetCC(DL, VT, Or, LR, CC1);
    }

    // All bits set?
    bool AndEqNeg1 = IsAnd && CC1 == ISD::SETEQ && IsNeg1;
    // All sign bits set?
    bool AndLtZero = IsAnd && CC1 == ISD::SETLT && IsZero;
    // Any bits clear?
    bool OrNeNeg1 = !IsAnd && CC1 == ISD::SETNE && IsNeg1;
    // Any sign bits clear?
    bool OrGtNeg1 = !IsAnd && CC1 == ISD::SETGT && IsNeg1;

    // (and (seteq X, -1), (seteq Y, -1)) --> (seteq (and X, Y), -1)
    // (and (setlt X,  0), (setlt Y,  0)) --> (setlt (and X, Y),  0)
    // (or  (setne X, -1), (setne Y, -1)) --> (setne (and X, Y), -1)
    // (or  (setgt X, -1), (setgt Y  -1)) --> (setgt (and X, Y), -1)
    if (AndEqNeg1 || AndLtZero || OrNeNeg1 || OrGtNeg1) {
      SDValue And = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, LL, RL);
      AddToWorklist(And.getNode());
      return DAG.getSetCC(DL, VT, And, LR, CC1);
    }
  }

  // TODO: What is the 'or' equivalent of this fold?
  // (and (setne X, 0), (setne X, -1)) --> (setuge (add X, 1), 2)
  if (IsAnd && LL == RL && CC0 == CC1 && OpVT.getScalarSizeInBits() > 1 &&
      IsInteger && CC0 == ISD::SETNE &&
      ((isNullConstant(LR) && isAllOnesConstant(RR)) ||
       (isAllOnesConstant(LR) && isNullConstant(RR)))) {
    SDValue One = DAG.getConstant(1, DL, OpVT);
    SDValue Two = DAG.getConstant(2, DL, OpVT);
    SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N0), OpVT, LL, One);
    AddToWorklist(Add.getNode());
    return DAG.getSetCC(DL, VT, Add, Two, ISD::SETUGE);
  }

  // Try more general transforms if the predicates match and the only user of
  // the compares is the 'and' or 'or'.
  if (IsInteger && TLI.convertSetCCLogicToBitwiseLogic(OpVT) && CC0 == CC1 &&
      N0.hasOneUse() && N1.hasOneUse()) {
    // and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
    // or  (setne A, B), (setne C, D) --> setne (or (xor A, B), (xor C, D)), 0
    if ((IsAnd && CC1 == ISD::SETEQ) || (!IsAnd && CC1 == ISD::SETNE)) {
      SDValue XorL = DAG.getNode(ISD::XOR, SDLoc(N0), OpVT, LL, LR);
      SDValue XorR = DAG.getNode(ISD::XOR, SDLoc(N1), OpVT, RL, RR);
      SDValue Or = DAG.getNode(ISD::OR, DL, OpVT, XorL, XorR);
      SDValue Zero = DAG.getConstant(0, DL, OpVT);
      return DAG.getSetCC(DL, VT, Or, Zero, CC1);
    }

    // Turn compare of constants whose difference is 1 bit into add+and+setcc.
    // TODO - support non-uniform vector amounts.
    if ((IsAnd && CC1 == ISD::SETNE) || (!IsAnd && CC1 == ISD::SETEQ)) {
      // Match a shared variable operand and 2 non-opaque constant operands.
      ConstantSDNode *C0 = isConstOrConstSplat(LR);
      ConstantSDNode *C1 = isConstOrConstSplat(RR);
      if (LL == RL && C0 && C1 && !C0->isOpaque() && !C1->isOpaque()) {
        // Canonicalize larger constant as C0.
        if (C1->getAPIntValue().ugt(C0->getAPIntValue()))
          std::swap(C0, C1);

        // The difference of the constants must be a single bit.
        const APInt &C0Val = C0->getAPIntValue();
        const APInt &C1Val = C1->getAPIntValue();
        if ((C0Val - C1Val).isPowerOf2()) {
          // and/or (setcc X, C0, ne), (setcc X, C1, ne/eq) -->
          // setcc ((add X, -C1), ~(C0 - C1)), 0, ne/eq
          SDValue OffsetC = DAG.getConstant(-C1Val, DL, OpVT);
          SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LL, OffsetC);
          SDValue MaskC = DAG.getConstant(~(C0Val - C1Val), DL, OpVT);
          SDValue And = DAG.getNode(ISD::AND, DL, OpVT, Add, MaskC);
          SDValue Zero = DAG.getConstant(0, DL, OpVT);
          return DAG.getSetCC(DL, VT, And, Zero, CC0);
        }
      }
    }
  }

  // Canonicalize equivalent operands to LL == RL.
  if (LL == RR && LR == RL) {
    CC1 = ISD::getSetCCSwappedOperands(CC1);
    std::swap(RL, RR);
  }

  // (and (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC)
  // (or  (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC)
  if (LL == RL && LR == RR) {
    ISD::CondCode NewCC = IsAnd ? ISD::getSetCCAndOperation(CC0, CC1, IsInteger)
                                : ISD::getSetCCOrOperation(CC0, CC1, IsInteger);
    if (NewCC != ISD::SETCC_INVALID &&
        (!LegalOperations ||
         (TLI.isCondCodeLegal(NewCC, LL.getSimpleValueType()) &&
          TLI.isOperationLegal(ISD::SETCC, OpVT))))
      return DAG.getSetCC(DL, VT, LL, LR, NewCC);
  }

  return SDValue();
}

/// This contains all DAGCombine rules which reduce two values combined by
/// an And operation to a single value. This makes them reusable in the context
/// of visitSELECT(). Rules involving constants are not included as
/// visitSELECT() already handles those cases.
SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) {
  EVT VT = N1.getValueType();
  SDLoc DL(N);

  // fold (and x, undef) -> 0
  if (N0.isUndef() || N1.isUndef())
    return DAG.getConstant(0, DL, VT);

  if (SDValue V = foldLogicOfSetCCs(true, N0, N1, DL))
    return V;

  if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL &&
      VT.getSizeInBits() <= 64) {
    if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
      if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) {
        // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal
        // immediate for an add, but it is legal if its top c2 bits are set,
        // transform the ADD so the immediate doesn't need to be materialized
        // in a register.
        APInt ADDC = ADDI->getAPIntValue();
        APInt SRLC = SRLI->getAPIntValue();
        if (ADDC.getMinSignedBits() <= 64 &&
            SRLC.ult(VT.getSizeInBits()) &&
            !TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
          APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
                                             SRLC.getZExtValue());
          if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) {
            ADDC |= Mask;
            if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
              SDLoc DL0(N0);
              SDValue NewAdd =
                DAG.getNode(ISD::ADD, DL0, VT,
                            N0.getOperand(0), DAG.getConstant(ADDC, DL, VT));
              CombineTo(N0.getNode(), NewAdd);
              // Return N so it doesn't get rechecked!
              return SDValue(N, 0);
            }
          }
        }
      }
    }
  }

  // Reduce bit extract of low half of an integer to the narrower type.
  // (and (srl i64:x, K), KMask) ->
  //   (i64 zero_extend (and (srl (i32 (trunc i64:x)), K)), KMask)
  if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
    if (ConstantSDNode *CAnd = dyn_cast<ConstantSDNode>(N1)) {
      if (ConstantSDNode *CShift = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
        unsigned Size = VT.getSizeInBits();
        const APInt &AndMask = CAnd->getAPIntValue();
        unsigned ShiftBits = CShift->getZExtValue();

        // Bail out, this node will probably disappear anyway.
        if (ShiftBits == 0)
          return SDValue();

        unsigned MaskBits = AndMask.countTrailingOnes();
        EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), Size / 2);

        if (AndMask.isMask() &&
            // Required bits must not span the two halves of the integer and
            // must fit in the half size type.
            (ShiftBits + MaskBits <= Size / 2) &&
            TLI.isNarrowingProfitable(VT, HalfVT) &&
            TLI.isTypeDesirableForOp(ISD::AND, HalfVT) &&
            TLI.isTypeDesirableForOp(ISD::SRL, HalfVT) &&
            TLI.isTruncateFree(VT, HalfVT) &&
            TLI.isZExtFree(HalfVT, VT)) {
          // The isNarrowingProfitable is to avoid regressions on PPC and
          // AArch64 which match a few 64-bit bit insert / bit extract patterns
          // on downstream users of this. Those patterns could probably be
          // extended to handle extensions mixed in.

          SDValue SL(N0);
          assert(MaskBits <= Size);

          // Extracting the highest bit of the low half.
          EVT ShiftVT = TLI.getShiftAmountTy(HalfVT, DAG.getDataLayout());
          SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, HalfVT,
                                      N0.getOperand(0));

          SDValue NewMask = DAG.getConstant(AndMask.trunc(Size / 2), SL, HalfVT);
          SDValue ShiftK = DAG.getConstant(ShiftBits, SL, ShiftVT);
          SDValue Shift = DAG.getNode(ISD::SRL, SL, HalfVT, Trunc, ShiftK);
          SDValue And = DAG.getNode(ISD::AND, SL, HalfVT, Shift, NewMask);
          return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, And);
        }
      }
    }
  }

  return SDValue();
}

bool DAGCombiner::isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN,
                                   EVT LoadResultTy, EVT &ExtVT) {
  if (!AndC->getAPIntValue().isMask())
    return false;

  unsigned ActiveBits = AndC->getAPIntValue().countTrailingOnes();

  ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
  EVT LoadedVT = LoadN->getMemoryVT();

  if (ExtVT == LoadedVT &&
      (!LegalOperations ||
       TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))) {
    // ZEXTLOAD will match without needing to change the size of the value being
    // loaded.
    return true;
  }

  // Do not change the width of a volatile or atomic loads.
  if (!LoadN->isSimple())
    return false;

  // Do not generate loads of non-round integer types since these can
  // be expensive (and would be wrong if the type is not byte sized).
  if (!LoadedVT.bitsGT(ExtVT) || !ExtVT.isRound())
    return false;

  if (LegalOperations &&
      !TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))
    return false;

  if (!TLI.shouldReduceLoadWidth(LoadN, ISD::ZEXTLOAD, ExtVT))
    return false;

  return true;
}

bool DAGCombiner::isLegalNarrowLdSt(LSBaseSDNode *LDST,
                                    ISD::LoadExtType ExtType, EVT &MemVT,
                                    unsigned ShAmt) {
  if (!LDST)
    return false;
  // Only allow byte offsets.
  if (ShAmt % 8)
    return false;

  // Do not generate loads of non-round integer types since these can
  // be expensive (and would be wrong if the type is not byte sized).
  if (!MemVT.isRound())
    return false;

  // Don't change the width of a volatile or atomic loads.
  if (!LDST->isSimple())
    return false;

  // Verify that we are actually reducing a load width here.
  if (LDST->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits())
    return false;

  // Ensure that this isn't going to produce an unsupported memory access.
  if (ShAmt &&
      !TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
                              LDST->getAddressSpace(), ShAmt / 8,
                              LDST->getMemOperand()->getFlags()))
    return false;

  // It's not possible to generate a constant of extended or untyped type.
  EVT PtrType = LDST->getBasePtr().getValueType();
  if (PtrType == MVT::Untyped || PtrType.isExtended())
    return false;

  if (isa<LoadSDNode>(LDST)) {
    LoadSDNode *Load = cast<LoadSDNode>(LDST);
    // Don't transform one with multiple uses, this would require adding a new
    // load.
    if (!SDValue(Load, 0).hasOneUse())
      return false;

    if (LegalOperations &&
        !TLI.isLoadExtLegal(ExtType, Load->getValueType(0), MemVT))
      return false;

    // For the transform to be legal, the load must produce only two values
    // (the value loaded and the chain).  Don't transform a pre-increment
    // load, for example, which produces an extra value.  Otherwise the
    // transformation is not equivalent, and the downstream logic to replace
    // uses gets things wrong.
    if (Load->getNumValues() > 2)
      return false;

    // If the load that we're shrinking is an extload and we're not just
    // discarding the extension we can't simply shrink the load. Bail.
    // TODO: It would be possible to merge the extensions in some cases.
    if (Load->getExtensionType() != ISD::NON_EXTLOAD &&
        Load->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits() + ShAmt)
      return false;

    if (!TLI.shouldReduceLoadWidth(Load, ExtType, MemVT))
      return false;
  } else {
    assert(isa<StoreSDNode>(LDST) && "It is not a Load nor a Store SDNode");
    StoreSDNode *Store = cast<StoreSDNode>(LDST);
    // Can't write outside the original store
    if (Store->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits() + ShAmt)
      return false;

    if (LegalOperations &&
        !TLI.isTruncStoreLegal(Store->getValue().getValueType(), MemVT))
      return false;
  }
  return true;
}

bool DAGCombiner::SearchForAndLoads(SDNode *N,
                                    SmallVectorImpl<LoadSDNode*> &Loads,
                                    SmallPtrSetImpl<SDNode*> &NodesWithConsts,
                                    ConstantSDNode *Mask,
                                    SDNode *&NodeToMask) {
  // Recursively search for the operands, looking for loads which can be
  // narrowed.
  for (SDValue Op : N->op_values()) {
    if (Op.getValueType().isVector())
      return false;

    // Some constants may need fixing up later if they are too large.
    if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
      if ((N->getOpcode() == ISD::OR || N->getOpcode() == ISD::XOR) &&
          (Mask->getAPIntValue() & C->getAPIntValue()) != C->getAPIntValue())
        NodesWithConsts.insert(N);
      continue;
    }

    if (!Op.hasOneUse())
      return false;

    switch(Op.getOpcode()) {
    case ISD::LOAD: {
      auto *Load = cast<LoadSDNode>(Op);
      EVT ExtVT;
      if (isAndLoadExtLoad(Mask, Load, Load->getValueType(0), ExtVT) &&
          isLegalNarrowLdSt(Load, ISD::ZEXTLOAD, ExtVT)) {

        // ZEXTLOAD is already small enough.
        if (Load->getExtensionType() == ISD::ZEXTLOAD &&
            ExtVT.bitsGE(Load->getMemoryVT()))
          continue;

        // Use LE to convert equal sized loads to zext.
        if (ExtVT.bitsLE(Load->getMemoryVT()))
          Loads.push_back(Load);

        continue;
      }
      return false;
    }
    case ISD::ZERO_EXTEND:
    case ISD::AssertZext: {
      unsigned ActiveBits = Mask->getAPIntValue().countTrailingOnes();
      EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
      EVT VT = Op.getOpcode() == ISD::AssertZext ?
        cast<VTSDNode>(Op.getOperand(1))->getVT() :
        Op.getOperand(0).getValueType();

      // We can accept extending nodes if the mask is wider or an equal
      // width to the original type.
      if (ExtVT.bitsGE(VT))
        continue;
      break;
    }
    case ISD::OR:
    case ISD::XOR:
    case ISD::AND:
      if (!SearchForAndLoads(Op.getNode(), Loads, NodesWithConsts, Mask,
                             NodeToMask))
        return false;
      continue;
    }

    // Allow one node which will masked along with any loads found.
    if (NodeToMask)
      return false;

    // Also ensure that the node to be masked only produces one data result.
    NodeToMask = Op.getNode();
    if (NodeToMask->getNumValues() > 1) {
      bool HasValue = false;
      for (unsigned i = 0, e = NodeToMask->getNumValues(); i < e; ++i) {
        MVT VT = SDValue(NodeToMask, i).getSimpleValueType();
        if (VT != MVT::Glue && VT != MVT::Other) {
          if (HasValue) {
            NodeToMask = nullptr;
            return false;
          }
          HasValue = true;
        }
      }
      assert(HasValue && "Node to be masked has no data result?");
    }
  }
  return true;
}

bool DAGCombiner::BackwardsPropagateMask(SDNode *N, SelectionDAG &DAG) {
  auto *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
  if (!Mask)
    return false;

  if (!Mask->getAPIntValue().isMask())
    return false;

  // No need to do anything if the and directly uses a load.
  if (isa<LoadSDNode>(N->getOperand(0)))
    return false;

  SmallVector<LoadSDNode*, 8> Loads;
  SmallPtrSet<SDNode*, 2> NodesWithConsts;
  SDNode *FixupNode = nullptr;
  if (SearchForAndLoads(N, Loads, NodesWithConsts, Mask, FixupNode)) {
    if (Loads.size() == 0)
      return false;

    LLVM_DEBUG(dbgs() << "Backwards propagate AND: "; N->dump());
    SDValue MaskOp = N->getOperand(1);

    // If it exists, fixup the single node we allow in the tree that needs
    // masking.
    if (FixupNode) {
      LLVM_DEBUG(dbgs() << "First, need to fix up: "; FixupNode->dump());
      SDValue And = DAG.getNode(ISD::AND, SDLoc(FixupNode),
                                FixupNode->getValueType(0),
                                SDValue(FixupNode, 0), MaskOp);
      DAG.ReplaceAllUsesOfValueWith(SDValue(FixupNode, 0), And);
      if (And.getOpcode() == ISD ::AND)
        DAG.UpdateNodeOperands(And.getNode(), SDValue(FixupNode, 0), MaskOp);
    }

    // Narrow any constants that need it.
    for (auto *LogicN : NodesWithConsts) {
      SDValue Op0 = LogicN->getOperand(0);
      SDValue Op1 = LogicN->getOperand(1);

      if (isa<ConstantSDNode>(Op0))
          std::swap(Op0, Op1);

      SDValue And = DAG.getNode(ISD::AND, SDLoc(Op1), Op1.getValueType(),
                                Op1, MaskOp);

      DAG.UpdateNodeOperands(LogicN, Op0, And);
    }

    // Create narrow loads.
    for (auto *Load : Loads) {
      LLVM_DEBUG(dbgs() << "Propagate AND back to: "; Load->dump());
      SDValue And = DAG.getNode(ISD::AND, SDLoc(Load), Load->getValueType(0),
                                SDValue(Load, 0), MaskOp);
      DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), And);
      if (And.getOpcode() == ISD ::AND)
        And = SDValue(
            DAG.UpdateNodeOperands(And.getNode(), SDValue(Load, 0), MaskOp), 0);
      SDValue NewLoad = ReduceLoadWidth(And.getNode());
      assert(NewLoad &&
             "Shouldn't be masking the load if it can't be narrowed");
      CombineTo(Load, NewLoad, NewLoad.getValue(1));
    }
    DAG.ReplaceAllUsesWith(N, N->getOperand(0).getNode());
    return true;
  }
  return false;
}

// Unfold
//    x &  (-1 'logical shift' y)
// To
//    (x 'opposite logical shift' y) 'logical shift' y
// if it is better for performance.
SDValue DAGCombiner::unfoldExtremeBitClearingToShifts(SDNode *N) {
  assert(N->getOpcode() == ISD::AND);

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // Do we actually prefer shifts over mask?
  if (!TLI.shouldFoldMaskToVariableShiftPair(N0))
    return SDValue();

  // Try to match  (-1 '[outer] logical shift' y)
  unsigned OuterShift;
  unsigned InnerShift; // The opposite direction to the OuterShift.
  SDValue Y;           // Shift amount.
  auto matchMask = [&OuterShift, &InnerShift, &Y](SDValue M) -> bool {
    if (!M.hasOneUse())
      return false;
    OuterShift = M->getOpcode();
    if (OuterShift == ISD::SHL)
      InnerShift = ISD::SRL;
    else if (OuterShift == ISD::SRL)
      InnerShift = ISD::SHL;
    else
      return false;
    if (!isAllOnesConstant(M->getOperand(0)))
      return false;
    Y = M->getOperand(1);
    return true;
  };

  SDValue X;
  if (matchMask(N1))
    X = N0;
  else if (matchMask(N0))
    X = N1;
  else
    return SDValue();

  SDLoc DL(N);
  EVT VT = N->getValueType(0);

  //     tmp = x   'opposite logical shift' y
  SDValue T0 = DAG.getNode(InnerShift, DL, VT, X, Y);
  //     ret = tmp 'logical shift' y
  SDValue T1 = DAG.getNode(OuterShift, DL, VT, T0, Y);

  return T1;
}

/// Try to replace shift/logic that tests if a bit is clear with mask + setcc.
/// For a target with a bit test, this is expected to become test + set and save
/// at least 1 instruction.
static SDValue combineShiftAnd1ToBitTest(SDNode *And, SelectionDAG &DAG) {
  assert(And->getOpcode() == ISD::AND && "Expected an 'and' op");

  // This is probably not worthwhile without a supported type.
  EVT VT = And->getValueType(0);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (!TLI.isTypeLegal(VT))
    return SDValue();

  // Look through an optional extension and find a 'not'.
  // TODO: Should we favor test+set even without the 'not' op?
  SDValue Not = And->getOperand(0), And1 = And->getOperand(1);
  if (Not.getOpcode() == ISD::ANY_EXTEND)
    Not = Not.getOperand(0);
  if (!isBitwiseNot(Not) || !Not.hasOneUse() || !isOneConstant(And1))
    return SDValue();

  // Look though an optional truncation. The source operand may not be the same
  // type as the original 'and', but that is ok because we are masking off
  // everything but the low bit.
  SDValue Srl = Not.getOperand(0);
  if (Srl.getOpcode() == ISD::TRUNCATE)
    Srl = Srl.getOperand(0);

  // Match a shift-right by constant.
  if (Srl.getOpcode() != ISD::SRL || !Srl.hasOneUse() ||
      !isa<ConstantSDNode>(Srl.getOperand(1)))
    return SDValue();

  // We might have looked through casts that make this transform invalid.
  // TODO: If the source type is wider than the result type, do the mask and
  //       compare in the source type.
  const APInt &ShiftAmt = Srl.getConstantOperandAPInt(1);
  unsigned VTBitWidth = VT.getSizeInBits();
  if (ShiftAmt.uge(VTBitWidth))
    return SDValue();

  // Turn this into a bit-test pattern using mask op + setcc:
  // and (not (srl X, C)), 1 --> (and X, 1<<C) == 0
  SDLoc DL(And);
  SDValue X = DAG.getZExtOrTrunc(Srl.getOperand(0), DL, VT);
  EVT CCVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
  SDValue Mask = DAG.getConstant(
      APInt::getOneBitSet(VTBitWidth, ShiftAmt.getZExtValue()), DL, VT);
  SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, Mask);
  SDValue Zero = DAG.getConstant(0, DL, VT);
  SDValue Setcc = DAG.getSetCC(DL, CCVT, NewAnd, Zero, ISD::SETEQ);
  return DAG.getZExtOrTrunc(Setcc, DL, VT);
}

SDValue DAGCombiner::visitAND(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N1.getValueType();

  // x & x --> x
  if (N0 == N1)
    return N0;

  // fold vector ops
  if (VT.isVector()) {
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

    // fold (and x, 0) -> 0, vector edition
    if (ISD::isBuildVectorAllZeros(N0.getNode()))
      // do not return N0, because undef node may exist in N0
      return DAG.getConstant(APInt::getNullValue(N0.getScalarValueSizeInBits()),
                             SDLoc(N), N0.getValueType());
    if (ISD::isBuildVectorAllZeros(N1.getNode()))
      // do not return N1, because undef node may exist in N1
      return DAG.getConstant(APInt::getNullValue(N1.getScalarValueSizeInBits()),
                             SDLoc(N), N1.getValueType());

    // fold (and x, -1) -> x, vector edition
    if (ISD::isBuildVectorAllOnes(N0.getNode()))
      return N1;
    if (ISD::isBuildVectorAllOnes(N1.getNode()))
      return N0;
  }

  // fold (and c1, c2) -> c1&c2
  ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
  ConstantSDNode *N1C = isConstOrConstSplat(N1);
  if (N0C && N1C && !N1C->isOpaque())
    return DAG.FoldConstantArithmetic(ISD::AND, SDLoc(N), VT, N0C, N1C);
  // canonicalize constant to RHS
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
      !DAG.isConstantIntBuildVectorOrConstantInt(N1))
    return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0);
  // fold (and x, -1) -> x
  if (isAllOnesConstant(N1))
    return N0;
  // if (and x, c) is known to be zero, return 0
  unsigned BitWidth = VT.getScalarSizeInBits();
  if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
                                   APInt::getAllOnesValue(BitWidth)))
    return DAG.getConstant(0, SDLoc(N), VT);

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  // reassociate and
  if (SDValue RAND = reassociateOps(ISD::AND, SDLoc(N), N0, N1, N->getFlags()))
    return RAND;

  // Try to convert a constant mask AND into a shuffle clear mask.
  if (VT.isVector())
    if (SDValue Shuffle = XformToShuffleWithZero(N))
      return Shuffle;

  // fold (and (or x, C), D) -> D if (C & D) == D
  auto MatchSubset = [](ConstantSDNode *LHS, ConstantSDNode *RHS) {
    return RHS->getAPIntValue().isSubsetOf(LHS->getAPIntValue());
  };
  if (N0.getOpcode() == ISD::OR &&
      ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchSubset))
    return N1;
  // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
  if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
    SDValue N0Op0 = N0.getOperand(0);
    APInt Mask = ~N1C->getAPIntValue();
    Mask = Mask.trunc(N0Op0.getScalarValueSizeInBits());
    if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
      SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
                                 N0.getValueType(), N0Op0);

      // Replace uses of the AND with uses of the Zero extend node.
      CombineTo(N, Zext);

      // We actually want to replace all uses of the any_extend with the
      // zero_extend, to avoid duplicating things.  This will later cause this
      // AND to be folded.
      CombineTo(N0.getNode(), Zext);
      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
    }
  }

  // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) ->
  // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must
  // already be zero by virtue of the width of the base type of the load.
  //
  // the 'X' node here can either be nothing or an extract_vector_elt to catch
  // more cases.
  if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
       N0.getValueSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits() &&
       N0.getOperand(0).getOpcode() == ISD::LOAD &&
       N0.getOperand(0).getResNo() == 0) ||
      (N0.getOpcode() == ISD::LOAD && N0.getResNo() == 0)) {
    LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ?
                                         N0 : N0.getOperand(0) );

    // Get the constant (if applicable) the zero'th operand is being ANDed with.
    // This can be a pure constant or a vector splat, in which case we treat the
    // vector as a scalar and use the splat value.
    APInt Constant = APInt::getNullValue(1);
    if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
      Constant = C->getAPIntValue();
    } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) {
      APInt SplatValue, SplatUndef;
      unsigned SplatBitSize;
      bool HasAnyUndefs;
      bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef,
                                             SplatBitSize, HasAnyUndefs);
      if (IsSplat) {
        // Undef bits can contribute to a possible optimisation if set, so
        // set them.
        SplatValue |= SplatUndef;

        // The splat value may be something like "0x00FFFFFF", which means 0 for
        // the first vector value and FF for the rest, repeating. We need a mask
        // that will apply equally to all members of the vector, so AND all the
        // lanes of the constant together.
        unsigned EltBitWidth = Vector->getValueType(0).getScalarSizeInBits();

        // If the splat value has been compressed to a bitlength lower
        // than the size of the vector lane, we need to re-expand it to
        // the lane size.
        if (EltBitWidth > SplatBitSize)
          for (SplatValue = SplatValue.zextOrTrunc(EltBitWidth);
               SplatBitSize < EltBitWidth; SplatBitSize = SplatBitSize * 2)
            SplatValue |= SplatValue.shl(SplatBitSize);

        // Make sure that variable 'Constant' is only set if 'SplatBitSize' is a
        // multiple of 'BitWidth'. Otherwise, we could propagate a wrong value.
        if ((SplatBitSize % EltBitWidth) == 0) {
          Constant = APInt::getAllOnesValue(EltBitWidth);
          for (unsigned i = 0, n = (SplatBitSize / EltBitWidth); i < n; ++i)
            Constant &= SplatValue.extractBits(EltBitWidth, i * EltBitWidth);
        }
      }
    }

    // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is
    // actually legal and isn't going to get expanded, else this is a false
    // optimisation.
    bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD,
                                                    Load->getValueType(0),
                                                    Load->getMemoryVT());

    // Resize the constant to the same size as the original memory access before
    // extension. If it is still the AllOnesValue then this AND is completely
    // unneeded.
    Constant = Constant.zextOrTrunc(Load->getMemoryVT().getScalarSizeInBits());

    bool B;
    switch (Load->getExtensionType()) {
    default: B = false; break;
    case ISD::EXTLOAD: B = CanZextLoadProfitably; break;
    case ISD::ZEXTLOAD:
    case ISD::NON_EXTLOAD: B = true; break;
    }

    if (B && Constant.isAllOnesValue()) {
      // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to
      // preserve semantics once we get rid of the AND.
      SDValue NewLoad(Load, 0);

      // Fold the AND away. NewLoad may get replaced immediately.
      CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0);

      if (Load->getExtensionType() == ISD::EXTLOAD) {
        NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD,
                              Load->getValueType(0), SDLoc(Load),
                              Load->getChain(), Load->getBasePtr(),
                              Load->getOffset(), Load->getMemoryVT(),
                              Load->getMemOperand());
        // Replace uses of the EXTLOAD with the new ZEXTLOAD.
        if (Load->getNumValues() == 3) {
          // PRE/POST_INC loads have 3 values.
          SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1),
                           NewLoad.getValue(2) };
          CombineTo(Load, To, 3, true);
        } else {
          CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1));
        }
      }

      return SDValue(N, 0); // Return N so it doesn't get rechecked!
    }
  }

  // fold (and (load x), 255) -> (zextload x, i8)
  // fold (and (extload x, i16), 255) -> (zextload x, i8)
  // fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8)
  if (!VT.isVector() && N1C && (N0.getOpcode() == ISD::LOAD ||
                                (N0.getOpcode() == ISD::ANY_EXTEND &&
                                 N0.getOperand(0).getOpcode() == ISD::LOAD))) {
    if (SDValue Res = ReduceLoadWidth(N)) {
      LoadSDNode *LN0 = N0->getOpcode() == ISD::ANY_EXTEND
        ? cast<LoadSDNode>(N0.getOperand(0)) : cast<LoadSDNode>(N0);
      AddToWorklist(N);
      DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 0), Res);
      return SDValue(N, 0);
    }
  }

  if (Level >= AfterLegalizeTypes) {
    // Attempt to propagate the AND back up to the leaves which, if they're
    // loads, can be combined to narrow loads and the AND node can be removed.
    // Perform after legalization so that extend nodes will already be
    // combined into the loads.
    if (BackwardsPropagateMask(N, DAG)) {
      return SDValue(N, 0);
    }
  }

  if (SDValue Combined = visitANDLike(N0, N1, N))
    return Combined;

  // Simplify: (and (op x...), (op y...))  -> (op (and x, y))
  if (N0.getOpcode() == N1.getOpcode())
    if (SDValue V = hoistLogicOpWithSameOpcodeHands(N))
      return V;

  // Masking the negated extension of a boolean is just the zero-extended
  // boolean:
  // and (sub 0, zext(bool X)), 1 --> zext(bool X)
  // and (sub 0, sext(bool X)), 1 --> zext(bool X)
  //
  // Note: the SimplifyDemandedBits fold below can make an information-losing
  // transform, and then we have no way to find this better fold.
  if (N1C && N1C->isOne() && N0.getOpcode() == ISD::SUB) {
    if (isNullOrNullSplat(N0.getOperand(0))) {
      SDValue SubRHS = N0.getOperand(1);
      if (SubRHS.getOpcode() == ISD::ZERO_EXTEND &&
          SubRHS.getOperand(0).getScalarValueSizeInBits() == 1)
        return SubRHS;
      if (SubRHS.getOpcode() == ISD::SIGN_EXTEND &&
          SubRHS.getOperand(0).getScalarValueSizeInBits() == 1)
        return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, SubRHS.getOperand(0));
    }
  }

  // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
  // fold (and (sra)) -> (and (srl)) when possible.
  if (SimplifyDemandedBits(SDValue(N, 0)))
    return SDValue(N, 0);

  // fold (zext_inreg (extload x)) -> (zextload x)
  // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use
  if (ISD::isUNINDEXEDLoad(N0.getNode()) &&
      (ISD::isEXTLoad(N0.getNode()) ||
       (ISD::isSEXTLoad(N0.getNode()) && N0.hasOneUse()))) {
    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
    EVT MemVT = LN0->getMemoryVT();
    // If we zero all the possible extended bits, then we can turn this into
    // a zextload if we are running before legalize or the operation is legal.
    unsigned ExtBitSize = N1.getScalarValueSizeInBits();
    unsigned MemBitSize = MemVT.getScalarSizeInBits();
    APInt ExtBits = APInt::getHighBitsSet(ExtBitSize, ExtBitSize - MemBitSize);
    if (DAG.MaskedValueIsZero(N1, ExtBits) &&
        ((!LegalOperations && LN0->isSimple()) ||
         TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) {
      SDValue ExtLoad =
          DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, LN0->getChain(),
                         LN0->getBasePtr(), MemVT, LN0->getMemOperand());
      AddToWorklist(N);
      CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
      return SDValue(N, 0); // Return N so it doesn't get rechecked!
    }
  }

  // fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const)
  if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) {
    if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
                                           N0.getOperand(1), false))
      return BSwap;
  }

  if (SDValue Shifts = unfoldExtremeBitClearingToShifts(N))
    return Shifts;

  if (TLI.hasBitTest(N0, N1))
    if (SDValue V = combineShiftAnd1ToBitTest(N, DAG))
      return V;

  return SDValue();
}

/// Match (a >> 8) | (a << 8) as (bswap a) >> 16.
SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
                                        bool DemandHighBits) {
  if (!LegalOperations)
    return SDValue();

  EVT VT = N->getValueType(0);
  if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16)
    return SDValue();
  if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT))
    return SDValue();

  // Recognize (and (shl a, 8), 0xff00), (and (srl a, 8), 0xff)
  bool LookPassAnd0 = false;
  bool LookPassAnd1 = false;
  if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL)
      std::swap(N0, N1);
  if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL)
      std::swap(N0, N1);
  if (N0.getOpcode() == ISD::AND) {
    if (!N0.getNode()->hasOneUse())
      return SDValue();
    ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
    // Also handle 0xffff since the LHS is guaranteed to have zeros there.
    // This is needed for X86.
    if (!N01C || (N01C->getZExtValue() != 0xFF00 &&
                  N01C->getZExtValue() != 0xFFFF))
      return SDValue();
    N0 = N0.getOperand(0);
    LookPassAnd0 = true;
  }

  if (N1.getOpcode() == ISD::AND) {
    if (!N1.getNode()->hasOneUse())
      return SDValue();
    ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
    if (!N11C || N11C->getZExtValue() != 0xFF)
      return SDValue();
    N1 = N1.getOperand(0);
    LookPassAnd1 = true;
  }

  if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
    std::swap(N0, N1);
  if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
    return SDValue();
  if (!N0.getNode()->hasOneUse() || !N1.getNode()->hasOneUse())
    return SDValue();

  ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
  ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
  if (!N01C || !N11C)
    return SDValue();
  if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8)
    return SDValue();

  // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8)
  SDValue N00 = N0->getOperand(0);
  if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) {
    if (!N00.getNode()->hasOneUse())
      return SDValue();
    ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1));
    if (!N001C || N001C->getZExtValue() != 0xFF)
      return SDValue();
    N00 = N00.getOperand(0);
    LookPassAnd0 = true;
  }

  SDValue N10 = N1->getOperand(0);
  if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) {
    if (!N10.getNode()->hasOneUse())
      return SDValue();
    ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1));
    // Also allow 0xFFFF since the bits will be shifted out. This is needed
    // for X86.
    if (!N101C || (N101C->getZExtValue() != 0xFF00 &&
                   N101C->getZExtValue() != 0xFFFF))
      return SDValue();
    N10 = N10.getOperand(0);
    LookPassAnd1 = true;
  }

  if (N00 != N10)
    return SDValue();

  // Make sure everything beyond the low halfword gets set to zero since the SRL
  // 16 will clear the top bits.
  unsigned OpSizeInBits = VT.getSizeInBits();
  if (DemandHighBits && OpSizeInBits > 16) {
    // If the left-shift isn't masked out then the only way this is a bswap is
    // if all bits beyond the low 8 are 0. In that case the entire pattern
    // reduces to a left shift anyway: leave it for other parts of the combiner.
    if (!LookPassAnd0)
      return SDValue();

    // However, if the right shift isn't masked out then it might be because
    // it's not needed. See if we can spot that too.
    if (!LookPassAnd1 &&
        !DAG.MaskedValueIsZero(
            N10, APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - 16)))
      return SDValue();
  }

  SDValue Res = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N00);
  if (OpSizeInBits > 16) {
    SDLoc DL(N);
    Res = DAG.getNode(ISD::SRL, DL, VT, Res,
                      DAG.getConstant(OpSizeInBits - 16, DL,
                                      getShiftAmountTy(VT)));
  }
  return Res;
}

/// Return true if the specified node is an element that makes up a 32-bit
/// packed halfword byteswap.
/// ((x & 0x000000ff) << 8) |
/// ((x & 0x0000ff00) >> 8) |
/// ((x & 0x00ff0000) << 8) |
/// ((x & 0xff000000) >> 8)
static bool isBSwapHWordElement(SDValue N, MutableArrayRef<SDNode *> Parts) {
  if (!N.getNode()->hasOneUse())
    return false;

  unsigned Opc = N.getOpcode();
  if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL)
    return false;

  SDValue N0 = N.getOperand(0);
  unsigned Opc0 = N0.getOpcode();
  if (Opc0 != ISD::AND && Opc0 != ISD::SHL && Opc0 != ISD::SRL)
    return false;

  ConstantSDNode *N1C = nullptr;
  // SHL or SRL: look upstream for AND mask operand
  if (Opc == ISD::AND)
    N1C = dyn_cast<ConstantSDNode>(N.getOperand(1));
  else if (Opc0 == ISD::AND)
    N1C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
  if (!N1C)
    return false;

  unsigned MaskByteOffset;
  switch (N1C->getZExtValue()) {
  default:
    return false;
  case 0xFF:       MaskByteOffset = 0; break;
  case 0xFF00:     MaskByteOffset = 1; break;
  case 0xFFFF:
    // In case demanded bits didn't clear the bits that will be shifted out.
    // This is needed for X86.
    if (Opc == ISD::SRL || (Opc == ISD::AND && Opc0 == ISD::SHL)) {
      MaskByteOffset = 1;
      break;
    }
    return false;
  case 0xFF0000:   MaskByteOffset = 2; break;
  case 0xFF000000: MaskByteOffset = 3; break;
  }

  // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00).
  if (Opc == ISD::AND) {
    if (MaskByteOffset == 0 || MaskByteOffset == 2) {
      // (x >> 8) & 0xff
      // (x >> 8) & 0xff0000
      if (Opc0 != ISD::SRL)
        return false;
      ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
      if (!C || C->getZExtValue() != 8)
        return false;
    } else {
      // (x << 8) & 0xff00
      // (x << 8) & 0xff000000
      if (Opc0 != ISD::SHL)
        return false;
      ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
      if (!C || C->getZExtValue() != 8)
        return false;
    }
  } else if (Opc == ISD::SHL) {
    // (x & 0xff) << 8
    // (x & 0xff0000) << 8
    if (MaskByteOffset != 0 && MaskByteOffset != 2)
      return false;
    ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
    if (!C || C->getZExtValue() != 8)
      return false;
  } else { // Opc == ISD::SRL
    // (x & 0xff00) >> 8
    // (x & 0xff000000) >> 8
    if (MaskByteOffset != 1 && MaskByteOffset != 3)
      return false;
    ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
    if (!C || C->getZExtValue() != 8)
      return false;
  }

  if (Parts[MaskByteOffset])
    return false;

  Parts[MaskByteOffset] = N0.getOperand(0).getNode();
  return true;
}

// Match 2 elements of a packed halfword bswap.
static bool isBSwapHWordPair(SDValue N, MutableArrayRef<SDNode *> Parts) {
  if (N.getOpcode() == ISD::OR)
    return isBSwapHWordElement(N.getOperand(0), Parts) &&
           isBSwapHWordElement(N.getOperand(1), Parts);

  if (N.getOpcode() == ISD::SRL && N.getOperand(0).getOpcode() == ISD::BSWAP) {
    ConstantSDNode *C = isConstOrConstSplat(N.getOperand(1));
    if (!C || C->getAPIntValue() != 16)
      return false;
    Parts[0] = Parts[1] = N.getOperand(0).getOperand(0).getNode();
    return true;
  }

  return false;
}

/// Match a 32-bit packed halfword bswap. That is
/// ((x & 0x000000ff) << 8) |
/// ((x & 0x0000ff00) >> 8) |
/// ((x & 0x00ff0000) << 8) |
/// ((x & 0xff000000) >> 8)
/// => (rotl (bswap x), 16)
SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {
  if (!LegalOperations)
    return SDValue();

  EVT VT = N->getValueType(0);
  if (VT != MVT::i32)
    return SDValue();
  if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT))
    return SDValue();

  // Look for either
  // (or (bswaphpair), (bswaphpair))
  // (or (or (bswaphpair), (and)), (and))
  // (or (or (and), (bswaphpair)), (and))
  SDNode *Parts[4] = {};

  if (isBSwapHWordPair(N0, Parts)) {
    // (or (or (and), (and)), (or (and), (and)))
    if (!isBSwapHWordPair(N1, Parts))
      return SDValue();
  } else if (N0.getOpcode() == ISD::OR) {
    // (or (or (or (and), (and)), (and)), (and))
    if (!isBSwapHWordElement(N1, Parts))
      return SDValue();
    SDValue N00 = N0.getOperand(0);
    SDValue N01 = N0.getOperand(1);
    if (!(isBSwapHWordElement(N01, Parts) && isBSwapHWordPair(N00, Parts)) &&
        !(isBSwapHWordElement(N00, Parts) && isBSwapHWordPair(N01, Parts)))
      return SDValue();
  } else
    return SDValue();

  // Make sure the parts are all coming from the same node.
  if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3])
    return SDValue();

  SDLoc DL(N);
  SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT,
                              SDValue(Parts[0], 0));

  // Result of the bswap should be rotated by 16. If it's not legal, then
  // do  (x << 16) | (x >> 16).
  SDValue ShAmt = DAG.getConstant(16, DL, getShiftAmountTy(VT));
  if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT))
    return DAG.getNode(ISD::ROTL, DL, VT, BSwap, ShAmt);
  if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT))
    return DAG.getNode(ISD::ROTR, DL, VT, BSwap, ShAmt);
  return DAG.getNode(ISD::OR, DL, VT,
                     DAG.getNode(ISD::SHL, DL, VT, BSwap, ShAmt),
                     DAG.getNode(ISD::SRL, DL, VT, BSwap, ShAmt));
}

/// This contains all DAGCombine rules which reduce two values combined by
/// an Or operation to a single value \see visitANDLike().
SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, SDNode *N) {
  EVT VT = N1.getValueType();
  SDLoc DL(N);

  // fold (or x, undef) -> -1
  if (!LegalOperations && (N0.isUndef() || N1.isUndef()))
    return DAG.getAllOnesConstant(DL, VT);

  if (SDValue V = foldLogicOfSetCCs(false, N0, N1, DL))
    return V;

  // (or (and X, C1), (and Y, C2))  -> (and (or X, Y), C3) if possible.
  if (N0.getOpcode() == ISD::AND && N1.getOpcode() == ISD::AND &&
      // Don't increase # computations.
      (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
    // We can only do this xform if we know that bits from X that are set in C2
    // but not in C1 are already zero.  Likewise for Y.
    if (const ConstantSDNode *N0O1C =
        getAsNonOpaqueConstant(N0.getOperand(1))) {
      if (const ConstantSDNode *N1O1C =
          getAsNonOpaqueConstant(N1.getOperand(1))) {
        // We can only do this xform if we know that bits from X that are set in
        // C2 but not in C1 are already zero.  Likewise for Y.
        const APInt &LHSMask = N0O1C->getAPIntValue();
        const APInt &RHSMask = N1O1C->getAPIntValue();

        if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
            DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
          SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
                                  N0.getOperand(0), N1.getOperand(0));
          return DAG.getNode(ISD::AND, DL, VT, X,
                             DAG.getConstant(LHSMask | RHSMask, DL, VT));
        }
      }
    }
  }

  // (or (and X, M), (and X, N)) -> (and X, (or M, N))
  if (N0.getOpcode() == ISD::AND &&
      N1.getOpcode() == ISD::AND &&
      N0.getOperand(0) == N1.getOperand(0) &&
      // Don't increase # computations.
      (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
    SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
                            N0.getOperand(1), N1.getOperand(1));
    return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), X);
  }

  return SDValue();
}

/// OR combines for which the commuted variant will be tried as well.
static SDValue visitORCommutative(
    SelectionDAG &DAG, SDValue N0, SDValue N1, SDNode *N) {
  EVT VT = N0.getValueType();
  if (N0.getOpcode() == ISD::AND) {
    // fold (or (and X, (xor Y, -1)), Y) -> (or X, Y)
    if (isBitwiseNot(N0.getOperand(1)) && N0.getOperand(1).getOperand(0) == N1)
      return DAG.getNode(ISD::OR, SDLoc(N), VT, N0.getOperand(0), N1);

    // fold (or (and (xor Y, -1), X), Y) -> (or X, Y)
    if (isBitwiseNot(N0.getOperand(0)) && N0.getOperand(0).getOperand(0) == N1)
      return DAG.getNode(ISD::OR, SDLoc(N), VT, N0.getOperand(1), N1);
  }

  return SDValue();
}

SDValue DAGCombiner::visitOR(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N1.getValueType();

  // x | x --> x
  if (N0 == N1)
    return N0;

  // fold vector ops
  if (VT.isVector()) {
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

    // fold (or x, 0) -> x, vector edition
    if (ISD::isBuildVectorAllZeros(N0.getNode()))
      return N1;
    if (ISD::isBuildVectorAllZeros(N1.getNode()))
      return N0;

    // fold (or x, -1) -> -1, vector edition
    if (ISD::isBuildVectorAllOnes(N0.getNode()))
      // do not return N0, because undef node may exist in N0
      return DAG.getAllOnesConstant(SDLoc(N), N0.getValueType());
    if (ISD::isBuildVectorAllOnes(N1.getNode()))
      // do not return N1, because undef node may exist in N1
      return DAG.getAllOnesConstant(SDLoc(N), N1.getValueType());

    // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask)
    // Do this only if the resulting shuffle is legal.
    if (isa<ShuffleVectorSDNode>(N0) &&
        isa<ShuffleVectorSDNode>(N1) &&
        // Avoid folding a node with illegal type.
        TLI.isTypeLegal(VT)) {
      bool ZeroN00 = ISD::isBuildVectorAllZeros(N0.getOperand(0).getNode());
      bool ZeroN01 = ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode());
      bool ZeroN10 = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode());
      bool ZeroN11 = ISD::isBuildVectorAllZeros(N1.getOperand(1).getNode());
      // Ensure both shuffles have a zero input.
      if ((ZeroN00 != ZeroN01) && (ZeroN10 != ZeroN11)) {
        assert((!ZeroN00 || !ZeroN01) && "Both inputs zero!");
        assert((!ZeroN10 || !ZeroN11) && "Both inputs zero!");
        const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0);
        const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1);
        bool CanFold = true;
        int NumElts = VT.getVectorNumElements();
        SmallVector<int, 4> Mask(NumElts);

        for (int i = 0; i != NumElts; ++i) {
          int M0 = SV0->getMaskElt(i);
          int M1 = SV1->getMaskElt(i);

          // Determine if either index is pointing to a zero vector.
          bool M0Zero = M0 < 0 || (ZeroN00 == (M0 < NumElts));
          bool M1Zero = M1 < 0 || (ZeroN10 == (M1 < NumElts));

          // If one element is zero and the otherside is undef, keep undef.
          // This also handles the case that both are undef.
          if ((M0Zero && M1 < 0) || (M1Zero && M0 < 0)) {
            Mask[i] = -1;
            continue;
          }

          // Make sure only one of the elements is zero.
          if (M0Zero == M1Zero) {
            CanFold = false;
            break;
          }

          assert((M0 >= 0 || M1 >= 0) && "Undef index!");

          // We have a zero and non-zero element. If the non-zero came from
          // SV0 make the index a LHS index. If it came from SV1, make it
          // a RHS index. We need to mod by NumElts because we don't care
          // which operand it came from in the original shuffles.
          Mask[i] = M1Zero ? M0 % NumElts : (M1 % NumElts) + NumElts;
        }

        if (CanFold) {
          SDValue NewLHS = ZeroN00 ? N0.getOperand(1) : N0.getOperand(0);
          SDValue NewRHS = ZeroN10 ? N1.getOperand(1) : N1.getOperand(0);

          SDValue LegalShuffle =
              TLI.buildLegalVectorShuffle(VT, SDLoc(N), NewLHS, NewRHS,
                                          Mask, DAG);
          if (LegalShuffle)
            return LegalShuffle;
        }
      }
    }
  }

  // fold (or c1, c2) -> c1|c2
  ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
  if (N0C && N1C && !N1C->isOpaque())
    return DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N), VT, N0C, N1C);
  // canonicalize constant to RHS
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
     !DAG.isConstantIntBuildVectorOrConstantInt(N1))
    return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0);
  // fold (or x, 0) -> x
  if (isNullConstant(N1))
    return N0;
  // fold (or x, -1) -> -1
  if (isAllOnesConstant(N1))
    return N1;

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  // fold (or x, c) -> c iff (x & ~c) == 0
  if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
    return N1;

  if (SDValue Combined = visitORLike(N0, N1, N))
    return Combined;

  // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16)
  if (SDValue BSwap = MatchBSwapHWord(N, N0, N1))
    return BSwap;
  if (SDValue BSwap = MatchBSwapHWordLow(N, N0, N1))
    return BSwap;

  // reassociate or
  if (SDValue ROR = reassociateOps(ISD::OR, SDLoc(N), N0, N1, N->getFlags()))
    return ROR;

  // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
  // iff (c1 & c2) != 0 or c1/c2 are undef.
  auto MatchIntersect = [](ConstantSDNode *C1, ConstantSDNode *C2) {
    return !C1 || !C2 || C1->getAPIntValue().intersects(C2->getAPIntValue());
  };
  if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
      ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchIntersect, true)) {
    if (SDValue COR = DAG.FoldConstantArithmetic(
            ISD::OR, SDLoc(N1), VT, N1.getNode(), N0.getOperand(1).getNode())) {
      SDValue IOR = DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1);
      AddToWorklist(IOR.getNode());
      return DAG.getNode(ISD::AND, SDLoc(N), VT, COR, IOR);
    }
  }

  if (SDValue Combined = visitORCommutative(DAG, N0, N1, N))
    return Combined;
  if (SDValue Combined = visitORCommutative(DAG, N1, N0, N))
    return Combined;

  // Simplify: (or (op x...), (op y...))  -> (op (or x, y))
  if (N0.getOpcode() == N1.getOpcode())
    if (SDValue V = hoistLogicOpWithSameOpcodeHands(N))
      return V;

  // See if this is some rotate idiom.
  if (SDValue Rot = MatchRotate(N0, N1, SDLoc(N)))
    return Rot;

  if (SDValue Load = MatchLoadCombine(N))
    return Load;

  // Simplify the operands using demanded-bits information.
  if (SimplifyDemandedBits(SDValue(N, 0)))
    return SDValue(N, 0);

  // If OR can be rewritten into ADD, try combines based on ADD.
  if ((!LegalOperations || TLI.isOperationLegal(ISD::ADD, VT)) &&
      DAG.haveNoCommonBitsSet(N0, N1))
    if (SDValue Combined = visitADDLike(N))
      return Combined;

  return SDValue();
}

static SDValue stripConstantMask(SelectionDAG &DAG, SDValue Op, SDValue &Mask) {
  if (Op.getOpcode() == ISD::AND &&
      DAG.isConstantIntBuildVectorOrConstantInt(Op.getOperand(1))) {
    Mask = Op.getOperand(1);
    return Op.getOperand(0);
  }
  return Op;
}

/// Match "(X shl/srl V1) & V2" where V2 may not be present.
static bool matchRotateHalf(SelectionDAG &DAG, SDValue Op, SDValue &Shift,
                            SDValue &Mask) {
  Op = stripConstantMask(DAG, Op, Mask);
  if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) {
    Shift = Op;
    return true;
  }
  return false;
}

/// Helper function for visitOR to extract the needed side of a rotate idiom
/// from a shl/srl/mul/udiv.  This is meant to handle cases where
/// InstCombine merged some outside op with one of the shifts from
/// the rotate pattern.
/// \returns An empty \c SDValue if the needed shift couldn't be extracted.
/// Otherwise, returns an expansion of \p ExtractFrom based on the following
/// patterns:
///
///   (or (add v v) (shrl v bitwidth-1)):
///     expands (add v v) -> (shl v 1)
///
///   (or (mul v c0) (shrl (mul v c1) c2)):
///     expands (mul v c0) -> (shl (mul v c1) c3)
///
///   (or (udiv v c0) (shl (udiv v c1) c2)):
///     expands (udiv v c0) -> (shrl (udiv v c1) c3)
///
///   (or (shl v c0) (shrl (shl v c1) c2)):
///     expands (shl v c0) -> (shl (shl v c1) c3)
///
///   (or (shrl v c0) (shl (shrl v c1) c2)):
///     expands (shrl v c0) -> (shrl (shrl v c1) c3)
///
/// Such that in all cases, c3+c2==bitwidth(op v c1).
static SDValue extractShiftForRotate(SelectionDAG &DAG, SDValue OppShift,
                                     SDValue ExtractFrom, SDValue &Mask,
                                     const SDLoc &DL) {
  assert(OppShift && ExtractFrom && "Empty SDValue");
  assert(
      (OppShift.getOpcode() == ISD::SHL || OppShift.getOpcode() == ISD::SRL) &&
      "Existing shift must be valid as a rotate half");

  ExtractFrom = stripConstantMask(DAG, ExtractFrom, Mask);

  // Value and Type of the shift.
  SDValue OppShiftLHS = OppShift.getOperand(0);
  EVT ShiftedVT = OppShiftLHS.getValueType();

  // Amount of the existing shift.
  ConstantSDNode *OppShiftCst = isConstOrConstSplat(OppShift.getOperand(1));

  // (add v v) -> (shl v 1)
  if (OppShift.getOpcode() == ISD::SRL && OppShiftCst &&
      ExtractFrom.getOpcode() == ISD::ADD &&
      ExtractFrom.getOperand(0) == ExtractFrom.getOperand(1) &&
      ExtractFrom.getOperand(0) == OppShiftLHS &&
      OppShiftCst->getAPIntValue() == ShiftedVT.getScalarSizeInBits() - 1)
    return DAG.getNode(ISD::SHL, DL, ShiftedVT, OppShiftLHS,
                       DAG.getShiftAmountConstant(1, ShiftedVT, DL));

  // Preconditions:
  //    (or (op0 v c0) (shiftl/r (op0 v c1) c2))
  //
  // Find opcode of the needed shift to be extracted from (op0 v c0).
  unsigned Opcode = ISD::DELETED_NODE;
  bool IsMulOrDiv = false;
  // Set Opcode and IsMulOrDiv if the extract opcode matches the needed shift
  // opcode or its arithmetic (mul or udiv) variant.
  auto SelectOpcode = [&](unsigned NeededShift, unsigned MulOrDivVariant) {
    IsMulOrDiv = ExtractFrom.getOpcode() == MulOrDivVariant;
    if (!IsMulOrDiv && ExtractFrom.getOpcode() != NeededShift)
      return false;
    Opcode = NeededShift;
    return true;
  };
  // op0 must be either the needed shift opcode or the mul/udiv equivalent
  // that the needed shift can be extracted from.
  if ((OppShift.getOpcode() != ISD::SRL || !SelectOpcode(ISD::SHL, ISD::MUL)) &&
      (OppShift.getOpcode() != ISD::SHL || !SelectOpcode(ISD::SRL, ISD::UDIV)))
    return SDValue();

  // op0 must be the same opcode on both sides, have the same LHS argument,
  // and produce the same value type.
  if (OppShiftLHS.getOpcode() != ExtractFrom.getOpcode() ||
      OppShiftLHS.getOperand(0) != ExtractFrom.getOperand(0) ||
      ShiftedVT != ExtractFrom.getValueType())
    return SDValue();

  // Constant mul/udiv/shift amount from the RHS of the shift's LHS op.
  ConstantSDNode *OppLHSCst = isConstOrConstSplat(OppShiftLHS.getOperand(1));
  // Constant mul/udiv/shift amount from the RHS of the ExtractFrom op.
  ConstantSDNode *ExtractFromCst =
      isConstOrConstSplat(ExtractFrom.getOperand(1));
  // TODO: We should be able to handle non-uniform constant vectors for these values
  // Check that we have constant values.
  if (!OppShiftCst || !OppShiftCst->getAPIntValue() ||
      !OppLHSCst || !OppLHSCst->getAPIntValue() ||
      !ExtractFromCst || !ExtractFromCst->getAPIntValue())
    return SDValue();

  // Compute the shift amount we need to extract to complete the rotate.
  const unsigned VTWidth = ShiftedVT.getScalarSizeInBits();
  if (OppShiftCst->getAPIntValue().ugt(VTWidth))
    return SDValue();
  APInt NeededShiftAmt = VTWidth - OppShiftCst->getAPIntValue();
  // Normalize the bitwidth of the two mul/udiv/shift constant operands.
  APInt ExtractFromAmt = ExtractFromCst->getAPIntValue();
  APInt OppLHSAmt = OppLHSCst->getAPIntValue();
  zeroExtendToMatch(ExtractFromAmt, OppLHSAmt);

  // Now try extract the needed shift from the ExtractFrom op and see if the
  // result matches up with the existing shift's LHS op.
  if (IsMulOrDiv) {
    // Op to extract from is a mul or udiv by a constant.
    // Check:
    //     c2 / (1 << (bitwidth(op0 v c0) - c1)) == c0
    //     c2 % (1 << (bitwidth(op0 v c0) - c1)) == 0
    const APInt ExtractDiv = APInt::getOneBitSet(ExtractFromAmt.getBitWidth(),
                                                 NeededShiftAmt.getZExtValue());
    APInt ResultAmt;
    APInt Rem;
    APInt::udivrem(ExtractFromAmt, ExtractDiv, ResultAmt, Rem);
    if (Rem != 0 || ResultAmt != OppLHSAmt)
      return SDValue();
  } else {
    // Op to extract from is a shift by a constant.
    // Check:
    //      c2 - (bitwidth(op0 v c0) - c1) == c0
    if (OppLHSAmt != ExtractFromAmt - NeededShiftAmt.zextOrTrunc(
                                          ExtractFromAmt.getBitWidth()))
      return SDValue();
  }

  // Return the expanded shift op that should allow a rotate to be formed.
  EVT ShiftVT = OppShift.getOperand(1).getValueType();
  EVT ResVT = ExtractFrom.getValueType();
  SDValue NewShiftNode = DAG.getConstant(NeededShiftAmt, DL, ShiftVT);
  return DAG.getNode(Opcode, DL, ResVT, OppShiftLHS, NewShiftNode);
}

// Return true if we can prove that, whenever Neg and Pos are both in the
// range [0, EltSize), Neg == (Pos == 0 ? 0 : EltSize - Pos).  This means that
// for two opposing shifts shift1 and shift2 and a value X with OpBits bits:
//
//     (or (shift1 X, Neg), (shift2 X, Pos))
//
// reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate
// in direction shift1 by Neg.  The range [0, EltSize) means that we only need
// to consider shift amounts with defined behavior.
static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize,
                           SelectionDAG &DAG) {
  // If EltSize is a power of 2 then:
  //
  //  (a) (Pos == 0 ? 0 : EltSize - Pos) == (EltSize - Pos) & (EltSize - 1)
  //  (b) Neg == Neg & (EltSize - 1) whenever Neg is in [0, EltSize).
  //
  // So if EltSize is a power of 2 and Neg is (and Neg', EltSize-1), we check
  // for the stronger condition:
  //
  //     Neg & (EltSize - 1) == (EltSize - Pos) & (EltSize - 1)    [A]
  //
  // for all Neg and Pos.  Since Neg & (EltSize - 1) == Neg' & (EltSize - 1)
  // we can just replace Neg with Neg' for the rest of the function.
  //
  // In other cases we check for the even stronger condition:
  //
  //     Neg == EltSize - Pos                                    [B]
  //
  // for all Neg and Pos.  Note that the (or ...) then invokes undefined
  // behavior if Pos == 0 (and consequently Neg == EltSize).
  //
  // We could actually use [A] whenever EltSize is a power of 2, but the
  // only extra cases that it would match are those uninteresting ones
  // where Neg and Pos are never in range at the same time.  E.g. for
  // EltSize == 32, using [A] would allow a Neg of the form (sub 64, Pos)
  // as well as (sub 32, Pos), but:
  //
  //     (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos))
  //
  // always invokes undefined behavior for 32-bit X.
  //
  // Below, Mask == EltSize - 1 when using [A] and is all-ones otherwise.
  unsigned MaskLoBits = 0;
  if (Neg.getOpcode() == ISD::AND && isPowerOf2_64(EltSize)) {
    if (ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(1))) {
      KnownBits Known = DAG.computeKnownBits(Neg.getOperand(0));
      unsigned Bits = Log2_64(EltSize);
      if (NegC->getAPIntValue().getActiveBits() <= Bits &&
          ((NegC->getAPIntValue() | Known.Zero).countTrailingOnes() >= Bits)) {
        Neg = Neg.getOperand(0);
        MaskLoBits = Bits;
      }
    }
  }

  // Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1.
  if (Neg.getOpcode() != ISD::SUB)
    return false;
  ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(0));
  if (!NegC)
    return false;
  SDValue NegOp1 = Neg.getOperand(1);

  // On the RHS of [A], if Pos is Pos' & (EltSize - 1), just replace Pos with
  // Pos'.  The truncation is redundant for the purpose of the equality.
  if (MaskLoBits && Pos.getOpcode() == ISD::AND) {
    if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) {
      KnownBits Known = DAG.computeKnownBits(Pos.getOperand(0));
      if (PosC->getAPIntValue().getActiveBits() <= MaskLoBits &&
          ((PosC->getAPIntValue() | Known.Zero).countTrailingOnes() >=
           MaskLoBits))
        Pos = Pos.getOperand(0);
    }
  }

  // The condition we need is now:
  //
  //     (NegC - NegOp1) & Mask == (EltSize - Pos) & Mask
  //
  // If NegOp1 == Pos then we need:
  //
  //              EltSize & Mask == NegC & Mask
  //
  // (because "x & Mask" is a truncation and distributes through subtraction).
  APInt Width;
  if (Pos == NegOp1)
    Width = NegC->getAPIntValue();

  // Check for cases where Pos has the form (add NegOp1, PosC) for some PosC.
  // Then the condition we want to prove becomes:
  //
  //     (NegC - NegOp1) & Mask == (EltSize - (NegOp1 + PosC)) & Mask
  //
  // which, again because "x & Mask" is a truncation, becomes:
  //
  //                NegC & Mask == (EltSize - PosC) & Mask
  //             EltSize & Mask == (NegC + PosC) & Mask
  else if (Pos.getOpcode() == ISD::ADD && Pos.getOperand(0) == NegOp1) {
    if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1)))
      Width = PosC->getAPIntValue() + NegC->getAPIntValue();
    else
      return false;
  } else
    return false;

  // Now we just need to check that EltSize & Mask == Width & Mask.
  if (MaskLoBits)
    // EltSize & Mask is 0 since Mask is EltSize - 1.
    return Width.getLoBits(MaskLoBits) == 0;
  return Width == EltSize;
}

// A subroutine of MatchRotate used once we have found an OR of two opposite
// shifts of Shifted.  If Neg == <operand size> - Pos then the OR reduces
// to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the
// former being preferred if supported.  InnerPos and InnerNeg are Pos and
// Neg with outer conversions stripped away.
SDValue DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
                                       SDValue Neg, SDValue InnerPos,
                                       SDValue InnerNeg, unsigned PosOpcode,
                                       unsigned NegOpcode, const SDLoc &DL) {
  // fold (or (shl x, (*ext y)),
  //          (srl x, (*ext (sub 32, y)))) ->
  //   (rotl x, y) or (rotr x, (sub 32, y))
  //
  // fold (or (shl x, (*ext (sub 32, y))),
  //          (srl x, (*ext y))) ->
  //   (rotr x, y) or (rotl x, (sub 32, y))
  EVT VT = Shifted.getValueType();
  if (matchRotateSub(InnerPos, InnerNeg, VT.getScalarSizeInBits(), DAG)) {
    bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT);
    return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted,
                       HasPos ? Pos : Neg);
  }

  return SDValue();
}

// MatchRotate - Handle an 'or' of two operands.  If this is one of the many
// idioms for rotate, and if the target supports rotation instructions, generate
// a rot[lr].
SDValue DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) {
  // Must be a legal type.  Expanded 'n promoted things won't work with rotates.
  EVT VT = LHS.getValueType();
  if (!TLI.isTypeLegal(VT))
    return SDValue();

  // The target must have at least one rotate flavor.
  bool HasROTL = hasOperation(ISD::ROTL, VT);
  bool HasROTR = hasOperation(ISD::ROTR, VT);
  if (!HasROTL && !HasROTR)
    return SDValue();

  // Check for truncated rotate.
  if (LHS.getOpcode() == ISD::TRUNCATE && RHS.getOpcode() == ISD::TRUNCATE &&
      LHS.getOperand(0).getValueType() == RHS.getOperand(0).getValueType()) {
    assert(LHS.getValueType() == RHS.getValueType());
    if (SDValue Rot = MatchRotate(LHS.getOperand(0), RHS.getOperand(0), DL)) {
      return DAG.getNode(ISD::TRUNCATE, SDLoc(LHS), LHS.getValueType(), Rot);
    }
  }

  // Match "(X shl/srl V1) & V2" where V2 may not be present.
  SDValue LHSShift;   // The shift.
  SDValue LHSMask;    // AND value if any.
  matchRotateHalf(DAG, LHS, LHSShift, LHSMask);

  SDValue RHSShift;   // The shift.
  SDValue RHSMask;    // AND value if any.
  matchRotateHalf(DAG, RHS, RHSShift, RHSMask);

  // If neither side matched a rotate half, bail
  if (!LHSShift && !RHSShift)
    return SDValue();

  // InstCombine may have combined a constant shl, srl, mul, or udiv with one
  // side of the rotate, so try to handle that here. In all cases we need to
  // pass the matched shift from the opposite side to compute the opcode and
  // needed shift amount to extract.  We still want to do this if both sides
  // matched a rotate half because one half may be a potential overshift that
  // can be broken down (ie if InstCombine merged two shl or srl ops into a
  // single one).

  // Have LHS side of the rotate, try to extract the needed shift from the RHS.
  if (LHSShift)
    if (SDValue NewRHSShift =
            extractShiftForRotate(DAG, LHSShift, RHS, RHSMask, DL))
      RHSShift = NewRHSShift;
  // Have RHS side of the rotate, try to extract the needed shift from the LHS.
  if (RHSShift)
    if (SDValue NewLHSShift =
            extractShiftForRotate(DAG, RHSShift, LHS, LHSMask, DL))
      LHSShift = NewLHSShift;

  // If a side is still missing, nothing else we can do.
  if (!RHSShift || !LHSShift)
    return SDValue();

  // At this point we've matched or extracted a shift op on each side.

  if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
    return SDValue(); // Not shifting the same value.

  if (LHSShift.getOpcode() == RHSShift.getOpcode())
    return SDValue(); // Shifts must disagree.

  // Canonicalize shl to left side in a shl/srl pair.
  if (RHSShift.getOpcode() == ISD::SHL) {
    std::swap(LHS, RHS);
    std::swap(LHSShift, RHSShift);
    std::swap(LHSMask, RHSMask);
  }

  unsigned EltSizeInBits = VT.getScalarSizeInBits();
  SDValue LHSShiftArg = LHSShift.getOperand(0);
  SDValue LHSShiftAmt = LHSShift.getOperand(1);
  SDValue RHSShiftArg = RHSShift.getOperand(0);
  SDValue RHSShiftAmt = RHSShift.getOperand(1);

  // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
  // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
  auto MatchRotateSum = [EltSizeInBits](ConstantSDNode *LHS,
                                        ConstantSDNode *RHS) {
    return (LHS->getAPIntValue() + RHS->getAPIntValue()) == EltSizeInBits;
  };
  if (ISD::matchBinaryPredicate(LHSShiftAmt, RHSShiftAmt, MatchRotateSum)) {
    SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
                              LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt);

    // If there is an AND of either shifted operand, apply it to the result.
    if (LHSMask.getNode() || RHSMask.getNode()) {
      SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
      SDValue Mask = AllOnes;

      if (LHSMask.getNode()) {
        SDValue RHSBits = DAG.getNode(ISD::SRL, DL, VT, AllOnes, RHSShiftAmt);
        Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
                           DAG.getNode(ISD::OR, DL, VT, LHSMask, RHSBits));
      }
      if (RHSMask.getNode()) {
        SDValue LHSBits = DAG.getNode(ISD::SHL, DL, VT, AllOnes, LHSShiftAmt);
        Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
                           DAG.getNode(ISD::OR, DL, VT, RHSMask, LHSBits));
      }

      Rot = DAG.getNode(ISD::AND, DL, VT, Rot, Mask);
    }

    return Rot;
  }

  // If there is a mask here, and we have a variable shift, we can't be sure
  // that we're masking out the right stuff.
  if (LHSMask.getNode() || RHSMask.getNode())
    return SDValue();

  // If the shift amount is sign/zext/any-extended just peel it off.
  SDValue LExtOp0 = LHSShiftAmt;
  SDValue RExtOp0 = RHSShiftAmt;
  if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
       LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
       LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
       LHSShiftAmt.getOpcode() == ISD::TRUNCATE) &&
      (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
       RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
       RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
       RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) {
    LExtOp0 = LHSShiftAmt.getOperand(0);
    RExtOp0 = RHSShiftAmt.getOperand(0);
  }

  SDValue TryL = MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt,
                                   LExtOp0, RExtOp0, ISD::ROTL, ISD::ROTR, DL);
  if (TryL)
    return TryL;

  SDValue TryR = MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt,
                                   RExtOp0, LExtOp0, ISD::ROTR, ISD::ROTL, DL);
  if (TryR)
    return TryR;

  return SDValue();
}

namespace {

/// Represents known origin of an individual byte in load combine pattern. The
/// value of the byte is either constant zero or comes from memory.
struct ByteProvider {
  // For constant zero providers Load is set to nullptr. For memory providers
  // Load represents the node which loads the byte from memory.
  // ByteOffset is the offset of the byte in the value produced by the load.
  LoadSDNode *Load = nullptr;
  unsigned ByteOffset = 0;

  ByteProvider() = default;

  static ByteProvider getMemory(LoadSDNode *Load, unsigned ByteOffset) {
    return ByteProvider(Load, ByteOffset);
  }

  static ByteProvider getConstantZero() { return ByteProvider(nullptr, 0); }

  bool isConstantZero() const { return !Load; }
  bool isMemory() const { return Load; }

  bool operator==(const ByteProvider &Other) const {
    return Other.Load == Load && Other.ByteOffset == ByteOffset;
  }

private:
  ByteProvider(LoadSDNode *Load, unsigned ByteOffset)
      : Load(Load), ByteOffset(ByteOffset) {}
};

} // end anonymous namespace

/// Recursively traverses the expression calculating the origin of the requested
/// byte of the given value. Returns None if the provider can't be calculated.
///
/// For all the values except the root of the expression verifies that the value
/// has exactly one use and if it's not true return None. This way if the origin
/// of the byte is returned it's guaranteed that the values which contribute to
/// the byte are not used outside of this expression.
///
/// Because the parts of the expression are not allowed to have more than one
/// use this function iterates over trees, not DAGs. So it never visits the same
/// node more than once.
static const Optional<ByteProvider>
calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth,
                      bool Root = false) {
  // Typical i64 by i8 pattern requires recursion up to 8 calls depth
  if (Depth == 10)
    return None;

  if (!Root && !Op.hasOneUse())
    return None;

  assert(Op.getValueType().isScalarInteger() && "can't handle other types");
  unsigned BitWidth = Op.getValueSizeInBits();
  if (BitWidth % 8 != 0)
    return None;
  unsigned ByteWidth = BitWidth / 8;
  assert(Index < ByteWidth && "invalid index requested");
  (void) ByteWidth;

  switch (Op.getOpcode()) {
  case ISD::OR: {
    auto LHS = calculateByteProvider(Op->getOperand(0), Index, Depth + 1);
    if (!LHS)
      return None;
    auto RHS = calculateByteProvider(Op->getOperand(1), Index, Depth + 1);
    if (!RHS)
      return None;

    if (LHS->isConstantZero())
      return RHS;
    if (RHS->isConstantZero())
      return LHS;
    return None;
  }
  case ISD::SHL: {
    auto ShiftOp = dyn_cast<ConstantSDNode>(Op->getOperand(1));
    if (!ShiftOp)
      return None;

    uint64_t BitShift = ShiftOp->getZExtValue();
    if (BitShift % 8 != 0)
      return None;
    uint64_t ByteShift = BitShift / 8;

    return Index < ByteShift
               ? ByteProvider::getConstantZero()
               : calculateByteProvider(Op->getOperand(0), Index - ByteShift,
                                       Depth + 1);
  }
  case ISD::ANY_EXTEND:
  case ISD::SIGN_EXTEND:
  case ISD::ZERO_EXTEND: {
    SDValue NarrowOp = Op->getOperand(0);
    unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits();
    if (NarrowBitWidth % 8 != 0)
      return None;
    uint64_t NarrowByteWidth = NarrowBitWidth / 8;

    if (Index >= NarrowByteWidth)
      return Op.getOpcode() == ISD::ZERO_EXTEND
                 ? Optional<ByteProvider>(ByteProvider::getConstantZero())
                 : None;
    return calculateByteProvider(NarrowOp, Index, Depth + 1);
  }
  case ISD::BSWAP:
    return calculateByteProvider(Op->getOperand(0), ByteWidth - Index - 1,
                                 Depth + 1);
  case ISD::LOAD: {
    auto L = cast<LoadSDNode>(Op.getNode());
    if (!L->isSimple() || L->isIndexed())
      return None;

    unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits();
    if (NarrowBitWidth % 8 != 0)
      return None;
    uint64_t NarrowByteWidth = NarrowBitWidth / 8;

    if (Index >= NarrowByteWidth)
      return L->getExtensionType() == ISD::ZEXTLOAD
                 ? Optional<ByteProvider>(ByteProvider::getConstantZero())
                 : None;
    return ByteProvider::getMemory(L, Index);
  }
  }

  return None;
}

static unsigned LittleEndianByteAt(unsigned BW, unsigned i) {
  return i;
}

static unsigned BigEndianByteAt(unsigned BW, unsigned i) {
  return BW - i - 1;
}

// Check if the bytes offsets we are looking at match with either big or
// little endian value loaded. Return true for big endian, false for little
// endian, and None if match failed.
static Optional<bool> isBigEndian(const SmallVector<int64_t, 4> &ByteOffsets,
                                  int64_t FirstOffset) {
  // The endian can be decided only when it is 2 bytes at least.
  unsigned Width = ByteOffsets.size();
  if (Width < 2)
    return None;

  bool BigEndian = true, LittleEndian = true;
  for (unsigned i = 0; i < Width; i++) {
    int64_t CurrentByteOffset = ByteOffsets[i] - FirstOffset;
    LittleEndian &= CurrentByteOffset == LittleEndianByteAt(Width, i);
    BigEndian &= CurrentByteOffset == BigEndianByteAt(Width, i);
    if (!BigEndian && !LittleEndian)
      return None;
  }

  assert((BigEndian != LittleEndian) && "It should be either big endian or"
                                        "little endian");
  return BigEndian;
}

static SDValue stripTruncAndExt(SDValue Value) {
  switch (Value.getOpcode()) {
  case ISD::TRUNCATE:
  case ISD::ZERO_EXTEND:
  case ISD::SIGN_EXTEND:
  case ISD::ANY_EXTEND:
    return stripTruncAndExt(Value.getOperand(0));
  }
  return Value;
}

/// Match a pattern where a wide type scalar value is stored by several narrow
/// stores. Fold it into a single store or a BSWAP and a store if the targets
/// supports it.
///
/// Assuming little endian target:
///  i8 *p = ...
///  i32 val = ...
///  p[0] = (val >> 0) & 0xFF;
///  p[1] = (val >> 8) & 0xFF;
///  p[2] = (val >> 16) & 0xFF;
///  p[3] = (val >> 24) & 0xFF;
/// =>
///  *((i32)p) = val;
///
///  i8 *p = ...
///  i32 val = ...
///  p[0] = (val >> 24) & 0xFF;
///  p[1] = (val >> 16) & 0xFF;
///  p[2] = (val >> 8) & 0xFF;
///  p[3] = (val >> 0) & 0xFF;
/// =>
///  *((i32)p) = BSWAP(val);
SDValue DAGCombiner::MatchStoreCombine(StoreSDNode *N) {
  // Collect all the stores in the chain.
  SDValue Chain;
  SmallVector<StoreSDNode *, 8> Stores;
  for (StoreSDNode *Store = N; Store; Store = dyn_cast<StoreSDNode>(Chain)) {
    // TODO: Allow unordered atomics when wider type is legal (see D66309)
    if (Store->getMemoryVT() != MVT::i8 ||
        !Store->isSimple() || Store->isIndexed())
      return SDValue();
    Stores.push_back(Store);
    Chain = Store->getChain();
  }
  // Handle the simple type only.
  unsigned Width = Stores.size();
  EVT VT = EVT::getIntegerVT(
    *DAG.getContext(), Width * N->getMemoryVT().getSizeInBits());
  if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
    return SDValue();

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (LegalOperations && !TLI.isOperationLegal(ISD::STORE, VT))
    return SDValue();

  // Check if all the bytes of the combined value we are looking at are stored 
  // to the same base address. Collect bytes offsets from Base address into 
  // ByteOffsets. 
  SDValue CombinedValue;
  SmallVector<int64_t, 4> ByteOffsets(Width, INT64_MAX);
  int64_t FirstOffset = INT64_MAX;
  StoreSDNode *FirstStore = nullptr;
  Optional<BaseIndexOffset> Base;
  for (auto Store : Stores) {
    // All the stores store different byte of the CombinedValue. A truncate is
    // required to get that byte value.
    SDValue Trunc = Store->getValue();
    if (Trunc.getOpcode() != ISD::TRUNCATE)
      return SDValue();
    // A shift operation is required to get the right byte offset, except the
    // first byte.
    int64_t Offset = 0;
    SDValue Value = Trunc.getOperand(0);
    if (Value.getOpcode() == ISD::SRL ||
        Value.getOpcode() == ISD::SRA) {
      ConstantSDNode *ShiftOffset =
        dyn_cast<ConstantSDNode>(Value.getOperand(1));
      // Trying to match the following pattern. The shift offset must be 
      // a constant and a multiple of 8. It is the byte offset in "y".
      // 
      // x = srl y, offset
      // i8 z = trunc x 
      // store z, ...
      if (!ShiftOffset || (ShiftOffset->getSExtValue() % 8))
        return SDValue();
  
     Offset = ShiftOffset->getSExtValue()/8;
     Value = Value.getOperand(0);
    }

    // Stores must share the same combined value with different offsets.
    if (!CombinedValue)
      CombinedValue = Value;
    else if (stripTruncAndExt(CombinedValue) != stripTruncAndExt(Value))
      return SDValue();

    // The trunc and all the extend operation should be stripped to get the
    // real value we are stored.
    else if (CombinedValue.getValueType() != VT) {
      if (Value.getValueType() == VT ||
          Value.getValueSizeInBits() > CombinedValue.getValueSizeInBits())
        CombinedValue = Value;
      // Give up if the combined value type is smaller than the store size.
      if (CombinedValue.getValueSizeInBits() < VT.getSizeInBits())
        return SDValue();
    }

    // Stores must share the same base address
    BaseIndexOffset Ptr = BaseIndexOffset::match(Store, DAG);
    int64_t ByteOffsetFromBase = 0;
    if (!Base)
      Base = Ptr;
    else if (!Base->equalBaseIndex(Ptr, DAG, ByteOffsetFromBase))
      return SDValue();

    // Remember the first byte store
    if (ByteOffsetFromBase < FirstOffset) {
      FirstStore = Store;
      FirstOffset = ByteOffsetFromBase;
    }
    // Map the offset in the store and the offset in the combined value, and
    // early return if it has been set before.
    if (Offset < 0 || Offset >= Width || ByteOffsets[Offset] != INT64_MAX)
      return SDValue();
    ByteOffsets[Offset] = ByteOffsetFromBase;
  }

  assert(FirstOffset != INT64_MAX && "First byte offset must be set");
  assert(FirstStore && "First store must be set");

  // Check if the bytes of the combined value we are looking at match with 
  // either big or little endian value store.
  Optional<bool> IsBigEndian = isBigEndian(ByteOffsets, FirstOffset);
  if (!IsBigEndian.hasValue())
    return SDValue();

  // The node we are looking at matches with the pattern, check if we can
  // replace it with a single bswap if needed and store.

  // If the store needs byte swap check if the target supports it
  bool NeedsBswap = DAG.getDataLayout().isBigEndian() != *IsBigEndian;

  // Before legalize we can introduce illegal bswaps which will be later
  // converted to an explicit bswap sequence. This way we end up with a single
  // store and byte shuffling instead of several stores and byte shuffling.
  if (NeedsBswap && LegalOperations && !TLI.isOperationLegal(ISD::BSWAP, VT))
    return SDValue();

  // Check that a store of the wide type is both allowed and fast on the target
  bool Fast = false;
  bool Allowed =
      TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
                             *FirstStore->getMemOperand(), &Fast);
  if (!Allowed || !Fast)
    return SDValue();

  if (VT != CombinedValue.getValueType()) {
    assert(CombinedValue.getValueType().getSizeInBits() > VT.getSizeInBits() &&
           "Get unexpected store value to combine");
    CombinedValue = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
                             CombinedValue);
  }

  if (NeedsBswap)
    CombinedValue = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, CombinedValue);

  SDValue NewStore =
    DAG.getStore(Chain, SDLoc(N),  CombinedValue, FirstStore->getBasePtr(),
                 FirstStore->getPointerInfo(), FirstStore->getAlignment());

  // Rely on other DAG combine rules to remove the other individual stores.
  DAG.ReplaceAllUsesWith(N, NewStore.getNode());
  return NewStore;
}

/// Match a pattern where a wide type scalar value is loaded by several narrow
/// loads and combined by shifts and ors. Fold it into a single load or a load
/// and a BSWAP if the targets supports it.
///
/// Assuming little endian target:
///  i8 *a = ...
///  i32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
/// =>
///  i32 val = *((i32)a)
///
///  i8 *a = ...
///  i32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
/// =>
///  i32 val = BSWAP(*((i32)a))
///
/// TODO: This rule matches complex patterns with OR node roots and doesn't
/// interact well with the worklist mechanism. When a part of the pattern is
/// updated (e.g. one of the loads) its direct users are put into the worklist,
/// but the root node of the pattern which triggers the load combine is not
/// necessarily a direct user of the changed node. For example, once the address
/// of t28 load is reassociated load combine won't be triggered:
///             t25: i32 = add t4, Constant:i32<2>
///           t26: i64 = sign_extend t25
///        t27: i64 = add t2, t26
///       t28: i8,ch = load<LD1[%tmp9]> t0, t27, undef:i64
///     t29: i32 = zero_extend t28
///   t32: i32 = shl t29, Constant:i8<8>
/// t33: i32 = or t23, t32
/// As a possible fix visitLoad can check if the load can be a part of a load
/// combine pattern and add corresponding OR roots to the worklist.
SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
  assert(N->getOpcode() == ISD::OR &&
         "Can only match load combining against OR nodes");

  // Handles simple types only
  EVT VT = N->getValueType(0);
  if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
    return SDValue();
  unsigned ByteWidth = VT.getSizeInBits() / 8;

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  // Before legalize we can introduce too wide illegal loads which will be later
  // split into legal sized loads. This enables us to combine i64 load by i8
  // patterns to a couple of i32 loads on 32 bit targets.
  if (LegalOperations && !TLI.isOperationLegal(ISD::LOAD, VT))
    return SDValue();

  bool IsBigEndianTarget = DAG.getDataLayout().isBigEndian();
  auto MemoryByteOffset = [&] (ByteProvider P) {
    assert(P.isMemory() && "Must be a memory byte provider");
    unsigned LoadBitWidth = P.Load->getMemoryVT().getSizeInBits();
    assert(LoadBitWidth % 8 == 0 &&
           "can only analyze providers for individual bytes not bit");
    unsigned LoadByteWidth = LoadBitWidth / 8;
    return IsBigEndianTarget
            ? BigEndianByteAt(LoadByteWidth, P.ByteOffset)
            : LittleEndianByteAt(LoadByteWidth, P.ByteOffset);
  };

  Optional<BaseIndexOffset> Base;
  SDValue Chain;

  SmallPtrSet<LoadSDNode *, 8> Loads;
  Optional<ByteProvider> FirstByteProvider;
  int64_t FirstOffset = INT64_MAX;

  // Check if all the bytes of the OR we are looking at are loaded from the same
  // base address. Collect bytes offsets from Base address in ByteOffsets.
  SmallVector<int64_t, 4> ByteOffsets(ByteWidth);
  for (unsigned i = 0; i < ByteWidth; i++) {
    auto P = calculateByteProvider(SDValue(N, 0), i, 0, /*Root=*/true);
    if (!P || !P->isMemory()) // All the bytes must be loaded from memory
      return SDValue();

    LoadSDNode *L = P->Load;
    assert(L->hasNUsesOfValue(1, 0) && L->isSimple() &&
           !L->isIndexed() &&
           "Must be enforced by calculateByteProvider");
    assert(L->getOffset().isUndef() && "Unindexed load must have undef offset");

    // All loads must share the same chain
    SDValue LChain = L->getChain();
    if (!Chain)
      Chain = LChain;
    else if (Chain != LChain)
      return SDValue();

    // Loads must share the same base address
    BaseIndexOffset Ptr = BaseIndexOffset::match(L, DAG);
    int64_t ByteOffsetFromBase = 0;
    if (!Base)
      Base = Ptr;
    else if (!Base->equalBaseIndex(Ptr, DAG, ByteOffsetFromBase))
      return SDValue();

    // Calculate the offset of the current byte from the base address
    ByteOffsetFromBase += MemoryByteOffset(*P);
    ByteOffsets[i] = ByteOffsetFromBase;

    // Remember the first byte load
    if (ByteOffsetFromBase < FirstOffset) {
      FirstByteProvider = P;
      FirstOffset = ByteOffsetFromBase;
    }

    Loads.insert(L);
  }
  assert(!Loads.empty() && "All the bytes of the value must be loaded from "
         "memory, so there must be at least one load which produces the value");
  assert(Base && "Base address of the accessed memory location must be set");
  assert(FirstOffset != INT64_MAX && "First byte offset must be set");

  // Check if the bytes of the OR we are looking at match with either big or
  // little endian value load
  Optional<bool> IsBigEndian = isBigEndian(ByteOffsets, FirstOffset);
  if (!IsBigEndian.hasValue())
    return SDValue();

  assert(FirstByteProvider && "must be set");

  // Ensure that the first byte is loaded from zero offset of the first load.
  // So the combined value can be loaded from the first load address.
  if (MemoryByteOffset(*FirstByteProvider) != 0)
    return SDValue();
  LoadSDNode *FirstLoad = FirstByteProvider->Load;

  // The node we are looking at matches with the pattern, check if we can
  // replace it with a single load and bswap if needed.

  // If the load needs byte swap check if the target supports it
  bool NeedsBswap = IsBigEndianTarget != *IsBigEndian;

  // Before legalize we can introduce illegal bswaps which will be later
  // converted to an explicit bswap sequence. This way we end up with a single
  // load and byte shuffling instead of several loads and byte shuffling.
  if (NeedsBswap && LegalOperations && !TLI.isOperationLegal(ISD::BSWAP, VT))
    return SDValue();

  // Check that a load of the wide type is both allowed and fast on the target
  bool Fast = false;
  bool Allowed = TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
                                        VT, *FirstLoad->getMemOperand(), &Fast);
  if (!Allowed || !Fast)
    return SDValue();

  SDValue NewLoad =
      DAG.getLoad(VT, SDLoc(N), Chain, FirstLoad->getBasePtr(),
                  FirstLoad->getPointerInfo(), FirstLoad->getAlignment());

  // Transfer chain users from old loads to the new load.
  for (LoadSDNode *L : Loads)
    DAG.ReplaceAllUsesOfValueWith(SDValue(L, 1), SDValue(NewLoad.getNode(), 1));

  return NeedsBswap ? DAG.getNode(ISD::BSWAP, SDLoc(N), VT, NewLoad) : NewLoad;
}

// If the target has andn, bsl, or a similar bit-select instruction,
// we want to unfold masked merge, with canonical pattern of:
//   |        A  |  |B|
//   ((x ^ y) & m) ^ y
//    |  D  |
// Into:
//   (x & m) | (y & ~m)
// If y is a constant, and the 'andn' does not work with immediates,
// we unfold into a different pattern:
//   ~(~x & m) & (m | y)
// NOTE: we don't unfold the pattern if 'xor' is actually a 'not', because at
//       the very least that breaks andnpd / andnps patterns, and because those
//       patterns are simplified in IR and shouldn't be created in the DAG
SDValue DAGCombiner::unfoldMaskedMerge(SDNode *N) {
  assert(N->getOpcode() == ISD::XOR);

  // Don't touch 'not' (i.e. where y = -1).
  if (isAllOnesOrAllOnesSplat(N->getOperand(1)))
    return SDValue();

  EVT VT = N->getValueType(0);

  // There are 3 commutable operators in the pattern,
  // so we have to deal with 8 possible variants of the basic pattern.
  SDValue X, Y, M;
  auto matchAndXor = [&X, &Y, &M](SDValue And, unsigned XorIdx, SDValue Other) {
    if (And.getOpcode() != ISD::AND || !And.hasOneUse())
      return false;
    SDValue Xor = And.getOperand(XorIdx);
    if (Xor.getOpcode() != ISD::XOR || !Xor.hasOneUse())
      return false;
    SDValue Xor0 = Xor.getOperand(0);
    SDValue Xor1 = Xor.getOperand(1);
    // Don't touch 'not' (i.e. where y = -1).
    if (isAllOnesOrAllOnesSplat(Xor1))
      return false;
    if (Other == Xor0)
      std::swap(Xor0, Xor1);
    if (Other != Xor1)
      return false;
    X = Xor0;
    Y = Xor1;
    M = And.getOperand(XorIdx ? 0 : 1);
    return true;
  };

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  if (!matchAndXor(N0, 0, N1) && !matchAndXor(N0, 1, N1) &&
      !matchAndXor(N1, 0, N0) && !matchAndXor(N1, 1, N0))
    return SDValue();

  // Don't do anything if the mask is constant. This should not be reachable.
  // InstCombine should have already unfolded this pattern, and DAGCombiner
  // probably shouldn't produce it, too.
  if (isa<ConstantSDNode>(M.getNode()))
    return SDValue();

  // We can transform if the target has AndNot
  if (!TLI.hasAndNot(M))
    return SDValue();

  SDLoc DL(N);

  // If Y is a constant, check that 'andn' works with immediates.
  if (!TLI.hasAndNot(Y)) {
    assert(TLI.hasAndNot(X) && "Only mask is a variable? Unreachable.");
    // If not, we need to do a bit more work to make sure andn is still used.
    SDValue NotX = DAG.getNOT(DL, X, VT);
    SDValue LHS = DAG.getNode(ISD::AND, DL, VT, NotX, M);
    SDValue NotLHS = DAG.getNOT(DL, LHS, VT);
    SDValue RHS = DAG.getNode(ISD::OR, DL, VT, M, Y);
    return DAG.getNode(ISD::AND, DL, VT, NotLHS, RHS);
  }

  SDValue LHS = DAG.getNode(ISD::AND, DL, VT, X, M);
  SDValue NotM = DAG.getNOT(DL, M, VT);
  SDValue RHS = DAG.getNode(ISD::AND, DL, VT, Y, NotM);

  return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
}

SDValue DAGCombiner::visitXOR(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();

  // fold vector ops
  if (VT.isVector()) {
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

    // fold (xor x, 0) -> x, vector edition
    if (ISD::isBuildVectorAllZeros(N0.getNode()))
      return N1;
    if (ISD::isBuildVectorAllZeros(N1.getNode()))
      return N0;
  }

  // fold (xor undef, undef) -> 0. This is a common idiom (misuse).
  SDLoc DL(N);
  if (N0.isUndef() && N1.isUndef())
    return DAG.getConstant(0, DL, VT);
  // fold (xor x, undef) -> undef
  if (N0.isUndef())
    return N0;
  if (N1.isUndef())
    return N1;
  // fold (xor c1, c2) -> c1^c2
  ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
  ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
  if (N0C && N1C)
    return DAG.FoldConstantArithmetic(ISD::XOR, DL, VT, N0C, N1C);
  // canonicalize constant to RHS
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
     !DAG.isConstantIntBuildVectorOrConstantInt(N1))
    return DAG.getNode(ISD::XOR, DL, VT, N1, N0);
  // fold (xor x, 0) -> x
  if (isNullConstant(N1))
    return N0;

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  // reassociate xor
  if (SDValue RXOR = reassociateOps(ISD::XOR, DL, N0, N1, N->getFlags()))
    return RXOR;

  // fold !(x cc y) -> (x !cc y)
  unsigned N0Opcode = N0.getOpcode();
  SDValue LHS, RHS, CC;
  if (TLI.isConstTrueVal(N1.getNode()) && isSetCCEquivalent(N0, LHS, RHS, CC)) {
    ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
                                               LHS.getValueType().isInteger());
    if (!LegalOperations ||
        TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) {
      switch (N0Opcode) {
      default:
        llvm_unreachable("Unhandled SetCC Equivalent!");
      case ISD::SETCC:
        return DAG.getSetCC(SDLoc(N0), VT, LHS, RHS, NotCC);
      case ISD::SELECT_CC:
        return DAG.getSelectCC(SDLoc(N0), LHS, RHS, N0.getOperand(2),
                               N0.getOperand(3), NotCC);
      }
    }
  }

  // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y)))
  if (isOneConstant(N1) && N0Opcode == ISD::ZERO_EXTEND && N0.hasOneUse() &&
      isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){
    SDValue V = N0.getOperand(0);
    SDLoc DL0(N0);
    V = DAG.getNode(ISD::XOR, DL0, V.getValueType(), V,
                    DAG.getConstant(1, DL0, V.getValueType()));
    AddToWorklist(V.getNode());
    return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, V);
  }

  // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc
  if (isOneConstant(N1) && VT == MVT::i1 && N0.hasOneUse() &&
      (N0Opcode == ISD::OR || N0Opcode == ISD::AND)) {
    SDValue N00 = N0.getOperand(0), N01 = N0.getOperand(1);
    if (isOneUseSetCC(N01) || isOneUseSetCC(N00)) {
      unsigned NewOpcode = N0Opcode == ISD::AND ? ISD::OR : ISD::AND;
      N00 = DAG.getNode(ISD::XOR, SDLoc(N00), VT, N00, N1); // N00 = ~N00
      N01 = DAG.getNode(ISD::XOR, SDLoc(N01), VT, N01, N1); // N01 = ~N01
      AddToWorklist(N00.getNode()); AddToWorklist(N01.getNode());
      return DAG.getNode(NewOpcode, DL, VT, N00, N01);
    }
  }
  // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants
  if (isAllOnesConstant(N1) && N0.hasOneUse() &&
      (N0Opcode == ISD::OR || N0Opcode == ISD::AND)) {
    SDValue N00 = N0.getOperand(0), N01 = N0.getOperand(1);
    if (isa<ConstantSDNode>(N01) || isa<ConstantSDNode>(N00)) {
      unsigned NewOpcode = N0Opcode == ISD::AND ? ISD::OR : ISD::AND;
      N00 = DAG.getNode(ISD::XOR, SDLoc(N00), VT, N00, N1); // N00 = ~N00
      N01 = DAG.getNode(ISD::XOR, SDLoc(N01), VT, N01, N1); // N01 = ~N01
      AddToWorklist(N00.getNode()); AddToWorklist(N01.getNode());
      return DAG.getNode(NewOpcode, DL, VT, N00, N01);
    }
  }

  // fold (not (neg x)) -> (add X, -1)
  // FIXME: This can be generalized to (not (sub Y, X)) -> (add X, ~Y) if
  // Y is a constant or the subtract has a single use.
  if (isAllOnesConstant(N1) && N0.getOpcode() == ISD::SUB &&
      isNullConstant(N0.getOperand(0))) {
    return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(1),
                       DAG.getAllOnesConstant(DL, VT));
  }

  // fold (xor (and x, y), y) -> (and (not x), y)
  if (N0Opcode == ISD::AND && N0.hasOneUse() && N0->getOperand(1) == N1) {
    SDValue X = N0.getOperand(0);
    SDValue NotX = DAG.getNOT(SDLoc(X), X, VT);
    AddToWorklist(NotX.getNode());
    return DAG.getNode(ISD::AND, DL, VT, NotX, N1);
  }

  if ((N0Opcode == ISD::SRL || N0Opcode == ISD::SHL) && N0.hasOneUse()) {
    ConstantSDNode *XorC = isConstOrConstSplat(N1);
    ConstantSDNode *ShiftC = isConstOrConstSplat(N0.getOperand(1));
    unsigned BitWidth = VT.getScalarSizeInBits();
    if (XorC && ShiftC) {
      // Don't crash on an oversized shift. We can not guarantee that a bogus
      // shift has been simplified to undef.
      uint64_t ShiftAmt = ShiftC->getLimitedValue();
      if (ShiftAmt < BitWidth) {
        APInt Ones = APInt::getAllOnesValue(BitWidth);
        Ones = N0Opcode == ISD::SHL ? Ones.shl(ShiftAmt) : Ones.lshr(ShiftAmt);
        if (XorC->getAPIntValue() == Ones) {
          // If the xor constant is a shifted -1, do a 'not' before the shift:
          // xor (X << ShiftC), XorC --> (not X) << ShiftC
          // xor (X >> ShiftC), XorC --> (not X) >> ShiftC
          SDValue Not = DAG.getNOT(DL, N0.getOperand(0), VT);
          return DAG.getNode(N0Opcode, DL, VT, Not, N0.getOperand(1));
        }
      }
    }
  }

  // fold Y = sra (X, size(X)-1); xor (add (X, Y), Y) -> (abs X)
  if (TLI.isOperationLegalOrCustom(ISD::ABS, VT)) {
    SDValue A = N0Opcode == ISD::ADD ? N0 : N1;
    SDValue S = N0Opcode == ISD::SRA ? N0 : N1;
    if (A.getOpcode() == ISD::ADD && S.getOpcode() == ISD::SRA) {
      SDValue A0 = A.getOperand(0), A1 = A.getOperand(1);
      SDValue S0 = S.getOperand(0);
      if ((A0 == S && A1 == S0) || (A1 == S && A0 == S0)) {
        unsigned OpSizeInBits = VT.getScalarSizeInBits();
        if (ConstantSDNode *C = isConstOrConstSplat(S.getOperand(1)))
          if (C->getAPIntValue() == (OpSizeInBits - 1))
            return DAG.getNode(ISD::ABS, DL, VT, S0);
      }
    }
  }

  // fold (xor x, x) -> 0
  if (N0 == N1)
    return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);

  // fold (xor (shl 1, x), -1) -> (rotl ~1, x)
  // Here is a concrete example of this equivalence:
  // i16   x ==  14
  // i16 shl ==   1 << 14  == 16384 == 0b0100000000000000
  // i16 xor == ~(1 << 14) == 49151 == 0b1011111111111111
  //
  // =>
  //
  // i16     ~1      == 0b1111111111111110
  // i16 rol(~1, 14) == 0b1011111111111111
  //
  // Some additional tips to help conceptualize this transform:
  // - Try to see the operation as placing a single zero in a value of all ones.
  // - There exists no value for x which would allow the result to contain zero.
  // - Values of x larger than the bitwidth are undefined and do not require a
  //   consistent result.
  // - Pushing the zero left requires shifting one bits in from the right.
  // A rotate left of ~1 is a nice way of achieving the desired result.
  if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT) && N0Opcode == ISD::SHL &&
      isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0))) {
    return DAG.getNode(ISD::ROTL, DL, VT, DAG.getConstant(~1, DL, VT),
                       N0.getOperand(1));
  }

  // Simplify: xor (op x...), (op y...)  -> (op (xor x, y))
  if (N0Opcode == N1.getOpcode())
    if (SDValue V = hoistLogicOpWithSameOpcodeHands(N))
      return V;

  // Unfold  ((x ^ y) & m) ^ y  into  (x & m) | (y & ~m)  if profitable
  if (SDValue MM = unfoldMaskedMerge(N))
    return MM;

  // Simplify the expression using non-local knowledge.
  if (SimplifyDemandedBits(SDValue(N, 0)))
    return SDValue(N, 0);

  return SDValue();
}

/// If we have a shift-by-constant of a bitwise logic op that itself has a
/// shift-by-constant operand with identical opcode, we may be able to convert
/// that into 2 independent shifts followed by the logic op. This is a
/// throughput improvement.
static SDValue combineShiftOfShiftedLogic(SDNode *Shift, SelectionDAG &DAG) {
  // Match a one-use bitwise logic op.
  SDValue LogicOp = Shift->getOperand(0);
  if (!LogicOp.hasOneUse())
    return SDValue();

  unsigned LogicOpcode = LogicOp.getOpcode();
  if (LogicOpcode != ISD::AND && LogicOpcode != ISD::OR &&
      LogicOpcode != ISD::XOR)
    return SDValue();

  // Find a matching one-use shift by constant.
  unsigned ShiftOpcode = Shift->getOpcode();
  SDValue C1 = Shift->getOperand(1);
  ConstantSDNode *C1Node = isConstOrConstSplat(C1);
  assert(C1Node && "Expected a shift with constant operand");
  const APInt &C1Val = C1Node->getAPIntValue();
  auto matchFirstShift = [&](SDValue V, SDValue &ShiftOp,
                             const APInt *&ShiftAmtVal) {
    if (V.getOpcode() != ShiftOpcode || !V.hasOneUse())
      return false;

    ConstantSDNode *ShiftCNode = isConstOrConstSplat(V.getOperand(1));
    if (!ShiftCNode)
      return false;

    // Capture the shifted operand and shift amount value.
    ShiftOp = V.getOperand(0);
    ShiftAmtVal = &ShiftCNode->getAPIntValue();

    // Shift amount types do not have to match their operand type, so check that
    // the constants are the same width.
    if (ShiftAmtVal->getBitWidth() != C1Val.getBitWidth())
      return false;

    // The fold is not valid if the sum of the shift values exceeds bitwidth.
    if ((*ShiftAmtVal + C1Val).uge(V.getScalarValueSizeInBits()))
      return false;

    return true;
  };

  // Logic ops are commutative, so check each operand for a match.
  SDValue X, Y;
  const APInt *C0Val;
  if (matchFirstShift(LogicOp.getOperand(0), X, C0Val))
    Y = LogicOp.getOperand(1);
  else if (matchFirstShift(LogicOp.getOperand(1), X, C0Val))
    Y = LogicOp.getOperand(0);
  else
    return SDValue();

  // shift (logic (shift X, C0), Y), C1 -> logic (shift X, C0+C1), (shift Y, C1)
  SDLoc DL(Shift);
  EVT VT = Shift->getValueType(0);
  EVT ShiftAmtVT = Shift->getOperand(1).getValueType();
  SDValue ShiftSumC = DAG.getConstant(*C0Val + C1Val, DL, ShiftAmtVT);
  SDValue NewShift1 = DAG.getNode(ShiftOpcode, DL, VT, X, ShiftSumC);
  SDValue NewShift2 = DAG.getNode(ShiftOpcode, DL, VT, Y, C1);
  return DAG.getNode(LogicOpcode, DL, VT, NewShift1, NewShift2);
}

/// Handle transforms common to the three shifts, when the shift amount is a
/// constant.
/// We are looking for: (shift being one of shl/sra/srl)
///   shift (binop X, C0), C1
/// And want to transform into:
///   binop (shift X, C1), (shift C0, C1)
SDValue DAGCombiner::visitShiftByConstant(SDNode *N) {
  assert(isConstOrConstSplat(N->getOperand(1)) && "Expected constant operand");

  // Do not turn a 'not' into a regular xor.
  if (isBitwiseNot(N->getOperand(0)))
    return SDValue();

  // The inner binop must be one-use, since we want to replace it.
  SDValue LHS = N->getOperand(0);
  if (!LHS.hasOneUse() || !TLI.isDesirableToCommuteWithShift(N, Level))
    return SDValue();

  // TODO: This is limited to early combining because it may reveal regressions
  //       otherwise. But since we just checked a target hook to see if this is
  //       desirable, that should have filtered out cases where this interferes
  //       with some other pattern matching.
  if (!LegalTypes)
    if (SDValue R = combineShiftOfShiftedLogic(N, DAG))
      return R;

  // We want to pull some binops through shifts, so that we have (and (shift))
  // instead of (shift (and)), likewise for add, or, xor, etc.  This sort of
  // thing happens with address calculations, so it's important to canonicalize
  // it.
  switch (LHS.getOpcode()) {
  default:
    return SDValue();
  case ISD::OR:
  case ISD::XOR:
  case ISD::AND:
    break;
  case ISD::ADD:
    if (N->getOpcode() != ISD::SHL)
      return SDValue(); // only shl(add) not sr[al](add).
    break;
  }

  // We require the RHS of the binop to be a constant and not opaque as well.
  ConstantSDNode *BinOpCst = getAsNonOpaqueConstant(LHS.getOperand(1));
  if (!BinOpCst)
    return SDValue();

  // FIXME: disable this unless the input to the binop is a shift by a constant
  // or is copy/select. Enable this in other cases when figure out it's exactly
  // profitable.
  SDValue BinOpLHSVal = LHS.getOperand(0);
  bool IsShiftByConstant = (BinOpLHSVal.getOpcode() == ISD::SHL ||
                            BinOpLHSVal.getOpcode() == ISD::SRA ||
                            BinOpLHSVal.getOpcode() == ISD::SRL) &&
                           isa<ConstantSDNode>(BinOpLHSVal.getOperand(1));
  bool IsCopyOrSelect = BinOpLHSVal.getOpcode() == ISD::CopyFromReg ||
                        BinOpLHSVal.getOpcode() == ISD::SELECT;

  if (!IsShiftByConstant && !IsCopyOrSelect)
    return SDValue();

  if (IsCopyOrSelect && N->hasOneUse())
    return SDValue();

  // Fold the constants, shifting the binop RHS by the shift amount.
  SDLoc DL(N);
  EVT VT = N->getValueType(0);
  SDValue NewRHS = DAG.getNode(N->getOpcode(), DL, VT, LHS.getOperand(1),
                               N->getOperand(1));
  assert(isa<ConstantSDNode>(NewRHS) && "Folding was not successful!");

  SDValue NewShift = DAG.getNode(N->getOpcode(), DL, VT, LHS.getOperand(0),
                                 N->getOperand(1));
  return DAG.getNode(LHS.getOpcode(), DL, VT, NewShift, NewRHS);
}

SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) {
  assert(N->getOpcode() == ISD::TRUNCATE);
  assert(N->getOperand(0).getOpcode() == ISD::AND);

  // (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC)
  EVT TruncVT = N->getValueType(0);
  if (N->hasOneUse() && N->getOperand(0).hasOneUse() &&
      TLI.isTypeDesirableForOp(ISD::AND, TruncVT)) {
    SDValue N01 = N->getOperand(0).getOperand(1);
    if (isConstantOrConstantVector(N01, /* NoOpaques */ true)) {
      SDLoc DL(N);
      SDValue N00 = N->getOperand(0).getOperand(0);
      SDValue Trunc00 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00);
      SDValue Trunc01 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N01);
      AddToWorklist(Trunc00.getNode());
      AddToWorklist(Trunc01.getNode());
      return DAG.getNode(ISD::AND, DL, TruncVT, Trunc00, Trunc01);
    }
  }

  return SDValue();
}

SDValue DAGCombiner::visitRotate(SDNode *N) {
  SDLoc dl(N);
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  unsigned Bitsize = VT.getScalarSizeInBits();

  // fold (rot x, 0) -> x
  if (isNullOrNullSplat(N1))
    return N0;

  // fold (rot x, c) -> x iff (c % BitSize) == 0
  if (isPowerOf2_32(Bitsize) && Bitsize > 1) {
    APInt ModuloMask(N1.getScalarValueSizeInBits(), Bitsize - 1);
    if (DAG.MaskedValueIsZero(N1, ModuloMask))
      return N0;
  }

  // fold (rot x, c) -> (rot x, c % BitSize)
  // TODO - support non-uniform vector amounts.
  if (ConstantSDNode *Cst = isConstOrConstSplat(N1)) {
    if (Cst->getAPIntValue().uge(Bitsize)) {
      uint64_t RotAmt = Cst->getAPIntValue().urem(Bitsize);
      return DAG.getNode(N->getOpcode(), dl, VT, N0,
                         DAG.getConstant(RotAmt, dl, N1.getValueType()));
    }
  }

  // fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))).
  if (N1.getOpcode() == ISD::TRUNCATE &&
      N1.getOperand(0).getOpcode() == ISD::AND) {
    if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
      return DAG.getNode(N->getOpcode(), dl, VT, N0, NewOp1);
  }

  unsigned NextOp = N0.getOpcode();
  // fold (rot* (rot* x, c2), c1) -> (rot* x, c1 +- c2 % bitsize)
  if (NextOp == ISD::ROTL || NextOp == ISD::ROTR) {
    SDNode *C1 = DAG.isConstantIntBuildVectorOrConstantInt(N1);
    SDNode *C2 = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1));
    if (C1 && C2 && C1->getValueType(0) == C2->getValueType(0)) {
      EVT ShiftVT = C1->getValueType(0);
      bool SameSide = (N->getOpcode() == NextOp);
      unsigned CombineOp = SameSide ? ISD::ADD : ISD::SUB;
      if (SDValue CombinedShift =
              DAG.FoldConstantArithmetic(CombineOp, dl, ShiftVT, C1, C2)) {
        SDValue BitsizeC = DAG.getConstant(Bitsize, dl, ShiftVT);
        SDValue CombinedShiftNorm = DAG.FoldConstantArithmetic(
            ISD::SREM, dl, ShiftVT, CombinedShift.getNode(),
            BitsizeC.getNode());
        return DAG.getNode(N->getOpcode(), dl, VT, N0->getOperand(0),
                           CombinedShiftNorm);
      }
    }
  }
  return SDValue();
}

SDValue DAGCombiner::visitSHL(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  if (SDValue V = DAG.simplifyShift(N0, N1))
    return V;

  EVT VT = N0.getValueType();
  EVT ShiftVT = N1.getValueType();
  unsigned OpSizeInBits = VT.getScalarSizeInBits();

  // fold vector ops
  if (VT.isVector()) {
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

    BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1);
    // If setcc produces all-one true value then:
    // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV)
    if (N1CV && N1CV->isConstant()) {
      if (N0.getOpcode() == ISD::AND) {
        SDValue N00 = N0->getOperand(0);
        SDValue N01 = N0->getOperand(1);
        BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01);

        if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC &&
            TLI.getBooleanContents(N00.getOperand(0).getValueType()) ==
                TargetLowering::ZeroOrNegativeOneBooleanContent) {
          if (SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT,
                                                     N01CV, N1CV))
            return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, C);
        }
      }
    }
  }

  ConstantSDNode *N1C = isConstOrConstSplat(N1);

  // fold (shl c1, c2) -> c1<<c2
  // TODO - support non-uniform vector shift amounts.
  ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
  if (N0C && N1C && !N1C->isOpaque())
    return DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, N0C, N1C);

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  // if (shl x, c) is known to be zero, return 0
  if (DAG.MaskedValueIsZero(SDValue(N, 0),
                            APInt::getAllOnesValue(OpSizeInBits)))
    return DAG.getConstant(0, SDLoc(N), VT);

  // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
  if (N1.getOpcode() == ISD::TRUNCATE &&
      N1.getOperand(0).getOpcode() == ISD::AND) {
    if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
      return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1);
  }

  // TODO - support non-uniform vector shift amounts.
  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
    return SDValue(N, 0);

  // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
  if (N0.getOpcode() == ISD::SHL) {
    auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS,
                                          ConstantSDNode *RHS) {
      APInt c1 = LHS->getAPIntValue();
      APInt c2 = RHS->getAPIntValue();
      zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
      return (c1 + c2).uge(OpSizeInBits);
    };
    if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange))
      return DAG.getConstant(0, SDLoc(N), VT);

    auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS,
                                       ConstantSDNode *RHS) {
      APInt c1 = LHS->getAPIntValue();
      APInt c2 = RHS->getAPIntValue();
      zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
      return (c1 + c2).ult(OpSizeInBits);
    };
    if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) {
      SDLoc DL(N);
      SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1));
      return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), Sum);
    }
  }

  // fold (shl (ext (shl x, c1)), c2) -> (shl (ext x), (add c1, c2))
  // For this to be valid, the second form must not preserve any of the bits
  // that are shifted out by the inner shift in the first form.  This means
  // the outer shift size must be >= the number of bits added by the ext.
  // As a corollary, we don't care what kind of ext it is.
  if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
       N0.getOpcode() == ISD::ANY_EXTEND ||
       N0.getOpcode() == ISD::SIGN_EXTEND) &&
      N0.getOperand(0).getOpcode() == ISD::SHL) {
    SDValue N0Op0 = N0.getOperand(0);
    SDValue InnerShiftAmt = N0Op0.getOperand(1);
    EVT InnerVT = N0Op0.getValueType();
    uint64_t InnerBitwidth = InnerVT.getScalarSizeInBits();

    auto MatchOutOfRange = [OpSizeInBits, InnerBitwidth](ConstantSDNode *LHS,
                                                         ConstantSDNode *RHS) {
      APInt c1 = LHS->getAPIntValue();
      APInt c2 = RHS->getAPIntValue();
      zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
      return c2.uge(OpSizeInBits - InnerBitwidth) &&
             (c1 + c2).uge(OpSizeInBits);
    };
    if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchOutOfRange,
                                  /*AllowUndefs*/ false,
                                  /*AllowTypeMismatch*/ true))
      return DAG.getConstant(0, SDLoc(N), VT);

    auto MatchInRange = [OpSizeInBits, InnerBitwidth](ConstantSDNode *LHS,
                                                      ConstantSDNode *RHS) {
      APInt c1 = LHS->getAPIntValue();
      APInt c2 = RHS->getAPIntValue();
      zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
      return c2.uge(OpSizeInBits - InnerBitwidth) &&
             (c1 + c2).ult(OpSizeInBits);
    };
    if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchInRange,
                                  /*AllowUndefs*/ false,
                                  /*AllowTypeMismatch*/ true)) {
      SDLoc DL(N);
      SDValue Ext = DAG.getNode(N0.getOpcode(), DL, VT, N0Op0.getOperand(0));
      SDValue Sum = DAG.getZExtOrTrunc(InnerShiftAmt, DL, ShiftVT);
      Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, Sum, N1);
      return DAG.getNode(ISD::SHL, DL, VT, Ext, Sum);
    }
  }

  // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
  // Only fold this if the inner zext has no other uses to avoid increasing
  // the total number of instructions.
  if (N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() &&
      N0.getOperand(0).getOpcode() == ISD::SRL) {
    SDValue N0Op0 = N0.getOperand(0);
    SDValue InnerShiftAmt = N0Op0.getOperand(1);

    auto MatchEqual = [VT](ConstantSDNode *LHS, ConstantSDNode *RHS) {
      APInt c1 = LHS->getAPIntValue();
      APInt c2 = RHS->getAPIntValue();
      zeroExtendToMatch(c1, c2);
      return c1.ult(VT.getScalarSizeInBits()) && (c1 == c2);
    };
    if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchEqual,
                                  /*AllowUndefs*/ false,
                                  /*AllowTypeMismatch*/ true)) {
      SDLoc DL(N);
      EVT InnerShiftAmtVT = N0Op0.getOperand(1).getValueType();
      SDValue NewSHL = DAG.getZExtOrTrunc(N1, DL, InnerShiftAmtVT);
      NewSHL = DAG.getNode(ISD::SHL, DL, N0Op0.getValueType(), N0Op0, NewSHL);
      AddToWorklist(NewSHL.getNode());
      return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL);
    }
  }

  // fold (shl (sr[la] exact X,  C1), C2) -> (shl    X, (C2-C1)) if C1 <= C2
  // fold (shl (sr[la] exact X,  C1), C2) -> (sr[la] X, (C2-C1)) if C1  > C2
  // TODO - support non-uniform vector shift amounts.
  if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) &&
      N0->getFlags().hasExact()) {
    if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
      uint64_t C1 = N0C1->getZExtValue();
      uint64_t C2 = N1C->getZExtValue();
      SDLoc DL(N);
      if (C1 <= C2)
        return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
                           DAG.getConstant(C2 - C1, DL, ShiftVT));
      return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0),
                         DAG.getConstant(C1 - C2, DL, ShiftVT));
    }
  }

  // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or
  //                               (and (srl x, (sub c1, c2), MASK)
  // Only fold this if the inner shift has no other uses -- if it does, folding
  // this will increase the total number of instructions.
  // TODO - drop hasOneUse requirement if c1 == c2?
  // TODO - support non-uniform vector shift amounts.
  if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse() &&
      TLI.shouldFoldConstantShiftPairToMask(N, Level)) {
    if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
      if (N0C1->getAPIntValue().ult(OpSizeInBits)) {
        uint64_t c1 = N0C1->getZExtValue();
        uint64_t c2 = N1C->getZExtValue();
        APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1);
        SDValue Shift;
        if (c2 > c1) {
          Mask <<= c2 - c1;
          SDLoc DL(N);
          Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
                              DAG.getConstant(c2 - c1, DL, ShiftVT));
        } else {
          Mask.lshrInPlace(c1 - c2);
          SDLoc DL(N);
          Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0),
                              DAG.getConstant(c1 - c2, DL, ShiftVT));
        }
        SDLoc DL(N0);
        return DAG.getNode(ISD::AND, DL, VT, Shift,
                           DAG.getConstant(Mask, DL, VT));
      }
    }
  }

  // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
  if (N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1) &&
      isConstantOrConstantVector(N1, /* No Opaques */ true)) {
    SDLoc DL(N);
    SDValue AllBits = DAG.getAllOnesConstant(DL, VT);
    SDValue HiBitsMask = DAG.getNode(ISD::SHL, DL, VT, AllBits, N1);
    return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), HiBitsMask);
  }

  // fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
  // fold (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
  // Variant of version done on multiply, except mul by a power of 2 is turned
  // into a shift.
  if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR) &&
      N0.getNode()->hasOneUse() &&
      isConstantOrConstantVector(N1, /* No Opaques */ true) &&
      isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true) &&
      TLI.isDesirableToCommuteWithShift(N, Level)) {
    SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1);
    SDValue Shl1 = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1);
    AddToWorklist(Shl0.getNode());
    AddToWorklist(Shl1.getNode());
    return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, Shl0, Shl1);
  }

  // fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
  if (N0.getOpcode() == ISD::MUL && N0.getNode()->hasOneUse() &&
      isConstantOrConstantVector(N1, /* No Opaques */ true) &&
      isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true)) {
    SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1);
    if (isConstantOrConstantVector(Shl))
      return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), Shl);
  }

  if (N1C && !N1C->isOpaque())
    if (SDValue NewSHL = visitShiftByConstant(N))
      return NewSHL;

  return SDValue();
}

SDValue DAGCombiner::visitSRA(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  if (SDValue V = DAG.simplifyShift(N0, N1))
    return V;

  EVT VT = N0.getValueType();
  unsigned OpSizeInBits = VT.getScalarSizeInBits();

  // Arithmetic shifting an all-sign-bit value is a no-op.
  // fold (sra 0, x) -> 0
  // fold (sra -1, x) -> -1
  if (DAG.ComputeNumSignBits(N0) == OpSizeInBits)
    return N0;

  // fold vector ops
  if (VT.isVector())
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

  ConstantSDNode *N1C = isConstOrConstSplat(N1);

  // fold (sra c1, c2) -> (sra c1, c2)
  // TODO - support non-uniform vector shift amounts.
  ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
  if (N0C && N1C && !N1C->isOpaque())
    return DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, N0C, N1C);

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
  // sext_inreg.
  if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
    unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue();
    EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits);
    if (VT.isVector())
      ExtVT = EVT::getVectorVT(*DAG.getContext(),
                               ExtVT, VT.getVectorNumElements());
    if ((!LegalOperations ||
         TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT)))
      return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
                         N0.getOperand(0), DAG.getValueType(ExtVT));
  }

  // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
  // clamp (add c1, c2) to max shift.
  if (N0.getOpcode() == ISD::SRA) {
    SDLoc DL(N);
    EVT ShiftVT = N1.getValueType();
    EVT ShiftSVT = ShiftVT.getScalarType();
    SmallVector<SDValue, 16> ShiftValues;

    auto SumOfShifts = [&](ConstantSDNode *LHS, ConstantSDNode *RHS) {
      APInt c1 = LHS->getAPIntValue();
      APInt c2 = RHS->getAPIntValue();
      zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
      APInt Sum = c1 + c2;
      unsigned ShiftSum =
          Sum.uge(OpSizeInBits) ? (OpSizeInBits - 1) : Sum.getZExtValue();
      ShiftValues.push_back(DAG.getConstant(ShiftSum, DL, ShiftSVT));
      return true;
    };
    if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), SumOfShifts)) {
      SDValue ShiftValue;
      if (VT.isVector())
        ShiftValue = DAG.getBuildVector(ShiftVT, DL, ShiftValues);
      else
        ShiftValue = ShiftValues[0];
      return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0), ShiftValue);
    }
  }

  // fold (sra (shl X, m), (sub result_size, n))
  // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for
  // result_size - n != m.
  // If truncate is free for the target sext(shl) is likely to result in better
  // code.
  if (N0.getOpcode() == ISD::SHL && N1C) {
    // Get the two constanst of the shifts, CN0 = m, CN = n.
    const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1));
    if (N01C) {
      LLVMContext &Ctx = *DAG.getContext();
      // Determine what the truncate's result bitsize and type would be.
      EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue());

      if (VT.isVector())
        TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorNumElements());

      // Determine the residual right-shift amount.
      int ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();

      // If the shift is not a no-op (in which case this should be just a sign
      // extend already), the truncated to type is legal, sign_extend is legal
      // on that type, and the truncate to that type is both legal and free,
      // perform the transform.
      if ((ShiftAmt > 0) &&
          TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) &&
          TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) &&
          TLI.isTruncateFree(VT, TruncVT)) {
        SDLoc DL(N);
        SDValue Amt = DAG.getConstant(ShiftAmt, DL,
            getShiftAmountTy(N0.getOperand(0).getValueType()));
        SDValue Shift = DAG.getNode(ISD::SRL, DL, VT,
                                    N0.getOperand(0), Amt);
        SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT,
                                    Shift);
        return DAG.getNode(ISD::SIGN_EXTEND, DL,
                           N->getValueType(0), Trunc);
      }
    }
  }

  // We convert trunc/ext to opposing shifts in IR, but casts may be cheaper.
  //   sra (add (shl X, N1C), AddC), N1C -->
  //   sext (add (trunc X to (width - N1C)), AddC')
  if (!LegalTypes && N0.getOpcode() == ISD::ADD && N0.hasOneUse() && N1C &&
      N0.getOperand(0).getOpcode() == ISD::SHL &&
      N0.getOperand(0).getOperand(1) == N1 && N0.getOperand(0).hasOneUse()) {
    if (ConstantSDNode *AddC = isConstOrConstSplat(N0.getOperand(1))) {
      SDValue Shl = N0.getOperand(0);
      // Determine what the truncate's type would be and ask the target if that
      // is a free operation.
      LLVMContext &Ctx = *DAG.getContext();
      unsigned ShiftAmt = N1C->getZExtValue();
      EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - ShiftAmt);
      if (VT.isVector())
        TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorNumElements());

      // TODO: The simple type check probably belongs in the default hook
      //       implementation and/or target-specific overrides (because
      //       non-simple types likely require masking when legalized), but that
      //       restriction may conflict with other transforms.
      if (TruncVT.isSimple() && TLI.isTruncateFree(VT, TruncVT)) {
        SDLoc DL(N);
        SDValue Trunc = DAG.getZExtOrTrunc(Shl.getOperand(0), DL, TruncVT);
        SDValue ShiftC = DAG.getConstant(AddC->getAPIntValue().lshr(ShiftAmt).
                             trunc(TruncVT.getScalarSizeInBits()), DL, TruncVT);
        SDValue Add = DAG.getNode(ISD::ADD, DL, TruncVT, Trunc, ShiftC);
        return DAG.getSExtOrTrunc(Add, DL, VT);
      }
    }
  }

  // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
  if (N1.getOpcode() == ISD::TRUNCATE &&
      N1.getOperand(0).getOpcode() == ISD::AND) {
    if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
      return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1);
  }

  // fold (sra (trunc (sra x, c1)), c2) -> (trunc (sra x, c1 + c2))
  // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
  //      if c1 is equal to the number of bits the trunc removes
  // TODO - support non-uniform vector shift amounts.
  if (N0.getOpcode() == ISD::TRUNCATE &&
      (N0.getOperand(0).getOpcode() == ISD::SRL ||
       N0.getOperand(0).getOpcode() == ISD::SRA) &&
      N0.getOperand(0).hasOneUse() &&
      N0.getOperand(0).getOperand(1).hasOneUse() && N1C) {
    SDValue N0Op0 = N0.getOperand(0);
    if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) {
      EVT LargeVT = N0Op0.getValueType();
      unsigned TruncBits = LargeVT.getScalarSizeInBits() - OpSizeInBits;
      if (LargeShift->getAPIntValue() == TruncBits) {
        SDLoc DL(N);
        SDValue Amt = DAG.getConstant(N1C->getZExtValue() + TruncBits, DL,
                                      getShiftAmountTy(LargeVT));
        SDValue SRA =
            DAG.getNode(ISD::SRA, DL, LargeVT, N0Op0.getOperand(0), Amt);
        return DAG.getNode(ISD::TRUNCATE, DL, VT, SRA);
      }
    }
  }

  // Simplify, based on bits shifted out of the LHS.
  // TODO - support non-uniform vector shift amounts.
  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
    return SDValue(N, 0);

  // If the sign bit is known to be zero, switch this to a SRL.
  if (DAG.SignBitIsZero(N0))
    return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1);

  if (N1C && !N1C->isOpaque())
    if (SDValue NewSRA = visitShiftByConstant(N))
      return NewSRA;

  return SDValue();
}

SDValue DAGCombiner::visitSRL(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  if (SDValue V = DAG.simplifyShift(N0, N1))
    return V;

  EVT VT = N0.getValueType();
  unsigned OpSizeInBits = VT.getScalarSizeInBits();

  // fold vector ops
  if (VT.isVector())
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

  ConstantSDNode *N1C = isConstOrConstSplat(N1);

  // fold (srl c1, c2) -> c1 >>u c2
  // TODO - support non-uniform vector shift amounts.
  ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
  if (N0C && N1C && !N1C->isOpaque())
    return DAG.FoldConstantArithmetic(ISD::SRL, SDLoc(N), VT, N0C, N1C);

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  // if (srl x, c) is known to be zero, return 0
  if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
                                   APInt::getAllOnesValue(OpSizeInBits)))
    return DAG.getConstant(0, SDLoc(N), VT);

  // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
  if (N0.getOpcode() == ISD::SRL) {
    auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS,
                                          ConstantSDNode *RHS) {
      APInt c1 = LHS->getAPIntValue();
      APInt c2 = RHS->getAPIntValue();
      zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
      return (c1 + c2).uge(OpSizeInBits);
    };
    if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange))
      return DAG.getConstant(0, SDLoc(N), VT);

    auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS,
                                       ConstantSDNode *RHS) {
      APInt c1 = LHS->getAPIntValue();
      APInt c2 = RHS->getAPIntValue();
      zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
      return (c1 + c2).ult(OpSizeInBits);
    };
    if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) {
      SDLoc DL(N);
      EVT ShiftVT = N1.getValueType();
      SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1));
      return DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), Sum);
    }
  }

  // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2)))
  // TODO - support non-uniform vector shift amounts.
  if (N1C && N0.getOpcode() == ISD::TRUNCATE &&
      N0.getOperand(0).getOpcode() == ISD::SRL) {
    if (auto N001C = isConstOrConstSplat(N0.getOperand(0).getOperand(1))) {
      uint64_t c1 = N001C->getZExtValue();
      uint64_t c2 = N1C->getZExtValue();
      EVT InnerShiftVT = N0.getOperand(0).getValueType();
      EVT ShiftCountVT = N0.getOperand(0).getOperand(1).getValueType();
      uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits();
      // This is only valid if the OpSizeInBits + c1 = size of inner shift.
      if (c1 + OpSizeInBits == InnerShiftSize) {
        SDLoc DL(N0);
        if (c1 + c2 >= InnerShiftSize)
          return DAG.getConstant(0, DL, VT);
        return DAG.getNode(ISD::TRUNCATE, DL, VT,
                           DAG.getNode(ISD::SRL, DL, InnerShiftVT,
                                       N0.getOperand(0).getOperand(0),
                                       DAG.getConstant(c1 + c2, DL,
                                                       ShiftCountVT)));
      }
    }
  }

  // fold (srl (shl x, c), c) -> (and x, cst2)
  // TODO - (srl (shl x, c1), c2).
  if (N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 &&
      isConstantOrConstantVector(N1, /* NoOpaques */ true)) {
    SDLoc DL(N);
    SDValue Mask =
        DAG.getNode(ISD::SRL, DL, VT, DAG.getAllOnesConstant(DL, VT), N1);
    AddToWorklist(Mask.getNode());
    return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), Mask);
  }

  // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask)
  // TODO - support non-uniform vector shift amounts.
  if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
    // Shifting in all undef bits?
    EVT SmallVT = N0.getOperand(0).getValueType();
    unsigned BitSize = SmallVT.getScalarSizeInBits();
    if (N1C->getAPIntValue().uge(BitSize))
      return DAG.getUNDEF(VT);

    if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) {
      uint64_t ShiftAmt = N1C->getZExtValue();
      SDLoc DL0(N0);
      SDValue SmallShift = DAG.getNode(ISD::SRL, DL0, SmallVT,
                                       N0.getOperand(0),
                          DAG.getConstant(ShiftAmt, DL0,
                                          getShiftAmountTy(SmallVT)));
      AddToWorklist(SmallShift.getNode());
      APInt Mask = APInt::getLowBitsSet(OpSizeInBits, OpSizeInBits - ShiftAmt);
      SDLoc DL(N);
      return DAG.getNode(ISD::AND, DL, VT,
                         DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift),
                         DAG.getConstant(Mask, DL, VT));
    }
  }

  // fold (srl (sra X, Y), 31) -> (srl X, 31).  This srl only looks at the sign
  // bit, which is unmodified by sra.
  if (N1C && N1C->getAPIntValue() == (OpSizeInBits - 1)) {
    if (N0.getOpcode() == ISD::SRA)
      return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), N1);
  }

  // fold (srl (ctlz x), "5") -> x  iff x has one bit set (the low bit).
  if (N1C && N0.getOpcode() == ISD::CTLZ &&
      N1C->getAPIntValue() == Log2_32(OpSizeInBits)) {
    KnownBits Known = DAG.computeKnownBits(N0.getOperand(0));

    // If any of the input bits are KnownOne, then the input couldn't be all
    // zeros, thus the result of the srl will always be zero.
    if (Known.One.getBoolValue()) return DAG.getConstant(0, SDLoc(N0), VT);

    // If all of the bits input the to ctlz node are known to be zero, then
    // the result of the ctlz is "32" and the result of the shift is one.
    APInt UnknownBits = ~Known.Zero;
    if (UnknownBits == 0) return DAG.getConstant(1, SDLoc(N0), VT);

    // Otherwise, check to see if there is exactly one bit input to the ctlz.
    if (UnknownBits.isPowerOf2()) {
      // Okay, we know that only that the single bit specified by UnknownBits
      // could be set on input to the CTLZ node. If this bit is set, the SRL
      // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
      // to an SRL/XOR pair, which is likely to simplify more.
      unsigned ShAmt = UnknownBits.countTrailingZeros();
      SDValue Op = N0.getOperand(0);

      if (ShAmt) {
        SDLoc DL(N0);
        Op = DAG.getNode(ISD::SRL, DL, VT, Op,
                  DAG.getConstant(ShAmt, DL,
                                  getShiftAmountTy(Op.getValueType())));
        AddToWorklist(Op.getNode());
      }

      SDLoc DL(N);
      return DAG.getNode(ISD::XOR, DL, VT,
                         Op, DAG.getConstant(1, DL, VT));
    }
  }

  // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
  if (N1.getOpcode() == ISD::TRUNCATE &&
      N1.getOperand(0).getOpcode() == ISD::AND) {
    if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
      return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1);
  }

  // fold operands of srl based on knowledge that the low bits are not
  // demanded.
  // TODO - support non-uniform vector shift amounts.
  if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
    return SDValue(N, 0);

  if (N1C && !N1C->isOpaque())
    if (SDValue NewSRL = visitShiftByConstant(N))
      return NewSRL;

  // Attempt to convert a srl of a load into a narrower zero-extending load.
  if (SDValue NarrowLoad = ReduceLoadWidth(N))
    return NarrowLoad;

  // Here is a common situation. We want to optimize:
  //
  //   %a = ...
  //   %b = and i32 %a, 2
  //   %c = srl i32 %b, 1
  //   brcond i32 %c ...
  //
  // into
  //
  //   %a = ...
  //   %b = and %a, 2
  //   %c = setcc eq %b, 0
  //   brcond %c ...
  //
  // However when after the source operand of SRL is optimized into AND, the SRL
  // itself may not be optimized further. Look for it and add the BRCOND into
  // the worklist.
  if (N->hasOneUse()) {
    SDNode *Use = *N->use_begin();
    if (Use->getOpcode() == ISD::BRCOND)
      AddToWorklist(Use);
    else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) {
      // Also look pass the truncate.
      Use = *Use->use_begin();
      if (Use->getOpcode() == ISD::BRCOND)
        AddToWorklist(Use);
    }
  }

  return SDValue();
}

SDValue DAGCombiner::visitFunnelShift(SDNode *N) {
  EVT VT = N->getValueType(0);
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue N2 = N->getOperand(2);
  bool IsFSHL = N->getOpcode() == ISD::FSHL;
  unsigned BitWidth = VT.getScalarSizeInBits();

  // fold (fshl N0, N1, 0) -> N0
  // fold (fshr N0, N1, 0) -> N1
  if (isPowerOf2_32(BitWidth))
    if (DAG.MaskedValueIsZero(
            N2, APInt(N2.getScalarValueSizeInBits(), BitWidth - 1)))
      return IsFSHL ? N0 : N1;

  auto IsUndefOrZero = [](SDValue V) {
    return V.isUndef() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
  };

  // TODO - support non-uniform vector shift amounts.
  if (ConstantSDNode *Cst = isConstOrConstSplat(N2)) {
    EVT ShAmtTy = N2.getValueType();

    // fold (fsh* N0, N1, c) -> (fsh* N0, N1, c % BitWidth)
    if (Cst->getAPIntValue().uge(BitWidth)) {
      uint64_t RotAmt = Cst->getAPIntValue().urem(BitWidth);
      return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N0, N1,
                         DAG.getConstant(RotAmt, SDLoc(N), ShAmtTy));
    }

    unsigned ShAmt = Cst->getZExtValue();
    if (ShAmt == 0)
      return IsFSHL ? N0 : N1;

    // fold fshl(undef_or_zero, N1, C) -> lshr(N1, BW-C)
    // fold fshr(undef_or_zero, N1, C) -> lshr(N1, C)
    // fold fshl(N0, undef_or_zero, C) -> shl(N0, C)
    // fold fshr(N0, undef_or_zero, C) -> shl(N0, BW-C)
    if (IsUndefOrZero(N0))
      return DAG.getNode(ISD::SRL, SDLoc(N), VT, N1,
                         DAG.getConstant(IsFSHL ? BitWidth - ShAmt : ShAmt,
                                         SDLoc(N), ShAmtTy));
    if (IsUndefOrZero(N1))
      return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0,
                         DAG.getConstant(IsFSHL ? ShAmt : BitWidth - ShAmt,
                                         SDLoc(N), ShAmtTy));
  }

  // fold fshr(undef_or_zero, N1, N2) -> lshr(N1, N2)
  // fold fshl(N0, undef_or_zero, N2) -> shl(N0, N2)
  // iff We know the shift amount is in range.
  // TODO: when is it worth doing SUB(BW, N2) as well?
  if (isPowerOf2_32(BitWidth)) {
    APInt ModuloBits(N2.getScalarValueSizeInBits(), BitWidth - 1);
    if (IsUndefOrZero(N0) && !IsFSHL && DAG.MaskedValueIsZero(N2, ~ModuloBits))
      return DAG.getNode(ISD::SRL, SDLoc(N), VT, N1, N2);
    if (IsUndefOrZero(N1) && IsFSHL && DAG.MaskedValueIsZero(N2, ~ModuloBits))
      return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, N2);
  }

  // fold (fshl N0, N0, N2) -> (rotl N0, N2)
  // fold (fshr N0, N0, N2) -> (rotr N0, N2)
  // TODO: Investigate flipping this rotate if only one is legal, if funnel shift
  // is legal as well we might be better off avoiding non-constant (BW - N2).
  unsigned RotOpc = IsFSHL ? ISD::ROTL : ISD::ROTR;
  if (N0 == N1 && hasOperation(RotOpc, VT))
    return DAG.getNode(RotOpc, SDLoc(N), VT, N0, N2);

  // Simplify, based on bits shifted out of N0/N1.
  if (SimplifyDemandedBits(SDValue(N, 0)))
    return SDValue(N, 0);

  return SDValue();
}

SDValue DAGCombiner::visitABS(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (abs c1) -> c2
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
    return DAG.getNode(ISD::ABS, SDLoc(N), VT, N0);
  // fold (abs (abs x)) -> (abs x)
  if (N0.getOpcode() == ISD::ABS)
    return N0;
  // fold (abs x) -> x iff not-negative
  if (DAG.SignBitIsZero(N0))
    return N0;
  return SDValue();
}

SDValue DAGCombiner::visitBSWAP(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (bswap c1) -> c2
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
    return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N0);
  // fold (bswap (bswap x)) -> x
  if (N0.getOpcode() == ISD::BSWAP)
    return N0->getOperand(0);
  return SDValue();
}

SDValue DAGCombiner::visitBITREVERSE(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (bitreverse c1) -> c2
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
    return DAG.getNode(ISD::BITREVERSE, SDLoc(N), VT, N0);
  // fold (bitreverse (bitreverse x)) -> x
  if (N0.getOpcode() == ISD::BITREVERSE)
    return N0.getOperand(0);
  return SDValue();
}

SDValue DAGCombiner::visitCTLZ(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (ctlz c1) -> c2
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
    return DAG.getNode(ISD::CTLZ, SDLoc(N), VT, N0);

  // If the value is known never to be zero, switch to the undef version.
  if (!LegalOperations || TLI.isOperationLegal(ISD::CTLZ_ZERO_UNDEF, VT)) {
    if (DAG.isKnownNeverZero(N0))
      return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0);
  }

  return SDValue();
}

SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (ctlz_zero_undef c1) -> c2
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
    return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0);
  return SDValue();
}

SDValue DAGCombiner::visitCTTZ(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (cttz c1) -> c2
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
    return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0);

  // If the value is known never to be zero, switch to the undef version.
  if (!LegalOperations || TLI.isOperationLegal(ISD::CTTZ_ZERO_UNDEF, VT)) {
    if (DAG.isKnownNeverZero(N0))
      return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0);
  }

  return SDValue();
}

SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (cttz_zero_undef c1) -> c2
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
    return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0);
  return SDValue();
}

SDValue DAGCombiner::visitCTPOP(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (ctpop c1) -> c2
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
    return DAG.getNode(ISD::CTPOP, SDLoc(N), VT, N0);
  return SDValue();
}

// FIXME: This should be checking for no signed zeros on individual operands, as
// well as no nans.
static bool isLegalToCombineMinNumMaxNum(SelectionDAG &DAG, SDValue LHS,
                                         SDValue RHS,
                                         const TargetLowering &TLI) {
  const TargetOptions &Options = DAG.getTarget().Options;
  EVT VT = LHS.getValueType();

  return Options.NoSignedZerosFPMath && VT.isFloatingPoint() &&
         TLI.isProfitableToCombineMinNumMaxNum(VT) &&
         DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS);
}

/// Generate Min/Max node
static SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS,
                                   SDValue RHS, SDValue True, SDValue False,
                                   ISD::CondCode CC, const TargetLowering &TLI,
                                   SelectionDAG &DAG) {
  if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
    return SDValue();

  EVT TransformVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
  switch (CC) {
  case ISD::SETOLT:
  case ISD::SETOLE:
  case ISD::SETLT:
  case ISD::SETLE:
  case ISD::SETULT:
  case ISD::SETULE: {
    // Since it's known never nan to get here already, either fminnum or
    // fminnum_ieee are OK. Try the ieee version first, since it's fminnum is
    // expanded in terms of it.
    unsigned IEEEOpcode = (LHS == True) ? ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE;
    if (TLI.isOperationLegalOrCustom(IEEEOpcode, VT))
      return DAG.getNode(IEEEOpcode, DL, VT, LHS, RHS);

    unsigned Opcode = (LHS == True) ? ISD::FMINNUM : ISD::FMAXNUM;
    if (TLI.isOperationLegalOrCustom(Opcode, TransformVT))
      return DAG.getNode(Opcode, DL, VT, LHS, RHS);
    return SDValue();
  }
  case ISD::SETOGT:
  case ISD::SETOGE:
  case ISD::SETGT:
  case ISD::SETGE:
  case ISD::SETUGT:
  case ISD::SETUGE: {
    unsigned IEEEOpcode = (LHS == True) ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
    if (TLI.isOperationLegalOrCustom(IEEEOpcode, VT))
      return DAG.getNode(IEEEOpcode, DL, VT, LHS, RHS);

    unsigned Opcode = (LHS == True) ? ISD::FMAXNUM : ISD::FMINNUM;
    if (TLI.isOperationLegalOrCustom(Opcode, TransformVT))
      return DAG.getNode(Opcode, DL, VT, LHS, RHS);
    return SDValue();
  }
  default:
    return SDValue();
  }
}

/// If a (v)select has a condition value that is a sign-bit test, try to smear
/// the condition operand sign-bit across the value width and use it as a mask.
static SDValue foldSelectOfConstantsUsingSra(SDNode *N, SelectionDAG &DAG) {
  SDValue Cond = N->getOperand(0);
  SDValue C1 = N->getOperand(1);
  SDValue C2 = N->getOperand(2);
  assert(isConstantOrConstantVector(C1) && isConstantOrConstantVector(C2) &&
         "Expected select-of-constants");

  EVT VT = N->getValueType(0);
  if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse() ||
      VT != Cond.getOperand(0).getValueType())
    return SDValue();

  // The inverted-condition + commuted-select variants of these patterns are
  // canonicalized to these forms in IR.
  SDValue X = Cond.getOperand(0);
  SDValue CondC = Cond.getOperand(1);
  ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
  if (CC == ISD::SETGT && isAllOnesOrAllOnesSplat(CondC) &&
      isAllOnesOrAllOnesSplat(C2)) {
    // i32 X > -1 ? C1 : -1 --> (X >>s 31) | C1
    SDLoc DL(N);
    SDValue ShAmtC = DAG.getConstant(X.getScalarValueSizeInBits() - 1, DL, VT);
    SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShAmtC);
    return DAG.getNode(ISD::OR, DL, VT, Sra, C1);
  }
  if (CC == ISD::SETLT && isNullOrNullSplat(CondC) && isNullOrNullSplat(C2)) {
    // i8 X < 0 ? C1 : 0 --> (X >>s 7) & C1
    SDLoc DL(N);
    SDValue ShAmtC = DAG.getConstant(X.getScalarValueSizeInBits() - 1, DL, VT);
    SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShAmtC);
    return DAG.getNode(ISD::AND, DL, VT, Sra, C1);
  }
  return SDValue();
}

SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) {
  SDValue Cond = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue N2 = N->getOperand(2);
  EVT VT = N->getValueType(0);
  EVT CondVT = Cond.getValueType();
  SDLoc DL(N);

  if (!VT.isInteger())
    return SDValue();

  auto *C1 = dyn_cast<ConstantSDNode>(N1);
  auto *C2 = dyn_cast<ConstantSDNode>(N2);
  if (!C1 || !C2)
    return SDValue();

  // Only do this before legalization to avoid conflicting with target-specific
  // transforms in the other direction (create a select from a zext/sext). There
  // is also a target-independent combine here in DAGCombiner in the other
  // direction for (select Cond, -1, 0) when the condition is not i1.
  if (CondVT == MVT::i1 && !LegalOperations) {
    if (C1->isNullValue() && C2->isOne()) {
      // select Cond, 0, 1 --> zext (!Cond)
      SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
      if (VT != MVT::i1)
        NotCond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, NotCond);
      return NotCond;
    }
    if (C1->isNullValue() && C2->isAllOnesValue()) {
      // select Cond, 0, -1 --> sext (!Cond)
      SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
      if (VT != MVT::i1)
        NotCond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NotCond);
      return NotCond;
    }
    if (C1->isOne() && C2->isNullValue()) {
      // select Cond, 1, 0 --> zext (Cond)
      if (VT != MVT::i1)
        Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
      return Cond;
    }
    if (C1->isAllOnesValue() && C2->isNullValue()) {
      // select Cond, -1, 0 --> sext (Cond)
      if (VT != MVT::i1)
        Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
      return Cond;
    }

    // Use a target hook because some targets may prefer to transform in the
    // other direction.
    if (TLI.convertSelectOfConstantsToMath(VT)) {
      // For any constants that differ by 1, we can transform the select into an
      // extend and add.
      const APInt &C1Val = C1->getAPIntValue();
      const APInt &C2Val = C2->getAPIntValue();
      if (C1Val - 1 == C2Val) {
        // select Cond, C1, C1-1 --> add (zext Cond), C1-1
        if (VT != MVT::i1)
          Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
        return DAG.getNode(ISD::ADD, DL, VT, Cond, N2);
      }
      if (C1Val + 1 == C2Val) {
        // select Cond, C1, C1+1 --> add (sext Cond), C1+1
        if (VT != MVT::i1)
          Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
        return DAG.getNode(ISD::ADD, DL, VT, Cond, N2);
      }

      // select Cond, Pow2, 0 --> (zext Cond) << log2(Pow2)
      if (C1Val.isPowerOf2() && C2Val.isNullValue()) {
        if (VT != MVT::i1)
          Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
        SDValue ShAmtC = DAG.getConstant(C1Val.exactLogBase2(), DL, VT);
        return DAG.getNode(ISD::SHL, DL, VT, Cond, ShAmtC);
      }

      if (SDValue V = foldSelectOfConstantsUsingSra(N, DAG))
        return V;
    }

    return SDValue();
  }

  // fold (select Cond, 0, 1) -> (xor Cond, 1)
  // We can't do this reliably if integer based booleans have different contents
  // to floating point based booleans. This is because we can't tell whether we
  // have an integer-based boolean or a floating-point-based boolean unless we
  // can find the SETCC that produced it and inspect its operands. This is
  // fairly easy if C is the SETCC node, but it can potentially be
  // undiscoverable (or not reasonably discoverable). For example, it could be
  // in another basic block or it could require searching a complicated
  // expression.
  if (CondVT.isInteger() &&
      TLI.getBooleanContents(/*isVec*/false, /*isFloat*/true) ==
          TargetLowering::ZeroOrOneBooleanContent &&
      TLI.getBooleanContents(/*isVec*/false, /*isFloat*/false) ==
          TargetLowering::ZeroOrOneBooleanContent &&
      C1->isNullValue() && C2->isOne()) {
    SDValue NotCond =
        DAG.getNode(ISD::XOR, DL, CondVT, Cond, DAG.getConstant(1, DL, CondVT));
    if (VT.bitsEq(CondVT))
      return NotCond;
    return DAG.getZExtOrTrunc(NotCond, DL, VT);
  }

  return SDValue();
}

SDValue DAGCombiner::visitSELECT(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue N2 = N->getOperand(2);
  EVT VT = N->getValueType(0);
  EVT VT0 = N0.getValueType();
  SDLoc DL(N);
  SDNodeFlags Flags = N->getFlags();

  if (SDValue V = DAG.simplifySelect(N0, N1, N2))
    return V;

  // fold (select X, X, Y) -> (or X, Y)
  // fold (select X, 1, Y) -> (or C, Y)
  if (VT == VT0 && VT == MVT::i1 && (N0 == N1 || isOneConstant(N1)))
    return DAG.getNode(ISD::OR, DL, VT, N0, N2);

  if (SDValue V = foldSelectOfConstants(N))
    return V;

  // fold (select C, 0, X) -> (and (not C), X)
  if (VT == VT0 && VT == MVT::i1 && isNullConstant(N1)) {
    SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
    AddToWorklist(NOTNode.getNode());
    return DAG.getNode(ISD::AND, DL, VT, NOTNode, N2);
  }
  // fold (select C, X, 1) -> (or (not C), X)
  if (VT == VT0 && VT == MVT::i1 && isOneConstant(N2)) {
    SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
    AddToWorklist(NOTNode.getNode());
    return DAG.getNode(ISD::OR, DL, VT, NOTNode, N1);
  }
  // fold (select X, Y, X) -> (and X, Y)
  // fold (select X, Y, 0) -> (and X, Y)
  if (VT == VT0 && VT == MVT::i1 && (N0 == N2 || isNullConstant(N2)))
    return DAG.getNode(ISD::AND, DL, VT, N0, N1);

  // If we can fold this based on the true/false value, do so.
  if (SimplifySelectOps(N, N1, N2))
    return SDValue(N, 0); // Don't revisit N.

  if (VT0 == MVT::i1) {
    // The code in this block deals with the following 2 equivalences:
    //    select(C0|C1, x, y) <=> select(C0, x, select(C1, x, y))
    //    select(C0&C1, x, y) <=> select(C0, select(C1, x, y), y)
    // The target can specify its preferred form with the
    // shouldNormalizeToSelectSequence() callback. However we always transform
    // to the right anyway if we find the inner select exists in the DAG anyway
    // and we always transform to the left side if we know that we can further
    // optimize the combination of the conditions.
    bool normalizeToSequence =
        TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT);
    // select (and Cond0, Cond1), X, Y
    //   -> select Cond0, (select Cond1, X, Y), Y
    if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) {
      SDValue Cond0 = N0->getOperand(0);
      SDValue Cond1 = N0->getOperand(1);
      SDValue InnerSelect =
          DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond1, N1, N2, Flags);
      if (normalizeToSequence || !InnerSelect.use_empty())
        return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0,
                           InnerSelect, N2, Flags);
      // Cleanup on failure.
      if (InnerSelect.use_empty())
        recursivelyDeleteUnusedNodes(InnerSelect.getNode());
    }
    // select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y)
    if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) {
      SDValue Cond0 = N0->getOperand(0);
      SDValue Cond1 = N0->getOperand(1);
      SDValue InnerSelect = DAG.getNode(ISD::SELECT, DL, N1.getValueType(),
                                        Cond1, N1, N2, Flags);
      if (normalizeToSequence || !InnerSelect.use_empty())
        return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0, N1,
                           InnerSelect, Flags);
      // Cleanup on failure.
      if (InnerSelect.use_empty())
        recursivelyDeleteUnusedNodes(InnerSelect.getNode());
    }

    // select Cond0, (select Cond1, X, Y), Y -> select (and Cond0, Cond1), X, Y
    if (N1->getOpcode() == ISD::SELECT && N1->hasOneUse()) {
      SDValue N1_0 = N1->getOperand(0);
      SDValue N1_1 = N1->getOperand(1);
      SDValue N1_2 = N1->getOperand(2);
      if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) {
        // Create the actual and node if we can generate good code for it.
        if (!normalizeToSequence) {
          SDValue And = DAG.getNode(ISD::AND, DL, N0.getValueType(), N0, N1_0);
          return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), And, N1_1,
                             N2, Flags);
        }
        // Otherwise see if we can optimize the "and" to a better pattern.
        if (SDValue Combined = visitANDLike(N0, N1_0, N)) {
          return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1_1,
                             N2, Flags);
        }
      }
    }
    // select Cond0, X, (select Cond1, X, Y) -> select (or Cond0, Cond1), X, Y
    if (N2->getOpcode() == ISD::SELECT && N2->hasOneUse()) {
      SDValue N2_0 = N2->getOperand(0);
      SDValue N2_1 = N2->getOperand(1);
      SDValue N2_2 = N2->getOperand(2);
      if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) {
        // Create the actual or node if we can generate good code for it.
        if (!normalizeToSequence) {
          SDValue Or = DAG.getNode(ISD::OR, DL, N0.getValueType(), N0, N2_0);
          return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Or, N1, 
                             N2_2, Flags);
        }
        // Otherwise see if we can optimize to a better pattern.
        if (SDValue Combined = visitORLike(N0, N2_0, N))
          return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1,
                             N2_2, Flags);
      }
    }
  }

  // select (not Cond), N1, N2 -> select Cond, N2, N1
  if (SDValue F = extractBooleanFlip(N0, DAG, TLI, false)) {
    SDValue SelectOp = DAG.getSelect(DL, VT, F, N2, N1);
    SelectOp->setFlags(Flags);
    return SelectOp;
  }

  // Fold selects based on a setcc into other things, such as min/max/abs.
  if (N0.getOpcode() == ISD::SETCC) {
    SDValue Cond0 = N0.getOperand(0), Cond1 = N0.getOperand(1);
    ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();

    // select (fcmp lt x, y), x, y -> fminnum x, y
    // select (fcmp gt x, y), x, y -> fmaxnum x, y
    //
    // This is OK if we don't care what happens if either operand is a NaN.
    if (N0.hasOneUse() && isLegalToCombineMinNumMaxNum(DAG, N1, N2, TLI))
      if (SDValue FMinMax = combineMinNumMaxNum(DL, VT, Cond0, Cond1, N1, N2,
                                                CC, TLI, DAG))
        return FMinMax;

    // Use 'unsigned add with overflow' to optimize an unsigned saturating add.
    // This is conservatively limited to pre-legal-operations to give targets
    // a chance to reverse the transform if they want to do that. Also, it is
    // unlikely that the pattern would be formed late, so it's probably not
    // worth going through the other checks.
    if (!LegalOperations && TLI.isOperationLegalOrCustom(ISD::UADDO, VT) &&
        CC == ISD::SETUGT && N0.hasOneUse() && isAllOnesConstant(N1) &&
        N2.getOpcode() == ISD::ADD && Cond0 == N2.getOperand(0)) {
      auto *C = dyn_cast<ConstantSDNode>(N2.getOperand(1));
      auto *NotC = dyn_cast<ConstantSDNode>(Cond1);
      if (C && NotC && C->getAPIntValue() == ~NotC->getAPIntValue()) {
        // select (setcc Cond0, ~C, ugt), -1, (add Cond0, C) -->
        // uaddo Cond0, C; select uaddo.1, -1, uaddo.0
        //
        // The IR equivalent of this transform would have this form:
        //   %a = add %x, C
        //   %c = icmp ugt %x, ~C
        //   %r = select %c, -1, %a
        //   =>
        //   %u = call {iN,i1} llvm.uadd.with.overflow(%x, C)
        //   %u0 = extractvalue %u, 0
        //   %u1 = extractvalue %u, 1
        //   %r = select %u1, -1, %u0
        SDVTList VTs = DAG.getVTList(VT, VT0);
        SDValue UAO = DAG.getNode(ISD::UADDO, DL, VTs, Cond0, N2.getOperand(1));
        return DAG.getSelect(DL, VT, UAO.getValue(1), N1, UAO.getValue(0));
      }
    }

    if (TLI.isOperationLegal(ISD::SELECT_CC, VT) ||
        (!LegalOperations &&
         TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT))) {
      // Any flags available in a select/setcc fold will be on the setcc as they
      // migrated from fcmp
      Flags = N0.getNode()->getFlags();
      SDValue SelectNode = DAG.getNode(ISD::SELECT_CC, DL, VT, Cond0, Cond1, N1,
                                       N2, N0.getOperand(2));
      SelectNode->setFlags(Flags);
      return SelectNode;
    }

    return SimplifySelect(DL, N0, N1, N2);
  }

  return SDValue();
}

// This function assumes all the vselect's arguments are CONCAT_VECTOR
// nodes and that the condition is a BV of ConstantSDNodes (or undefs).
static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
  SDLoc DL(N);
  SDValue Cond = N->getOperand(0);
  SDValue LHS = N->getOperand(1);
  SDValue RHS = N->getOperand(2);
  EVT VT = N->getValueType(0);
  int NumElems = VT.getVectorNumElements();
  assert(LHS.getOpcode() == ISD::CONCAT_VECTORS &&
         RHS.getOpcode() == ISD::CONCAT_VECTORS &&
         Cond.getOpcode() == ISD::BUILD_VECTOR);

  // CONCAT_VECTOR can take an arbitrary number of arguments. We only care about
  // binary ones here.
  if (LHS->getNumOperands() != 2 || RHS->getNumOperands() != 2)
    return SDValue();

  // We're sure we have an even number of elements due to the
  // concat_vectors we have as arguments to vselect.
  // Skip BV elements until we find one that's not an UNDEF
  // After we find an UNDEF element, keep looping until we get to half the
  // length of the BV and see if all the non-undef nodes are the same.
  ConstantSDNode *BottomHalf = nullptr;
  for (int i = 0; i < NumElems / 2; ++i) {
    if (Cond->getOperand(i)->isUndef())
      continue;

    if (BottomHalf == nullptr)
      BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i));
    else if (Cond->getOperand(i).getNode() != BottomHalf)
      return SDValue();
  }

  // Do the same for the second half of the BuildVector
  ConstantSDNode *TopHalf = nullptr;
  for (int i = NumElems / 2; i < NumElems; ++i) {
    if (Cond->getOperand(i)->isUndef())
      continue;

    if (TopHalf == nullptr)
      TopHalf = cast<ConstantSDNode>(Cond.getOperand(i));
    else if (Cond->getOperand(i).getNode() != TopHalf)
      return SDValue();
  }

  assert(TopHalf && BottomHalf &&
         "One half of the selector was all UNDEFs and the other was all the "
         "same value. This should have been addressed before this function.");
  return DAG.getNode(
      ISD::CONCAT_VECTORS, DL, VT,
      BottomHalf->isNullValue() ? RHS->getOperand(0) : LHS->getOperand(0),
      TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1));
}

SDValue DAGCombiner::visitMSCATTER(SDNode *N) {
  MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(N);
  SDValue Mask = MSC->getMask();
  SDValue Chain = MSC->getChain();
  SDLoc DL(N);

  // Zap scatters with a zero mask.
  if (ISD::isBuildVectorAllZeros(Mask.getNode()))
    return Chain;

  return SDValue();
}

SDValue DAGCombiner::visitMSTORE(SDNode *N) {
  MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
  SDValue Mask = MST->getMask();
  SDValue Chain = MST->getChain();
  SDLoc DL(N);

  // Zap masked stores with a zero mask.
  if (ISD::isBuildVectorAllZeros(Mask.getNode()))
    return Chain;

  return SDValue();
}

SDValue DAGCombiner::visitMGATHER(SDNode *N) {
  MaskedGatherSDNode *MGT = cast<MaskedGatherSDNode>(N);
  SDValue Mask = MGT->getMask();
  SDLoc DL(N);

  // Zap gathers with a zero mask.
  if (ISD::isBuildVectorAllZeros(Mask.getNode()))
    return CombineTo(N, MGT->getPassThru(), MGT->getChain());

  return SDValue();
}

SDValue DAGCombiner::visitMLOAD(SDNode *N) {
  MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
  SDValue Mask = MLD->getMask();
  SDLoc DL(N);

  // Zap masked loads with a zero mask.
  if (ISD::isBuildVectorAllZeros(Mask.getNode()))
    return CombineTo(N, MLD->getPassThru(), MLD->getChain());

  return SDValue();
}

/// A vector select of 2 constant vectors can be simplified to math/logic to
/// avoid a variable select instruction and possibly avoid constant loads.
SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) {
  SDValue Cond = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue N2 = N->getOperand(2);
  EVT VT = N->getValueType(0);
  if (!Cond.hasOneUse() || Cond.getScalarValueSizeInBits() != 1 ||
      !TLI.convertSelectOfConstantsToMath(VT) ||
      !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()) ||
      !ISD::isBuildVectorOfConstantSDNodes(N2.getNode()))
    return SDValue();

  // Check if we can use the condition value to increment/decrement a single
  // constant value. This simplifies a select to an add and removes a constant
  // load/materialization from the general case.
  bool AllAddOne = true;
  bool AllSubOne = true;
  unsigned Elts = VT.getVectorNumElements();
  for (unsigned i = 0; i != Elts; ++i) {
    SDValue N1Elt = N1.getOperand(i);
    SDValue N2Elt = N2.getOperand(i);
    if (N1Elt.isUndef() || N2Elt.isUndef())
      continue;

    const APInt &C1 = cast<ConstantSDNode>(N1Elt)->getAPIntValue();
    const APInt &C2 = cast<ConstantSDNode>(N2Elt)->getAPIntValue();
    if (C1 != C2 + 1)
      AllAddOne = false;
    if (C1 != C2 - 1)
      AllSubOne = false;
  }

  // Further simplifications for the extra-special cases where the constants are
  // all 0 or all -1 should be implemented as folds of these patterns.
  SDLoc DL(N);
  if (AllAddOne || AllSubOne) {
    // vselect <N x i1> Cond, C+1, C --> add (zext Cond), C
    // vselect <N x i1> Cond, C-1, C --> add (sext Cond), C
    auto ExtendOpcode = AllAddOne ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
    SDValue ExtendedCond = DAG.getNode(ExtendOpcode, DL, VT, Cond);
    return DAG.getNode(ISD::ADD, DL, VT, ExtendedCond, N2);
  }

  // select Cond, Pow2C, 0 --> (zext Cond) << log2(Pow2C)
  APInt Pow2C;
  if (ISD::isConstantSplatVector(N1.getNode(), Pow2C) && Pow2C.isPowerOf2() &&
      isNullOrNullSplat(N2)) {
    SDValue ZextCond = DAG.getZExtOrTrunc(Cond, DL, VT);
    SDValue ShAmtC = DAG.getConstant(Pow2C.exactLogBase2(), DL, VT);
    return DAG.getNode(ISD::SHL, DL, VT, ZextCond, ShAmtC);
  }

  if (SDValue V = foldSelectOfConstantsUsingSra(N, DAG))
    return V;

  // The general case for select-of-constants:
  // vselect <N x i1> Cond, C1, C2 --> xor (and (sext Cond), (C1^C2)), C2
  // ...but that only makes sense if a vselect is slower than 2 logic ops, so
  // leave that to a machine-specific pass.
  return SDValue();
}

SDValue DAGCombiner::visitVSELECT(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue N2 = N->getOperand(2);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);

  if (SDValue V = DAG.simplifySelect(N0, N1, N2))
    return V;

  // vselect (not Cond), N1, N2 -> vselect Cond, N2, N1
  if (SDValue F = extractBooleanFlip(N0, DAG, TLI, false))
    return DAG.getSelect(DL, VT, F, N2, N1);

  // Canonicalize integer abs.
  // vselect (setg[te] X,  0),  X, -X ->
  // vselect (setgt    X, -1),  X, -X ->
  // vselect (setl[te] X,  0), -X,  X ->
  // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
  if (N0.getOpcode() == ISD::SETCC) {
    SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
    ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
    bool isAbs = false;
    bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());

    if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
         (ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) &&
        N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1))
      isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode());
    else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) &&
             N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1))
      isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode());

    if (isAbs) {
      if (TLI.isOperationLegalOrCustom(ISD::ABS, VT))
        return DAG.getNode(ISD::ABS, DL, VT, LHS);

      SDValue Shift = DAG.getNode(ISD::SRA, DL, VT, LHS,
                                  DAG.getConstant(VT.getScalarSizeInBits() - 1,
                                                  DL, getShiftAmountTy(VT)));
      SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift);
      AddToWorklist(Shift.getNode());
      AddToWorklist(Add.getNode());
      return DAG.getNode(ISD::XOR, DL, VT, Add, Shift);
    }

    // vselect x, y (fcmp lt x, y) -> fminnum x, y
    // vselect x, y (fcmp gt x, y) -> fmaxnum x, y
    //
    // This is OK if we don't care about what happens if either operand is a
    // NaN.
    //
    if (N0.hasOneUse() && isLegalToCombineMinNumMaxNum(DAG, LHS, RHS, TLI)) {
      if (SDValue FMinMax =
              combineMinNumMaxNum(DL, VT, LHS, RHS, N1, N2, CC, TLI, DAG))
        return FMinMax;
    }

    // If this select has a condition (setcc) with narrower operands than the
    // select, try to widen the compare to match the select width.
    // TODO: This should be extended to handle any constant.
    // TODO: This could be extended to handle non-loading patterns, but that
    //       requires thorough testing to avoid regressions.
    if (isNullOrNullSplat(RHS)) {
      EVT NarrowVT = LHS.getValueType();
      EVT WideVT = N1.getValueType().changeVectorElementTypeToInteger();
      EVT SetCCVT = getSetCCResultType(LHS.getValueType());
      unsigned SetCCWidth = SetCCVT.getScalarSizeInBits();
      unsigned WideWidth = WideVT.getScalarSizeInBits();
      bool IsSigned = isSignedIntSetCC(CC);
      auto LoadExtOpcode = IsSigned ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
      if (LHS.getOpcode() == ISD::LOAD && LHS.hasOneUse() &&
          SetCCWidth != 1 && SetCCWidth < WideWidth &&
          TLI.isLoadExtLegalOrCustom(LoadExtOpcode, WideVT, NarrowVT) &&
          TLI.isOperationLegalOrCustom(ISD::SETCC, WideVT)) {
        // Both compare operands can be widened for free. The LHS can use an
        // extended load, and the RHS is a constant:
        //   vselect (ext (setcc load(X), C)), N1, N2 -->
        //   vselect (setcc extload(X), C'), N1, N2
        auto ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
        SDValue WideLHS = DAG.getNode(ExtOpcode, DL, WideVT, LHS);
        SDValue WideRHS = DAG.getNode(ExtOpcode, DL, WideVT, RHS);
        EVT WideSetCCVT = getSetCCResultType(WideVT);
        SDValue WideSetCC = DAG.getSetCC(DL, WideSetCCVT, WideLHS, WideRHS, CC);
        return DAG.getSelect(DL, N1.getValueType(), WideSetCC, N1, N2);
      }
    }
  }

  if (SimplifySelectOps(N, N1, N2))
    return SDValue(N, 0);  // Don't revisit N.

  // Fold (vselect (build_vector all_ones), N1, N2) -> N1
  if (ISD::isBuildVectorAllOnes(N0.getNode()))
    return N1;
  // Fold (vselect (build_vector all_zeros), N1, N2) -> N2
  if (ISD::isBuildVectorAllZeros(N0.getNode()))
    return N2;

  // The ConvertSelectToConcatVector function is assuming both the above
  // checks for (vselect (build_vector all{ones,zeros) ...) have been made
  // and addressed.
  if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
      N2.getOpcode() == ISD::CONCAT_VECTORS &&
      ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
    if (SDValue CV = ConvertSelectToConcatVector(N, DAG))
      return CV;
  }

  if (SDValue V = foldVSelectOfConstants(N))
    return V;

  return SDValue();
}

SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue N2 = N->getOperand(2);
  SDValue N3 = N->getOperand(3);
  SDValue N4 = N->getOperand(4);
  ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get();

  // fold select_cc lhs, rhs, x, x, cc -> x
  if (N2 == N3)
    return N2;

  // Determine if the condition we're dealing with is constant
  if (SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), N0, N1,
                                  CC, SDLoc(N), false)) {
    AddToWorklist(SCC.getNode());

    if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) {
      if (!SCCC->isNullValue())
        return N2;    // cond always true -> true val
      else
        return N3;    // cond always false -> false val
    } else if (SCC->isUndef()) {
      // When the condition is UNDEF, just return the first operand. This is
      // coherent the DAG creation, no setcc node is created in this case
      return N2;
    } else if (SCC.getOpcode() == ISD::SETCC) {
      // Fold to a simpler select_cc
      SDValue SelectOp = DAG.getNode(
          ISD::SELECT_CC, SDLoc(N), N2.getValueType(), SCC.getOperand(0),
          SCC.getOperand(1), N2, N3, SCC.getOperand(2));
      SelectOp->setFlags(SCC->getFlags());
      return SelectOp;
    }
  }

  // If we can fold this based on the true/false value, do so.
  if (SimplifySelectOps(N, N2, N3))
    return SDValue(N, 0);  // Don't revisit N.

  // fold select_cc into other things, such as min/max/abs
  return SimplifySelectCC(SDLoc(N), N0, N1, N2, N3, CC);
}

SDValue DAGCombiner::visitSETCC(SDNode *N) {
  // setcc is very commonly used as an argument to brcond. This pattern
  // also lend itself to numerous combines and, as a result, it is desired
  // we keep the argument to a brcond as a setcc as much as possible.
  bool PreferSetCC =
      N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BRCOND;

  SDValue Combined = SimplifySetCC(
      N->getValueType(0), N->getOperand(0), N->getOperand(1),
      cast<CondCodeSDNode>(N->getOperand(2))->get(), SDLoc(N), !PreferSetCC);

  if (!Combined)
    return SDValue();

  // If we prefer to have a setcc, and we don't, we'll try our best to
  // recreate one using rebuildSetCC.
  if (PreferSetCC && Combined.getOpcode() != ISD::SETCC) {
    SDValue NewSetCC = rebuildSetCC(Combined);

    // We don't have anything interesting to combine to.
    if (NewSetCC.getNode() == N)
      return SDValue();

    if (NewSetCC)
      return NewSetCC;
  }

  return Combined;
}

SDValue DAGCombiner::visitSETCCCARRY(SDNode *N) {
  SDValue LHS = N->getOperand(0);
  SDValue RHS = N->getOperand(1);
  SDValue Carry = N->getOperand(2);
  SDValue Cond = N->getOperand(3);

  // If Carry is false, fold to a regular SETCC.
  if (isNullConstant(Carry))
    return DAG.getNode(ISD::SETCC, SDLoc(N), N->getVTList(), LHS, RHS, Cond);

  return SDValue();
}

/// Try to fold a sext/zext/aext dag node into a ConstantSDNode or
/// a build_vector of constants.
/// This function is called by the DAGCombiner when visiting sext/zext/aext
/// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
/// Vector extends are not folded if operations are legal; this is to
/// avoid introducing illegal build_vector dag nodes.
static SDValue tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI,
                                         SelectionDAG &DAG, bool LegalTypes) {
  unsigned Opcode = N->getOpcode();
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);

  assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND ||
         Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
         Opcode == ISD::ZERO_EXTEND_VECTOR_INREG)
         && "Expected EXTEND dag node in input!");

  // fold (sext c1) -> c1
  // fold (zext c1) -> c1
  // fold (aext c1) -> c1
  if (isa<ConstantSDNode>(N0))
    return DAG.getNode(Opcode, DL, VT, N0);

  // fold (sext (select cond, c1, c2)) -> (select cond, sext c1, sext c2)
  // fold (zext (select cond, c1, c2)) -> (select cond, zext c1, zext c2)
  // fold (aext (select cond, c1, c2)) -> (select cond, sext c1, sext c2)
  if (N0->getOpcode() == ISD::SELECT) {
    SDValue Op1 = N0->getOperand(1);
    SDValue Op2 = N0->getOperand(2);
    if (isa<ConstantSDNode>(Op1) && isa<ConstantSDNode>(Op2) &&
        (Opcode != ISD::ZERO_EXTEND || !TLI.isZExtFree(N0.getValueType(), VT))) {
      // For any_extend, choose sign extension of the constants to allow a
      // possible further transform to sign_extend_inreg.i.e.
      //
      // t1: i8 = select t0, Constant:i8<-1>, Constant:i8<0>
      // t2: i64 = any_extend t1
      // -->
      // t3: i64 = select t0, Constant:i64<-1>, Constant:i64<0>
      // -->
      // t4: i64 = sign_extend_inreg t3
      unsigned FoldOpc = Opcode;
      if (FoldOpc == ISD::ANY_EXTEND)
        FoldOpc = ISD::SIGN_EXTEND;
      return DAG.getSelect(DL, VT, N0->getOperand(0),
                           DAG.getNode(FoldOpc, DL, VT, Op1),
                           DAG.getNode(FoldOpc, DL, VT, Op2));
    }
  }

  // fold (sext (build_vector AllConstants) -> (build_vector AllConstants)
  // fold (zext (build_vector AllConstants) -> (build_vector AllConstants)
  // fold (aext (build_vector AllConstants) -> (build_vector AllConstants)
  EVT SVT = VT.getScalarType();
  if (!(VT.isVector() && (!LegalTypes || TLI.isTypeLegal(SVT)) &&
      ISD::isBuildVectorOfConstantSDNodes(N0.getNode())))
    return SDValue();

  // We can fold this node into a build_vector.
  unsigned VTBits = SVT.getSizeInBits();
  unsigned EVTBits = N0->getValueType(0).getScalarSizeInBits();
  SmallVector<SDValue, 8> Elts;
  unsigned NumElts = VT.getVectorNumElements();

  // For zero-extensions, UNDEF elements still guarantee to have the upper
  // bits set to zero.
  bool IsZext =
      Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG;

  for (unsigned i = 0; i != NumElts; ++i) {
    SDValue Op = N0.getOperand(i);
    if (Op.isUndef()) {
      Elts.push_back(IsZext ? DAG.getConstant(0, DL, SVT) : DAG.getUNDEF(SVT));
      continue;
    }

    SDLoc DL(Op);
    // Get the constant value and if needed trunc it to the size of the type.
    // Nodes like build_vector might have constants wider than the scalar type.
    APInt C = cast<ConstantSDNode>(Op)->getAPIntValue().zextOrTrunc(EVTBits);
    if (Opcode == ISD::SIGN_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG)
      Elts.push_back(DAG.getConstant(C.sext(VTBits), DL, SVT));
    else
      Elts.push_back(DAG.getConstant(C.zext(VTBits), DL, SVT));
  }

  return DAG.getBuildVector(VT, DL, Elts);
}

// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
// "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))"
// transformation. Returns true if extension are possible and the above
// mentioned transformation is profitable.
static bool ExtendUsesToFormExtLoad(EVT VT, SDNode *N, SDValue N0,
                                    unsigned ExtOpc,
                                    SmallVectorImpl<SDNode *> &ExtendNodes,
                                    const TargetLowering &TLI) {
  bool HasCopyToRegUses = false;
  bool isTruncFree = TLI.isTruncateFree(VT, N0.getValueType());
  for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
                            UE = N0.getNode()->use_end();
       UI != UE; ++UI) {
    SDNode *User = *UI;
    if (User == N)
      continue;
    if (UI.getUse().getResNo() != N0.getResNo())
      continue;
    // FIXME: Only extend SETCC N, N and SETCC N, c for now.
    if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) {
      ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get();
      if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC))
        // Sign bits will be lost after a zext.
        return false;
      bool Add = false;
      for (unsigned i = 0; i != 2; ++i) {
        SDValue UseOp = User->getOperand(i);
        if (UseOp == N0)
          continue;
        if (!isa<ConstantSDNode>(UseOp))
          return false;
        Add = true;
      }
      if (Add)
        ExtendNodes.push_back(User);
      continue;
    }
    // If truncates aren't free and there are users we can't
    // extend, it isn't worthwhile.
    if (!isTruncFree)
      return false;
    // Remember if this value is live-out.
    if (User->getOpcode() == ISD::CopyToReg)
      HasCopyToRegUses = true;
  }

  if (HasCopyToRegUses) {
    bool BothLiveOut = false;
    for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
         UI != UE; ++UI) {
      SDUse &Use = UI.getUse();
      if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) {
        BothLiveOut = true;
        break;
      }
    }
    if (BothLiveOut)
      // Both unextended and extended values are live out. There had better be
      // a good reason for the transformation.
      return ExtendNodes.size();
  }
  return true;
}

void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
                                  SDValue OrigLoad, SDValue ExtLoad,
                                  ISD::NodeType ExtType) {
  // Extend SetCC uses if necessary.
  SDLoc DL(ExtLoad);
  for (SDNode *SetCC : SetCCs) {
    SmallVector<SDValue, 4> Ops;

    for (unsigned j = 0; j != 2; ++j) {
      SDValue SOp = SetCC->getOperand(j);
      if (SOp == OrigLoad)
        Ops.push_back(ExtLoad);
      else
        Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp));
    }

    Ops.push_back(SetCC->getOperand(2));
    CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
  }
}

// FIXME: Bring more similar combines here, common to sext/zext (maybe aext?).
SDValue DAGCombiner::CombineExtLoad(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT DstVT = N->getValueType(0);
  EVT SrcVT = N0.getValueType();

  assert((N->getOpcode() == ISD::SIGN_EXTEND ||
          N->getOpcode() == ISD::ZERO_EXTEND) &&
         "Unexpected node type (not an extend)!");

  // fold (sext (load x)) to multiple smaller sextloads; same for zext.
  // For example, on a target with legal v4i32, but illegal v8i32, turn:
  //   (v8i32 (sext (v8i16 (load x))))
  // into:
  //   (v8i32 (concat_vectors (v4i32 (sextload x)),
  //                          (v4i32 (sextload (x + 16)))))
  // Where uses of the original load, i.e.:
  //   (v8i16 (load x))
  // are replaced with:
  //   (v8i16 (truncate
  //     (v8i32 (concat_vectors (v4i32 (sextload x)),
  //                            (v4i32 (sextload (x + 16)))))))
  //
  // This combine is only applicable to illegal, but splittable, vectors.
  // All legal types, and illegal non-vector types, are handled elsewhere.
  // This combine is controlled by TargetLowering::isVectorLoadExtDesirable.
  //
  if (N0->getOpcode() != ISD::LOAD)
    return SDValue();

  LoadSDNode *LN0 = cast<LoadSDNode>(N0);

  if (!ISD::isNON_EXTLoad(LN0) || !ISD::isUNINDEXEDLoad(LN0) ||
      !N0.hasOneUse() || !LN0->isSimple() ||
      !DstVT.isVector() || !DstVT.isPow2VectorType() ||
      !TLI.isVectorLoadExtDesirable(SDValue(N, 0)))
    return SDValue();

  SmallVector<SDNode *, 4> SetCCs;
  if (!ExtendUsesToFormExtLoad(DstVT, N, N0, N->getOpcode(), SetCCs, TLI))
    return SDValue();

  ISD::LoadExtType ExtType =
      N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD;

  // Try to split the vector types to get down to legal types.
  EVT SplitSrcVT = SrcVT;
  EVT SplitDstVT = DstVT;
  while (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT) &&
         SplitSrcVT.getVectorNumElements() > 1) {
    SplitDstVT = DAG.GetSplitDestVTs(SplitDstVT).first;
    SplitSrcVT = DAG.GetSplitDestVTs(SplitSrcVT).first;
  }

  if (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT))
    return SDValue();

  assert(!DstVT.isScalableVector() && "Unexpected scalable vector type");

  SDLoc DL(N);
  const unsigned NumSplits =
      DstVT.getVectorNumElements() / SplitDstVT.getVectorNumElements();
  const unsigned Stride = SplitSrcVT.getStoreSize();
  SmallVector<SDValue, 4> Loads;
  SmallVector<SDValue, 4> Chains;

  SDValue BasePtr = LN0->getBasePtr();
  for (unsigned Idx = 0; Idx < NumSplits; Idx++) {
    const unsigned Offset = Idx * Stride;
    const unsigned Align = MinAlign(LN0->getAlignment(), Offset);

    SDValue SplitLoad = DAG.getExtLoad(
        ExtType, SDLoc(LN0), SplitDstVT, LN0->getChain(), BasePtr,
        LN0->getPointerInfo().getWithOffset(Offset), SplitSrcVT, Align,
        LN0->getMemOperand()->getFlags(), LN0->getAAInfo());

    BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
                          DAG.getConstant(Stride, DL, BasePtr.getValueType()));

    Loads.push_back(SplitLoad.getValue(0));
    Chains.push_back(SplitLoad.getValue(1));
  }

  SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
  SDValue NewValue = DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Loads);

  // Simplify TF.
  AddToWorklist(NewChain.getNode());

  CombineTo(N, NewValue);

  // Replace uses of the original load (before extension)
  // with a truncate of the concatenated sextloaded vectors.
  SDValue Trunc =
      DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), NewValue);
  ExtendSetCCUses(SetCCs, N0, NewValue, (ISD::NodeType)N->getOpcode());
  CombineTo(N0.getNode(), Trunc, NewChain);
  return SDValue(N, 0); // Return N so it doesn't get rechecked!
}

// fold (zext (and/or/xor (shl/shr (load x), cst), cst)) ->
//      (and/or/xor (shl/shr (zextload x), (zext cst)), (zext cst))
SDValue DAGCombiner::CombineZExtLogicopShiftLoad(SDNode *N) {
  assert(N->getOpcode() == ISD::ZERO_EXTEND);
  EVT VT = N->getValueType(0);
  EVT OrigVT = N->getOperand(0).getValueType();
  if (TLI.isZExtFree(OrigVT, VT))
    return SDValue();

  // and/or/xor
  SDValue N0 = N->getOperand(0);
  if (!(N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
        N0.getOpcode() == ISD::XOR) ||
      N0.getOperand(1).getOpcode() != ISD::Constant ||
      (LegalOperations && !TLI.isOperationLegal(N0.getOpcode(), VT)))
    return SDValue();

  // shl/shr
  SDValue N1 = N0->getOperand(0);
  if (!(N1.getOpcode() == ISD::SHL || N1.getOpcode() == ISD::SRL) ||
      N1.getOperand(1).getOpcode() != ISD::Constant ||
      (LegalOperations && !TLI.isOperationLegal(N1.getOpcode(), VT)))
    return SDValue();

  // load
  if (!isa<LoadSDNode>(N1.getOperand(0)))
    return SDValue();
  LoadSDNode *Load = cast<LoadSDNode>(N1.getOperand(0));
  EVT MemVT = Load->getMemoryVT();
  if (!TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT) ||
      Load->getExtensionType() == ISD::SEXTLOAD || Load->isIndexed())
    return SDValue();


  // If the shift op is SHL, the logic op must be AND, otherwise the result
  // will be wrong.
  if (N1.getOpcode() == ISD::SHL && N0.getOpcode() != ISD::AND)
    return SDValue();

  if (!N0.hasOneUse() || !N1.hasOneUse())
    return SDValue();

  SmallVector<SDNode*, 4> SetCCs;
  if (!ExtendUsesToFormExtLoad(VT, N1.getNode(), N1.getOperand(0),
                               ISD::ZERO_EXTEND, SetCCs, TLI))
    return SDValue();

  // Actually do the transformation.
  SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(Load), VT,
                                   Load->getChain(), Load->getBasePtr(),
                                   Load->getMemoryVT(), Load->getMemOperand());

  SDLoc DL1(N1);
  SDValue Shift = DAG.getNode(N1.getOpcode(), DL1, VT, ExtLoad,
                              N1.getOperand(1));

  APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
  Mask = Mask.zext(VT.getSizeInBits());
  SDLoc DL0(N0);
  SDValue And = DAG.getNode(N0.getOpcode(), DL0, VT, Shift,
                            DAG.getConstant(Mask, DL0, VT));

  ExtendSetCCUses(SetCCs, N1.getOperand(0), ExtLoad, ISD::ZERO_EXTEND);
  CombineTo(N, And);
  if (SDValue(Load, 0).hasOneUse()) {
    DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), ExtLoad.getValue(1));
  } else {
    SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(Load),
                                Load->getValueType(0), ExtLoad);
    CombineTo(Load, Trunc, ExtLoad.getValue(1));
  }

  // N0 is dead at this point.
  recursivelyDeleteUnusedNodes(N0.getNode());

  return SDValue(N,0); // Return N so it doesn't get rechecked!
}

/// If we're narrowing or widening the result of a vector select and the final
/// size is the same size as a setcc (compare) feeding the select, then try to
/// apply the cast operation to the select's operands because matching vector
/// sizes for a select condition and other operands should be more efficient.
SDValue DAGCombiner::matchVSelectOpSizesWithSetCC(SDNode *Cast) {
  unsigned CastOpcode = Cast->getOpcode();
  assert((CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND ||
          CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND ||
          CastOpcode == ISD::FP_ROUND) &&
         "Unexpected opcode for vector select narrowing/widening");

  // We only do this transform before legal ops because the pattern may be
  // obfuscated by target-specific operations after legalization. Do not create
  // an illegal select op, however, because that may be difficult to lower.
  EVT VT = Cast->getValueType(0);
  if (LegalOperations || !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
    return SDValue();

  SDValue VSel = Cast->getOperand(0);
  if (VSel.getOpcode() != ISD::VSELECT || !VSel.hasOneUse() ||
      VSel.getOperand(0).getOpcode() != ISD::SETCC)
    return SDValue();

  // Does the setcc have the same vector size as the casted select?
  SDValue SetCC = VSel.getOperand(0);
  EVT SetCCVT = getSetCCResultType(SetCC.getOperand(0).getValueType());
  if (SetCCVT.getSizeInBits() != VT.getSizeInBits())
    return SDValue();

  // cast (vsel (setcc X), A, B) --> vsel (setcc X), (cast A), (cast B)
  SDValue A = VSel.getOperand(1);
  SDValue B = VSel.getOperand(2);
  SDValue CastA, CastB;
  SDLoc DL(Cast);
  if (CastOpcode == ISD::FP_ROUND) {
    // FP_ROUND (fptrunc) has an extra flag operand to pass along.
    CastA = DAG.getNode(CastOpcode, DL, VT, A, Cast->getOperand(1));
    CastB = DAG.getNode(CastOpcode, DL, VT, B, Cast->getOperand(1));
  } else {
    CastA = DAG.getNode(CastOpcode, DL, VT, A);
    CastB = DAG.getNode(CastOpcode, DL, VT, B);
  }
  return DAG.getNode(ISD::VSELECT, DL, VT, SetCC, CastA, CastB);
}

// fold ([s|z]ext ([s|z]extload x)) -> ([s|z]ext (truncate ([s|z]extload x)))
// fold ([s|z]ext (     extload x)) -> ([s|z]ext (truncate ([s|z]extload x)))
static SDValue tryToFoldExtOfExtload(SelectionDAG &DAG, DAGCombiner &Combiner,
                                     const TargetLowering &TLI, EVT VT,
                                     bool LegalOperations, SDNode *N,
                                     SDValue N0, ISD::LoadExtType ExtLoadType) {
  SDNode *N0Node = N0.getNode();
  bool isAExtLoad = (ExtLoadType == ISD::SEXTLOAD) ? ISD::isSEXTLoad(N0Node)
                                                   : ISD::isZEXTLoad(N0Node);
  if ((!isAExtLoad && !ISD::isEXTLoad(N0Node)) ||
      !ISD::isUNINDEXEDLoad(N0Node) || !N0.hasOneUse())
    return SDValue();

  LoadSDNode *LN0 = cast<LoadSDNode>(N0);
  EVT MemVT = LN0->getMemoryVT();
  if ((LegalOperations || !LN0->isSimple() ||
       VT.isVector()) &&
      !TLI.isLoadExtLegal(ExtLoadType, VT, MemVT))
    return SDValue();

  SDValue ExtLoad =
      DAG.getExtLoad(ExtLoadType, SDLoc(LN0), VT, LN0->getChain(),
                     LN0->getBasePtr(), MemVT, LN0->getMemOperand());
  Combiner.CombineTo(N, ExtLoad);
  DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
  if (LN0->use_empty())
    Combiner.recursivelyDeleteUnusedNodes(LN0);
  return SDValue(N, 0); // Return N so it doesn't get rechecked!
}

// fold ([s|z]ext (load x)) -> ([s|z]ext (truncate ([s|z]extload x)))
// Only generate vector extloads when 1) they're legal, and 2) they are
// deemed desirable by the target.
static SDValue tryToFoldExtOfLoad(SelectionDAG &DAG, DAGCombiner &Combiner,
                                  const TargetLowering &TLI, EVT VT,
                                  bool LegalOperations, SDNode *N, SDValue N0,
                                  ISD::LoadExtType ExtLoadType,
                                  ISD::NodeType ExtOpc) {
  if (!ISD::isNON_EXTLoad(N0.getNode()) ||
      !ISD::isUNINDEXEDLoad(N0.getNode()) ||
      ((LegalOperations || VT.isVector() ||
        !cast<LoadSDNode>(N0)->isSimple()) &&
       !TLI.isLoadExtLegal(ExtLoadType, VT, N0.getValueType())))
    return {};

  bool DoXform = true;
  SmallVector<SDNode *, 4> SetCCs;
  if (!N0.hasOneUse())
    DoXform = ExtendUsesToFormExtLoad(VT, N, N0, ExtOpc, SetCCs, TLI);
  if (VT.isVector())
    DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0));
  if (!DoXform)
    return {};

  LoadSDNode *LN0 = cast<LoadSDNode>(N0);
  SDValue ExtLoad = DAG.getExtLoad(ExtLoadType, SDLoc(LN0), VT, LN0->getChain(),
                                   LN0->getBasePtr(), N0.getValueType(),
                                   LN0->getMemOperand());
  Combiner.ExtendSetCCUses(SetCCs, N0, ExtLoad, ExtOpc);
  // If the load value is used only by N, replace it via CombineTo N.
  bool NoReplaceTrunc = SDValue(LN0, 0).hasOneUse();
  Combiner.CombineTo(N, ExtLoad);
  if (NoReplaceTrunc) {
    DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
    Combiner.recursivelyDeleteUnusedNodes(LN0);
  } else {
    SDValue Trunc =
        DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), ExtLoad);
    Combiner.CombineTo(LN0, Trunc, ExtLoad.getValue(1));
  }
  return SDValue(N, 0); // Return N so it doesn't get rechecked!
}

static SDValue tryToFoldExtOfMaskedLoad(SelectionDAG &DAG,
                                        const TargetLowering &TLI, EVT VT,
                                        SDNode *N, SDValue N0,
                                        ISD::LoadExtType ExtLoadType,
                                        ISD::NodeType ExtOpc) {
  if (!N0.hasOneUse())
    return SDValue();

  MaskedLoadSDNode *Ld = dyn_cast<MaskedLoadSDNode>(N0);
  if (!Ld || Ld->getExtensionType() != ISD::NON_EXTLOAD)
    return SDValue();

  if (!TLI.isLoadExtLegal(ExtLoadType, VT, Ld->getValueType(0)))
    return SDValue();

  if (!TLI.isVectorLoadExtDesirable(SDValue(N, 0)))
    return SDValue();

  SDLoc dl(Ld);
  SDValue PassThru = DAG.getNode(ExtOpc, dl, VT, Ld->getPassThru());
  SDValue NewLoad = DAG.getMaskedLoad(VT, dl, Ld->getChain(),
                                      Ld->getBasePtr(), Ld->getMask(),
                                      PassThru, Ld->getMemoryVT(),
                                      Ld->getMemOperand(), ExtLoadType,
                                      Ld->isExpandingLoad());
  DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), SDValue(NewLoad.getNode(), 1));
  return NewLoad;
}

static SDValue foldExtendedSignBitTest(SDNode *N, SelectionDAG &DAG,
                                       bool LegalOperations) {
  assert((N->getOpcode() == ISD::SIGN_EXTEND ||
          N->getOpcode() == ISD::ZERO_EXTEND) && "Expected sext or zext");

  SDValue SetCC = N->getOperand(0);
  if (LegalOperations || SetCC.getOpcode() != ISD::SETCC ||
      !SetCC.hasOneUse() || SetCC.getValueType() != MVT::i1)
    return SDValue();

  SDValue X = SetCC.getOperand(0);
  SDValue Ones = SetCC.getOperand(1);
  ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
  EVT VT = N->getValueType(0);
  EVT XVT = X.getValueType();
  // setge X, C is canonicalized to setgt, so we do not need to match that
  // pattern. The setlt sibling is folded in SimplifySelectCC() because it does
  // not require the 'not' op.
  if (CC == ISD::SETGT && isAllOnesConstant(Ones) && VT == XVT) {
    // Invert and smear/shift the sign bit:
    // sext i1 (setgt iN X, -1) --> sra (not X), (N - 1)
    // zext i1 (setgt iN X, -1) --> srl (not X), (N - 1)
    SDLoc DL(N);
    SDValue NotX = DAG.getNOT(DL, X, VT);
    SDValue ShiftAmount = DAG.getConstant(VT.getSizeInBits() - 1, DL, VT);
    auto ShiftOpcode = N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SRA : ISD::SRL;
    return DAG.getNode(ShiftOpcode, DL, VT, NotX, ShiftAmount);
  }
  return SDValue();
}

SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);

  if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
    return Res;

  // fold (sext (sext x)) -> (sext x)
  // fold (sext (aext x)) -> (sext x)
  if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
    return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, N0.getOperand(0));

  if (N0.getOpcode() == ISD::TRUNCATE) {
    // fold (sext (truncate (load x))) -> (sext (smaller load x))
    // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
    if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
      SDNode *oye = N0.getOperand(0).getNode();
      if (NarrowLoad.getNode() != N0.getNode()) {
        CombineTo(N0.getNode(), NarrowLoad);
        // CombineTo deleted the truncate, if needed, but not what's under it.
        AddToWorklist(oye);
      }
      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
    }

    // See if the value being truncated is already sign extended.  If so, just
    // eliminate the trunc/sext pair.
    SDValue Op = N0.getOperand(0);
    unsigned OpBits   = Op.getScalarValueSizeInBits();
    unsigned MidBits  = N0.getScalarValueSizeInBits();
    unsigned DestBits = VT.getScalarSizeInBits();
    unsigned NumSignBits = DAG.ComputeNumSignBits(Op);

    if (OpBits == DestBits) {
      // Op is i32, Mid is i8, and Dest is i32.  If Op has more than 24 sign
      // bits, it is already ready.
      if (NumSignBits > DestBits-MidBits)
        return Op;
    } else if (OpBits < DestBits) {
      // Op is i32, Mid is i8, and Dest is i64.  If Op has more than 24 sign
      // bits, just sext from i32.
      if (NumSignBits > OpBits-MidBits)
        return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op);
    } else {
      // Op is i64, Mid is i8, and Dest is i32.  If Op has more than 56 sign
      // bits, just truncate to i32.
      if (NumSignBits > OpBits-MidBits)
        return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
    }

    // fold (sext (truncate x)) -> (sextinreg x).
    if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
                                                 N0.getValueType())) {
      if (OpBits < DestBits)
        Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N0), VT, Op);
      else if (OpBits > DestBits)
        Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT, Op);
      return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Op,
                         DAG.getValueType(N0.getValueType()));
    }
  }

  // Try to simplify (sext (load x)).
  if (SDValue foldedExt =
          tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
                             ISD::SEXTLOAD, ISD::SIGN_EXTEND))
    return foldedExt;

  if (SDValue foldedExt =
      tryToFoldExtOfMaskedLoad(DAG, TLI, VT, N, N0, ISD::SEXTLOAD,
                               ISD::SIGN_EXTEND))
    return foldedExt;

  // fold (sext (load x)) to multiple smaller sextloads.
  // Only on illegal but splittable vectors.
  if (SDValue ExtLoad = CombineExtLoad(N))
    return ExtLoad;

  // Try to simplify (sext (sextload x)).
  if (SDValue foldedExt = tryToFoldExtOfExtload(
          DAG, *this, TLI, VT, LegalOperations, N, N0, ISD::SEXTLOAD))
    return foldedExt;

  // fold (sext (and/or/xor (load x), cst)) ->
  //      (and/or/xor (sextload x), (sext cst))
  if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
       N0.getOpcode() == ISD::XOR) &&
      isa<LoadSDNode>(N0.getOperand(0)) &&
      N0.getOperand(1).getOpcode() == ISD::Constant &&
      (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
    LoadSDNode *LN00 = cast<LoadSDNode>(N0.getOperand(0));
    EVT MemVT = LN00->getMemoryVT();
    if (TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, MemVT) &&
      LN00->getExtensionType() != ISD::ZEXTLOAD && LN00->isUnindexed()) {
      SmallVector<SDNode*, 4> SetCCs;
      bool DoXform = ExtendUsesToFormExtLoad(VT, N0.getNode(), N0.getOperand(0),
                                             ISD::SIGN_EXTEND, SetCCs, TLI);
      if (DoXform) {
        SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(LN00), VT,
                                         LN00->getChain(), LN00->getBasePtr(),
                                         LN00->getMemoryVT(),
                                         LN00->getMemOperand());
        APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
        Mask = Mask.sext(VT.getSizeInBits());
        SDValue And = DAG.getNode(N0.getOpcode(), DL, VT,
                                  ExtLoad, DAG.getConstant(Mask, DL, VT));
        ExtendSetCCUses(SetCCs, N0.getOperand(0), ExtLoad, ISD::SIGN_EXTEND);
        bool NoReplaceTruncAnd = !N0.hasOneUse();
        bool NoReplaceTrunc = SDValue(LN00, 0).hasOneUse();
        CombineTo(N, And);
        // If N0 has multiple uses, change other uses as well.
        if (NoReplaceTruncAnd) {
          SDValue TruncAnd =
              DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), And);
          CombineTo(N0.getNode(), TruncAnd);
        }
        if (NoReplaceTrunc) {
          DAG.ReplaceAllUsesOfValueWith(SDValue(LN00, 1), ExtLoad.getValue(1));
        } else {
          SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(LN00),
                                      LN00->getValueType(0), ExtLoad);
          CombineTo(LN00, Trunc, ExtLoad.getValue(1));
        }
        return SDValue(N,0); // Return N so it doesn't get rechecked!
      }
    }
  }

  if (SDValue V = foldExtendedSignBitTest(N, DAG, LegalOperations))
    return V;

  if (N0.getOpcode() == ISD::SETCC) {
    SDValue N00 = N0.getOperand(0);
    SDValue N01 = N0.getOperand(1);
    ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
    EVT N00VT = N0.getOperand(0).getValueType();

    // sext(setcc) -> sext_in_reg(vsetcc) for vectors.
    // Only do this before legalize for now.
    if (VT.isVector() && !LegalOperations &&
        TLI.getBooleanContents(N00VT) ==
            TargetLowering::ZeroOrNegativeOneBooleanContent) {
      // On some architectures (such as SSE/NEON/etc) the SETCC result type is
      // of the same size as the compared operands. Only optimize sext(setcc())
      // if this is the case.
      EVT SVT = getSetCCResultType(N00VT);

      // If we already have the desired type, don't change it.
      if (SVT != N0.getValueType()) {
        // We know that the # elements of the results is the same as the
        // # elements of the compare (and the # elements of the compare result
        // for that matter).  Check to see that they are the same size.  If so,
        // we know that the element size of the sext'd result matches the
        // element size of the compare operands.
        if (VT.getSizeInBits() == SVT.getSizeInBits())
          return DAG.getSetCC(DL, VT, N00, N01, CC);

        // If the desired elements are smaller or larger than the source
        // elements, we can use a matching integer vector type and then
        // truncate/sign extend.
        EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
        if (SVT == MatchingVecType) {
          SDValue VsetCC = DAG.getSetCC(DL, MatchingVecType, N00, N01, CC);
          return DAG.getSExtOrTrunc(VsetCC, DL, VT);
        }
      }
    }

    // sext(setcc x, y, cc) -> (select (setcc x, y, cc), T, 0)
    // Here, T can be 1 or -1, depending on the type of the setcc and
    // getBooleanContents().
    unsigned SetCCWidth = N0.getScalarValueSizeInBits();

    // To determine the "true" side of the select, we need to know the high bit
    // of the value returned by the setcc if it evaluates to true.
    // If the type of the setcc is i1, then the true case of the select is just
    // sext(i1 1), that is, -1.
    // If the type of the setcc is larger (say, i8) then the value of the high
    // bit depends on getBooleanContents(), so ask TLI for a real "true" value
    // of the appropriate width.
    SDValue ExtTrueVal = (SetCCWidth == 1)
                             ? DAG.getAllOnesConstant(DL, VT)
                             : DAG.getBoolConstant(true, DL, VT, N00VT);
    SDValue Zero = DAG.getConstant(0, DL, VT);
    if (SDValue SCC =
            SimplifySelectCC(DL, N00, N01, ExtTrueVal, Zero, CC, true))
      return SCC;

    if (!VT.isVector() && !TLI.convertSelectOfConstantsToMath(VT)) {
      EVT SetCCVT = getSetCCResultType(N00VT);
      // Don't do this transform for i1 because there's a select transform
      // that would reverse it.
      // TODO: We should not do this transform at all without a target hook
      // because a sext is likely cheaper than a select?
      if (SetCCVT.getScalarSizeInBits() != 1 &&
          (!LegalOperations || TLI.isOperationLegal(ISD::SETCC, N00VT))) {
        SDValue SetCC = DAG.getSetCC(DL, SetCCVT, N00, N01, CC);
        return DAG.getSelect(DL, VT, SetCC, ExtTrueVal, Zero);
      }
    }
  }

  // fold (sext x) -> (zext x) if the sign bit is known zero.
  if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
      DAG.SignBitIsZero(N0))
    return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0);

  if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
    return NewVSel;

  // Eliminate this sign extend by doing a negation in the destination type:
  // sext i32 (0 - (zext i8 X to i32)) to i64 --> 0 - (zext i8 X to i64)
  if (N0.getOpcode() == ISD::SUB && N0.hasOneUse() &&
      isNullOrNullSplat(N0.getOperand(0)) &&
      N0.getOperand(1).getOpcode() == ISD::ZERO_EXTEND &&
      TLI.isOperationLegalOrCustom(ISD::SUB, VT)) {
    SDValue Zext = DAG.getZExtOrTrunc(N0.getOperand(1).getOperand(0), DL, VT);
    return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Zext);
  }
  // Eliminate this sign extend by doing a decrement in the destination type:
  // sext i32 ((zext i8 X to i32) + (-1)) to i64 --> (zext i8 X to i64) + (-1)
  if (N0.getOpcode() == ISD::ADD && N0.hasOneUse() &&
      isAllOnesOrAllOnesSplat(N0.getOperand(1)) &&
      N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
      TLI.isOperationLegalOrCustom(ISD::ADD, VT)) {
    SDValue Zext = DAG.getZExtOrTrunc(N0.getOperand(0).getOperand(0), DL, VT);
    return DAG.getNode(ISD::ADD, DL, VT, Zext, DAG.getAllOnesConstant(DL, VT));
  }

  return SDValue();
}

// isTruncateOf - If N is a truncate of some other value, return true, record
// the value being truncated in Op and which of Op's bits are zero/one in Known.
// This function computes KnownBits to avoid a duplicated call to
// computeKnownBits in the caller.
static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
                         KnownBits &Known) {
  if (N->getOpcode() == ISD::TRUNCATE) {
    Op = N->getOperand(0);
    Known = DAG.computeKnownBits(Op);
    return true;
  }

  if (N.getOpcode() != ISD::SETCC ||
      N.getValueType().getScalarType() != MVT::i1 ||
      cast<CondCodeSDNode>(N.getOperand(2))->get() != ISD::SETNE)
    return false;

  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);
  assert(Op0.getValueType() == Op1.getValueType());

  if (isNullOrNullSplat(Op0))
    Op = Op1;
  else if (isNullOrNullSplat(Op1))
    Op = Op0;
  else
    return false;

  Known = DAG.computeKnownBits(Op);

  return (Known.Zero | 1).isAllOnesValue();
}

/// Given an extending node with a pop-count operand, if the target does not
/// support a pop-count in the narrow source type but does support it in the
/// destination type, widen the pop-count to the destination type.
static SDValue widenCtPop(SDNode *Extend, SelectionDAG &DAG) {
  assert((Extend->getOpcode() == ISD::ZERO_EXTEND ||
          Extend->getOpcode() == ISD::ANY_EXTEND) && "Expected extend op");

  SDValue CtPop = Extend->getOperand(0);
  if (CtPop.getOpcode() != ISD::CTPOP || !CtPop.hasOneUse())
    return SDValue();

  EVT VT = Extend->getValueType(0);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (TLI.isOperationLegalOrCustom(ISD::CTPOP, CtPop.getValueType()) ||
      !TLI.isOperationLegalOrCustom(ISD::CTPOP, VT))
    return SDValue();

  // zext (ctpop X) --> ctpop (zext X)
  SDLoc DL(Extend);
  SDValue NewZext = DAG.getZExtOrTrunc(CtPop.getOperand(0), DL, VT);
  return DAG.getNode(ISD::CTPOP, DL, VT, NewZext);
}

SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
    return Res;

  // fold (zext (zext x)) -> (zext x)
  // fold (zext (aext x)) -> (zext x)
  if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
    return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT,
                       N0.getOperand(0));

  // fold (zext (truncate x)) -> (zext x) or
  //      (zext (truncate x)) -> (truncate x)
  // This is valid when the truncated bits of x are already zero.
  SDValue Op;
  KnownBits Known;
  if (isTruncateOf(DAG, N0, Op, Known)) {
    APInt TruncatedBits =
      (Op.getScalarValueSizeInBits() == N0.getScalarValueSizeInBits()) ?
      APInt(Op.getScalarValueSizeInBits(), 0) :
      APInt::getBitsSet(Op.getScalarValueSizeInBits(),
                        N0.getScalarValueSizeInBits(),
                        std::min(Op.getScalarValueSizeInBits(),
                                 VT.getScalarSizeInBits()));
    if (TruncatedBits.isSubsetOf(Known.Zero))
      return DAG.getZExtOrTrunc(Op, SDLoc(N), VT);
  }

  // fold (zext (truncate x)) -> (and x, mask)
  if (N0.getOpcode() == ISD::TRUNCATE) {
    // fold (zext (truncate (load x))) -> (zext (smaller load x))
    // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n)))
    if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
      SDNode *oye = N0.getOperand(0).getNode();
      if (NarrowLoad.getNode() != N0.getNode()) {
        CombineTo(N0.getNode(), NarrowLoad);
        // CombineTo deleted the truncate, if needed, but not what's under it.
        AddToWorklist(oye);
      }
      return SDValue(N, 0); // Return N so it doesn't get rechecked!
    }

    EVT SrcVT = N0.getOperand(0).getValueType();
    EVT MinVT = N0.getValueType();

    // Try to mask before the extension to avoid having to generate a larger mask,
    // possibly over several sub-vectors.
    if (SrcVT.bitsLT(VT) && VT.isVector()) {
      if (!LegalOperations || (TLI.isOperationLegal(ISD::AND, SrcVT) &&
                               TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) {
        SDValue Op = N0.getOperand(0);
        Op = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType());
        AddToWorklist(Op.getNode());
        SDValue ZExtOrTrunc = DAG.getZExtOrTrunc(Op, SDLoc(N), VT);
        // Transfer the debug info; the new node is equivalent to N0.
        DAG.transferDbgValues(N0, ZExtOrTrunc);
        return ZExtOrTrunc;
      }
    }

    if (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT)) {
      SDValue Op = DAG.getAnyExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
      AddToWorklist(Op.getNode());
      SDValue And = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType());
      // We may safely transfer the debug info describing the truncate node over
      // to the equivalent and operation.
      DAG.transferDbgValues(N0, And);
      return And;
    }
  }

  // Fold (zext (and (trunc x), cst)) -> (and x, cst),
  // if either of the casts is not free.
  if (N0.getOpcode() == ISD::AND &&
      N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
      N0.getOperand(1).getOpcode() == ISD::Constant &&
      (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
                           N0.getValueType()) ||
       !TLI.isZExtFree(N0.getValueType(), VT))) {
    SDValue X = N0.getOperand(0).getOperand(0);
    X = DAG.getAnyExtOrTrunc(X, SDLoc(X), VT);
    APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
    Mask = Mask.zext(VT.getSizeInBits());
    SDLoc DL(N);
    return DAG.getNode(ISD::AND, DL, VT,
                       X, DAG.getConstant(Mask, DL, VT));
  }

  // Try to simplify (zext (load x)).
  if (SDValue foldedExt =
          tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
                             ISD::ZEXTLOAD, ISD::ZERO_EXTEND))
    return foldedExt;

  if (SDValue foldedExt =
      tryToFoldExtOfMaskedLoad(DAG, TLI, VT, N, N0, ISD::ZEXTLOAD,
                               ISD::ZERO_EXTEND))
    return foldedExt;

  // fold (zext (load x)) to multiple smaller zextloads.
  // Only on illegal but splittable vectors.
  if (SDValue ExtLoad = CombineExtLoad(N))
    return ExtLoad;

  // fold (zext (and/or/xor (load x), cst)) ->
  //      (and/or/xor (zextload x), (zext cst))
  // Unless (and (load x) cst) will match as a zextload already and has
  // additional users.
  if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
       N0.getOpcode() == ISD::XOR) &&
      isa<LoadSDNode>(N0.getOperand(0)) &&
      N0.getOperand(1).getOpcode() == ISD::Constant &&
      (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
    LoadSDNode *LN00 = cast<LoadSDNode>(N0.getOperand(0));
    EVT MemVT = LN00->getMemoryVT();
    if (TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT) &&
        LN00->getExtensionType() != ISD::SEXTLOAD && LN00->isUnindexed()) {
      bool DoXform = true;
      SmallVector<SDNode*, 4> SetCCs;
      if (!N0.hasOneUse()) {
        if (N0.getOpcode() == ISD::AND) {
          auto *AndC = cast<ConstantSDNode>(N0.getOperand(1));
          EVT LoadResultTy = AndC->getValueType(0);
          EVT ExtVT;
          if (isAndLoadExtLoad(AndC, LN00, LoadResultTy, ExtVT))
            DoXform = false;
        }
      }
      if (DoXform)
        DoXform = ExtendUsesToFormExtLoad(VT, N0.getNode(), N0.getOperand(0),
                                          ISD::ZERO_EXTEND, SetCCs, TLI);
      if (DoXform) {
        SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN00), VT,
                                         LN00->getChain(), LN00->getBasePtr(),
                                         LN00->getMemoryVT(),
                                         LN00->getMemOperand());
        APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
        Mask = Mask.zext(VT.getSizeInBits());
        SDLoc DL(N);
        SDValue And = DAG.getNode(N0.getOpcode(), DL, VT,
                                  ExtLoad, DAG.getConstant(Mask, DL, VT));
        ExtendSetCCUses(SetCCs, N0.getOperand(0), ExtLoad, ISD::ZERO_EXTEND);
        bool NoReplaceTruncAnd = !N0.hasOneUse();
        bool NoReplaceTrunc = SDValue(LN00, 0).hasOneUse();
        CombineTo(N, And);
        // If N0 has multiple uses, change other uses as well.
        if (NoReplaceTruncAnd) {
          SDValue TruncAnd =
              DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), And);
          CombineTo(N0.getNode(), TruncAnd);
        }
        if (NoReplaceTrunc) {
          DAG.ReplaceAllUsesOfValueWith(SDValue(LN00, 1), ExtLoad.getValue(1));
        } else {
          SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(LN00),
                                      LN00->getValueType(0), ExtLoad);
          CombineTo(LN00, Trunc, ExtLoad.getValue(1));
        }
        return SDValue(N,0); // Return N so it doesn't get rechecked!
      }
    }
  }

  // fold (zext (and/or/xor (shl/shr (load x), cst), cst)) ->
  //      (and/or/xor (shl/shr (zextload x), (zext cst)), (zext cst))
  if (SDValue ZExtLoad = CombineZExtLogicopShiftLoad(N))
    return ZExtLoad;

  // Try to simplify (zext (zextload x)).
  if (SDValue foldedExt = tryToFoldExtOfExtload(
          DAG, *this, TLI, VT, LegalOperations, N, N0, ISD::ZEXTLOAD))
    return foldedExt;

  if (SDValue V = foldExtendedSignBitTest(N, DAG, LegalOperations))
    return V;

  if (N0.getOpcode() == ISD::SETCC) {
    // Only do this before legalize for now.
    if (!LegalOperations && VT.isVector() &&
        N0.getValueType().getVectorElementType() == MVT::i1) {
      EVT N00VT = N0.getOperand(0).getValueType();
      if (getSetCCResultType(N00VT) == N0.getValueType())
        return SDValue();

      // We know that the # elements of the results is the same as the #
      // elements of the compare (and the # elements of the compare result for
      // that matter). Check to see that they are the same size. If so, we know
      // that the element size of the sext'd result matches the element size of
      // the compare operands.
      SDLoc DL(N);
      SDValue VecOnes = DAG.getConstant(1, DL, VT);
      if (VT.getSizeInBits() == N00VT.getSizeInBits()) {
        // zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors.
        SDValue VSetCC = DAG.getNode(ISD::SETCC, DL, VT, N0.getOperand(0),
                                     N0.getOperand(1), N0.getOperand(2));
        return DAG.getNode(ISD::AND, DL, VT, VSetCC, VecOnes);
      }

      // If the desired elements are smaller or larger than the source
      // elements we can use a matching integer vector type and then
      // truncate/sign extend.
      EVT MatchingVectorType = N00VT.changeVectorElementTypeToInteger();
      SDValue VsetCC =
          DAG.getNode(ISD::SETCC, DL, MatchingVectorType, N0.getOperand(0),
                      N0.getOperand(1), N0.getOperand(2));
      return DAG.getNode(ISD::AND, DL, VT, DAG.getSExtOrTrunc(VsetCC, DL, VT),
                         VecOnes);
    }

    // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
    SDLoc DL(N);
    if (SDValue SCC = SimplifySelectCC(
            DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT),
            DAG.getConstant(0, DL, VT),
            cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
      return SCC;
  }

  // (zext (shl (zext x), cst)) -> (shl (zext x), cst)
  if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) &&
      isa<ConstantSDNode>(N0.getOperand(1)) &&
      N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
      N0.hasOneUse()) {
    SDValue ShAmt = N0.getOperand(1);
    if (N0.getOpcode() == ISD::SHL) {
      SDValue InnerZExt = N0.getOperand(0);
      // If the original shl may be shifting out bits, do not perform this
      // transformation.
      unsigned KnownZeroBits = InnerZExt.getValueSizeInBits() -
        InnerZExt.getOperand(0).getValueSizeInBits();
      if (cast<ConstantSDNode>(ShAmt)->getAPIntValue().ugt(KnownZeroBits))
        return SDValue();
    }

    SDLoc DL(N);

    // Ensure that the shift amount is wide enough for the shifted value.
    if (VT.getSizeInBits() >= 256)
      ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt);

    return DAG.getNode(N0.getOpcode(), DL, VT,
                       DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)),
                       ShAmt);
  }

  if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
    return NewVSel;

  if (SDValue NewCtPop = widenCtPop(N, DAG))
    return NewCtPop;

  return SDValue();
}

SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
    return Res;

  // fold (aext (aext x)) -> (aext x)
  // fold (aext (zext x)) -> (zext x)
  // fold (aext (sext x)) -> (sext x)
  if (N0.getOpcode() == ISD::ANY_EXTEND  ||
      N0.getOpcode() == ISD::ZERO_EXTEND ||
      N0.getOpcode() == ISD::SIGN_EXTEND)
    return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0));

  // fold (aext (truncate (load x))) -> (aext (smaller load x))
  // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
  if (N0.getOpcode() == ISD::TRUNCATE) {
    if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
      SDNode *oye = N0.getOperand(0).getNode();
      if (NarrowLoad.getNode() != N0.getNode()) {
        CombineTo(N0.getNode(), NarrowLoad);
        // CombineTo deleted the truncate, if needed, but not what's under it.
        AddToWorklist(oye);
      }
      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
    }
  }

  // fold (aext (truncate x))
  if (N0.getOpcode() == ISD::TRUNCATE)
    return DAG.getAnyExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);

  // Fold (aext (and (trunc x), cst)) -> (and x, cst)
  // if the trunc is not free.
  if (N0.getOpcode() == ISD::AND &&
      N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
      N0.getOperand(1).getOpcode() == ISD::Constant &&
      !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
                          N0.getValueType())) {
    SDLoc DL(N);
    SDValue X = N0.getOperand(0).getOperand(0);
    X = DAG.getAnyExtOrTrunc(X, DL, VT);
    APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
    Mask = Mask.zext(VT.getSizeInBits());
    return DAG.getNode(ISD::AND, DL, VT,
                       X, DAG.getConstant(Mask, DL, VT));
  }

  // fold (aext (load x)) -> (aext (truncate (extload x)))
  // None of the supported targets knows how to perform load and any_ext
  // on vectors in one instruction.  We only perform this transformation on
  // scalars.
  if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() &&
      ISD::isUNINDEXEDLoad(N0.getNode()) &&
      TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
    bool DoXform = true;
    SmallVector<SDNode*, 4> SetCCs;
    if (!N0.hasOneUse())
      DoXform = ExtendUsesToFormExtLoad(VT, N, N0, ISD::ANY_EXTEND, SetCCs,
                                        TLI);
    if (DoXform) {
      LoadSDNode *LN0 = cast<LoadSDNode>(N0);
      SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
                                       LN0->getChain(),
                                       LN0->getBasePtr(), N0.getValueType(),
                                       LN0->getMemOperand());
      ExtendSetCCUses(SetCCs, N0, ExtLoad, ISD::ANY_EXTEND);
      // If the load value is used only by N, replace it via CombineTo N.
      bool NoReplaceTrunc = N0.hasOneUse();
      CombineTo(N, ExtLoad);
      if (NoReplaceTrunc) {
        DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
        recursivelyDeleteUnusedNodes(LN0);
      } else {
        SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
                                    N0.getValueType(), ExtLoad);
        CombineTo(LN0, Trunc, ExtLoad.getValue(1));
      }
      return SDValue(N, 0); // Return N so it doesn't get rechecked!
    }
  }

  // fold (aext (zextload x)) -> (aext (truncate (zextload x)))
  // fold (aext (sextload x)) -> (aext (truncate (sextload x)))
  // fold (aext ( extload x)) -> (aext (truncate (extload  x)))
  if (N0.getOpcode() == ISD::LOAD && !ISD::isNON_EXTLoad(N0.getNode()) &&
      ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
    ISD::LoadExtType ExtType = LN0->getExtensionType();
    EVT MemVT = LN0->getMemoryVT();
    if (!LegalOperations || TLI.isLoadExtLegal(ExtType, VT, MemVT)) {
      SDValue ExtLoad = DAG.getExtLoad(ExtType, SDLoc(N),
                                       VT, LN0->getChain(), LN0->getBasePtr(),
                                       MemVT, LN0->getMemOperand());
      CombineTo(N, ExtLoad);
      DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
      recursivelyDeleteUnusedNodes(LN0);
      return SDValue(N, 0);   // Return N so it doesn't get rechecked!
    }
  }

  if (N0.getOpcode() == ISD::SETCC) {
    // For vectors:
    // aext(setcc) -> vsetcc
    // aext(setcc) -> truncate(vsetcc)
    // aext(setcc) -> aext(vsetcc)
    // Only do this before legalize for now.
    if (VT.isVector() && !LegalOperations) {
      EVT N00VT = N0.getOperand(0).getValueType();
      if (getSetCCResultType(N00VT) == N0.getValueType())
        return SDValue();

      // We know that the # elements of the results is the same as the
      // # elements of the compare (and the # elements of the compare result
      // for that matter).  Check to see that they are the same size.  If so,
      // we know that the element size of the sext'd result matches the
      // element size of the compare operands.
      if (VT.getSizeInBits() == N00VT.getSizeInBits())
        return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0),
                             N0.getOperand(1),
                             cast<CondCodeSDNode>(N0.getOperand(2))->get());

      // If the desired elements are smaller or larger than the source
      // elements we can use a matching integer vector type and then
      // truncate/any extend
      EVT MatchingVectorType = N00VT.changeVectorElementTypeToInteger();
      SDValue VsetCC =
        DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0),
                      N0.getOperand(1),
                      cast<CondCodeSDNode>(N0.getOperand(2))->get());
      return DAG.getAnyExtOrTrunc(VsetCC, SDLoc(N), VT);
    }

    // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
    SDLoc DL(N);
    if (SDValue SCC = SimplifySelectCC(
            DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT),
            DAG.getConstant(0, DL, VT),
            cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
      return SCC;
  }

  if (SDValue NewCtPop = widenCtPop(N, DAG))
    return NewCtPop;

  return SDValue();
}

SDValue DAGCombiner::visitAssertExt(SDNode *N) {
  unsigned Opcode = N->getOpcode();
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT AssertVT = cast<VTSDNode>(N1)->getVT();

  // fold (assert?ext (assert?ext x, vt), vt) -> (assert?ext x, vt)
  if (N0.getOpcode() == Opcode &&
      AssertVT == cast<VTSDNode>(N0.getOperand(1))->getVT())
    return N0;

  if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
      N0.getOperand(0).getOpcode() == Opcode) {
    // We have an assert, truncate, assert sandwich. Make one stronger assert
    // by asserting on the smallest asserted type to the larger source type.
    // This eliminates the later assert:
    // assert (trunc (assert X, i8) to iN), i1 --> trunc (assert X, i1) to iN
    // assert (trunc (assert X, i1) to iN), i8 --> trunc (assert X, i1) to iN
    SDValue BigA = N0.getOperand(0);
    EVT BigA_AssertVT = cast<VTSDNode>(BigA.getOperand(1))->getVT();
    assert(BigA_AssertVT.bitsLE(N0.getValueType()) &&
           "Asserting zero/sign-extended bits to a type larger than the "
           "truncated destination does not provide information");

    SDLoc DL(N);
    EVT MinAssertVT = AssertVT.bitsLT(BigA_AssertVT) ? AssertVT : BigA_AssertVT;
    SDValue MinAssertVTVal = DAG.getValueType(MinAssertVT);
    SDValue NewAssert = DAG.getNode(Opcode, DL, BigA.getValueType(),
                                    BigA.getOperand(0), MinAssertVTVal);
    return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewAssert);
  }

  // If we have (AssertZext (truncate (AssertSext X, iX)), iY) and Y is smaller
  // than X. Just move the AssertZext in front of the truncate and drop the
  // AssertSExt.
  if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
      N0.getOperand(0).getOpcode() == ISD::AssertSext &&
      Opcode == ISD::AssertZext) {
    SDValue BigA = N0.getOperand(0);
    EVT BigA_AssertVT = cast<VTSDNode>(BigA.getOperand(1))->getVT();
    assert(BigA_AssertVT.bitsLE(N0.getValueType()) &&
           "Asserting zero/sign-extended bits to a type larger than the "
           "truncated destination does not provide information");

    if (AssertVT.bitsLT(BigA_AssertVT)) {
      SDLoc DL(N);
      SDValue NewAssert = DAG.getNode(Opcode, DL, BigA.getValueType(),
                                      BigA.getOperand(0), N1);
      return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewAssert);
    }
  }

  return SDValue();
}

/// If the result of a wider load is shifted to right of N  bits and then
/// truncated to a narrower type and where N is a multiple of number of bits of
/// the narrower type, transform it to a narrower load from address + N / num of
/// bits of new type. Also narrow the load if the result is masked with an AND
/// to effectively produce a smaller type. If the result is to be extended, also
/// fold the extension to form a extending load.
SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
  unsigned Opc = N->getOpcode();

  ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  EVT ExtVT = VT;

  // This transformation isn't valid for vector loads.
  if (VT.isVector())
    return SDValue();

  unsigned ShAmt = 0;
  bool HasShiftedOffset = false;
  // Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then
  // extended to VT.
  if (Opc == ISD::SIGN_EXTEND_INREG) {
    ExtType = ISD::SEXTLOAD;
    ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
  } else if (Opc == ISD::SRL) {
    // Another special-case: SRL is basically zero-extending a narrower value,
    // or it maybe shifting a higher subword, half or byte into the lowest
    // bits.
    ExtType = ISD::ZEXTLOAD;
    N0 = SDValue(N, 0);

    auto *LN0 = dyn_cast<LoadSDNode>(N0.getOperand(0));
    auto *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
    if (!N01 || !LN0)
      return SDValue();

    uint64_t ShiftAmt = N01->getZExtValue();
    uint64_t MemoryWidth = LN0->getMemoryVT().getSizeInBits();
    if (LN0->getExtensionType() != ISD::SEXTLOAD && MemoryWidth > ShiftAmt)
      ExtVT = EVT::getIntegerVT(*DAG.getContext(), MemoryWidth - ShiftAmt);
    else
      ExtVT = EVT::getIntegerVT(*DAG.getContext(),
                                VT.getSizeInBits() - ShiftAmt);
  } else if (Opc == ISD::AND) {
    // An AND with a constant mask is the same as a truncate + zero-extend.
    auto AndC = dyn_cast<ConstantSDNode>(N->getOperand(1));
    if (!AndC)
      return SDValue();

    const APInt &Mask = AndC->getAPIntValue();
    unsigned ActiveBits = 0;
    if (Mask.isMask()) {
      ActiveBits = Mask.countTrailingOnes();
    } else if (Mask.isShiftedMask()) {
      ShAmt = Mask.countTrailingZeros();
      APInt ShiftedMask = Mask.lshr(ShAmt);
      ActiveBits = ShiftedMask.countTrailingOnes();
      HasShiftedOffset = true;
    } else
      return SDValue();

    ExtType = ISD::ZEXTLOAD;
    ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
  }

  if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
    SDValue SRL = N0;
    if (auto *ConstShift = dyn_cast<ConstantSDNode>(SRL.getOperand(1))) {
      ShAmt = ConstShift->getZExtValue();
      unsigned EVTBits = ExtVT.getSizeInBits();
      // Is the shift amount a multiple of size of VT?
      if ((ShAmt & (EVTBits-1)) == 0) {
        N0 = N0.getOperand(0);
        // Is the load width a multiple of size of VT?
        if ((N0.getValueSizeInBits() & (EVTBits-1)) != 0)
          return SDValue();
      }

      // At this point, we must have a load or else we can't do the transform.
      if (!isa<LoadSDNode>(N0)) return SDValue();

      auto *LN0 = cast<LoadSDNode>(N0);

      // Because a SRL must be assumed to *need* to zero-extend the high bits
      // (as opposed to anyext the high bits), we can't combine the zextload
      // lowering of SRL and an sextload.
      if (LN0->getExtensionType() == ISD::SEXTLOAD)
        return SDValue();

      // If the shift amount is larger than the input type then we're not
      // accessing any of the loaded bytes.  If the load was a zextload/extload
      // then the result of the shift+trunc is zero/undef (handled elsewhere).
      if (ShAmt >= LN0->getMemoryVT().getSizeInBits())
        return SDValue();

      // If the SRL is only used by a masking AND, we may be able to adjust
      // the ExtVT to make the AND redundant.
      SDNode *Mask = *(SRL->use_begin());
      if (Mask->getOpcode() == ISD::AND &&
          isa<ConstantSDNode>(Mask->getOperand(1))) {
        const APInt &ShiftMask =
          cast<ConstantSDNode>(Mask->getOperand(1))->getAPIntValue();
        if (ShiftMask.isMask()) {
          EVT MaskedVT = EVT::getIntegerVT(*DAG.getContext(),
                                           ShiftMask.countTrailingOnes());
          // If the mask is smaller, recompute the type.
          if ((ExtVT.getSizeInBits() > MaskedVT.getSizeInBits()) &&
              TLI.isLoadExtLegal(ExtType, N0.getValueType(), MaskedVT))
            ExtVT = MaskedVT;
        }
      }
    }
  }

  // If the load is shifted left (and the result isn't shifted back right),
  // we can fold the truncate through the shift.
  unsigned ShLeftAmt = 0;
  if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
      ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) {
    if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
      ShLeftAmt = N01->getZExtValue();
      N0 = N0.getOperand(0);
    }
  }

  // If we haven't found a load, we can't narrow it.
  if (!isa<LoadSDNode>(N0))
    return SDValue();

  LoadSDNode *LN0 = cast<LoadSDNode>(N0);
  // Reducing the width of a volatile load is illegal.  For atomics, we may be
  // able to reduce the width provided we never widen again. (see D66309)  
  if (!LN0->isSimple() ||
      !isLegalNarrowLdSt(LN0, ExtType, ExtVT, ShAmt))
    return SDValue();

  auto AdjustBigEndianShift = [&](unsigned ShAmt) {
    unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
    unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
    return LVTStoreBits - EVTStoreBits - ShAmt;
  };

  // For big endian targets, we need to adjust the offset to the pointer to
  // load the correct bytes.
  if (DAG.getDataLayout().isBigEndian())
    ShAmt = AdjustBigEndianShift(ShAmt);

  EVT PtrType = N0.getOperand(1).getValueType();
  uint64_t PtrOff = ShAmt / 8;
  unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff);
  SDLoc DL(LN0);
  // The original load itself didn't wrap, so an offset within it doesn't.
  SDNodeFlags Flags;
  Flags.setNoUnsignedWrap(true);
  SDValue NewPtr = DAG.getNode(ISD::ADD, DL,
                               PtrType, LN0->getBasePtr(),
                               DAG.getConstant(PtrOff, DL, PtrType),
                               Flags);
  AddToWorklist(NewPtr.getNode());

  SDValue Load;
  if (ExtType == ISD::NON_EXTLOAD)
    Load = DAG.getLoad(VT, DL, LN0->getChain(), NewPtr,
                       LN0->getPointerInfo().getWithOffset(PtrOff), NewAlign,
                       LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
  else
    Load = DAG.getExtLoad(ExtType, DL, VT, LN0->getChain(), NewPtr,
                          LN0->getPointerInfo().getWithOffset(PtrOff), ExtVT,
                          NewAlign, LN0->getMemOperand()->getFlags(),
                          LN0->getAAInfo());

  // Replace the old load's chain with the new load's chain.
  WorklistRemover DeadNodes(*this);
  DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));

  // Shift the result left, if we've swallowed a left shift.
  SDValue Result = Load;
  if (ShLeftAmt != 0) {
    EVT ShImmTy = getShiftAmountTy(Result.getValueType());
    if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt))
      ShImmTy = VT;
    // If the shift amount is as large as the result size (but, presumably,
    // no larger than the source) then the useful bits of the result are
    // zero; we can't simply return the shortened shift, because the result
    // of that operation is undefined.
    if (ShLeftAmt >= VT.getSizeInBits())
      Result = DAG.getConstant(0, DL, VT);
    else
      Result = DAG.getNode(ISD::SHL, DL, VT,
                          Result, DAG.getConstant(ShLeftAmt, DL, ShImmTy));
  }

  if (HasShiftedOffset) {
    // Recalculate the shift amount after it has been altered to calculate
    // the offset.
    if (DAG.getDataLayout().isBigEndian())
      ShAmt = AdjustBigEndianShift(ShAmt);

    // We're using a shifted mask, so the load now has an offset. This means
    // that data has been loaded into the lower bytes than it would have been
    // before, so we need to shl the loaded data into the correct position in the
    // register.
    SDValue ShiftC = DAG.getConstant(ShAmt, DL, VT);
    Result = DAG.getNode(ISD::SHL, DL, VT, Result, ShiftC);
    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
  }

  // Return the new loaded value.
  return Result;
}

SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  EVT EVT = cast<VTSDNode>(N1)->getVT();
  unsigned VTBits = VT.getScalarSizeInBits();
  unsigned EVTBits = EVT.getScalarSizeInBits();

  if (N0.isUndef())
    return DAG.getUNDEF(VT);

  // fold (sext_in_reg c1) -> c1
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
    return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0, N1);

  // If the input is already sign extended, just drop the extension.
  if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1)
    return N0;

  // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
  if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
      EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT()))
    return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
                       N0.getOperand(0), N1);

  // fold (sext_in_reg (sext x)) -> (sext x)
  // fold (sext_in_reg (aext x)) -> (sext x)
  // if x is small enough or if we know that x has more than 1 sign bit and the
  // sign_extend_inreg is extending from one of them.
  if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
    SDValue N00 = N0.getOperand(0);
    unsigned N00Bits = N00.getScalarValueSizeInBits();
    if ((N00Bits <= EVTBits ||
         (N00Bits - DAG.ComputeNumSignBits(N00)) < EVTBits) &&
        (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
      return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00);
  }

  // fold (sext_in_reg (*_extend_vector_inreg x)) -> (sext_vector_inreg x)
  if ((N0.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG ||
       N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ||
       N0.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) &&
      N0.getOperand(0).getScalarValueSizeInBits() == EVTBits) {
    if (!LegalOperations ||
        TLI.isOperationLegal(ISD::SIGN_EXTEND_VECTOR_INREG, VT))
      return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, SDLoc(N), VT,
                         N0.getOperand(0));
  }

  // fold (sext_in_reg (zext x)) -> (sext x)
  // iff we are extending the source sign bit.
  if (N0.getOpcode() == ISD::ZERO_EXTEND) {
    SDValue N00 = N0.getOperand(0);
    if (N00.getScalarValueSizeInBits() == EVTBits &&
        (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
      return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1);
  }

  // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero.
  if (DAG.MaskedValueIsZero(N0, APInt::getOneBitSet(VTBits, EVTBits - 1)))
    return DAG.getZeroExtendInReg(N0, SDLoc(N), EVT.getScalarType());

  // fold operands of sext_in_reg based on knowledge that the top bits are not
  // demanded.
  if (SimplifyDemandedBits(SDValue(N, 0)))
    return SDValue(N, 0);

  // fold (sext_in_reg (load x)) -> (smaller sextload x)
  // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
  if (SDValue NarrowLoad = ReduceLoadWidth(N))
    return NarrowLoad;

  // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24)
  // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible.
  // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above.
  if (N0.getOpcode() == ISD::SRL) {
    if (auto *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
      if (ShAmt->getAPIntValue().ule(VTBits - EVTBits)) {
        // We can turn this into an SRA iff the input to the SRL is already sign
        // extended enough.
        unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0));
        if (((VTBits - EVTBits) - ShAmt->getZExtValue()) < InSignBits)
          return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0.getOperand(0),
                             N0.getOperand(1));
      }
  }

  // fold (sext_inreg (extload x)) -> (sextload x)
  // If sextload is not supported by target, we can only do the combine when
  // load has one use. Doing otherwise can block folding the extload with other
  // extends that the target does support.
  if (ISD::isEXTLoad(N0.getNode()) &&
      ISD::isUNINDEXEDLoad(N0.getNode()) &&
      EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
      ((!LegalOperations && cast<LoadSDNode>(N0)->isSimple() &&
        N0.hasOneUse()) ||
       TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) {
    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
    SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
                                     LN0->getChain(),
                                     LN0->getBasePtr(), EVT,
                                     LN0->getMemOperand());
    CombineTo(N, ExtLoad);
    CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
    AddToWorklist(ExtLoad.getNode());
    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
  }
  // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
  if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
      N0.hasOneUse() &&
      EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
      ((!LegalOperations && cast<LoadSDNode>(N0)->isSimple()) &&
       TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) {
    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
    SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
                                     LN0->getChain(),
                                     LN0->getBasePtr(), EVT,
                                     LN0->getMemOperand());
    CombineTo(N, ExtLoad);
    CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
  }

  // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16))
  if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) {
    if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
                                           N0.getOperand(1), false))
      return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
                         BSwap, N1);
  }

  return SDValue();
}

SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  if (N0.isUndef())
    return DAG.getUNDEF(VT);

  if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
    return Res;

  if (SimplifyDemandedVectorElts(SDValue(N, 0)))
    return SDValue(N, 0);

  return SDValue();
}

SDValue DAGCombiner::visitZERO_EXTEND_VECTOR_INREG(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  if (N0.isUndef())
    return DAG.getUNDEF(VT);

  if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
    return Res;

  if (SimplifyDemandedVectorElts(SDValue(N, 0)))
    return SDValue(N, 0);

  return SDValue();
}

SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  EVT SrcVT = N0.getValueType();
  bool isLE = DAG.getDataLayout().isLittleEndian();

  // noop truncate
  if (SrcVT == VT)
    return N0;

  // fold (truncate (truncate x)) -> (truncate x)
  if (N0.getOpcode() == ISD::TRUNCATE)
    return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));

  // fold (truncate c1) -> c1
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
    SDValue C = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0);
    if (C.getNode() != N)
      return C;
  }

  // fold (truncate (ext x)) -> (ext x) or (truncate x) or x
  if (N0.getOpcode() == ISD::ZERO_EXTEND ||
      N0.getOpcode() == ISD::SIGN_EXTEND ||
      N0.getOpcode() == ISD::ANY_EXTEND) {
    // if the source is smaller than the dest, we still need an extend.
    if (N0.getOperand(0).getValueType().bitsLT(VT))
      return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0));
    // if the source is larger than the dest, than we just need the truncate.
    if (N0.getOperand(0).getValueType().bitsGT(VT))
      return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));
    // if the source and dest are the same type, we can drop both the extend
    // and the truncate.
    return N0.getOperand(0);
  }

  // If this is anyext(trunc), don't fold it, allow ourselves to be folded.
  if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ANY_EXTEND))
    return SDValue();

  // Fold extract-and-trunc into a narrow extract. For example:
  //   i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1)
  //   i32 y = TRUNCATE(i64 x)
  //        -- becomes --
  //   v16i8 b = BITCAST (v2i64 val)
  //   i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8)
  //
  // Note: We only run this optimization after type legalization (which often
  // creates this pattern) and before operation legalization after which
  // we need to be more careful about the vector instructions that we generate.
  if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
      LegalTypes && !LegalOperations && N0->hasOneUse() && VT != MVT::i1) {
    EVT VecTy = N0.getOperand(0).getValueType();
    EVT ExTy = N0.getValueType();
    EVT TrTy = N->getValueType(0);

    unsigned NumElem = VecTy.getVectorNumElements();
    unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits();

    EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem);
    assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size");

    SDValue EltNo = N0->getOperand(1);
    if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
      int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
      EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
      int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));

      SDLoc DL(N);
      return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TrTy,
                         DAG.getBitcast(NVT, N0.getOperand(0)),
                         DAG.getConstant(Index, DL, IndexTy));
    }
  }

  // trunc (select c, a, b) -> select c, (trunc a), (trunc b)
  if (N0.getOpcode() == ISD::SELECT && N0.hasOneUse()) {
    if ((!LegalOperations || TLI.isOperationLegal(ISD::SELECT, SrcVT)) &&
        TLI.isTruncateFree(SrcVT, VT)) {
      SDLoc SL(N0);
      SDValue Cond = N0.getOperand(0);
      SDValue TruncOp0 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
      SDValue TruncOp1 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(2));
      return DAG.getNode(ISD::SELECT, SDLoc(N), VT, Cond, TruncOp0, TruncOp1);
    }
  }

  // trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits()
  if (N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
      (!LegalOperations || TLI.isOperationLegal(ISD::SHL, VT)) &&
      TLI.isTypeDesirableForOp(ISD::SHL, VT)) {
    SDValue Amt = N0.getOperand(1);
    KnownBits Known = DAG.computeKnownBits(Amt);
    unsigned Size = VT.getScalarSizeInBits();
    if (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size)) {
      SDLoc SL(N);
      EVT AmtVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());

      SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0));
      if (AmtVT != Amt.getValueType()) {
        Amt = DAG.getZExtOrTrunc(Amt, SL, AmtVT);
        AddToWorklist(Amt.getNode());
      }
      return DAG.getNode(ISD::SHL, SL, VT, Trunc, Amt);
    }
  }

  // Attempt to pre-truncate BUILD_VECTOR sources.
  if (N0.getOpcode() == ISD::BUILD_VECTOR && !LegalOperations &&
      TLI.isTruncateFree(SrcVT.getScalarType(), VT.getScalarType())) {
    SDLoc DL(N);
    EVT SVT = VT.getScalarType();
    SmallVector<SDValue, 8> TruncOps;
    for (const SDValue &Op : N0->op_values()) {
      SDValue TruncOp = DAG.getNode(ISD::TRUNCATE, DL, SVT, Op);
      TruncOps.push_back(TruncOp);
    }
    return DAG.getBuildVector(VT, DL, TruncOps);
  }

  // Fold a series of buildvector, bitcast, and truncate if possible.
  // For example fold
  //   (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to
  //   (2xi32 (buildvector x, y)).
  if (Level == AfterLegalizeVectorOps && VT.isVector() &&
      N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
      N0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
      N0.getOperand(0).hasOneUse()) {
    SDValue BuildVect = N0.getOperand(0);
    EVT BuildVectEltTy = BuildVect.getValueType().getVectorElementType();
    EVT TruncVecEltTy = VT.getVectorElementType();

    // Check that the element types match.
    if (BuildVectEltTy == TruncVecEltTy) {
      // Now we only need to compute the offset of the truncated elements.
      unsigned BuildVecNumElts =  BuildVect.getNumOperands();
      unsigned TruncVecNumElts = VT.getVectorNumElements();
      unsigned TruncEltOffset = BuildVecNumElts / TruncVecNumElts;

      assert((BuildVecNumElts % TruncVecNumElts) == 0 &&
             "Invalid number of elements");

      SmallVector<SDValue, 8> Opnds;
      for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset)
        Opnds.push_back(BuildVect.getOperand(i));

      return DAG.getBuildVector(VT, SDLoc(N), Opnds);
    }
  }

  // See if we can simplify the input to this truncate through knowledge that
  // only the low bits are being used.
  // For example "trunc (or (shl x, 8), y)" // -> trunc y
  // Currently we only perform this optimization on scalars because vectors
  // may have different active low bits.
  if (!VT.isVector()) {
    APInt Mask =
        APInt::getLowBitsSet(N0.getValueSizeInBits(), VT.getSizeInBits());
    if (SDValue Shorter = DAG.GetDemandedBits(N0, Mask))
      return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Shorter);
  }

  // fold (truncate (load x)) -> (smaller load x)
  // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
  if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
    if (SDValue Reduced = ReduceLoadWidth(N))
      return Reduced;

    // Handle the case where the load remains an extending load even
    // after truncation.
    if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) {
      LoadSDNode *LN0 = cast<LoadSDNode>(N0);
      if (LN0->isSimple() &&
          LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) {
        SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0),
                                         VT, LN0->getChain(), LN0->getBasePtr(),
                                         LN0->getMemoryVT(),
                                         LN0->getMemOperand());
        DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLoad.getValue(1));
        return NewLoad;
      }
    }
  }

  // fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)),
  // where ... are all 'undef'.
  if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) {
    SmallVector<EVT, 8> VTs;
    SDValue V;
    unsigned Idx = 0;
    unsigned NumDefs = 0;

    for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) {
      SDValue X = N0.getOperand(i);
      if (!X.isUndef()) {
        V = X;
        Idx = i;
        NumDefs++;
      }
      // Stop if more than one members are non-undef.
      if (NumDefs > 1)
        break;
      VTs.push_back(EVT::getVectorVT(*DAG.getContext(),
                                     VT.getVectorElementType(),
                                     X.getValueType().getVectorNumElements()));
    }

    if (NumDefs == 0)
      return DAG.getUNDEF(VT);

    if (NumDefs == 1) {
      assert(V.getNode() && "The single defined operand is empty!");
      SmallVector<SDValue, 8> Opnds;
      for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
        if (i != Idx) {
          Opnds.push_back(DAG.getUNDEF(VTs[i]));
          continue;
        }
        SDValue NV = DAG.getNode(ISD::TRUNCATE, SDLoc(V), VTs[i], V);
        AddToWorklist(NV.getNode());
        Opnds.push_back(NV);
      }
      return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Opnds);
    }
  }

  // Fold truncate of a bitcast of a vector to an extract of the low vector
  // element.
  //
  // e.g. trunc (i64 (bitcast v2i32:x)) -> extract_vector_elt v2i32:x, idx
  if (N0.getOpcode() == ISD::BITCAST && !VT.isVector()) {
    SDValue VecSrc = N0.getOperand(0);
    EVT VecSrcVT = VecSrc.getValueType();
    if (VecSrcVT.isVector() && VecSrcVT.getScalarType() == VT &&
        (!LegalOperations ||
         TLI.isOperationLegal(ISD::EXTRACT_VECTOR_ELT, VecSrcVT))) {
      SDLoc SL(N);

      EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
      unsigned Idx = isLE ? 0 : VecSrcVT.getVectorNumElements() - 1;
      return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, VT, VecSrc,
                         DAG.getConstant(Idx, SL, IdxVT));
    }
  }

  // Simplify the operands using demanded-bits information.
  if (!VT.isVector() &&
      SimplifyDemandedBits(SDValue(N, 0)))
    return SDValue(N, 0);

  // (trunc adde(X, Y, Carry)) -> (adde trunc(X), trunc(Y), Carry)
  // (trunc addcarry(X, Y, Carry)) -> (addcarry trunc(X), trunc(Y), Carry)
  // When the adde's carry is not used.
  if ((N0.getOpcode() == ISD::ADDE || N0.getOpcode() == ISD::ADDCARRY) &&
      N0.hasOneUse() && !N0.getNode()->hasAnyUseOfValue(1) &&
      // We only do for addcarry before legalize operation
      ((!LegalOperations && N0.getOpcode() == ISD::ADDCARRY) ||
       TLI.isOperationLegal(N0.getOpcode(), VT))) {
    SDLoc SL(N);
    auto X = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0));
    auto Y = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
    auto VTs = DAG.getVTList(VT, N0->getValueType(1));
    return DAG.getNode(N0.getOpcode(), SL, VTs, X, Y, N0.getOperand(2));
  }

  // fold (truncate (extract_subvector(ext x))) ->
  //      (extract_subvector x)
  // TODO: This can be generalized to cover cases where the truncate and extract
  // do not fully cancel each other out.
  if (!LegalTypes && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
    SDValue N00 = N0.getOperand(0);
    if (N00.getOpcode() == ISD::SIGN_EXTEND ||
        N00.getOpcode() == ISD::ZERO_EXTEND ||
        N00.getOpcode() == ISD::ANY_EXTEND) {
      if (N00.getOperand(0)->getValueType(0).getVectorElementType() ==
          VT.getVectorElementType())
        return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N0->getOperand(0)), VT,
                           N00.getOperand(0), N0.getOperand(1));
    }
  }

  if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
    return NewVSel;

  // Narrow a suitable binary operation with a non-opaque constant operand by
  // moving it ahead of the truncate. This is limited to pre-legalization
  // because targets may prefer a wider type during later combines and invert
  // this transform.
  switch (N0.getOpcode()) {
  case ISD::ADD:
  case ISD::SUB:
  case ISD::MUL:
  case ISD::AND:
  case ISD::OR:
  case ISD::XOR:
    if (!LegalOperations && N0.hasOneUse() &&
        (isConstantOrConstantVector(N0.getOperand(0), true) ||
         isConstantOrConstantVector(N0.getOperand(1), true))) {
      // TODO: We already restricted this to pre-legalization, but for vectors
      // we are extra cautious to not create an unsupported operation.
      // Target-specific changes are likely needed to avoid regressions here.
      if (VT.isScalarInteger() || TLI.isOperationLegal(N0.getOpcode(), VT)) {
        SDLoc DL(N);
        SDValue NarrowL = DAG.getNode(ISD::TRUNCATE, DL, VT, N0.getOperand(0));
        SDValue NarrowR = DAG.getNode(ISD::TRUNCATE, DL, VT, N0.getOperand(1));
        return DAG.getNode(N0.getOpcode(), DL, VT, NarrowL, NarrowR);
      }
    }
  }

  return SDValue();
}

static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
  SDValue Elt = N->getOperand(i);
  if (Elt.getOpcode() != ISD::MERGE_VALUES)
    return Elt.getNode();
  return Elt.getOperand(Elt.getResNo()).getNode();
}

/// build_pair (load, load) -> load
/// if load locations are consecutive.
SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
  assert(N->getOpcode() == ISD::BUILD_PAIR);

  LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
  LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));

  // A BUILD_PAIR is always having the least significant part in elt 0 and the
  // most significant part in elt 1. So when combining into one large load, we
  // need to consider the endianness.
  if (DAG.getDataLayout().isBigEndian())
    std::swap(LD1, LD2);

  if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() ||
      LD1->getAddressSpace() != LD2->getAddressSpace())
    return SDValue();
  EVT LD1VT = LD1->getValueType(0);
  unsigned LD1Bytes = LD1VT.getStoreSize();
  if (ISD::isNON_EXTLoad(LD2) && LD2->hasOneUse() &&
      DAG.areNonVolatileConsecutiveLoads(LD2, LD1, LD1Bytes, 1)) {
    unsigned Align = LD1->getAlignment();
    unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
        VT.getTypeForEVT(*DAG.getContext()));

    if (NewAlign <= Align &&
        (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
      return DAG.getLoad(VT, SDLoc(N), LD1->getChain(), LD1->getBasePtr(),
                         LD1->getPointerInfo(), Align);
  }

  return SDValue();
}

static unsigned getPPCf128HiElementSelector(const SelectionDAG &DAG) {
  // On little-endian machines, bitcasting from ppcf128 to i128 does swap the Hi
  // and Lo parts; on big-endian machines it doesn't.
  return DAG.getDataLayout().isBigEndian() ? 1 : 0;
}

static SDValue foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG,
                                    const TargetLowering &TLI) {
  // If this is not a bitcast to an FP type or if the target doesn't have
  // IEEE754-compliant FP logic, we're done.
  EVT VT = N->getValueType(0);
  if (!VT.isFloatingPoint() || !TLI.hasBitPreservingFPLogic(VT))
    return SDValue();

  // TODO: Handle cases where the integer constant is a different scalar
  // bitwidth to the FP.
  SDValue N0 = N->getOperand(0);
  EVT SourceVT = N0.getValueType();
  if (VT.getScalarSizeInBits() != SourceVT.getScalarSizeInBits())
    return SDValue();

  unsigned FPOpcode;
  APInt SignMask;
  switch (N0.getOpcode()) {
  case ISD::AND:
    FPOpcode = ISD::FABS;
    SignMask = ~APInt::getSignMask(SourceVT.getScalarSizeInBits());
    break;
  case ISD::XOR:
    FPOpcode = ISD::FNEG;
    SignMask = APInt::getSignMask(SourceVT.getScalarSizeInBits());
    break;
  case ISD::OR:
    FPOpcode = ISD::FABS;
    SignMask = APInt::getSignMask(SourceVT.getScalarSizeInBits());
    break;
  default:
    return SDValue();
  }

  // Fold (bitcast int (and (bitcast fp X to int), 0x7fff...) to fp) -> fabs X
  // Fold (bitcast int (xor (bitcast fp X to int), 0x8000...) to fp) -> fneg X
  // Fold (bitcast int (or (bitcast fp X to int), 0x8000...) to fp) ->
  //   fneg (fabs X)
  SDValue LogicOp0 = N0.getOperand(0);
  ConstantSDNode *LogicOp1 = isConstOrConstSplat(N0.getOperand(1), true);
  if (LogicOp1 && LogicOp1->getAPIntValue() == SignMask &&
      LogicOp0.getOpcode() == ISD::BITCAST &&
      LogicOp0.getOperand(0).getValueType() == VT) {
    SDValue FPOp = DAG.getNode(FPOpcode, SDLoc(N), VT, LogicOp0.getOperand(0));
    NumFPLogicOpsConv++;
    if (N0.getOpcode() == ISD::OR)
      return DAG.getNode(ISD::FNEG, SDLoc(N), VT, FPOp);
    return FPOp;
  }

  return SDValue();
}

SDValue DAGCombiner::visitBITCAST(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  if (N0.isUndef())
    return DAG.getUNDEF(VT);

  // If the input is a BUILD_VECTOR with all constant elements, fold this now.
  // Only do this before legalize types, unless both types are integer and the
  // scalar type is legal. Only do this before legalize ops, since the target
  // maybe depending on the bitcast.
  // First check to see if this is all constant.
  // TODO: Support FP bitcasts after legalize types.
  if (VT.isVector() &&
      (!LegalTypes ||
       (!LegalOperations && VT.isInteger() && N0.getValueType().isInteger() &&
        TLI.isTypeLegal(VT.getVectorElementType()))) &&
      N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() &&
      cast<BuildVectorSDNode>(N0)->isConstant())
    return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(),
                                             VT.getVectorElementType());

  // If the input is a constant, let getNode fold it.
  if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
    // If we can't allow illegal operations, we need to check that this is just
    // a fp -> int or int -> conversion and that the resulting operation will
    // be legal.
    if (!LegalOperations ||
        (isa<ConstantSDNode>(N0) && VT.isFloatingPoint() && !VT.isVector() &&
         TLI.isOperationLegal(ISD::ConstantFP, VT)) ||
        (isa<ConstantFPSDNode>(N0) && VT.isInteger() && !VT.isVector() &&
         TLI.isOperationLegal(ISD::Constant, VT))) {
      SDValue C = DAG.getBitcast(VT, N0);
      if (C.getNode() != N)
        return C;
    }
  }

  // (conv (conv x, t1), t2) -> (conv x, t2)
  if (N0.getOpcode() == ISD::BITCAST)
    return DAG.getBitcast(VT, N0.getOperand(0));

  // fold (conv (load x)) -> (load (conv*)x)
  // If the resultant load doesn't need a higher alignment than the original!
  if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
      // Do not remove the cast if the types differ in endian layout.
      TLI.hasBigEndianPartOrdering(N0.getValueType(), DAG.getDataLayout()) ==
          TLI.hasBigEndianPartOrdering(VT, DAG.getDataLayout()) &&
      // If the load is volatile, we only want to change the load type if the
      // resulting load is legal. Otherwise we might increase the number of
      // memory accesses. We don't care if the original type was legal or not
      // as we assume software couldn't rely on the number of accesses of an
      // illegal type.
      ((!LegalOperations && cast<LoadSDNode>(N0)->isSimple()) ||
       TLI.isOperationLegal(ISD::LOAD, VT))) {
    LoadSDNode *LN0 = cast<LoadSDNode>(N0);

    if (TLI.isLoadBitCastBeneficial(N0.getValueType(), VT, DAG,
                                    *LN0->getMemOperand())) {
      SDValue Load =
          DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
                      LN0->getPointerInfo(), LN0->getAlignment(),
                      LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
      DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
      return Load;
    }
  }

  if (SDValue V = foldBitcastedFPLogic(N, DAG, TLI))
    return V;

  // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
  // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
  //
  // For ppc_fp128:
  // fold (bitcast (fneg x)) ->
  //     flipbit = signbit
  //     (xor (bitcast x) (build_pair flipbit, flipbit))
  //
  // fold (bitcast (fabs x)) ->
  //     flipbit = (and (extract_element (bitcast x), 0), signbit)
  //     (xor (bitcast x) (build_pair flipbit, flipbit))
  // This often reduces constant pool loads.
  if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) ||
       (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) &&
      N0.getNode()->hasOneUse() && VT.isInteger() &&
      !VT.isVector() && !N0.getValueType().isVector()) {
    SDValue NewConv = DAG.getBitcast(VT, N0.getOperand(0));
    AddToWorklist(NewConv.getNode());

    SDLoc DL(N);
    if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) {
      assert(VT.getSizeInBits() == 128);
      SDValue SignBit = DAG.getConstant(
          APInt::getSignMask(VT.getSizeInBits() / 2), SDLoc(N0), MVT::i64);
      SDValue FlipBit;
      if (N0.getOpcode() == ISD::FNEG) {
        FlipBit = SignBit;
        AddToWorklist(FlipBit.getNode());
      } else {
        assert(N0.getOpcode() == ISD::FABS);
        SDValue Hi =
            DAG.getNode(ISD::EXTRACT_ELEMENT, SDLoc(NewConv), MVT::i64, NewConv,
                        DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG),
                                              SDLoc(NewConv)));
        AddToWorklist(Hi.getNode());
        FlipBit = DAG.getNode(ISD::AND, SDLoc(N0), MVT::i64, Hi, SignBit);
        AddToWorklist(FlipBit.getNode());
      }
      SDValue FlipBits =
          DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit);
      AddToWorklist(FlipBits.getNode());
      return DAG.getNode(ISD::XOR, DL, VT, NewConv, FlipBits);
    }
    APInt SignBit = APInt::getSignMask(VT.getSizeInBits());
    if (N0.getOpcode() == ISD::FNEG)
      return DAG.getNode(ISD::XOR, DL, VT,
                         NewConv, DAG.getConstant(SignBit, DL, VT));
    assert(N0.getOpcode() == ISD::FABS);
    return DAG.getNode(ISD::AND, DL, VT,
                       NewConv, DAG.getConstant(~SignBit, DL, VT));
  }

  // fold (bitconvert (fcopysign cst, x)) ->
  //         (or (and (bitconvert x), sign), (and cst, (not sign)))
  // Note that we don't handle (copysign x, cst) because this can always be
  // folded to an fneg or fabs.
  //
  // For ppc_fp128:
  // fold (bitcast (fcopysign cst, x)) ->
  //     flipbit = (and (extract_element
  //                     (xor (bitcast cst), (bitcast x)), 0),
  //                    signbit)
  //     (xor (bitcast cst) (build_pair flipbit, flipbit))
  if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&
      isa<ConstantFPSDNode>(N0.getOperand(0)) &&
      VT.isInteger() && !VT.isVector()) {
    unsigned OrigXWidth = N0.getOperand(1).getValueSizeInBits();
    EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
    if (isTypeLegal(IntXVT)) {
      SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1));
      AddToWorklist(X.getNode());

      // If X has a different width than the result/lhs, sext it or truncate it.
      unsigned VTWidth = VT.getSizeInBits();
      if (OrigXWidth < VTWidth) {
        X = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, X);
        AddToWorklist(X.getNode());
      } else if (OrigXWidth > VTWidth) {
        // To get the sign bit in the right place, we have to shift it right
        // before truncating.
        SDLoc DL(X);
        X = DAG.getNode(ISD::SRL, DL,
                        X.getValueType(), X,
                        DAG.getConstant(OrigXWidth-VTWidth, DL,
                                        X.getValueType()));
        AddToWorklist(X.getNode());
        X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X);
        AddToWorklist(X.getNode());
      }

      if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) {
        APInt SignBit = APInt::getSignMask(VT.getSizeInBits() / 2);
        SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0));
        AddToWorklist(Cst.getNode());
        SDValue X = DAG.getBitcast(VT, N0.getOperand(1));
        AddToWorklist(X.getNode());
        SDValue XorResult = DAG.getNode(ISD::XOR, SDLoc(N0), VT, Cst, X);
        AddToWorklist(XorResult.getNode());
        SDValue XorResult64 = DAG.getNode(
            ISD::EXTRACT_ELEMENT, SDLoc(XorResult), MVT::i64, XorResult,
            DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG),
                                  SDLoc(XorResult)));
        AddToWorklist(XorResult64.getNode());
        SDValue FlipBit =
            DAG.getNode(ISD::AND, SDLoc(XorResult64), MVT::i64, XorResult64,
                        DAG.getConstant(SignBit, SDLoc(XorResult64), MVT::i64));
        AddToWorklist(FlipBit.getNode());
        SDValue FlipBits =
            DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit);
        AddToWorklist(FlipBits.getNode());
        return DAG.getNode(ISD::XOR, SDLoc(N), VT, Cst, FlipBits);
      }
      APInt SignBit = APInt::getSignMask(VT.getSizeInBits());
      X = DAG.getNode(ISD::AND, SDLoc(X), VT,
                      X, DAG.getConstant(SignBit, SDLoc(X), VT));
      AddToWorklist(X.getNode());

      SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0));
      Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT,
                        Cst, DAG.getConstant(~SignBit, SDLoc(Cst), VT));
      AddToWorklist(Cst.getNode());

      return DAG.getNode(ISD::OR, SDLoc(N), VT, X, Cst);
    }
  }

  // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive.
  if (N0.getOpcode() == ISD::BUILD_PAIR)
    if (SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT))
      return CombineLD;

  // Remove double bitcasts from shuffles - this is often a legacy of
  // XformToShuffleWithZero being used to combine bitmaskings (of
  // float vectors bitcast to integer vectors) into shuffles.
  // bitcast(shuffle(bitcast(s0),bitcast(s1))) -> shuffle(s0,s1)
  if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT) && VT.isVector() &&
      N0->getOpcode() == ISD::VECTOR_SHUFFLE && N0.hasOneUse() &&
      VT.getVectorNumElements() >= N0.getValueType().getVectorNumElements() &&
      !(VT.getVectorNumElements() % N0.getValueType().getVectorNumElements())) {
    ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N0);

    // If operands are a bitcast, peek through if it casts the original VT.
    // If operands are a constant, just bitcast back to original VT.
    auto PeekThroughBitcast = [&](SDValue Op) {
      if (Op.getOpcode() == ISD::BITCAST &&
          Op.getOperand(0).getValueType() == VT)
        return SDValue(Op.getOperand(0));
      if (Op.isUndef() || ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
          ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()))
        return DAG.getBitcast(VT, Op);
      return SDValue();
    };

    // FIXME: If either input vector is bitcast, try to convert the shuffle to
    // the result type of this bitcast. This would eliminate at least one
    // bitcast. See the transform in InstCombine.
    SDValue SV0 = PeekThroughBitcast(N0->getOperand(0));
    SDValue SV1 = PeekThroughBitcast(N0->getOperand(1));
    if (!(SV0 && SV1))
      return SDValue();

    int MaskScale =
        VT.getVectorNumElements() / N0.getValueType().getVectorNumElements();
    SmallVector<int, 8> NewMask;
    for (int M : SVN->getMask())
      for (int i = 0; i != MaskScale; ++i)
        NewMask.push_back(M < 0 ? -1 : M * MaskScale + i);

    SDValue LegalShuffle =
        TLI.buildLegalVectorShuffle(VT, SDLoc(N), SV0, SV1, NewMask, DAG);
    if (LegalShuffle)
      return LegalShuffle;
  }

  return SDValue();
}

SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
  EVT VT = N->getValueType(0);
  return CombineConsecutiveLoads(N, VT);
}

/// We know that BV is a build_vector node with Constant, ConstantFP or Undef
/// operands. DstEltVT indicates the destination element value type.
SDValue DAGCombiner::
ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
  EVT SrcEltVT = BV->getValueType(0).getVectorElementType();

  // If this is already the right type, we're done.
  if (SrcEltVT == DstEltVT) return SDValue(BV, 0);

  unsigned SrcBitSize = SrcEltVT.getSizeInBits();
  unsigned DstBitSize = DstEltVT.getSizeInBits();

  // If this is a conversion of N elements of one type to N elements of another
  // type, convert each element.  This handles FP<->INT cases.
  if (SrcBitSize == DstBitSize) {
    SmallVector<SDValue, 8> Ops;
    for (SDValue Op : BV->op_values()) {
      // If the vector element type is not legal, the BUILD_VECTOR operands
      // are promoted and implicitly truncated.  Make that explicit here.
      if (Op.getValueType() != SrcEltVT)
        Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op);
      Ops.push_back(DAG.getBitcast(DstEltVT, Op));
      AddToWorklist(Ops.back().getNode());
    }
    EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
                              BV->getValueType(0).getVectorNumElements());
    return DAG.getBuildVector(VT, SDLoc(BV), Ops);
  }

  // Otherwise, we're growing or shrinking the elements.  To avoid having to
  // handle annoying details of growing/shrinking FP values, we convert them to
  // int first.
  if (SrcEltVT.isFloatingPoint()) {
    // Convert the input float vector to a int vector where the elements are the
    // same sizes.
    EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
    BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode();
    SrcEltVT = IntVT;
  }

  // Now we know the input is an integer vector.  If the output is a FP type,
  // convert to integer first, then to FP of the right size.
  if (DstEltVT.isFloatingPoint()) {
    EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
    SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode();

    // Next, convert to FP elements of the same size.
    return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT);
  }

  SDLoc DL(BV);

  // Okay, we know the src/dst types are both integers of differing types.
  // Handling growing first.
  assert(SrcEltVT.isInteger() && DstEltVT.isInteger());
  if (SrcBitSize < DstBitSize) {
    unsigned NumInputsPerOutput = DstBitSize/SrcBitSize;

    SmallVector<SDValue, 8> Ops;
    for (unsigned i = 0, e = BV->getNumOperands(); i != e;
         i += NumInputsPerOutput) {
      bool isLE = DAG.getDataLayout().isLittleEndian();
      APInt NewBits = APInt(DstBitSize, 0);
      bool EltIsUndef = true;
      for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
        // Shift the previously computed bits over.
        NewBits <<= SrcBitSize;
        SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j));
        if (Op.isUndef()) continue;
        EltIsUndef = false;

        NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue().
                   zextOrTrunc(SrcBitSize).zext(DstBitSize);
      }

      if (EltIsUndef)
        Ops.push_back(DAG.getUNDEF(DstEltVT));
      else
        Ops.push_back(DAG.getConstant(NewBits, DL, DstEltVT));
    }

    EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size());
    return DAG.getBuildVector(VT, DL, Ops);
  }

  // Finally, this must be the case where we are shrinking elements: each input
  // turns into multiple outputs.
  unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
  EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
                            NumOutputsPerInput*BV->getNumOperands());
  SmallVector<SDValue, 8> Ops;

  for (const SDValue &Op : BV->op_values()) {
    if (Op.isUndef()) {
      Ops.append(NumOutputsPerInput, DAG.getUNDEF(DstEltVT));
      continue;
    }

    APInt OpVal = cast<ConstantSDNode>(Op)->
                  getAPIntValue().zextOrTrunc(SrcBitSize);

    for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
      APInt ThisVal = OpVal.trunc(DstBitSize);
      Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT));
      OpVal.lshrInPlace(DstBitSize);
    }

    // For big endian targets, swap the order of the pieces of each element.
    if (DAG.getDataLayout().isBigEndian())
      std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
  }

  return DAG.getBuildVector(VT, DL, Ops);
}

static bool isContractable(SDNode *N) {
  SDNodeFlags F = N->getFlags();
  return F.hasAllowContract() || F.hasAllowReassociation();
}

/// Try to perform FMA combining on a given FADD node.
SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  SDLoc SL(N);

  const TargetOptions &Options = DAG.getTarget().Options;

  // Floating-point multiply-add with intermediate rounding.
  bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT));

  // Floating-point multiply-add without intermediate rounding.
  bool HasFMA =
      TLI.isFMAFasterThanFMulAndFAdd(VT) &&
      (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));

  // No valid opcode, do not combine.
  if (!HasFMAD && !HasFMA)
    return SDValue();

  SDNodeFlags Flags = N->getFlags();
  bool CanFuse = Options.UnsafeFPMath || isContractable(N);
  bool AllowFusionGlobally = (Options.AllowFPOpFusion == FPOpFusion::Fast ||
                              CanFuse || HasFMAD);
  // If the addition is not contractable, do not combine.
  if (!AllowFusionGlobally && !isContractable(N))
    return SDValue();

  const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo();
  if (STI && STI->generateFMAsInMachineCombiner(OptLevel))
    return SDValue();

  // Always prefer FMAD to FMA for precision.
  unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
  bool Aggressive = TLI.enableAggressiveFMAFusion(VT);

  // Is the node an FMUL and contractable either due to global flags or
  // SDNodeFlags.
  auto isContractableFMUL = [AllowFusionGlobally](SDValue N) {
    if (N.getOpcode() != ISD::FMUL)
      return false;
    return AllowFusionGlobally || isContractable(N.getNode());
  };
  // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
  // prefer to fold the multiply with fewer uses.
  if (Aggressive && isContractableFMUL(N0) && isContractableFMUL(N1)) {
    if (N0.getNode()->use_size() > N1.getNode()->use_size())
      std::swap(N0, N1);
  }

  // fold (fadd (fmul x, y), z) -> (fma x, y, z)
  if (isContractableFMUL(N0) && (Aggressive || N0->hasOneUse())) {
    return DAG.getNode(PreferredFusedOpcode, SL, VT,
                       N0.getOperand(0), N0.getOperand(1), N1, Flags);
  }

  // fold (fadd x, (fmul y, z)) -> (fma y, z, x)
  // Note: Commutes FADD operands.
  if (isContractableFMUL(N1) && (Aggressive || N1->hasOneUse())) {
    return DAG.getNode(PreferredFusedOpcode, SL, VT,
                       N1.getOperand(0), N1.getOperand(1), N0, Flags);
  }

  // Look through FP_EXTEND nodes to do more combining.

  // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
  if (N0.getOpcode() == ISD::FP_EXTEND) {
    SDValue N00 = N0.getOperand(0);
    if (isContractableFMUL(N00) &&
        TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
      return DAG.getNode(PreferredFusedOpcode, SL, VT,
                         DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                     N00.getOperand(0)),
                         DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                     N00.getOperand(1)), N1, Flags);
    }
  }

  // fold (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
  // Note: Commutes FADD operands.
  if (N1.getOpcode() == ISD::FP_EXTEND) {
    SDValue N10 = N1.getOperand(0);
    if (isContractableFMUL(N10) &&
        TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N10.getValueType())) {
      return DAG.getNode(PreferredFusedOpcode, SL, VT,
                         DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                     N10.getOperand(0)),
                         DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                     N10.getOperand(1)), N0, Flags);
    }
  }

  // More folding opportunities when target permits.
  if (Aggressive) {
    // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y (fma u, v, z))
    if (CanFuse &&
        N0.getOpcode() == PreferredFusedOpcode &&
        N0.getOperand(2).getOpcode() == ISD::FMUL &&
        N0->hasOneUse() && N0.getOperand(2)->hasOneUse()) {
      return DAG.getNode(PreferredFusedOpcode, SL, VT,
                         N0.getOperand(0), N0.getOperand(1),
                         DAG.getNode(PreferredFusedOpcode, SL, VT,
                                     N0.getOperand(2).getOperand(0),
                                     N0.getOperand(2).getOperand(1),
                                     N1, Flags), Flags);
    }

    // fold (fadd x, (fma y, z, (fmul u, v)) -> (fma y, z (fma u, v, x))
    if (CanFuse &&
        N1->getOpcode() == PreferredFusedOpcode &&
        N1.getOperand(2).getOpcode() == ISD::FMUL &&
        N1->hasOneUse() && N1.getOperand(2)->hasOneUse()) {
      return DAG.getNode(PreferredFusedOpcode, SL, VT,
                         N1.getOperand(0), N1.getOperand(1),
                         DAG.getNode(PreferredFusedOpcode, SL, VT,
                                     N1.getOperand(2).getOperand(0),
                                     N1.getOperand(2).getOperand(1),
                                     N0, Flags), Flags);
    }


    // fold (fadd (fma x, y, (fpext (fmul u, v))), z)
    //   -> (fma x, y, (fma (fpext u), (fpext v), z))
    auto FoldFAddFMAFPExtFMul = [&] (
      SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z,
      SDNodeFlags Flags) {
      return DAG.getNode(PreferredFusedOpcode, SL, VT, X, Y,
                         DAG.getNode(PreferredFusedOpcode, SL, VT,
                                     DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
                                     DAG.getNode(ISD::FP_EXTEND, SL, VT, V),
                                     Z, Flags), Flags);
    };
    if (N0.getOpcode() == PreferredFusedOpcode) {
      SDValue N02 = N0.getOperand(2);
      if (N02.getOpcode() == ISD::FP_EXTEND) {
        SDValue N020 = N02.getOperand(0);
        if (isContractableFMUL(N020) &&
            TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N020.getValueType())) {
          return FoldFAddFMAFPExtFMul(N0.getOperand(0), N0.getOperand(1),
                                      N020.getOperand(0), N020.getOperand(1),
                                      N1, Flags);
        }
      }
    }

    // fold (fadd (fpext (fma x, y, (fmul u, v))), z)
    //   -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
    // FIXME: This turns two single-precision and one double-precision
    // operation into two double-precision operations, which might not be
    // interesting for all targets, especially GPUs.
    auto FoldFAddFPExtFMAFMul = [&] (
      SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z,
      SDNodeFlags Flags) {
      return DAG.getNode(PreferredFusedOpcode, SL, VT,
                         DAG.getNode(ISD::FP_EXTEND, SL, VT, X),
                         DAG.getNode(ISD::FP_EXTEND, SL, VT, Y),
                         DAG.getNode(PreferredFusedOpcode, SL, VT,
                                     DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
                                     DAG.getNode(ISD::FP_EXTEND, SL, VT, V),
                                     Z, Flags), Flags);
    };
    if (N0.getOpcode() == ISD::FP_EXTEND) {
      SDValue N00 = N0.getOperand(0);
      if (N00.getOpcode() == PreferredFusedOpcode) {
        SDValue N002 = N00.getOperand(2);
        if (isContractableFMUL(N002) &&
            TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
          return FoldFAddFPExtFMAFMul(N00.getOperand(0), N00.getOperand(1),
                                      N002.getOperand(0), N002.getOperand(1),
                                      N1, Flags);
        }
      }
    }

    // fold (fadd x, (fma y, z, (fpext (fmul u, v)))
    //   -> (fma y, z, (fma (fpext u), (fpext v), x))
    if (N1.getOpcode() == PreferredFusedOpcode) {
      SDValue N12 = N1.getOperand(2);
      if (N12.getOpcode() == ISD::FP_EXTEND) {
        SDValue N120 = N12.getOperand(0);
        if (isContractableFMUL(N120) &&
            TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N120.getValueType())) {
          return FoldFAddFMAFPExtFMul(N1.getOperand(0), N1.getOperand(1),
                                      N120.getOperand(0), N120.getOperand(1),
                                      N0, Flags);
        }
      }
    }

    // fold (fadd x, (fpext (fma y, z, (fmul u, v)))
    //   -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x))
    // FIXME: This turns two single-precision and one double-precision
    // operation into two double-precision operations, which might not be
    // interesting for all targets, especially GPUs.
    if (N1.getOpcode() == ISD::FP_EXTEND) {
      SDValue N10 = N1.getOperand(0);
      if (N10.getOpcode() == PreferredFusedOpcode) {
        SDValue N102 = N10.getOperand(2);
        if (isContractableFMUL(N102) &&
            TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N10.getValueType())) {
          return FoldFAddFPExtFMAFMul(N10.getOperand(0), N10.getOperand(1),
                                      N102.getOperand(0), N102.getOperand(1),
                                      N0, Flags);
        }
      }
    }
  }

  return SDValue();
}

/// Try to perform FMA combining on a given FSUB node.
SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  SDLoc SL(N);

  const TargetOptions &Options = DAG.getTarget().Options;
  // Floating-point multiply-add with intermediate rounding.
  bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT));

  // Floating-point multiply-add without intermediate rounding.
  bool HasFMA =
      TLI.isFMAFasterThanFMulAndFAdd(VT) &&
      (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));

  // No valid opcode, do not combine.
  if (!HasFMAD && !HasFMA)
    return SDValue();

  const SDNodeFlags Flags = N->getFlags();
  bool CanFuse = Options.UnsafeFPMath || isContractable(N);
  bool AllowFusionGlobally = (Options.AllowFPOpFusion == FPOpFusion::Fast ||
                              CanFuse || HasFMAD);

  // If the subtraction is not contractable, do not combine.
  if (!AllowFusionGlobally && !isContractable(N))
    return SDValue();

  const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo();
  if (STI && STI->generateFMAsInMachineCombiner(OptLevel))
    return SDValue();

  // Always prefer FMAD to FMA for precision.
  unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
  bool Aggressive = TLI.enableAggressiveFMAFusion(VT);

  // Is the node an FMUL and contractable either due to global flags or
  // SDNodeFlags.
  auto isContractableFMUL = [AllowFusionGlobally](SDValue N) {
    if (N.getOpcode() != ISD::FMUL)
      return false;
    return AllowFusionGlobally || isContractable(N.getNode());
  };

  // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
  if (isContractableFMUL(N0) && (Aggressive || N0->hasOneUse())) {
    return DAG.getNode(PreferredFusedOpcode, SL, VT,
                       N0.getOperand(0), N0.getOperand(1),
                       DAG.getNode(ISD::FNEG, SL, VT, N1), Flags);
  }

  // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
  // Note: Commutes FSUB operands.
  if (isContractableFMUL(N1) && (Aggressive || N1->hasOneUse())) {
    return DAG.getNode(PreferredFusedOpcode, SL, VT,
                       DAG.getNode(ISD::FNEG, SL, VT,
                                   N1.getOperand(0)),
                       N1.getOperand(1), N0, Flags);
  }

  // fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
  if (N0.getOpcode() == ISD::FNEG && isContractableFMUL(N0.getOperand(0)) &&
      (Aggressive || (N0->hasOneUse() && N0.getOperand(0).hasOneUse()))) {
    SDValue N00 = N0.getOperand(0).getOperand(0);
    SDValue N01 = N0.getOperand(0).getOperand(1);
    return DAG.getNode(PreferredFusedOpcode, SL, VT,
                       DAG.getNode(ISD::FNEG, SL, VT, N00), N01,
                       DAG.getNode(ISD::FNEG, SL, VT, N1), Flags);
  }

  // Look through FP_EXTEND nodes to do more combining.

  // fold (fsub (fpext (fmul x, y)), z)
  //   -> (fma (fpext x), (fpext y), (fneg z))
  if (N0.getOpcode() == ISD::FP_EXTEND) {
    SDValue N00 = N0.getOperand(0);
    if (isContractableFMUL(N00) &&
        TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
      return DAG.getNode(PreferredFusedOpcode, SL, VT,
                         DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                     N00.getOperand(0)),
                         DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                     N00.getOperand(1)),
                         DAG.getNode(ISD::FNEG, SL, VT, N1), Flags);
    }
  }

  // fold (fsub x, (fpext (fmul y, z)))
  //   -> (fma (fneg (fpext y)), (fpext z), x)
  // Note: Commutes FSUB operands.
  if (N1.getOpcode() == ISD::FP_EXTEND) {
    SDValue N10 = N1.getOperand(0);
    if (isContractableFMUL(N10) &&
        TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N10.getValueType())) {
      return DAG.getNode(PreferredFusedOpcode, SL, VT,
                         DAG.getNode(ISD::FNEG, SL, VT,
                                     DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                 N10.getOperand(0))),
                         DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                     N10.getOperand(1)),
                         N0, Flags);
    }
  }

  // fold (fsub (fpext (fneg (fmul, x, y))), z)
  //   -> (fneg (fma (fpext x), (fpext y), z))
  // Note: This could be removed with appropriate canonicalization of the
  // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the
  // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent
  // from implementing the canonicalization in visitFSUB.
  if (N0.getOpcode() == ISD::FP_EXTEND) {
    SDValue N00 = N0.getOperand(0);
    if (N00.getOpcode() == ISD::FNEG) {
      SDValue N000 = N00.getOperand(0);
      if (isContractableFMUL(N000) &&
          TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
        return DAG.getNode(ISD::FNEG, SL, VT,
                           DAG.getNode(PreferredFusedOpcode, SL, VT,
                                       DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                   N000.getOperand(0)),
                                       DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                   N000.getOperand(1)),
                                       N1, Flags));
      }
    }
  }

  // fold (fsub (fneg (fpext (fmul, x, y))), z)
  //   -> (fneg (fma (fpext x)), (fpext y), z)
  // Note: This could be removed with appropriate canonicalization of the
  // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the
  // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent
  // from implementing the canonicalization in visitFSUB.
  if (N0.getOpcode() == ISD::FNEG) {
    SDValue N00 = N0.getOperand(0);
    if (N00.getOpcode() == ISD::FP_EXTEND) {
      SDValue N000 = N00.getOperand(0);
      if (isContractableFMUL(N000) &&
          TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N000.getValueType())) {
        return DAG.getNode(ISD::FNEG, SL, VT,
                           DAG.getNode(PreferredFusedOpcode, SL, VT,
                                       DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                   N000.getOperand(0)),
                                       DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                   N000.getOperand(1)),
                                       N1, Flags));
      }
    }
  }

  // More folding opportunities when target permits.
  if (Aggressive) {
    // fold (fsub (fma x, y, (fmul u, v)), z)
    //   -> (fma x, y (fma u, v, (fneg z)))
    if (CanFuse && N0.getOpcode() == PreferredFusedOpcode &&
        isContractableFMUL(N0.getOperand(2)) && N0->hasOneUse() &&
        N0.getOperand(2)->hasOneUse()) {
      return DAG.getNode(PreferredFusedOpcode, SL, VT,
                         N0.getOperand(0), N0.getOperand(1),
                         DAG.getNode(PreferredFusedOpcode, SL, VT,
                                     N0.getOperand(2).getOperand(0),
                                     N0.getOperand(2).getOperand(1),
                                     DAG.getNode(ISD::FNEG, SL, VT,
                                                 N1), Flags), Flags);
    }

    // fold (fsub x, (fma y, z, (fmul u, v)))
    //   -> (fma (fneg y), z, (fma (fneg u), v, x))
    if (CanFuse && N1.getOpcode() == PreferredFusedOpcode &&
        isContractableFMUL(N1.getOperand(2))) {
      SDValue N20 = N1.getOperand(2).getOperand(0);
      SDValue N21 = N1.getOperand(2).getOperand(1);
      return DAG.getNode(PreferredFusedOpcode, SL, VT,
                         DAG.getNode(ISD::FNEG, SL, VT,
                                     N1.getOperand(0)),
                         N1.getOperand(1),
                         DAG.getNode(PreferredFusedOpcode, SL, VT,
                                     DAG.getNode(ISD::FNEG, SL, VT, N20),
                                     N21, N0, Flags), Flags);
    }


    // fold (fsub (fma x, y, (fpext (fmul u, v))), z)
    //   -> (fma x, y (fma (fpext u), (fpext v), (fneg z)))
    if (N0.getOpcode() == PreferredFusedOpcode) {
      SDValue N02 = N0.getOperand(2);
      if (N02.getOpcode() == ISD::FP_EXTEND) {
        SDValue N020 = N02.getOperand(0);
        if (isContractableFMUL(N020) &&
            TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N020.getValueType())) {
          return DAG.getNode(PreferredFusedOpcode, SL, VT,
                             N0.getOperand(0), N0.getOperand(1),
                             DAG.getNode(PreferredFusedOpcode, SL, VT,
                                         DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                     N020.getOperand(0)),
                                         DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                     N020.getOperand(1)),
                                         DAG.getNode(ISD::FNEG, SL, VT,
                                                     N1), Flags), Flags);
        }
      }
    }

    // fold (fsub (fpext (fma x, y, (fmul u, v))), z)
    //   -> (fma (fpext x), (fpext y),
    //           (fma (fpext u), (fpext v), (fneg z)))
    // FIXME: This turns two single-precision and one double-precision
    // operation into two double-precision operations, which might not be
    // interesting for all targets, especially GPUs.
    if (N0.getOpcode() == ISD::FP_EXTEND) {
      SDValue N00 = N0.getOperand(0);
      if (N00.getOpcode() == PreferredFusedOpcode) {
        SDValue N002 = N00.getOperand(2);
        if (isContractableFMUL(N002) &&
            TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
          return DAG.getNode(PreferredFusedOpcode, SL, VT,
                             DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                         N00.getOperand(0)),
                             DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                         N00.getOperand(1)),
                             DAG.getNode(PreferredFusedOpcode, SL, VT,
                                         DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                     N002.getOperand(0)),
                                         DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                     N002.getOperand(1)),
                                         DAG.getNode(ISD::FNEG, SL, VT,
                                                     N1), Flags), Flags);
        }
      }
    }

    // fold (fsub x, (fma y, z, (fpext (fmul u, v))))
    //   -> (fma (fneg y), z, (fma (fneg (fpext u)), (fpext v), x))
    if (N1.getOpcode() == PreferredFusedOpcode &&
        N1.getOperand(2).getOpcode() == ISD::FP_EXTEND) {
      SDValue N120 = N1.getOperand(2).getOperand(0);
      if (isContractableFMUL(N120) &&
          TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N120.getValueType())) {
        SDValue N1200 = N120.getOperand(0);
        SDValue N1201 = N120.getOperand(1);
        return DAG.getNode(PreferredFusedOpcode, SL, VT,
                           DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)),
                           N1.getOperand(1),
                           DAG.getNode(PreferredFusedOpcode, SL, VT,
                                       DAG.getNode(ISD::FNEG, SL, VT,
                                                   DAG.getNode(ISD::FP_EXTEND, SL,
                                                               VT, N1200)),
                                       DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                   N1201),
                                       N0, Flags), Flags);
      }
    }

    // fold (fsub x, (fpext (fma y, z, (fmul u, v))))
    //   -> (fma (fneg (fpext y)), (fpext z),
    //           (fma (fneg (fpext u)), (fpext v), x))
    // FIXME: This turns two single-precision and one double-precision
    // operation into two double-precision operations, which might not be
    // interesting for all targets, especially GPUs.
    if (N1.getOpcode() == ISD::FP_EXTEND &&
        N1.getOperand(0).getOpcode() == PreferredFusedOpcode) {
      SDValue CvtSrc = N1.getOperand(0);
      SDValue N100 = CvtSrc.getOperand(0);
      SDValue N101 = CvtSrc.getOperand(1);
      SDValue N102 = CvtSrc.getOperand(2);
      if (isContractableFMUL(N102) &&
          TLI.isFPExtFoldable(PreferredFusedOpcode, VT, CvtSrc.getValueType())) {
        SDValue N1020 = N102.getOperand(0);
        SDValue N1021 = N102.getOperand(1);
        return DAG.getNode(PreferredFusedOpcode, SL, VT,
                           DAG.getNode(ISD::FNEG, SL, VT,
                                       DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                   N100)),
                           DAG.getNode(ISD::FP_EXTEND, SL, VT, N101),
                           DAG.getNode(PreferredFusedOpcode, SL, VT,
                                       DAG.getNode(ISD::FNEG, SL, VT,
                                                   DAG.getNode(ISD::FP_EXTEND, SL,
                                                               VT, N1020)),
                                       DAG.getNode(ISD::FP_EXTEND, SL, VT,
                                                   N1021),
                                       N0, Flags), Flags);
      }
    }
  }

  return SDValue();
}

/// Try to perform FMA combining on a given FMUL node based on the distributive
/// law x * (y + 1) = x * y + x and variants thereof (commuted versions,
/// subtraction instead of addition).
SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  SDLoc SL(N);
  const SDNodeFlags Flags = N->getFlags();

  assert(N->getOpcode() == ISD::FMUL && "Expected FMUL Operation");

  const TargetOptions &Options = DAG.getTarget().Options;

  // The transforms below are incorrect when x == 0 and y == inf, because the
  // intermediate multiplication produces a nan.
  if (!Options.NoInfsFPMath)
    return SDValue();

  // Floating-point multiply-add without intermediate rounding.
  bool HasFMA =
      (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath) &&
      TLI.isFMAFasterThanFMulAndFAdd(VT) &&
      (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));

  // Floating-point multiply-add with intermediate rounding. This can result
  // in a less precise result due to the changed rounding order.
  bool HasFMAD = Options.UnsafeFPMath &&
                 (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT));

  // No valid opcode, do not combine.
  if (!HasFMAD && !HasFMA)
    return SDValue();

  // Always prefer FMAD to FMA for precision.
  unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
  bool Aggressive = TLI.enableAggressiveFMAFusion(VT);

  // fold (fmul (fadd x0, +1.0), y) -> (fma x0, y, y)
  // fold (fmul (fadd x0, -1.0), y) -> (fma x0, y, (fneg y))
  auto FuseFADD = [&](SDValue X, SDValue Y, const SDNodeFlags Flags) {
    if (X.getOpcode() == ISD::FADD && (Aggressive || X->hasOneUse())) {
      if (auto *C = isConstOrConstSplatFP(X.getOperand(1), true)) {
        if (C->isExactlyValue(+1.0))
          return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
                             Y, Flags);
        if (C->isExactlyValue(-1.0))
          return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
                             DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
      }
    }
    return SDValue();
  };

  if (SDValue FMA = FuseFADD(N0, N1, Flags))
    return FMA;
  if (SDValue FMA = FuseFADD(N1, N0, Flags))
    return FMA;

  // fold (fmul (fsub +1.0, x1), y) -> (fma (fneg x1), y, y)
  // fold (fmul (fsub -1.0, x1), y) -> (fma (fneg x1), y, (fneg y))
  // fold (fmul (fsub x0, +1.0), y) -> (fma x0, y, (fneg y))
  // fold (fmul (fsub x0, -1.0), y) -> (fma x0, y, y)
  auto FuseFSUB = [&](SDValue X, SDValue Y, const SDNodeFlags Flags) {
    if (X.getOpcode() == ISD::FSUB && (Aggressive || X->hasOneUse())) {
      if (auto *C0 = isConstOrConstSplatFP(X.getOperand(0), true)) {
        if (C0->isExactlyValue(+1.0))
          return DAG.getNode(PreferredFusedOpcode, SL, VT,
                             DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
                             Y, Flags);
        if (C0->isExactlyValue(-1.0))
          return DAG.getNode(PreferredFusedOpcode, SL, VT,
                             DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
                             DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
      }
      if (auto *C1 = isConstOrConstSplatFP(X.getOperand(1), true)) {
        if (C1->isExactlyValue(+1.0))
          return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
                             DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
        if (C1->isExactlyValue(-1.0))
          return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
                             Y, Flags);
      }
    }
    return SDValue();
  };

  if (SDValue FMA = FuseFSUB(N0, N1, Flags))
    return FMA;
  if (SDValue FMA = FuseFSUB(N1, N0, Flags))
    return FMA;

  return SDValue();
}

SDValue DAGCombiner::visitFADD(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  bool N0CFP = isConstantFPBuildVectorOrConstantFP(N0);
  bool N1CFP = isConstantFPBuildVectorOrConstantFP(N1);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);
  const TargetOptions &Options = DAG.getTarget().Options;
  const SDNodeFlags Flags = N->getFlags();

  // fold vector ops
  if (VT.isVector())
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

  // fold (fadd c1, c2) -> c1 + c2
  if (N0CFP && N1CFP)
    return DAG.getNode(ISD::FADD, DL, VT, N0, N1, Flags);

  // canonicalize constant to RHS
  if (N0CFP && !N1CFP)
    return DAG.getNode(ISD::FADD, DL, VT, N1, N0, Flags);

  // N0 + -0.0 --> N0 (also allowed with +0.0 and fast-math)
  ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, true);
  if (N1C && N1C->isZero())
    if (N1C->isNegative() || Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros())
      return N0;

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  // fold (fadd A, (fneg B)) -> (fsub A, B)
  if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
      TLI.isNegatibleForFree(N1, DAG, LegalOperations, ForCodeSize) == 2)
    return DAG.getNode(
        ISD::FSUB, DL, VT, N0,
        TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize), Flags);

  // fold (fadd (fneg A), B) -> (fsub B, A)
  if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
      TLI.isNegatibleForFree(N0, DAG, LegalOperations, ForCodeSize) == 2)
    return DAG.getNode(
        ISD::FSUB, DL, VT, N1,
        TLI.getNegatedExpression(N0, DAG, LegalOperations, ForCodeSize), Flags);

  auto isFMulNegTwo = [](SDValue FMul) {
    if (!FMul.hasOneUse() || FMul.getOpcode() != ISD::FMUL)
      return false;
    auto *C = isConstOrConstSplatFP(FMul.getOperand(1), true);
    return C && C->isExactlyValue(-2.0);
  };

  // fadd (fmul B, -2.0), A --> fsub A, (fadd B, B)
  if (isFMulNegTwo(N0)) {
    SDValue B = N0.getOperand(0);
    SDValue Add = DAG.getNode(ISD::FADD, DL, VT, B, B, Flags);
    return DAG.getNode(ISD::FSUB, DL, VT, N1, Add, Flags);
  }
  // fadd A, (fmul B, -2.0) --> fsub A, (fadd B, B)
  if (isFMulNegTwo(N1)) {
    SDValue B = N1.getOperand(0);
    SDValue Add = DAG.getNode(ISD::FADD, DL, VT, B, B, Flags);
    return DAG.getNode(ISD::FSUB, DL, VT, N0, Add, Flags);
  }

  // No FP constant should be created after legalization as Instruction
  // Selection pass has a hard time dealing with FP constants.
  bool AllowNewConst = (Level < AfterLegalizeDAG);

  // If nnan is enabled, fold lots of things.
  if ((Options.NoNaNsFPMath || Flags.hasNoNaNs()) && AllowNewConst) {
    // If allowed, fold (fadd (fneg x), x) -> 0.0
    if (N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1)
      return DAG.getConstantFP(0.0, DL, VT);

    // If allowed, fold (fadd x, (fneg x)) -> 0.0
    if (N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0)
      return DAG.getConstantFP(0.0, DL, VT);
  }

  // If 'unsafe math' or reassoc and nsz, fold lots of things.
  // TODO: break out portions of the transformations below for which Unsafe is
  //       considered and which do not require both nsz and reassoc
  if (((Options.UnsafeFPMath && Options.NoSignedZerosFPMath) ||
       (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros())) &&
      AllowNewConst) {
    // fadd (fadd x, c1), c2 -> fadd x, c1 + c2
    if (N1CFP && N0.getOpcode() == ISD::FADD &&
        isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
      SDValue NewC = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), N1, Flags);
      return DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(0), NewC, Flags);
    }

    // We can fold chains of FADD's of the same value into multiplications.
    // This transform is not safe in general because we are reducing the number
    // of rounding steps.
    if (TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && !N0CFP && !N1CFP) {
      if (N0.getOpcode() == ISD::FMUL) {
        bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
        bool CFP01 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(1));

        // (fadd (fmul x, c), x) -> (fmul x, c+1)
        if (CFP01 && !CFP00 && N0.getOperand(0) == N1) {
          SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1),
                                       DAG.getConstantFP(1.0, DL, VT), Flags);
          return DAG.getNode(ISD::FMUL, DL, VT, N1, NewCFP, Flags);
        }

        // (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2)
        if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD &&
            N1.getOperand(0) == N1.getOperand(1) &&
            N0.getOperand(0) == N1.getOperand(0)) {
          SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1),
                                       DAG.getConstantFP(2.0, DL, VT), Flags);
          return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), NewCFP, Flags);
        }
      }

      if (N1.getOpcode() == ISD::FMUL) {
        bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
        bool CFP11 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(1));

        // (fadd x, (fmul x, c)) -> (fmul x, c+1)
        if (CFP11 && !CFP10 && N1.getOperand(0) == N0) {
          SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1),
                                       DAG.getConstantFP(1.0, DL, VT), Flags);
          return DAG.getNode(ISD::FMUL, DL, VT, N0, NewCFP, Flags);
        }

        // (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2)
        if (CFP11 && !CFP10 && N0.getOpcode() == ISD::FADD &&
            N0.getOperand(0) == N0.getOperand(1) &&
            N1.getOperand(0) == N0.getOperand(0)) {
          SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1),
                                       DAG.getConstantFP(2.0, DL, VT), Flags);
          return DAG.getNode(ISD::FMUL, DL, VT, N1.getOperand(0), NewCFP, Flags);
        }
      }

      if (N0.getOpcode() == ISD::FADD) {
        bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
        // (fadd (fadd x, x), x) -> (fmul x, 3.0)
        if (!CFP00 && N0.getOperand(0) == N0.getOperand(1) &&
            (N0.getOperand(0) == N1)) {
          return DAG.getNode(ISD::FMUL, DL, VT,
                             N1, DAG.getConstantFP(3.0, DL, VT), Flags);
        }
      }

      if (N1.getOpcode() == ISD::FADD) {
        bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
        // (fadd x, (fadd x, x)) -> (fmul x, 3.0)
        if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) &&
            N1.getOperand(0) == N0) {
          return DAG.getNode(ISD::FMUL, DL, VT,
                             N0, DAG.getConstantFP(3.0, DL, VT), Flags);
        }
      }

      // (fadd (fadd x, x), (fadd x, x)) -> (fmul x, 4.0)
      if (N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD &&
          N0.getOperand(0) == N0.getOperand(1) &&
          N1.getOperand(0) == N1.getOperand(1) &&
          N0.getOperand(0) == N1.getOperand(0)) {
        return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0),
                           DAG.getConstantFP(4.0, DL, VT), Flags);
      }
    }
  } // enable-unsafe-fp-math

  // FADD -> FMA combines:
  if (SDValue Fused = visitFADDForFMACombine(N)) {
    AddToWorklist(Fused.getNode());
    return Fused;
  }
  return SDValue();
}

SDValue DAGCombiner::visitFSUB(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0, true);
  ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, true);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);
  const TargetOptions &Options = DAG.getTarget().Options;
  const SDNodeFlags Flags = N->getFlags();

  // fold vector ops
  if (VT.isVector())
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

  // fold (fsub c1, c2) -> c1-c2
  if (N0CFP && N1CFP)
    return DAG.getNode(ISD::FSUB, DL, VT, N0, N1, Flags);

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  // (fsub A, 0) -> A
  if (N1CFP && N1CFP->isZero()) {
    if (!N1CFP->isNegative() || Options.NoSignedZerosFPMath ||
        Flags.hasNoSignedZeros()) {
      return N0;
    }
  }

  if (N0 == N1) {
    // (fsub x, x) -> 0.0
    if (Options.NoNaNsFPMath || Flags.hasNoNaNs())
      return DAG.getConstantFP(0.0f, DL, VT);
  }

  // (fsub -0.0, N1) -> -N1
  // NOTE: It is safe to transform an FSUB(-0.0,X) into an FNEG(X), since the
  //       FSUB does not specify the sign bit of a NaN. Also note that for
  //       the same reason, the inverse transform is not safe, unless fast math
  //       flags are in play.
  if (N0CFP && N0CFP->isZero()) {
    if (N0CFP->isNegative() ||
        (Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros())) {
      if (TLI.isNegatibleForFree(N1, DAG, LegalOperations, ForCodeSize))
        return TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize);
      if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
        return DAG.getNode(ISD::FNEG, DL, VT, N1, Flags);
    }
  }

  if (((Options.UnsafeFPMath && Options.NoSignedZerosFPMath) ||
       (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros())) &&
      N1.getOpcode() == ISD::FADD) {
    // X - (X + Y) -> -Y
    if (N0 == N1->getOperand(0))
      return DAG.getNode(ISD::FNEG, DL, VT, N1->getOperand(1), Flags);
    // X - (Y + X) -> -Y
    if (N0 == N1->getOperand(1))
      return DAG.getNode(ISD::FNEG, DL, VT, N1->getOperand(0), Flags);
  }

  // fold (fsub A, (fneg B)) -> (fadd A, B)
  if (TLI.isNegatibleForFree(N1, DAG, LegalOperations, ForCodeSize))
    return DAG.getNode(
        ISD::FADD, DL, VT, N0,
        TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize), Flags);

  // FSUB -> FMA combines:
  if (SDValue Fused = visitFSUBForFMACombine(N)) {
    AddToWorklist(Fused.getNode());
    return Fused;
  }

  return SDValue();
}

/// Return true if both inputs are at least as cheap in negated form and at
/// least one input is strictly cheaper in negated form.
bool DAGCombiner::isCheaperToUseNegatedFPOps(SDValue X, SDValue Y) {
  if (char LHSNeg =
          TLI.isNegatibleForFree(X, DAG, LegalOperations, ForCodeSize))
    if (char RHSNeg =
            TLI.isNegatibleForFree(Y, DAG, LegalOperations, ForCodeSize))
      // Both negated operands are at least as cheap as their counterparts.
      // Check to see if at least one is cheaper negated.
      if (LHSNeg == 2 || RHSNeg == 2)
        return true;

  return false;
}

SDValue DAGCombiner::visitFMUL(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0, true);
  ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, true);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);
  const TargetOptions &Options = DAG.getTarget().Options;
  const SDNodeFlags Flags = N->getFlags();

  // fold vector ops
  if (VT.isVector()) {
    // This just handles C1 * C2 for vectors. Other vector folds are below.
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;
  }

  // fold (fmul c1, c2) -> c1*c2
  if (N0CFP && N1CFP)
    return DAG.getNode(ISD::FMUL, DL, VT, N0, N1, Flags);

  // canonicalize constant to RHS
  if (isConstantFPBuildVectorOrConstantFP(N0) &&
     !isConstantFPBuildVectorOrConstantFP(N1))
    return DAG.getNode(ISD::FMUL, DL, VT, N1, N0, Flags);

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  if ((Options.NoNaNsFPMath && Options.NoSignedZerosFPMath) ||
      (Flags.hasNoNaNs() && Flags.hasNoSignedZeros())) {
    // fold (fmul A, 0) -> 0
    if (N1CFP && N1CFP->isZero())
      return N1;
  }

  if (Options.UnsafeFPMath || Flags.hasAllowReassociation()) {
    // fmul (fmul X, C1), C2 -> fmul X, C1 * C2
    if (isConstantFPBuildVectorOrConstantFP(N1) &&
        N0.getOpcode() == ISD::FMUL) {
      SDValue N00 = N0.getOperand(0);
      SDValue N01 = N0.getOperand(1);
      // Avoid an infinite loop by making sure that N00 is not a constant
      // (the inner multiply has not been constant folded yet).
      if (isConstantFPBuildVectorOrConstantFP(N01) &&
          !isConstantFPBuildVectorOrConstantFP(N00)) {
        SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, N01, N1, Flags);
        return DAG.getNode(ISD::FMUL, DL, VT, N00, MulConsts, Flags);
      }
    }

    // Match a special-case: we convert X * 2.0 into fadd.
    // fmul (fadd X, X), C -> fmul X, 2.0 * C
    if (N0.getOpcode() == ISD::FADD && N0.hasOneUse() &&
        N0.getOperand(0) == N0.getOperand(1)) {
      const SDValue Two = DAG.getConstantFP(2.0, DL, VT);
      SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, Two, N1, Flags);
      return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), MulConsts, Flags);
    }
  }

  // fold (fmul X, 2.0) -> (fadd X, X)
  if (N1CFP && N1CFP->isExactlyValue(+2.0))
    return DAG.getNode(ISD::FADD, DL, VT, N0, N0, Flags);

  // fold (fmul X, -1.0) -> (fneg X)
  if (N1CFP && N1CFP->isExactlyValue(-1.0))
    if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
      return DAG.getNode(ISD::FNEG, DL, VT, N0);

  // -N0 * -N1 --> N0 * N1
  if (isCheaperToUseNegatedFPOps(N0, N1)) {
    SDValue NegN0 =
        TLI.getNegatedExpression(N0, DAG, LegalOperations, ForCodeSize);
    SDValue NegN1 =
        TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize);
    return DAG.getNode(ISD::FMUL, DL, VT, NegN0, NegN1, Flags);
  }

  // fold (fmul X, (select (fcmp X > 0.0), -1.0, 1.0)) -> (fneg (fabs X))
  // fold (fmul X, (select (fcmp X > 0.0), 1.0, -1.0)) -> (fabs X)
  if (Flags.hasNoNaNs() && Flags.hasNoSignedZeros() &&
      (N0.getOpcode() == ISD::SELECT || N1.getOpcode() == ISD::SELECT) &&
      TLI.isOperationLegal(ISD::FABS, VT)) {
    SDValue Select = N0, X = N1;
    if (Select.getOpcode() != ISD::SELECT)
      std::swap(Select, X);

    SDValue Cond = Select.getOperand(0);
    auto TrueOpnd  = dyn_cast<ConstantFPSDNode>(Select.getOperand(1));
    auto FalseOpnd = dyn_cast<ConstantFPSDNode>(Select.getOperand(2));

    if (TrueOpnd && FalseOpnd &&
        Cond.getOpcode() == ISD::SETCC && Cond.getOperand(0) == X &&
        isa<ConstantFPSDNode>(Cond.getOperand(1)) &&
        cast<ConstantFPSDNode>(Cond.getOperand(1))->isExactlyValue(0.0)) {
      ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
      switch (CC) {
      default: break;
      case ISD::SETOLT:
      case ISD::SETULT:
      case ISD::SETOLE:
      case ISD::SETULE:
      case ISD::SETLT:
      case ISD::SETLE:
        std::swap(TrueOpnd, FalseOpnd);
        LLVM_FALLTHROUGH;
      case ISD::SETOGT:
      case ISD::SETUGT:
      case ISD::SETOGE:
      case ISD::SETUGE:
      case ISD::SETGT:
      case ISD::SETGE:
        if (TrueOpnd->isExactlyValue(-1.0) && FalseOpnd->isExactlyValue(1.0) &&
            TLI.isOperationLegal(ISD::FNEG, VT))
          return DAG.getNode(ISD::FNEG, DL, VT,
                   DAG.getNode(ISD::FABS, DL, VT, X));
        if (TrueOpnd->isExactlyValue(1.0) && FalseOpnd->isExactlyValue(-1.0))
          return DAG.getNode(ISD::FABS, DL, VT, X);

        break;
      }
    }
  }

  // FMUL -> FMA combines:
  if (SDValue Fused = visitFMULForFMADistributiveCombine(N)) {
    AddToWorklist(Fused.getNode());
    return Fused;
  }

  return SDValue();
}

SDValue DAGCombiner::visitFMA(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue N2 = N->getOperand(2);
  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);
  const TargetOptions &Options = DAG.getTarget().Options;

  // FMA nodes have flags that propagate to the created nodes.
  const SDNodeFlags Flags = N->getFlags();
  bool UnsafeFPMath = Options.UnsafeFPMath || isContractable(N);

  // Constant fold FMA.
  if (isa<ConstantFPSDNode>(N0) &&
      isa<ConstantFPSDNode>(N1) &&
      isa<ConstantFPSDNode>(N2)) {
    return DAG.getNode(ISD::FMA, DL, VT, N0, N1, N2);
  }

  // (-N0 * -N1) + N2 --> (N0 * N1) + N2
  if (isCheaperToUseNegatedFPOps(N0, N1)) {
    SDValue NegN0 =
        TLI.getNegatedExpression(N0, DAG, LegalOperations, ForCodeSize);
    SDValue NegN1 =
        TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize);
    return DAG.getNode(ISD::FMA, DL, VT, NegN0, NegN1, N2, Flags);
  }

  if (UnsafeFPMath) {
    if (N0CFP && N0CFP->isZero())
      return N2;
    if (N1CFP && N1CFP->isZero())
      return N2;
  }
  // TODO: The FMA node should have flags that propagate to these nodes.
  if (N0CFP && N0CFP->isExactlyValue(1.0))
    return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N2);
  if (N1CFP && N1CFP->isExactlyValue(1.0))
    return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N2);

  // Canonicalize (fma c, x, y) -> (fma x, c, y)
  if (isConstantFPBuildVectorOrConstantFP(N0) &&
     !isConstantFPBuildVectorOrConstantFP(N1))
    return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2);

  if (UnsafeFPMath) {
    // (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2)
    if (N2.getOpcode() == ISD::FMUL && N0 == N2.getOperand(0) &&
        isConstantFPBuildVectorOrConstantFP(N1) &&
        isConstantFPBuildVectorOrConstantFP(N2.getOperand(1))) {
      return DAG.getNode(ISD::FMUL, DL, VT, N0,
                         DAG.getNode(ISD::FADD, DL, VT, N1, N2.getOperand(1),
                                     Flags), Flags);
    }

    // (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y)
    if (N0.getOpcode() == ISD::FMUL &&
        isConstantFPBuildVectorOrConstantFP(N1) &&
        isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
      return DAG.getNode(ISD::FMA, DL, VT,
                         N0.getOperand(0),
                         DAG.getNode(ISD::FMUL, DL, VT, N1, N0.getOperand(1),
                                     Flags),
                         N2);
    }
  }

  // (fma x, 1, y) -> (fadd x, y)
  // (fma x, -1, y) -> (fadd (fneg x), y)
  if (N1CFP) {
    if (N1CFP->isExactlyValue(1.0))
      // TODO: The FMA node should have flags that propagate to this node.
      return DAG.getNode(ISD::FADD, DL, VT, N0, N2);

    if (N1CFP->isExactlyValue(-1.0) &&
        (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) {
      SDValue RHSNeg = DAG.getNode(ISD::FNEG, DL, VT, N0);
      AddToWorklist(RHSNeg.getNode());
      // TODO: The FMA node should have flags that propagate to this node.
      return DAG.getNode(ISD::FADD, DL, VT, N2, RHSNeg);
    }

    // fma (fneg x), K, y -> fma x -K, y
    if (N0.getOpcode() == ISD::FNEG &&
        (TLI.isOperationLegal(ISD::ConstantFP, VT) ||
         (N1.hasOneUse() && !TLI.isFPImmLegal(N1CFP->getValueAPF(), VT,
                                              ForCodeSize)))) {
      return DAG.getNode(ISD::FMA, DL, VT, N0.getOperand(0),
                         DAG.getNode(ISD::FNEG, DL, VT, N1, Flags), N2);
    }
  }

  if (UnsafeFPMath) {
    // (fma x, c, x) -> (fmul x, (c+1))
    if (N1CFP && N0 == N2) {
      return DAG.getNode(ISD::FMUL, DL, VT, N0,
                         DAG.getNode(ISD::FADD, DL, VT, N1,
                                     DAG.getConstantFP(1.0, DL, VT), Flags),
                         Flags);
    }

    // (fma x, c, (fneg x)) -> (fmul x, (c-1))
    if (N1CFP && N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) {
      return DAG.getNode(ISD::FMUL, DL, VT, N0,
                         DAG.getNode(ISD::FADD, DL, VT, N1,
                                     DAG.getConstantFP(-1.0, DL, VT), Flags),
                         Flags);
    }
  }

  return SDValue();
}

// Combine multiple FDIVs with the same divisor into multiple FMULs by the
// reciprocal.
// E.g., (a / D; b / D;) -> (recip = 1.0 / D; a * recip; b * recip)
// Notice that this is not always beneficial. One reason is different targets
// may have different costs for FDIV and FMUL, so sometimes the cost of two
// FDIVs may be lower than the cost of one FDIV and two FMULs. Another reason
// is the critical path is increased from "one FDIV" to "one FDIV + one FMUL".
SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) {
  // TODO: Limit this transform based on optsize/minsize - it always creates at
  //       least 1 extra instruction. But the perf win may be substantial enough
  //       that only minsize should restrict this.
  bool UnsafeMath = DAG.getTarget().Options.UnsafeFPMath;
  const SDNodeFlags Flags = N->getFlags();
  if (!UnsafeMath && !Flags.hasAllowReciprocal())
    return SDValue();

  // Skip if current node is a reciprocal/fneg-reciprocal.
  SDValue N0 = N->getOperand(0);
  ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0, /* AllowUndefs */ true);
  if (N0CFP && (N0CFP->isExactlyValue(1.0) || N0CFP->isExactlyValue(-1.0)))
    return SDValue();

  // Exit early if the target does not want this transform or if there can't
  // possibly be enough uses of the divisor to make the transform worthwhile.
  SDValue N1 = N->getOperand(1);
  unsigned MinUses = TLI.combineRepeatedFPDivisors();

  // For splat vectors, scale the number of uses by the splat factor. If we can
  // convert the division into a scalar op, that will likely be much faster.
  unsigned NumElts = 1;
  EVT VT = N->getValueType(0);
  if (VT.isVector() && DAG.isSplatValue(N1))
    NumElts = VT.getVectorNumElements();

  if (!MinUses || (N1->use_size() * NumElts) < MinUses)
    return SDValue();

  // Find all FDIV users of the same divisor.
  // Use a set because duplicates may be present in the user list.
  SetVector<SDNode *> Users;
  for (auto *U : N1->uses()) {
    if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1) {
      // This division is eligible for optimization only if global unsafe math
      // is enabled or if this division allows reciprocal formation.
      if (UnsafeMath || U->getFlags().hasAllowReciprocal())
        Users.insert(U);
    }
  }

  // Now that we have the actual number of divisor uses, make sure it meets
  // the minimum threshold specified by the target.
  if ((Users.size() * NumElts) < MinUses)
    return SDValue();

  SDLoc DL(N);
  SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);
  SDValue Reciprocal = DAG.getNode(ISD::FDIV, DL, VT, FPOne, N1, Flags);

  // Dividend / Divisor -> Dividend * Reciprocal
  for (auto *U : Users) {
    SDValue Dividend = U->getOperand(0);
    if (Dividend != FPOne) {
      SDValue NewNode = DAG.getNode(ISD::FMUL, SDLoc(U), VT, Dividend,
                                    Reciprocal, Flags);
      CombineTo(U, NewNode);
    } else if (U != Reciprocal.getNode()) {
      // In the absence of fast-math-flags, this user node is always the
      // same node as Reciprocal, but with FMF they may be different nodes.
      CombineTo(U, Reciprocal);
    }
  }
  return SDValue(N, 0);  // N was replaced.
}

SDValue DAGCombiner::visitFDIV(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);
  const TargetOptions &Options = DAG.getTarget().Options;
  SDNodeFlags Flags = N->getFlags();

  // fold vector ops
  if (VT.isVector())
    if (SDValue FoldedVOp = SimplifyVBinOp(N))
      return FoldedVOp;

  // fold (fdiv c1, c2) -> c1/c2
  if (N0CFP && N1CFP)
    return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1, Flags);

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  if (SDValue V = combineRepeatedFPDivisors(N))
    return V;

  if (Options.UnsafeFPMath || Flags.hasAllowReciprocal()) {
    // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable.
    if (N1CFP) {
      // Compute the reciprocal 1.0 / c2.
      const APFloat &N1APF = N1CFP->getValueAPF();
      APFloat Recip(N1APF.getSemantics(), 1); // 1.0
      APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven);
      // Only do the transform if the reciprocal is a legal fp immediate that
      // isn't too nasty (eg NaN, denormal, ...).
      if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty
          (!LegalOperations ||
           // FIXME: custom lowering of ConstantFP might fail (see e.g. ARM
           // backend)... we should handle this gracefully after Legalize.
           // TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT) ||
           TLI.isOperationLegal(ISD::ConstantFP, VT) ||
           TLI.isFPImmLegal(Recip, VT, ForCodeSize)))
        return DAG.getNode(ISD::FMUL, DL, VT, N0,
                           DAG.getConstantFP(Recip, DL, VT), Flags);
    }

    // If this FDIV is part of a reciprocal square root, it may be folded
    // into a target-specific square root estimate instruction.
    if (N1.getOpcode() == ISD::FSQRT) {
      if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0), Flags))
        return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
    } else if (N1.getOpcode() == ISD::FP_EXTEND &&
               N1.getOperand(0).getOpcode() == ISD::FSQRT) {
      if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0),
                                          Flags)) {
        RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N1), VT, RV);
        AddToWorklist(RV.getNode());
        return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
      }
    } else if (N1.getOpcode() == ISD::FP_ROUND &&
               N1.getOperand(0).getOpcode() == ISD::FSQRT) {
      if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0),
                                          Flags)) {
        RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N1), VT, RV, N1.getOperand(1));
        AddToWorklist(RV.getNode());
        return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
      }
    } else if (N1.getOpcode() == ISD::FMUL) {
      // Look through an FMUL. Even though this won't remove the FDIV directly,
      // it's still worthwhile to get rid of the FSQRT if possible.
      SDValue SqrtOp;
      SDValue OtherOp;
      if (N1.getOperand(0).getOpcode() == ISD::FSQRT) {
        SqrtOp = N1.getOperand(0);
        OtherOp = N1.getOperand(1);
      } else if (N1.getOperand(1).getOpcode() == ISD::FSQRT) {
        SqrtOp = N1.getOperand(1);
        OtherOp = N1.getOperand(0);
      }
      if (SqrtOp.getNode()) {
        // We found a FSQRT, so try to make this fold:
        // x / (y * sqrt(z)) -> x * (rsqrt(z) / y)
        if (SDValue RV = buildRsqrtEstimate(SqrtOp.getOperand(0), Flags)) {
          RV = DAG.getNode(ISD::FDIV, SDLoc(N1), VT, RV, OtherOp, Flags);
          AddToWorklist(RV.getNode());
          return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
        }
      }
    }

    // Fold into a reciprocal estimate and multiply instead of a real divide.
    if (SDValue RV = BuildDivEstimate(N0, N1, Flags))
      return RV;
  }

  // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y)
  if (isCheaperToUseNegatedFPOps(N0, N1))
    return DAG.getNode(
        ISD::FDIV, SDLoc(N), VT,
        TLI.getNegatedExpression(N0, DAG, LegalOperations, ForCodeSize),
        TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize), Flags);

  return SDValue();
}

SDValue DAGCombiner::visitFREM(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
  ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
  EVT VT = N->getValueType(0);

  // fold (frem c1, c2) -> fmod(c1,c2)
  if (N0CFP && N1CFP)
    return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1, N->getFlags());

  if (SDValue NewSel = foldBinOpIntoSelect(N))
    return NewSel;

  return SDValue();
}

SDValue DAGCombiner::visitFSQRT(SDNode *N) {
  SDNodeFlags Flags = N->getFlags();
  if (!DAG.getTarget().Options.UnsafeFPMath &&
      !Flags.hasApproximateFuncs())
    return SDValue();

  SDValue N0 = N->getOperand(0);
  if (TLI.isFsqrtCheap(N0, DAG))
    return SDValue();

  // FSQRT nodes have flags that propagate to the created nodes.
  return buildSqrtEstimate(N0, Flags);
}

/// copysign(x, fp_extend(y)) -> copysign(x, y)
/// copysign(x, fp_round(y)) -> copysign(x, y)
static inline bool CanCombineFCOPYSIGN_EXTEND_ROUND(SDNode *N) {
  SDValue N1 = N->getOperand(1);
  if ((N1.getOpcode() == ISD::FP_EXTEND ||
       N1.getOpcode() == ISD::FP_ROUND)) {
    // Do not optimize out type conversion of f128 type yet.
    // For some targets like x86_64, configuration is changed to keep one f128
    // value in one SSE register, but instruction selection cannot handle
    // FCOPYSIGN on SSE registers yet.
    EVT N1VT = N1->getValueType(0);
    EVT N1Op0VT = N1->getOperand(0).getValueType();
    return (N1VT == N1Op0VT || N1Op0VT != MVT::f128);
  }
  return false;
}

SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  bool N0CFP = isConstantFPBuildVectorOrConstantFP(N0);
  bool N1CFP = isConstantFPBuildVectorOrConstantFP(N1);
  EVT VT = N->getValueType(0);

  if (N0CFP && N1CFP) // Constant fold
    return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1);

  if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N->getOperand(1))) {
    const APFloat &V = N1C->getValueAPF();
    // copysign(x, c1) -> fabs(x)       iff ispos(c1)
    // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
    if (!V.isNegative()) {
      if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT))
        return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
    } else {
      if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
        return DAG.getNode(ISD::FNEG, SDLoc(N), VT,
                           DAG.getNode(ISD::FABS, SDLoc(N0), VT, N0));
    }
  }

  // copysign(fabs(x), y) -> copysign(x, y)
  // copysign(fneg(x), y) -> copysign(x, y)
  // copysign(copysign(x,z), y) -> copysign(x, y)
  if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG ||
      N0.getOpcode() == ISD::FCOPYSIGN)
    return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0.getOperand(0), N1);

  // copysign(x, abs(y)) -> abs(x)
  if (N1.getOpcode() == ISD::FABS)
    return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);

  // copysign(x, copysign(y,z)) -> copysign(x, z)
  if (N1.getOpcode() == ISD::FCOPYSIGN)
    return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(1));

  // copysign(x, fp_extend(y)) -> copysign(x, y)
  // copysign(x, fp_round(y)) -> copysign(x, y)
  if (CanCombineFCOPYSIGN_EXTEND_ROUND(N))
    return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(0));

  return SDValue();
}

SDValue DAGCombiner::visitFPOW(SDNode *N) {
  ConstantFPSDNode *ExponentC = isConstOrConstSplatFP(N->getOperand(1));
  if (!ExponentC)
    return SDValue();

  // Try to convert x ** (1/3) into cube root.
  // TODO: Handle the various flavors of long double.
  // TODO: Since we're approximating, we don't need an exact 1/3 exponent.
  //       Some range near 1/3 should be fine.
  EVT VT = N->getValueType(0);
  if ((VT == MVT::f32 && ExponentC->getValueAPF().isExactlyValue(1.0f/3.0f)) ||
      (VT == MVT::f64 && ExponentC->getValueAPF().isExactlyValue(1.0/3.0))) {
    // pow(-0.0, 1/3) = +0.0; cbrt(-0.0) = -0.0.
    // pow(-inf, 1/3) = +inf; cbrt(-inf) = -inf.
    // pow(-val, 1/3) =  nan; cbrt(-val) = -num.
    // For regular numbers, rounding may cause the results to differ.
    // Therefore, we require { nsz ninf nnan afn } for this transform.
    // TODO: We could select out the special cases if we don't have nsz/ninf.
    SDNodeFlags Flags = N->getFlags();
    if (!Flags.hasNoSignedZeros() || !Flags.hasNoInfs() || !Flags.hasNoNaNs() ||
        !Flags.hasApproximateFuncs())
      return SDValue();

    // Do not create a cbrt() libcall if the target does not have it, and do not
    // turn a pow that has lowering support into a cbrt() libcall.
    if (!DAG.getLibInfo().has(LibFunc_cbrt) ||
        (!DAG.getTargetLoweringInfo().isOperationExpand(ISD::FPOW, VT) &&
         DAG.getTargetLoweringInfo().isOperationExpand(ISD::FCBRT, VT)))
      return SDValue();

    return DAG.getNode(ISD::FCBRT, SDLoc(N), VT, N->getOperand(0), Flags);
  }

  // Try to convert x ** (1/4) and x ** (3/4) into square roots.
  // x ** (1/2) is canonicalized to sqrt, so we do not bother with that case.
  // TODO: This could be extended (using a target hook) to handle smaller
  // power-of-2 fractional exponents.
  bool ExponentIs025 = ExponentC->getValueAPF().isExactlyValue(0.25);
  bool ExponentIs075 = ExponentC->getValueAPF().isExactlyValue(0.75);
  if (ExponentIs025 || ExponentIs075) {
    // pow(-0.0, 0.25) = +0.0; sqrt(sqrt(-0.0)) = -0.0.
    // pow(-inf, 0.25) = +inf; sqrt(sqrt(-inf)) =  NaN.
    // pow(-0.0, 0.75) = +0.0; sqrt(-0.0) * sqrt(sqrt(-0.0)) = +0.0.
    // pow(-inf, 0.75) = +inf; sqrt(-inf) * sqrt(sqrt(-inf)) =  NaN.
    // For regular numbers, rounding may cause the results to differ.
    // Therefore, we require { nsz ninf afn } for this transform.
    // TODO: We could select out the special cases if we don't have nsz/ninf.
    SDNodeFlags Flags = N->getFlags();

    // We only need no signed zeros for the 0.25 case.
    if ((!Flags.hasNoSignedZeros() && ExponentIs025) || !Flags.hasNoInfs() ||
        !Flags.hasApproximateFuncs())
      return SDValue();

    // Don't double the number of libcalls. We are trying to inline fast code.
    if (!DAG.getTargetLoweringInfo().isOperationLegalOrCustom(ISD::FSQRT, VT))
      return SDValue();

    // Assume that libcalls are the smallest code.
    // TODO: This restriction should probably be lifted for vectors.
    if (DAG.getMachineFunction().getFunction().hasOptSize())
      return SDValue();

    // pow(X, 0.25) --> sqrt(sqrt(X))
    SDLoc DL(N);
    SDValue Sqrt = DAG.getNode(ISD::FSQRT, DL, VT, N->getOperand(0), Flags);
    SDValue SqrtSqrt = DAG.getNode(ISD::FSQRT, DL, VT, Sqrt, Flags);
    if (ExponentIs025)
      return SqrtSqrt;
    // pow(X, 0.75) --> sqrt(X) * sqrt(sqrt(X))
    return DAG.getNode(ISD::FMUL, DL, VT, Sqrt, SqrtSqrt, Flags);
  }

  return SDValue();
}

static SDValue foldFPToIntToFP(SDNode *N, SelectionDAG &DAG,
                               const TargetLowering &TLI) {
  // This optimization is guarded by a function attribute because it may produce
  // unexpected results. Ie, programs may be relying on the platform-specific
  // undefined behavior when the float-to-int conversion overflows.
  const Function &F = DAG.getMachineFunction().getFunction();
  Attribute StrictOverflow = F.getFnAttribute("strict-float-cast-overflow");
  if (StrictOverflow.getValueAsString().equals("false"))
    return SDValue();

  // We only do this if the target has legal ftrunc. Otherwise, we'd likely be
  // replacing casts with a libcall. We also must be allowed to ignore -0.0
  // because FTRUNC will return -0.0 for (-1.0, -0.0), but using integer
  // conversions would return +0.0.
  // FIXME: We should be able to use node-level FMF here.
  // TODO: If strict math, should we use FABS (+ range check for signed cast)?
  EVT VT = N->getValueType(0);
  if (!TLI.isOperationLegal(ISD::FTRUNC, VT) ||
      !DAG.getTarget().Options.NoSignedZerosFPMath)
    return SDValue();

  // fptosi/fptoui round towards zero, so converting from FP to integer and
  // back is the same as an 'ftrunc': [us]itofp (fpto[us]i X) --> ftrunc X
  SDValue N0 = N->getOperand(0);
  if (N->getOpcode() == ISD::SINT_TO_FP && N0.getOpcode() == ISD::FP_TO_SINT &&
      N0.getOperand(0).getValueType() == VT)
    return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0.getOperand(0));

  if (N->getOpcode() == ISD::UINT_TO_FP && N0.getOpcode() == ISD::FP_TO_UINT &&
      N0.getOperand(0).getValueType() == VT)
    return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0.getOperand(0));

  return SDValue();
}

SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  EVT OpVT = N0.getValueType();

  // [us]itofp(undef) = 0, because the result value is bounded.
  if (N0.isUndef())
    return DAG.getConstantFP(0.0, SDLoc(N), VT);

  // fold (sint_to_fp c1) -> c1fp
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
      // ...but only if the target supports immediate floating-point values
      (!LegalOperations ||
       TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT)))
    return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);

  // If the input is a legal type, and SINT_TO_FP is not legal on this target,
  // but UINT_TO_FP is legal on this target, try to convert.
  if (!hasOperation(ISD::SINT_TO_FP, OpVT) &&
      hasOperation(ISD::UINT_TO_FP, OpVT)) {
    // If the sign bit is known to be zero, we can change this to UINT_TO_FP.
    if (DAG.SignBitIsZero(N0))
      return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
  }

  // The next optimizations are desirable only if SELECT_CC can be lowered.
  if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
    // fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
    if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 &&
        !VT.isVector() &&
        (!LegalOperations ||
         TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
      SDLoc DL(N);
      SDValue Ops[] =
        { N0.getOperand(0), N0.getOperand(1),
          DAG.getConstantFP(-1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
          N0.getOperand(2) };
      return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
    }

    // fold (sint_to_fp (zext (setcc x, y, cc))) ->
    //      (select_cc x, y, 1.0, 0.0,, cc)
    if (N0.getOpcode() == ISD::ZERO_EXTEND &&
        N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() &&
        (!LegalOperations ||
         TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
      SDLoc DL(N);
      SDValue Ops[] =
        { N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1),
          DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
          N0.getOperand(0).getOperand(2) };
      return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
    }
  }

  if (SDValue FTrunc = foldFPToIntToFP(N, DAG, TLI))
    return FTrunc;

  return SDValue();
}

SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  EVT OpVT = N0.getValueType();

  // [us]itofp(undef) = 0, because the result value is bounded.
  if (N0.isUndef())
    return DAG.getConstantFP(0.0, SDLoc(N), VT);

  // fold (uint_to_fp c1) -> c1fp
  if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
      // ...but only if the target supports immediate floating-point values
      (!LegalOperations ||
       TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT)))
    return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);

  // If the input is a legal type, and UINT_TO_FP is not legal on this target,
  // but SINT_TO_FP is legal on this target, try to convert.
  if (!hasOperation(ISD::UINT_TO_FP, OpVT) &&
      hasOperation(ISD::SINT_TO_FP, OpVT)) {
    // If the sign bit is known to be zero, we can change this to SINT_TO_FP.
    if (DAG.SignBitIsZero(N0))
      return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
  }

  // The next optimizations are desirable only if SELECT_CC can be lowered.
  if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
    // fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
    if (N0.getOpcode() == ISD::SETCC && !VT.isVector() &&
        (!LegalOperations ||
         TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
      SDLoc DL(N);
      SDValue Ops[] =
        { N0.getOperand(0), N0.getOperand(1),
          DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
          N0.getOperand(2) };
      return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
    }
  }

  if (SDValue FTrunc = foldFPToIntToFP(N, DAG, TLI))
    return FTrunc;

  return SDValue();
}

// Fold (fp_to_{s/u}int ({s/u}int_to_fpx)) -> zext x, sext x, trunc x, or x
static SDValue FoldIntToFPToInt(SDNode *N, SelectionDAG &DAG) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  if (N0.getOpcode() != ISD::UINT_TO_FP && N0.getOpcode() != ISD::SINT_TO_FP)
    return SDValue();

  SDValue Src = N0.getOperand(0);
  EVT SrcVT = Src.getValueType();
  bool IsInputSigned = N0.getOpcode() == ISD::SINT_TO_FP;
  bool IsOutputSigned = N->getOpcode() == ISD::FP_TO_SINT;

  // We can safely assume the conversion won't overflow the output range,
  // because (for example) (uint8_t)18293.f is undefined behavior.

  // Since we can assume the conversion won't overflow, our decision as to
  // whether the input will fit in the float should depend on the minimum
  // of the input range and output range.

  // This means this is also safe for a signed input and unsigned output, since
  // a negative input would lead to undefined behavior.
  unsigned InputSize = (int)SrcVT.getScalarSizeInBits() - IsInputSigned;
  unsigned OutputSize = (int)VT.getScalarSizeInBits() - IsOutputSigned;
  unsigned ActualSize = std::min(InputSize, OutputSize);
  const fltSemantics &sem = DAG.EVTToAPFloatSemantics(N0.getValueType());

  // We can only fold away the float conversion if the input range can be
  // represented exactly in the float range.
  if (APFloat::semanticsPrecision(sem) >= ActualSize) {
    if (VT.getScalarSizeInBits() > SrcVT.getScalarSizeInBits()) {
      unsigned ExtOp = IsInputSigned && IsOutputSigned ? ISD::SIGN_EXTEND
                                                       : ISD::ZERO_EXTEND;
      return DAG.getNode(ExtOp, SDLoc(N), VT, Src);
    }
    if (VT.getScalarSizeInBits() < SrcVT.getScalarSizeInBits())
      return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Src);
    return DAG.getBitcast(VT, Src);
  }
  return SDValue();
}

SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (fp_to_sint undef) -> undef
  if (N0.isUndef())
    return DAG.getUNDEF(VT);

  // fold (fp_to_sint c1fp) -> c1
  if (isConstantFPBuildVectorOrConstantFP(N0))
    return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0);

  return FoldIntToFPToInt(N, DAG);
}

SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (fp_to_uint undef) -> undef
  if (N0.isUndef())
    return DAG.getUNDEF(VT);

  // fold (fp_to_uint c1fp) -> c1
  if (isConstantFPBuildVectorOrConstantFP(N0))
    return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0);

  return FoldIntToFPToInt(N, DAG);
}

SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
  EVT VT = N->getValueType(0);

  // fold (fp_round c1fp) -> c1fp
  if (N0CFP)
    return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0, N1);

  // fold (fp_round (fp_extend x)) -> x
  if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType())
    return N0.getOperand(0);

  // fold (fp_round (fp_round x)) -> (fp_round x)
  if (N0.getOpcode() == ISD::FP_ROUND) {
    const bool NIsTrunc = N->getConstantOperandVal(1) == 1;
    const bool N0IsTrunc = N0.getConstantOperandVal(1) == 1;

    // Skip this folding if it results in an fp_round from f80 to f16.
    //
    // f80 to f16 always generates an expensive (and as yet, unimplemented)
    // libcall to __truncxfhf2 instead of selecting native f16 conversion
    // instructions from f32 or f64.  Moreover, the first (value-preserving)
    // fp_round from f80 to either f32 or f64 may become a NOP in platforms like
    // x86.
    if (N0.getOperand(0).getValueType() == MVT::f80 && VT == MVT::f16)
      return SDValue();

    // If the first fp_round isn't a value preserving truncation, it might
    // introduce a tie in the second fp_round, that wouldn't occur in the
    // single-step fp_round we want to fold to.
    // In other words, double rounding isn't the same as rounding.
    // Also, this is a value preserving truncation iff both fp_round's are.
    if (DAG.getTarget().Options.UnsafeFPMath || N0IsTrunc) {
      SDLoc DL(N);
      return DAG.getNode(ISD::FP_ROUND, DL, VT, N0.getOperand(0),
                         DAG.getIntPtrConstant(NIsTrunc && N0IsTrunc, DL));
    }
  }

  // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y)
  if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) {
    SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT,
                              N0.getOperand(0), N1);
    AddToWorklist(Tmp.getNode());
    return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
                       Tmp, N0.getOperand(1));
  }

  if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
    return NewVSel;

  return SDValue();
}

SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
  if (N->hasOneUse() &&
      N->use_begin()->getOpcode() == ISD::FP_ROUND)
    return SDValue();

  // fold (fp_extend c1fp) -> c1fp
  if (isConstantFPBuildVectorOrConstantFP(N0))
    return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0);

  // fold (fp_extend (fp16_to_fp op)) -> (fp16_to_fp op)
  if (N0.getOpcode() == ISD::FP16_TO_FP &&
      TLI.getOperationAction(ISD::FP16_TO_FP, VT) == TargetLowering::Legal)
    return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), VT, N0.getOperand(0));

  // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
  // value of X.
  if (N0.getOpcode() == ISD::FP_ROUND
      && N0.getConstantOperandVal(1) == 1) {
    SDValue In = N0.getOperand(0);
    if (In.getValueType() == VT) return In;
    if (VT.bitsLT(In.getValueType()))
      return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT,
                         In, N0.getOperand(1));
    return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, In);
  }

  // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
  if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
       TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
    LoadSDNode *LN0 = cast<LoadSDNode>(N0);
    SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
                                     LN0->getChain(),
                                     LN0->getBasePtr(), N0.getValueType(),
                                     LN0->getMemOperand());
    CombineTo(N, ExtLoad);
    CombineTo(N0.getNode(),
              DAG.getNode(ISD::FP_ROUND, SDLoc(N0),
                          N0.getValueType(), ExtLoad,
                          DAG.getIntPtrConstant(1, SDLoc(N0))),
              ExtLoad.getValue(1));
    return SDValue(N, 0);   // Return N so it doesn't get rechecked!
  }

  if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
    return NewVSel;

  return SDValue();
}

SDValue DAGCombiner::visitFCEIL(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (fceil c1) -> fceil(c1)
  if (isConstantFPBuildVectorOrConstantFP(N0))
    return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0);

  return SDValue();
}

SDValue DAGCombiner::visitFTRUNC(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (ftrunc c1) -> ftrunc(c1)
  if (isConstantFPBuildVectorOrConstantFP(N0))
    return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0);

  // fold ftrunc (known rounded int x) -> x
  // ftrunc is a part of fptosi/fptoui expansion on some targets, so this is
  // likely to be generated to extract integer from a rounded floating value.
  switch (N0.getOpcode()) {
  default: break;
  case ISD::FRINT:
  case ISD::FTRUNC:
  case ISD::FNEARBYINT:
  case ISD::FFLOOR:
  case ISD::FCEIL:
    return N0;
  }

  return SDValue();
}

SDValue DAGCombiner::visitFFLOOR(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (ffloor c1) -> ffloor(c1)
  if (isConstantFPBuildVectorOrConstantFP(N0))
    return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0);

  return SDValue();
}

// FIXME: FNEG and FABS have a lot in common; refactor.
SDValue DAGCombiner::visitFNEG(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // Constant fold FNEG.
  if (isConstantFPBuildVectorOrConstantFP(N0))
    return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0);

  if (TLI.isNegatibleForFree(N0, DAG, LegalOperations, ForCodeSize))
    return TLI.getNegatedExpression(N0, DAG, LegalOperations, ForCodeSize);

  // Transform fneg(bitconvert(x)) -> bitconvert(x ^ sign) to avoid loading
  // constant pool values.
  if (!TLI.isFNegFree(VT) &&
      N0.getOpcode() == ISD::BITCAST &&
      N0.getNode()->hasOneUse()) {
    SDValue Int = N0.getOperand(0);
    EVT IntVT = Int.getValueType();
    if (IntVT.isInteger() && !IntVT.isVector()) {
      APInt SignMask;
      if (N0.getValueType().isVector()) {
        // For a vector, get a mask such as 0x80... per scalar element
        // and splat it.
        SignMask = APInt::getSignMask(N0.getScalarValueSizeInBits());
        SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
      } else {
        // For a scalar, just generate 0x80...
        SignMask = APInt::getSignMask(IntVT.getSizeInBits());
      }
      SDLoc DL0(N0);
      Int = DAG.getNode(ISD::XOR, DL0, IntVT, Int,
                        DAG.getConstant(SignMask, DL0, IntVT));
      AddToWorklist(Int.getNode());
      return DAG.getBitcast(VT, Int);
    }
  }

  // (fneg (fmul c, x)) -> (fmul -c, x)
  if (N0.getOpcode() == ISD::FMUL &&
      (N0.getNode()->hasOneUse() || !TLI.isFNegFree(VT))) {
    ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
    if (CFP1) {
      APFloat CVal = CFP1->getValueAPF();
      CVal.changeSign();
      if (Level >= AfterLegalizeDAG &&
          (TLI.isFPImmLegal(CVal, VT, ForCodeSize) ||
           TLI.isOperationLegal(ISD::ConstantFP, VT)))
        return DAG.getNode(
            ISD::FMUL, SDLoc(N), VT, N0.getOperand(0),
            DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0.getOperand(1)),
            N0->getFlags());
    }
  }

  return SDValue();
}

static SDValue visitFMinMax(SelectionDAG &DAG, SDNode *N,
                            APFloat (*Op)(const APFloat &, const APFloat &)) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
  const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);

  if (N0CFP && N1CFP) {
    const APFloat &C0 = N0CFP->getValueAPF();
    const APFloat &C1 = N1CFP->getValueAPF();
    return DAG.getConstantFP(Op(C0, C1), SDLoc(N), VT);
  }

  // Canonicalize to constant on RHS.
  if (isConstantFPBuildVectorOrConstantFP(N0) &&
      !isConstantFPBuildVectorOrConstantFP(N1))
    return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0);

  return SDValue();
}

SDValue DAGCombiner::visitFMINNUM(SDNode *N) {
  return visitFMinMax(DAG, N, minnum);
}

SDValue DAGCombiner::visitFMAXNUM(SDNode *N) {
  return visitFMinMax(DAG, N, maxnum);
}

SDValue DAGCombiner::visitFMINIMUM(SDNode *N) {
  return visitFMinMax(DAG, N, minimum);
}

SDValue DAGCombiner::visitFMAXIMUM(SDNode *N) {
  return visitFMinMax(DAG, N, maximum);
}

SDValue DAGCombiner::visitFABS(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // fold (fabs c1) -> fabs(c1)
  if (isConstantFPBuildVectorOrConstantFP(N0))
    return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);

  // fold (fabs (fabs x)) -> (fabs x)
  if (N0.getOpcode() == ISD::FABS)
    return N->getOperand(0);

  // fold (fabs (fneg x)) -> (fabs x)
  // fold (fabs (fcopysign x, y)) -> (fabs x)
  if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
    return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0));

  // fabs(bitcast(x)) -> bitcast(x & ~sign) to avoid constant pool loads.
  if (!TLI.isFAbsFree(VT) && N0.getOpcode() == ISD::BITCAST && N0.hasOneUse()) {
    SDValue Int = N0.getOperand(0);
    EVT IntVT = Int.getValueType();
    if (IntVT.isInteger() && !IntVT.isVector()) {
      APInt SignMask;
      if (N0.getValueType().isVector()) {
        // For a vector, get a mask such as 0x7f... per scalar element
        // and splat it.
        SignMask = ~APInt::getSignMask(N0.getScalarValueSizeInBits());
        SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
      } else {
        // For a scalar, just generate 0x7f...
        SignMask = ~APInt::getSignMask(IntVT.getSizeInBits());
      }
      SDLoc DL(N0);
      Int = DAG.getNode(ISD::AND, DL, IntVT, Int,
                        DAG.getConstant(SignMask, DL, IntVT));
      AddToWorklist(Int.getNode());
      return DAG.getBitcast(N->getValueType(0), Int);
    }
  }

  return SDValue();
}

SDValue DAGCombiner::visitBRCOND(SDNode *N) {
  SDValue Chain = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue N2 = N->getOperand(2);

  // If N is a constant we could fold this into a fallthrough or unconditional
  // branch. However that doesn't happen very often in normal code, because
  // Instcombine/SimplifyCFG should have handled the available opportunities.
  // If we did this folding here, it would be necessary to update the
  // MachineBasicBlock CFG, which is awkward.

  // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal
  // on the target.
  if (N1.getOpcode() == ISD::SETCC &&
      TLI.isOperationLegalOrCustom(ISD::BR_CC,
                                   N1.getOperand(0).getValueType())) {
    return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
                       Chain, N1.getOperand(2),
                       N1.getOperand(0), N1.getOperand(1), N2);
  }

  if (N1.hasOneUse()) {
    if (SDValue NewN1 = rebuildSetCC(N1))
      return DAG.getNode(ISD::BRCOND, SDLoc(N), MVT::Other, Chain, NewN1, N2);
  }

  return SDValue();
}

SDValue DAGCombiner::rebuildSetCC(SDValue N) {
  if (N.getOpcode() == ISD::SRL ||
      (N.getOpcode() == ISD::TRUNCATE &&
       (N.getOperand(0).hasOneUse() &&
        N.getOperand(0).getOpcode() == ISD::SRL))) {
    // Look pass the truncate.
    if (N.getOpcode() == ISD::TRUNCATE)
      N = N.getOperand(0);

    // Match this pattern so that we can generate simpler code:
    //
    //   %a = ...
    //   %b = and i32 %a, 2
    //   %c = srl i32 %b, 1
    //   brcond i32 %c ...
    //
    // into
    //
    //   %a = ...
    //   %b = and i32 %a, 2
    //   %c = setcc eq %b, 0
    //   brcond %c ...
    //
    // This applies only when the AND constant value has one bit set and the
    // SRL constant is equal to the log2 of the AND constant. The back-end is
    // smart enough to convert the result into a TEST/JMP sequence.
    SDValue Op0 = N.getOperand(0);
    SDValue Op1 = N.getOperand(1);

    if (Op0.getOpcode() == ISD::AND && Op1.getOpcode() == ISD::Constant) {
      SDValue AndOp1 = Op0.getOperand(1);

      if (AndOp1.getOpcode() == ISD::Constant) {
        const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue();

        if (AndConst.isPowerOf2() &&
            cast<ConstantSDNode>(Op1)->getAPIntValue() == AndConst.logBase2()) {
          SDLoc DL(N);
          return DAG.getSetCC(DL, getSetCCResultType(Op0.getValueType()),
                              Op0, DAG.getConstant(0, DL, Op0.getValueType()),
                              ISD::SETNE);
        }
      }
    }
  }

  // Transform br(xor(x, y)) -> br(x != y)
  // Transform br(xor(xor(x,y), 1)) -> br (x == y)
  if (N.getOpcode() == ISD::XOR) {
    // Because we may call this on a speculatively constructed
    // SimplifiedSetCC Node, we need to simplify this node first.
    // Ideally this should be folded into SimplifySetCC and not
    // here. For now, grab a handle to N so we don't lose it from
    // replacements interal to the visit.
    HandleSDNode XORHandle(N);
    while (N.getOpcode() == ISD::XOR) {
      SDValue Tmp = visitXOR(N.getNode());
      // No simplification done.
      if (!Tmp.getNode())
        break;
      // Returning N is form in-visit replacement that may invalidated
      // N. Grab value from Handle.
      if (Tmp.getNode() == N.getNode())
        N = XORHandle.getValue();
      else // Node simplified. Try simplifying again.
        N = Tmp;
    }

    if (N.getOpcode() != ISD::XOR)
      return N;

    SDNode *TheXor = N.getNode();

    SDValue Op0 = TheXor->getOperand(0);
    SDValue Op1 = TheXor->getOperand(1);

    if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) {
      bool Equal = false;
      if (isOneConstant(Op0) && Op0.hasOneUse() &&
          Op0.getOpcode() == ISD::XOR) {
        TheXor = Op0.getNode();
        Equal = true;
      }

      EVT SetCCVT = N.getValueType();
      if (LegalTypes)
        SetCCVT = getSetCCResultType(SetCCVT);
      // Replace the uses of XOR with SETCC
      return DAG.getSetCC(SDLoc(TheXor), SetCCVT, Op0, Op1,
                          Equal ? ISD::SETEQ : ISD::SETNE);
    }
  }

  return SDValue();
}

// Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB.
//
SDValue DAGCombiner::visitBR_CC(SDNode *N) {
  CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1));
  SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3);

  // If N is a constant we could fold this into a fallthrough or unconditional
  // branch. However that doesn't happen very often in normal code, because
  // Instcombine/SimplifyCFG should have handled the available opportunities.
  // If we did this folding here, it would be necessary to update the
  // MachineBasicBlock CFG, which is awkward.

  // Use SimplifySetCC to simplify SETCC's.
  SDValue Simp = SimplifySetCC(getSetCCResultType(CondLHS.getValueType()),
                               CondLHS, CondRHS, CC->get(), SDLoc(N),
                               false);
  if (Simp.getNode()) AddToWorklist(Simp.getNode());

  // fold to a simpler setcc
  if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC)
    return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
                       N->getOperand(0), Simp.getOperand(2),
                       Simp.getOperand(0), Simp.getOperand(1),
                       N->getOperand(4));

  return SDValue();
}

/// Return true if 'Use' is a load or a store that uses N as its base pointer
/// and that N may be folded in the load / store addressing mode.
static bool canFoldInAddressingMode(SDNode *N, SDNode *Use,
                                    SelectionDAG &DAG,
                                    const TargetLowering &TLI) {
  EVT VT;
  unsigned AS;

  if (LoadSDNode *LD  = dyn_cast<LoadSDNode>(Use)) {
    if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
      return false;
    VT = LD->getMemoryVT();
    AS = LD->getAddressSpace();
  } else if (StoreSDNode *ST  = dyn_cast<StoreSDNode>(Use)) {
    if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
      return false;
    VT = ST->getMemoryVT();
    AS = ST->getAddressSpace();
  } else
    return false;

  TargetLowering::AddrMode AM;
  if (N->getOpcode() == ISD::ADD) {
    AM.HasBaseReg = true;
    ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
    if (Offset)
      // [reg +/- imm]
      AM.BaseOffs = Offset->getSExtValue();
    else
      // [reg +/- reg]
      AM.Scale = 1;
  } else if (N->getOpcode() == ISD::SUB) {
    AM.HasBaseReg = true;
    ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
    if (Offset)
      // [reg +/- imm]
      AM.BaseOffs = -Offset->getSExtValue();
    else
      // [reg +/- reg]
      AM.Scale = 1;
  } else
    return false;

  return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM,
                                   VT.getTypeForEVT(*DAG.getContext()), AS);
}

/// Try turning a load/store into a pre-indexed load/store when the base
/// pointer is an add or subtract and it has other uses besides the load/store.
/// After the transformation, the new indexed load/store has effectively folded
/// the add/subtract in and all of its other uses are redirected to the
/// new load/store.
bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
  if (Level < AfterLegalizeDAG)
    return false;

  bool isLoad = true;
  SDValue Ptr;
  EVT VT;
  if (LoadSDNode *LD  = dyn_cast<LoadSDNode>(N)) {
    if (LD->isIndexed())
      return false;
    VT = LD->getMemoryVT();
    if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) &&
        !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT))
      return false;
    Ptr = LD->getBasePtr();
  } else if (StoreSDNode *ST  = dyn_cast<StoreSDNode>(N)) {
    if (ST->isIndexed())
      return false;
    VT = ST->getMemoryVT();
    if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) &&
        !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT))
      return false;
    Ptr = ST->getBasePtr();
    isLoad = false;
  } else {
    return false;
  }

  // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail
  // out.  There is no reason to make this a preinc/predec.
  if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) ||
      Ptr.getNode()->hasOneUse())
    return false;

  // Ask the target to do addressing mode selection.
  SDValue BasePtr;
  SDValue Offset;
  ISD::MemIndexedMode AM = ISD::UNINDEXED;
  if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG))
    return false;

  // Backends without true r+i pre-indexed forms may need to pass a
  // constant base with a variable offset so that constant coercion
  // will work with the patterns in canonical form.
  bool Swapped = false;
  if (isa<ConstantSDNode>(BasePtr)) {
    std::swap(BasePtr, Offset);
    Swapped = true;
  }

  // Don't create a indexed load / store with zero offset.
  if (isNullConstant(Offset))
    return false;

  // Try turning it into a pre-indexed load / store except when:
  // 1) The new base ptr is a frame index.
  // 2) If N is a store and the new base ptr is either the same as or is a
  //    predecessor of the value being stored.
  // 3) Another use of old base ptr is a predecessor of N. If ptr is folded
  //    that would create a cycle.
  // 4) All uses are load / store ops that use it as old base ptr.

  // Check #1.  Preinc'ing a frame index would require copying the stack pointer
  // (plus the implicit offset) to a register to preinc anyway.
  if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
    return false;

  // Check #2.
  if (!isLoad) {
    SDValue Val = cast<StoreSDNode>(N)->getValue();

    // Would require a copy.
    if (Val == BasePtr)
      return false;

    // Would create a cycle.
    if (Val == Ptr || Ptr->isPredecessorOf(Val.getNode()))
      return false;
  }

  // Caches for hasPredecessorHelper.
  SmallPtrSet<const SDNode *, 32> Visited;
  SmallVector<const SDNode *, 16> Worklist;
  Worklist.push_back(N);

  // If the offset is a constant, there may be other adds of constants that
  // can be folded with this one. We should do this to avoid having to keep
  // a copy of the original base pointer.
  SmallVector<SDNode *, 16> OtherUses;
  if (isa<ConstantSDNode>(Offset))
    for (SDNode::use_iterator UI = BasePtr.getNode()->use_begin(),
                              UE = BasePtr.getNode()->use_end();
         UI != UE; ++UI) {
      SDUse &Use = UI.getUse();
      // Skip the use that is Ptr and uses of other results from BasePtr's
      // node (important for nodes that return multiple results).
      if (Use.getUser() == Ptr.getNode() || Use != BasePtr)
        continue;

      if (SDNode::hasPredecessorHelper(Use.getUser(), Visited, Worklist))
        continue;

      if (Use.getUser()->getOpcode() != ISD::ADD &&
          Use.getUser()->getOpcode() != ISD::SUB) {
        OtherUses.clear();
        break;
      }

      SDValue Op1 = Use.getUser()->getOperand((UI.getOperandNo() + 1) & 1);
      if (!isa<ConstantSDNode>(Op1)) {
        OtherUses.clear();
        break;
      }

      // FIXME: In some cases, we can be smarter about this.
      if (Op1.getValueType() != Offset.getValueType()) {
        OtherUses.clear();
        break;
      }

      OtherUses.push_back(Use.getUser());
    }

  if (Swapped)
    std::swap(BasePtr, Offset);

  // Now check for #3 and #4.
  bool RealUse = false;

  for (SDNode *Use : Ptr.getNode()->uses()) {
    if (Use == N)
      continue;
    if (SDNode::hasPredecessorHelper(Use, Visited, Worklist))
      return false;

    // If Ptr may be folded in addressing mode of other use, then it's
    // not profitable to do this transformation.
    if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI))
      RealUse = true;
  }

  if (!RealUse)
    return false;

  SDValue Result;
  if (isLoad)
    Result = DAG.getIndexedLoad(SDValue(N,0), SDLoc(N),
                                BasePtr, Offset, AM);
  else
    Result = DAG.getIndexedStore(SDValue(N,0), SDLoc(N),
                                 BasePtr, Offset, AM);
  ++PreIndexedNodes;
  ++NodesCombined;
  LLVM_DEBUG(dbgs() << "\nReplacing.4 "; N->dump(&DAG); dbgs() << "\nWith: ";
             Result.getNode()->dump(&DAG); dbgs() << '\n');
  WorklistRemover DeadNodes(*this);
  if (isLoad) {
    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
  } else {
    DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
  }

  // Finally, since the node is now dead, remove it from the graph.
  deleteAndRecombine(N);

  if (Swapped)
    std::swap(BasePtr, Offset);

  // Replace other uses of BasePtr that can be updated to use Ptr
  for (unsigned i = 0, e = OtherUses.size(); i != e; ++i) {
    unsigned OffsetIdx = 1;
    if (OtherUses[i]->getOperand(OffsetIdx).getNode() == BasePtr.getNode())
      OffsetIdx = 0;
    assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() ==
           BasePtr.getNode() && "Expected BasePtr operand");

    // We need to replace ptr0 in the following expression:
    //   x0 * offset0 + y0 * ptr0 = t0
    // knowing that
    //   x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store)
    //
    // where x0, x1, y0 and y1 in {-1, 1} are given by the types of the
    // indexed load/store and the expression that needs to be re-written.
    //
    // Therefore, we have:
    //   t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1

    ConstantSDNode *CN =
      cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx));
    int X0, X1, Y0, Y1;
    const APInt &Offset0 = CN->getAPIntValue();
    APInt Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue();

    X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1;
    Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1;
    X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1;
    Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1;

    unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD;

    APInt CNV = Offset0;
    if (X0 < 0) CNV = -CNV;
    if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1;
    else CNV = CNV - Offset1;

    SDLoc DL(OtherUses[i]);

    // We can now generate the new expression.
    SDValue NewOp1 = DAG.getConstant(CNV, DL, CN->getValueType(0));
    SDValue NewOp2 = Result.getValue(isLoad ? 1 : 0);

    SDValue NewUse = DAG.getNode(Opcode,
                                 DL,
                                 OtherUses[i]->getValueType(0), NewOp1, NewOp2);
    DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse);
    deleteAndRecombine(OtherUses[i]);
  }

  // Replace the uses of Ptr with uses of the updated base value.
  DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0));
  deleteAndRecombine(Ptr.getNode());
  AddToWorklist(Result.getNode());

  return true;
}

/// Try to combine a load/store with a add/sub of the base pointer node into a
/// post-indexed load/store. The transformation folded the add/subtract into the
/// new indexed load/store effectively and all of its uses are redirected to the
/// new load/store.
bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
  if (Level < AfterLegalizeDAG)
    return false;

  bool isLoad = true;
  SDValue Ptr;
  EVT VT;
  if (LoadSDNode *LD  = dyn_cast<LoadSDNode>(N)) {
    if (LD->isIndexed())
      return false;
    VT = LD->getMemoryVT();
    if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) &&
        !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT))
      return false;
    Ptr = LD->getBasePtr();
  } else if (StoreSDNode *ST  = dyn_cast<StoreSDNode>(N)) {
    if (ST->isIndexed())
      return false;
    VT = ST->getMemoryVT();
    if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) &&
        !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT))
      return false;
    Ptr = ST->getBasePtr();
    isLoad = false;
  } else {
    return false;
  }

  if (Ptr.getNode()->hasOneUse())
    return false;

  for (SDNode *Op : Ptr.getNode()->uses()) {
    if (Op == N ||
        (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB))
      continue;

    SDValue BasePtr;
    SDValue Offset;
    ISD::MemIndexedMode AM = ISD::UNINDEXED;
    if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
      // Don't create a indexed load / store with zero offset.
      if (isNullConstant(Offset))
        continue;

      // Try turning it into a post-indexed load / store except when
      // 1) All uses are load / store ops that use it as base ptr (and
      //    it may be folded as addressing mmode).
      // 2) Op must be independent of N, i.e. Op is neither a predecessor
      //    nor a successor of N. Otherwise, if Op is folded that would
      //    create a cycle.

      if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
        continue;

      // Check for #1.
      bool TryNext = false;
      for (SDNode *Use : BasePtr.getNode()->uses()) {
        if (Use == Ptr.getNode())
          continue;

        // If all the uses are load / store addresses, then don't do the
        // transformation.
        if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){
          bool RealUse = false;
          for (SDNode *UseUse : Use->uses()) {
            if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI))
              RealUse = true;
          }

          if (!RealUse) {
            TryNext = true;
            break;
          }
        }
      }

      if (TryNext)
        continue;

      // Check for #2.
      SmallPtrSet<const SDNode *, 32> Visited;
      SmallVector<const SDNode *, 8> Worklist;
      // Ptr is predecessor to both N and Op.
      Visited.insert(Ptr.getNode());
      Worklist.push_back(N);
      Worklist.push_back(Op);
      if (!SDNode::hasPredecessorHelper(N, Visited, Worklist) &&
          !SDNode::hasPredecessorHelper(Op, Visited, Worklist)) {
        SDValue Result = isLoad
          ? DAG.getIndexedLoad(SDValue(N,0), SDLoc(N),
                               BasePtr, Offset, AM)
          : DAG.getIndexedStore(SDValue(N,0), SDLoc(N),
                                BasePtr, Offset, AM);
        ++PostIndexedNodes;
        ++NodesCombined;
        LLVM_DEBUG(dbgs() << "\nReplacing.5 "; N->dump(&DAG);
                   dbgs() << "\nWith: "; Result.getNode()->dump(&DAG);
                   dbgs() << '\n');
        WorklistRemover DeadNodes(*this);
        if (isLoad) {
          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
        } else {
          DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
        }

        // Finally, since the node is now dead, remove it from the graph.
        deleteAndRecombine(N);

        // Replace the uses of Use with uses of the updated base value.
        DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0),
                                      Result.getValue(isLoad ? 1 : 0));
        deleteAndRecombine(Op);
        return true;
      }
    }
  }

  return false;
}

/// Return the base-pointer arithmetic from an indexed \p LD.
SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) {
  ISD::MemIndexedMode AM = LD->getAddressingMode();
  assert(AM != ISD::UNINDEXED);
  SDValue BP = LD->getOperand(1);
  SDValue Inc = LD->getOperand(2);

  // Some backends use TargetConstants for load offsets, but don't expect
  // TargetConstants in general ADD nodes. We can convert these constants into
  // regular Constants (if the constant is not opaque).
  assert((Inc.getOpcode() != ISD::TargetConstant ||
          !cast<ConstantSDNode>(Inc)->isOpaque()) &&
         "Cannot split out indexing using opaque target constants");
  if (Inc.getOpcode() == ISD::TargetConstant) {
    ConstantSDNode *ConstInc = cast<ConstantSDNode>(Inc);
    Inc = DAG.getConstant(*ConstInc->getConstantIntValue(), SDLoc(Inc),
                          ConstInc->getValueType(0));
  }

  unsigned Opc =
      (AM == ISD::PRE_INC || AM == ISD::POST_INC ? ISD::ADD : ISD::SUB);
  return DAG.getNode(Opc, SDLoc(LD), BP.getSimpleValueType(), BP, Inc);
}

static inline int numVectorEltsOrZero(EVT T) {
  return T.isVector() ? T.getVectorNumElements() : 0;
}

bool DAGCombiner::getTruncatedStoreValue(StoreSDNode *ST, SDValue &Val) {
  Val = ST->getValue();
  EVT STType = Val.getValueType();
  EVT STMemType = ST->getMemoryVT();
  if (STType == STMemType)
    return true;
  if (isTypeLegal(STMemType))
    return false; // fail.
  if (STType.isFloatingPoint() && STMemType.isFloatingPoint() &&
      TLI.isOperationLegal(ISD::FTRUNC, STMemType)) {
    Val = DAG.getNode(ISD::FTRUNC, SDLoc(ST), STMemType, Val);
    return true;
  }
  if (numVectorEltsOrZero(STType) == numVectorEltsOrZero(STMemType) &&
      STType.isInteger() && STMemType.isInteger()) {
    Val = DAG.getNode(ISD::TRUNCATE, SDLoc(ST), STMemType, Val);
    return true;
  }
  if (STType.getSizeInBits() == STMemType.getSizeInBits()) {
    Val = DAG.getBitcast(STMemType, Val);
    return true;
  }
  return false; // fail.
}

bool DAGCombiner::extendLoadedValueToExtension(LoadSDNode *LD, SDValue &Val) {
  EVT LDMemType = LD->getMemoryVT();
  EVT LDType = LD->getValueType(0);
  assert(Val.getValueType() == LDMemType &&
         "Attempting to extend value of non-matching type");
  if (LDType == LDMemType)
    return true;
  if (LDMemType.isInteger() && LDType.isInteger()) {
    switch (LD->getExtensionType()) {
    case ISD::NON_EXTLOAD:
      Val = DAG.getBitcast(LDType, Val);
      return true;
    case ISD::EXTLOAD:
      Val = DAG.getNode(ISD::ANY_EXTEND, SDLoc(LD), LDType, Val);
      return true;
    case ISD::SEXTLOAD:
      Val = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(LD), LDType, Val);
      return true;
    case ISD::ZEXTLOAD:
      Val = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(LD), LDType, Val);
      return true;
    }
  }
  return false;
}

SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
  if (OptLevel == CodeGenOpt::None || !LD->isSimple())
    return SDValue();
  SDValue Chain = LD->getOperand(0);
  StoreSDNode *ST = dyn_cast<StoreSDNode>(Chain.getNode());
  // TODO: Relax this restriction for unordered atomics (see D66309)
  if (!ST || !ST->isSimple())
    return SDValue();

  EVT LDType = LD->getValueType(0);
  EVT LDMemType = LD->getMemoryVT();
  EVT STMemType = ST->getMemoryVT();
  EVT STType = ST->getValue().getValueType();

  BaseIndexOffset BasePtrLD = BaseIndexOffset::match(LD, DAG);
  BaseIndexOffset BasePtrST = BaseIndexOffset::match(ST, DAG);
  int64_t Offset;
  if (!BasePtrST.equalBaseIndex(BasePtrLD, DAG, Offset))
    return SDValue();

  // Normalize for Endianness. After this Offset=0 will denote that the least
  // significant bit in the loaded value maps to the least significant bit in
  // the stored value). With Offset=n (for n > 0) the loaded value starts at the
  // n:th least significant byte of the stored value.
  if (DAG.getDataLayout().isBigEndian())
    Offset = (STMemType.getStoreSizeInBits() -
              LDMemType.getStoreSizeInBits()) / 8 - Offset;

  // Check that the stored value cover all bits that are loaded.
  bool STCoversLD =
      (Offset >= 0) &&
      (Offset * 8 + LDMemType.getSizeInBits() <= STMemType.getSizeInBits());

  auto ReplaceLd = [&](LoadSDNode *LD, SDValue Val, SDValue Chain) -> SDValue {
    if (LD->isIndexed()) {
      bool IsSub = (LD->getAddressingMode() == ISD::PRE_DEC ||
                    LD->getAddressingMode() == ISD::POST_DEC);
      unsigned Opc = IsSub ? ISD::SUB : ISD::ADD;
      SDValue Idx = DAG.getNode(Opc, SDLoc(LD), LD->getOperand(1).getValueType(),
                             LD->getOperand(1), LD->getOperand(2));
      SDValue Ops[] = {Val, Idx, Chain};
      return CombineTo(LD, Ops, 3);
    }
    return CombineTo(LD, Val, Chain);
  };

  if (!STCoversLD)
    return SDValue();

  // Memory as copy space (potentially masked).
  if (Offset == 0 && LDType == STType && STMemType == LDMemType) {
    // Simple case: Direct non-truncating forwarding
    if (LDType.getSizeInBits() == LDMemType.getSizeInBits())
      return ReplaceLd(LD, ST->getValue(), Chain);
    // Can we model the truncate and extension with an and mask?
    if (STType.isInteger() && LDMemType.isInteger() && !STType.isVector() &&
        !LDMemType.isVector() && LD->getExtensionType() != ISD::SEXTLOAD) {
      // Mask to size of LDMemType
      auto Mask =
          DAG.getConstant(APInt::getLowBitsSet(STType.getSizeInBits(),
                                               STMemType.getSizeInBits()),
                          SDLoc(ST), STType);
      auto Val = DAG.getNode(ISD::AND, SDLoc(LD), LDType, ST->getValue(), Mask);
      return ReplaceLd(LD, Val, Chain);
    }
  }

  // TODO: Deal with nonzero offset.
  if (LD->getBasePtr().isUndef() || Offset != 0)
    return SDValue();
  // Model necessary truncations / extenstions.
  SDValue Val;
  // Truncate Value To Stored Memory Size.
  do {
    if (!getTruncatedStoreValue(ST, Val))
      continue;
    if (!isTypeLegal(LDMemType))
      continue;
    if (STMemType != LDMemType) {
      // TODO: Support vectors? This requires extract_subvector/bitcast.
      if (!STMemType.isVector() && !LDMemType.isVector() &&
          STMemType.isInteger() && LDMemType.isInteger())
        Val = DAG.getNode(ISD::TRUNCATE, SDLoc(LD), LDMemType, Val);
      else
        continue;
    }
    if (!extendLoadedValueToExtension(LD, Val))
      continue;
    return ReplaceLd(LD, Val, Chain);
  } while (false);

  // On failure, cleanup dead nodes we may have created.
  if (Val->use_empty())
    deleteAndRecombine(Val.getNode());
  return SDValue();
}

SDValue DAGCombiner::visitLOAD(SDNode *N) {
  LoadSDNode *LD  = cast<LoadSDNode>(N);
  SDValue Chain = LD->getChain();
  SDValue Ptr   = LD->getBasePtr();

  // If load is not volatile and there are no uses of the loaded value (and
  // the updated indexed value in case of indexed loads), change uses of the
  // chain value into uses of the chain input (i.e. delete the dead load).
  // TODO: Allow this for unordered atomics (see D66309)
  if (LD->isSimple()) {
    if (N->getValueType(1) == MVT::Other) {
      // Unindexed loads.
      if (!N->hasAnyUseOfValue(0)) {
        // It's not safe to use the two value CombineTo variant here. e.g.
        // v1, chain2 = load chain1, loc
        // v2, chain3 = load chain2, loc
        // v3         = add v2, c
        // Now we replace use of chain2 with chain1.  This makes the second load
        // isomorphic to the one we are deleting, and thus makes this load live.
        LLVM_DEBUG(dbgs() << "\nReplacing.6 "; N->dump(&DAG);
                   dbgs() << "\nWith chain: "; Chain.getNode()->dump(&DAG);
                   dbgs() << "\n");
        WorklistRemover DeadNodes(*this);
        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
        AddUsersToWorklist(Chain.getNode());
        if (N->use_empty())
          deleteAndRecombine(N);

        return SDValue(N, 0);   // Return N so it doesn't get rechecked!
      }
    } else {
      // Indexed loads.
      assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?");

      // If this load has an opaque TargetConstant offset, then we cannot split
      // the indexing into an add/sub directly (that TargetConstant may not be
      // valid for a different type of node, and we cannot convert an opaque
      // target constant into a regular constant).
      bool HasOTCInc = LD->getOperand(2).getOpcode() == ISD::TargetConstant &&
                       cast<ConstantSDNode>(LD->getOperand(2))->isOpaque();

      if (!N->hasAnyUseOfValue(0) &&
          ((MaySplitLoadIndex && !HasOTCInc) || !N->hasAnyUseOfValue(1))) {
        SDValue Undef = DAG.getUNDEF(N->getValueType(0));
        SDValue Index;
        if (N->hasAnyUseOfValue(1) && MaySplitLoadIndex && !HasOTCInc) {
          Index = SplitIndexingFromLoad(LD);
          // Try to fold the base pointer arithmetic into subsequent loads and
          // stores.
          AddUsersToWorklist(N);
        } else
          Index = DAG.getUNDEF(N->getValueType(1));
        LLVM_DEBUG(dbgs() << "\nReplacing.7 "; N->dump(&DAG);
                   dbgs() << "\nWith: "; Undef.getNode()->dump(&DAG);
                   dbgs() << " and 2 other values\n");
        WorklistRemover DeadNodes(*this);
        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef);
        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Index);
        DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain);
        deleteAndRecombine(N);
        return SDValue(N, 0);   // Return N so it doesn't get rechecked!
      }
    }
  }

  // If this load is directly stored, replace the load value with the stored
  // value.
  if (auto V = ForwardStoreValueToDirectLoad(LD))
    return V;

  // Try to infer better alignment information than the load already has.
  if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
    if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
      if (Align > LD->getAlignment() && LD->getSrcValueOffset() % Align == 0) {
        SDValue NewLoad = DAG.getExtLoad(
            LD->getExtensionType(), SDLoc(N), LD->getValueType(0), Chain, Ptr,
            LD->getPointerInfo(), LD->getMemoryVT(), Align,
            LD->getMemOperand()->getFlags(), LD->getAAInfo());
        // NewLoad will always be N as we are only refining the alignment
        assert(NewLoad.getNode() == N);
        (void)NewLoad;
      }
    }
  }

  if (LD->isUnindexed()) {
    // Walk up chain skipping non-aliasing memory nodes.
    SDValue BetterChain = FindBetterChain(LD, Chain);

    // If there is a better chain.
    if (Chain != BetterChain) {
      SDValue ReplLoad;

      // Replace the chain to void dependency.
      if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
        ReplLoad = DAG.getLoad(N->getValueType(0), SDLoc(LD),
                               BetterChain, Ptr, LD->getMemOperand());
      } else {
        ReplLoad = DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD),
                                  LD->getValueType(0),
                                  BetterChain, Ptr, LD->getMemoryVT(),
                                  LD->getMemOperand());
      }

      // Create token factor to keep old chain connected.
      SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N),
                                  MVT::Other, Chain, ReplLoad.getValue(1));

      // Replace uses with load result and token factor
      return CombineTo(N, ReplLoad.getValue(0), Token);
    }
  }

  // Try transforming N to an indexed load.
  if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
    return SDValue(N, 0);

  // Try to slice up N to more direct loads if the slices are mapped to
  // different register banks or pairing can take place.
  if (SliceUpLoad(N))
    return SDValue(N, 0);

  return SDValue();
}

namespace {

/// Helper structure used to slice a load in smaller loads.
/// Basically a slice is obtained from the following sequence:
/// Origin = load Ty1, Base
/// Shift = srl Ty1 Origin, CstTy Amount
/// Inst = trunc Shift to Ty2
///
/// Then, it will be rewritten into:
/// Slice = load SliceTy, Base + SliceOffset
/// [Inst = zext Slice to Ty2], only if SliceTy <> Ty2
///
/// SliceTy is deduced from the number of bits that are actually used to
/// build Inst.
struct LoadedSlice {
  /// Helper structure used to compute the cost of a slice.
  struct Cost {
    /// Are we optimizing for code size.
    bool ForCodeSize = false;

    /// Various cost.
    unsigned Loads = 0;
    unsigned Truncates = 0;
    unsigned CrossRegisterBanksCopies = 0;
    unsigned ZExts = 0;
    unsigned Shift = 0;

    explicit Cost(bool ForCodeSize) : ForCodeSize(ForCodeSize) {}

    /// Get the cost of one isolated slice.
    Cost(const LoadedSlice &LS, bool ForCodeSize)
        : ForCodeSize(ForCodeSize), Loads(1) {
      EVT TruncType = LS.Inst->getValueType(0);
      EVT LoadedType = LS.getLoadedType();
      if (TruncType != LoadedType &&
          !LS.DAG->getTargetLoweringInfo().isZExtFree(LoadedType, TruncType))
        ZExts = 1;
    }

    /// Account for slicing gain in the current cost.
    /// Slicing provide a few gains like removing a shift or a
    /// truncate. This method allows to grow the cost of the original
    /// load with the gain from this slice.
    void addSliceGain(const LoadedSlice &LS) {
      // Each slice saves a truncate.
      const TargetLowering &TLI = LS.DAG->getTargetLoweringInfo();
      if (!TLI.isTruncateFree(LS.Inst->getOperand(0).getValueType(),
                              LS.Inst->getValueType(0)))
        ++Truncates;
      // If there is a shift amount, this slice gets rid of it.
      if (LS.Shift)
        ++Shift;
      // If this slice can merge a cross register bank copy, account for it.
      if (LS.canMergeExpensiveCrossRegisterBankCopy())
        ++CrossRegisterBanksCopies;
    }

    Cost &operator+=(const Cost &RHS) {
      Loads += RHS.Loads;
      Truncates += RHS.Truncates;
      CrossRegisterBanksCopies += RHS.CrossRegisterBanksCopies;
      ZExts += RHS.ZExts;
      Shift += RHS.Shift;
      return *this;
    }

    bool operator==(const Cost &RHS) const {
      return Loads == RHS.Loads && Truncates == RHS.Truncates &&
             CrossRegisterBanksCopies == RHS.CrossRegisterBanksCopies &&
             ZExts == RHS.ZExts && Shift == RHS.Shift;
    }

    bool operator!=(const Cost &RHS) const { return !(*this == RHS); }

    bool operator<(const Cost &RHS) const {
      // Assume cross register banks copies are as expensive as loads.
      // FIXME: Do we want some more target hooks?
      unsigned ExpensiveOpsLHS = Loads + CrossRegisterBanksCopies;
      unsigned ExpensiveOpsRHS = RHS.Loads + RHS.CrossRegisterBanksCopies;
      // Unless we are optimizing for code size, consider the
      // expensive operation first.
      if (!ForCodeSize && ExpensiveOpsLHS != ExpensiveOpsRHS)
        return ExpensiveOpsLHS < ExpensiveOpsRHS;
      return (Truncates + ZExts + Shift + ExpensiveOpsLHS) <
             (RHS.Truncates + RHS.ZExts + RHS.Shift + ExpensiveOpsRHS);
    }

    bool operator>(const Cost &RHS) const { return RHS < *this; }

    bool operator<=(const Cost &RHS) const { return !(RHS < *this); }

    bool operator>=(const Cost &RHS) const { return !(*this < RHS); }
  };

  // The last instruction that represent the slice. This should be a
  // truncate instruction.
  SDNode *Inst;

  // The original load instruction.
  LoadSDNode *Origin;

  // The right shift amount in bits from the original load.
  unsigned Shift;

  // The DAG from which Origin came from.
  // This is used to get some contextual information about legal types, etc.
  SelectionDAG *DAG;

  LoadedSlice(SDNode *Inst = nullptr, LoadSDNode *Origin = nullptr,
              unsigned Shift = 0, SelectionDAG *DAG = nullptr)
      : Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {}

  /// Get the bits used in a chunk of bits \p BitWidth large.
  /// \return Result is \p BitWidth and has used bits set to 1 and
  ///         not used bits set to 0.
  APInt getUsedBits() const {
    // Reproduce the trunc(lshr) sequence:
    // - Start from the truncated value.
    // - Zero extend to the desired bit width.
    // - Shift left.
    assert(Origin && "No original load to compare against.");
    unsigned BitWidth = Origin->getValueSizeInBits(0);
    assert(Inst && "This slice is not bound to an instruction");
    assert(Inst->getValueSizeInBits(0) <= BitWidth &&
           "Extracted slice is bigger than the whole type!");
    APInt UsedBits(Inst->getValueSizeInBits(0), 0);
    UsedBits.setAllBits();
    UsedBits = UsedBits.zext(BitWidth);
    UsedBits <<= Shift;
    return UsedBits;
  }

  /// Get the size of the slice to be loaded in bytes.
  unsigned getLoadedSize() const {
    unsigned SliceSize = getUsedBits().countPopulation();
    assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte.");
    return SliceSize / 8;
  }

  /// Get the type that will be loaded for this slice.
  /// Note: This may not be the final type for the slice.
  EVT getLoadedType() const {
    assert(DAG && "Missing context");
    LLVMContext &Ctxt = *DAG->getContext();
    return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8);
  }

  /// Get the alignment of the load used for this slice.
  unsigned getAlignment() const {
    unsigned Alignment = Origin->getAlignment();
    uint64_t Offset = getOffsetFromBase();
    if (Offset != 0)
      Alignment = MinAlign(Alignment, Alignment + Offset);
    return Alignment;
  }

  /// Check if this slice can be rewritten with legal operations.
  bool isLegal() const {
    // An invalid slice is not legal.
    if (!Origin || !Inst || !DAG)
      return false;

    // Offsets are for indexed load only, we do not handle that.
    if (!Origin->getOffset().isUndef())
      return false;

    const TargetLowering &TLI = DAG->getTargetLoweringInfo();

    // Check that the type is legal.
    EVT SliceType = getLoadedType();
    if (!TLI.isTypeLegal(SliceType))
      return false;

    // Check that the load is legal for this type.
    if (!TLI.isOperationLegal(ISD::LOAD, SliceType))
      return false;

    // Check that the offset can be computed.
    // 1. Check its type.
    EVT PtrType = Origin->getBasePtr().getValueType();
    if (PtrType == MVT::Untyped || PtrType.isExtended())
      return false;

    // 2. Check that it fits in the immediate.
    if (!TLI.isLegalAddImmediate(getOffsetFromBase()))
      return false;

    // 3. Check that the computation is legal.
    if (!TLI.isOperationLegal(ISD::ADD, PtrType))
      return false;

    // Check that the zext is legal if it needs one.
    EVT TruncateType = Inst->getValueType(0);
    if (TruncateType != SliceType &&
        !TLI.isOperationLegal(ISD::ZERO_EXTEND, TruncateType))
      return false;

    return true;
  }

  /// Get the offset in bytes of this slice in the original chunk of
  /// bits.
  /// \pre DAG != nullptr.
  uint64_t getOffsetFromBase() const {
    assert(DAG && "Missing context.");
    bool IsBigEndian = DAG->getDataLayout().isBigEndian();
    assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported.");
    uint64_t Offset = Shift / 8;
    unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8;
    assert(!(Origin->getValueSizeInBits(0) & 0x7) &&
           "The size of the original loaded type is not a multiple of a"
           " byte.");
    // If Offset is bigger than TySizeInBytes, it means we are loading all
    // zeros. This should have been optimized before in the process.
    assert(TySizeInBytes > Offset &&
           "Invalid shift amount for given loaded size");
    if (IsBigEndian)
      Offset = TySizeInBytes - Offset - getLoadedSize();
    return Offset;
  }

  /// Generate the sequence of instructions to load the slice
  /// represented by this object and redirect the uses of this slice to
  /// this new sequence of instructions.
  /// \pre this->Inst && this->Origin are valid Instructions and this
  /// object passed the legal check: LoadedSlice::isLegal returned true.
  /// \return The last instruction of the sequence used to load the slice.
  SDValue loadSlice() const {
    assert(Inst && Origin && "Unable to replace a non-existing slice.");
    const SDValue &OldBaseAddr = Origin->getBasePtr();
    SDValue BaseAddr = OldBaseAddr;
    // Get the offset in that chunk of bytes w.r.t. the endianness.
    int64_t Offset = static_cast<int64_t>(getOffsetFromBase());
    assert(Offset >= 0 && "Offset too big to fit in int64_t!");
    if (Offset) {
      // BaseAddr = BaseAddr + Offset.
      EVT ArithType = BaseAddr.getValueType();
      SDLoc DL(Origin);
      BaseAddr = DAG->getNode(ISD::ADD, DL, ArithType, BaseAddr,
                              DAG->getConstant(Offset, DL, ArithType));
    }

    // Create the type of the loaded slice according to its size.
    EVT SliceType = getLoadedType();

    // Create the load for the slice.
    SDValue LastInst =
        DAG->getLoad(SliceType, SDLoc(Origin), Origin->getChain(), BaseAddr,
                     Origin->getPointerInfo().getWithOffset(Offset),
                     getAlignment(), Origin->getMemOperand()->getFlags());
    // If the final type is not the same as the loaded type, this means that
    // we have to pad with zero. Create a zero extend for that.
    EVT FinalType = Inst->getValueType(0);
    if (SliceType != FinalType)
      LastInst =
          DAG->getNode(ISD::ZERO_EXTEND, SDLoc(LastInst), FinalType, LastInst);
    return LastInst;
  }

  /// Check if this slice can be merged with an expensive cross register
  /// bank copy. E.g.,
  /// i = load i32
  /// f = bitcast i32 i to float
  bool canMergeExpensiveCrossRegisterBankCopy() const {
    if (!Inst || !Inst->hasOneUse())
      return false;
    SDNode *Use = *Inst->use_begin();
    if (Use->getOpcode() != ISD::BITCAST)
      return false;
    assert(DAG && "Missing context");
    const TargetLowering &TLI = DAG->getTargetLoweringInfo();
    EVT ResVT = Use->getValueType(0);
    const TargetRegisterClass *ResRC =
        TLI.getRegClassFor(ResVT.getSimpleVT(), Use->isDivergent());
    const TargetRegisterClass *ArgRC =
        TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT(),
                           Use->getOperand(0)->isDivergent());
    if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT))
      return false;

    // At this point, we know that we perform a cross-register-bank copy.
    // Check if it is expensive.
    const TargetRegisterInfo *TRI = DAG->getSubtarget().getRegisterInfo();
    // Assume bitcasts are cheap, unless both register classes do not
    // explicitly share a common sub class.
    if (!TRI || TRI->getCommonSubClass(ArgRC, ResRC))
      return false;

    // Check if it will be merged with the load.
    // 1. Check the alignment constraint.
    unsigned RequiredAlignment = DAG->getDataLayout().getABITypeAlignment(
        ResVT.getTypeForEVT(*DAG->getContext()));

    if (RequiredAlignment > getAlignment())
      return false;

    // 2. Check that the load is a legal operation for that type.
    if (!TLI.isOperationLegal(ISD::LOAD, ResVT))
      return false;

    // 3. Check that we do not have a zext in the way.
    if (Inst->getValueType(0) != getLoadedType())
      return false;

    return true;
  }
};

} // end anonymous namespace

/// Check that all bits set in \p UsedBits form a dense region, i.e.,
/// \p UsedBits looks like 0..0 1..1 0..0.
static bool areUsedBitsDense(const APInt &UsedBits) {
  // If all the bits are one, this is dense!
  if (UsedBits.isAllOnesValue())
    return true;

  // Get rid of the unused bits on the right.
  APInt NarrowedUsedBits = UsedBits.lshr(UsedBits.countTrailingZeros());
  // Get rid of the unused bits on the left.
  if (NarrowedUsedBits.countLeadingZeros())
    NarrowedUsedBits = NarrowedUsedBits.trunc(NarrowedUsedBits.getActiveBits());
  // Check that the chunk of bits is completely used.
  return NarrowedUsedBits.isAllOnesValue();
}

/// Check whether or not \p First and \p Second are next to each other
/// in memory. This means that there is no hole between the bits loaded
/// by \p First and the bits loaded by \p Second.
static bool areSlicesNextToEachOther(const LoadedSlice &First,
                                     const LoadedSlice &Second) {
  assert(First.Origin == Second.Origin && First.Origin &&
         "Unable to match different memory origins.");
  APInt UsedBits = First.getUsedBits();
  assert((UsedBits & Second.getUsedBits()) == 0 &&
         "Slices are not supposed to overlap.");
  UsedBits |= Second.getUsedBits();
  return areUsedBitsDense(UsedBits);
}

/// Adjust the \p GlobalLSCost according to the target
/// paring capabilities and the layout of the slices.
/// \pre \p GlobalLSCost should account for at least as many loads as
/// there is in the slices in \p LoadedSlices.
static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
                                 LoadedSlice::Cost &GlobalLSCost) {
  unsigned NumberOfSlices = LoadedSlices.size();
  // If there is less than 2 elements, no pairing is possible.
  if (NumberOfSlices < 2)
    return;

  // Sort the slices so that elements that are likely to be next to each
  // other in memory are next to each other in the list.
  llvm::sort(LoadedSlices, [](const LoadedSlice &LHS, const LoadedSlice &RHS) {
    assert(LHS.Origin == RHS.Origin && "Different bases not implemented.");
    return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
  });
  const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo();
  // First (resp. Second) is the first (resp. Second) potentially candidate
  // to be placed in a paired load.
  const LoadedSlice *First = nullptr;
  const LoadedSlice *Second = nullptr;
  for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice,
                // Set the beginning of the pair.
                                                           First = Second) {
    Second = &LoadedSlices[CurrSlice];

    // If First is NULL, it means we start a new pair.
    // Get to the next slice.
    if (!First)
      continue;

    EVT LoadedType = First->getLoadedType();

    // If the types of the slices are different, we cannot pair them.
    if (LoadedType != Second->getLoadedType())
      continue;

    // Check if the target supplies paired loads for this type.
    unsigned RequiredAlignment = 0;
    if (!TLI.hasPairedLoad(LoadedType, RequiredAlignment)) {
      // move to the next pair, this type is hopeless.
      Second = nullptr;
      continue;
    }
    // Check if we meet the alignment requirement.
    if (RequiredAlignment > First->getAlignment())
      continue;

    // Check that both loads are next to each other in memory.
    if (!areSlicesNextToEachOther(*First, *Second))
      continue;

    assert(GlobalLSCost.Loads > 0 && "We save more loads than we created!");
    --GlobalLSCost.Loads;
    // Move to the next pair.
    Second = nullptr;
  }
}

/// Check the profitability of all involved LoadedSlice.
/// Currently, it is considered profitable if there is exactly two
/// involved slices (1) which are (2) next to each other in memory, and
/// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3).
///
/// Note: The order of the elements in \p LoadedSlices may be modified, but not
/// the elements themselves.
///
/// FIXME: When the cost model will be mature enough, we can relax
/// constraints (1) and (2).
static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices,
                                const APInt &UsedBits, bool ForCodeSize) {
  unsigned NumberOfSlices = LoadedSlices.size();
  if (StressLoadSlicing)
    return NumberOfSlices > 1;

  // Check (1).
  if (NumberOfSlices != 2)
    return false;

  // Check (2).
  if (!areUsedBitsDense(UsedBits))
    return false;

  // Check (3).
  LoadedSlice::Cost OrigCost(ForCodeSize), GlobalSlicingCost(ForCodeSize);
  // The original code has one big load.
  OrigCost.Loads = 1;
  for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice) {
    const LoadedSlice &LS = LoadedSlices[CurrSlice];
    // Accumulate the cost of all the slices.
    LoadedSlice::Cost SliceCost(LS, ForCodeSize);
    GlobalSlicingCost += SliceCost;

    // Account as cost in the original configuration the gain obtained
    // with the current slices.
    OrigCost.addSliceGain(LS);
  }

  // If the target supports paired load, adjust the cost accordingly.
  adjustCostForPairing(LoadedSlices, GlobalSlicingCost);
  return OrigCost > GlobalSlicingCost;
}

/// If the given load, \p LI, is used only by trunc or trunc(lshr)
/// operations, split it in the various pieces being extracted.
///
/// This sort of thing is introduced by SROA.
/// This slicing takes care not to insert overlapping loads.
/// \pre LI is a simple load (i.e., not an atomic or volatile load).
bool DAGCombiner::SliceUpLoad(SDNode *N) {
  if (Level < AfterLegalizeDAG)
    return false;

  LoadSDNode *LD = cast<LoadSDNode>(N);
  if (!LD->isSimple() || !ISD::isNormalLoad(LD) ||
      !LD->getValueType(0).isInteger())
    return false;

  // Keep track of already used bits to detect overlapping values.
  // In that case, we will just abort the transformation.
  APInt UsedBits(LD->getValueSizeInBits(0), 0);

  SmallVector<LoadedSlice, 4> LoadedSlices;

  // Check if this load is used as several smaller chunks of bits.
  // Basically, look for uses in trunc or trunc(lshr) and record a new chain
  // of computation for each trunc.
  for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end();
       UI != UIEnd; ++UI) {
    // Skip the uses of the chain.
    if (UI.getUse().getResNo() != 0)
      continue;

    SDNode *User = *UI;
    unsigned Shift = 0;

    // Check if this is a trunc(lshr).
    if (User->getOpcode() == ISD::SRL && User->hasOneUse() &&
        isa<ConstantSDNode>(User->getOperand(1))) {
      Shift = User->getConstantOperandVal(1);
      User = *User->use_begin();
    }

    // At this point, User is a Truncate, iff we encountered, trunc or
    // trunc(lshr).
    if (User->getOpcode() != ISD::TRUNCATE)
      return false;

    // The width of the type must be a power of 2 and greater than 8-bits.
    // Otherwise the load cannot be represented in LLVM IR.
    // Moreover, if we shifted with a non-8-bits multiple, the slice
    // will be across several bytes. We do not support that.
    unsigned Width = User->getValueSizeInBits(0);
    if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7))
      return false;

    // Build the slice for this chain of computations.
    LoadedSlice LS(User, LD, Shift, &DAG);
    APInt CurrentUsedBits = LS.getUsedBits();

    // Check if this slice overlaps with another.
    if ((CurrentUsedBits & UsedBits) != 0)
      return false;
    // Update the bits used globally.
    UsedBits |= CurrentUsedBits;

    // Check if the new slice would be legal.
    if (!LS.isLegal())
      return false;

    // Record the slice.
    LoadedSlices.push_back(LS);
  }

  // Abort slicing if it does not seem to be profitable.
  if (!isSlicingProfitable(LoadedSlices, UsedBits, ForCodeSize))
    return false;

  ++SlicedLoads;

  // Rewrite each chain to use an independent load.
  // By construction, each chain can be represented by a unique load.

  // Prepare the argument for the new token factor for all the slices.
  SmallVector<SDValue, 8> ArgChains;
  for (SmallVectorImpl<LoadedSlice>::const_iterator
           LSIt = LoadedSlices.begin(),
           LSItEnd = LoadedSlices.end();
       LSIt != LSItEnd; ++LSIt) {
    SDValue SliceInst = LSIt->loadSlice();
    CombineTo(LSIt->Inst, SliceInst, true);
    if (SliceInst.getOpcode() != ISD::LOAD)
      SliceInst = SliceInst.getOperand(0);
    assert(SliceInst->getOpcode() == ISD::LOAD &&
           "It takes more than a zext to get to the loaded slice!!");
    ArgChains.push_back(SliceInst.getValue(1));
  }

  SDValue Chain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other,
                              ArgChains);
  DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
  AddToWorklist(Chain.getNode());
  return true;
}

/// Check to see if V is (and load (ptr), imm), where the load is having
/// specific bytes cleared out.  If so, return the byte size being masked out
/// and the shift amount.
static std::pair<unsigned, unsigned>
CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
  std::pair<unsigned, unsigned> Result(0, 0);

  // Check for the structure we're looking for.
  if (V->getOpcode() != ISD::AND ||
      !isa<ConstantSDNode>(V->getOperand(1)) ||
      !ISD::isNormalLoad(V->getOperand(0).getNode()))
    return Result;

  // Check the chain and pointer.
  LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
  if (LD->getBasePtr() != Ptr) return Result;  // Not from same pointer.

  // This only handles simple types.
  if (V.getValueType() != MVT::i16 &&
      V.getValueType() != MVT::i32 &&
      V.getValueType() != MVT::i64)
    return Result;

  // Check the constant mask.  Invert it so that the bits being masked out are
  // 0 and the bits being kept are 1.  Use getSExtValue so that leading bits
  // follow the sign bit for uniformity.
  uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue();
  unsigned NotMaskLZ = countLeadingZeros(NotMask);
  if (NotMaskLZ & 7) return Result;  // Must be multiple of a byte.
  unsigned NotMaskTZ = countTrailingZeros(NotMask);
  if (NotMaskTZ & 7) return Result;  // Must be multiple of a byte.
  if (NotMaskLZ == 64) return Result;  // All zero mask.

  // See if we have a continuous run of bits.  If so, we have 0*1+0*
  if (countTrailingOnes(NotMask >> NotMaskTZ) + NotMaskTZ + NotMaskLZ != 64)
    return Result;

  // Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
  if (V.getValueType() != MVT::i64 && NotMaskLZ)
    NotMaskLZ -= 64-V.getValueSizeInBits();

  unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
  switch (MaskedBytes) {
  case 1:
  case 2:
  case 4: break;
  default: return Result; // All one mask, or 5-byte mask.
  }

  // Verify that the first bit starts at a multiple of mask so that the access
  // is aligned the same as the access width.
  if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;

  // For narrowing to be valid, it must be the case that the load the
  // immediately preceding memory operation before the store.
  if (LD == Chain.getNode())
    ; // ok.
  else if (Chain->getOpcode() == ISD::TokenFactor &&
           SDValue(LD, 1).hasOneUse()) {
    // LD has only 1 chain use so they are no indirect dependencies.
    if (!LD->isOperandOf(Chain.getNode()))
      return Result;
  } else
    return Result; // Fail.

  Result.first = MaskedBytes;
  Result.second = NotMaskTZ/8;
  return Result;
}

/// Check to see if IVal is something that provides a value as specified by
/// MaskInfo. If so, replace the specified store with a narrower store of
/// truncated IVal.
static SDValue
ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
                                SDValue IVal, StoreSDNode *St,
                                DAGCombiner *DC) {
  unsigned NumBytes = MaskInfo.first;
  unsigned ByteShift = MaskInfo.second;
  SelectionDAG &DAG = DC->getDAG();

  // Check to see if IVal is all zeros in the part being masked in by the 'or'
  // that uses this.  If not, this is not a replacement.
  APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
                                  ByteShift*8, (ByteShift+NumBytes)*8);
  if (!DAG.MaskedValueIsZero(IVal, Mask)) return SDValue();

  // Check that it is legal on the target to do this.  It is legal if the new
  // VT we're shrinking to (i8/i16/i32) is legal or we're still before type
  // legalization (and the target doesn't explicitly think this is a bad idea).
  MVT VT = MVT::getIntegerVT(NumBytes * 8);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (!DC->isTypeLegal(VT))
    return SDValue();
  if (St->getMemOperand() &&
      !TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
                              *St->getMemOperand()))
    return SDValue();

  // Okay, we can do this!  Replace the 'St' store with a store of IVal that is
  // shifted by ByteShift and truncated down to NumBytes.
  if (ByteShift) {
    SDLoc DL(IVal);
    IVal = DAG.getNode(ISD::SRL, DL, IVal.getValueType(), IVal,
                       DAG.getConstant(ByteShift*8, DL,
                                    DC->getShiftAmountTy(IVal.getValueType())));
  }

  // Figure out the offset for the store and the alignment of the access.
  unsigned StOffset;
  unsigned NewAlign = St->getAlignment();

  if (DAG.getDataLayout().isLittleEndian())
    StOffset = ByteShift;
  else
    StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;

  SDValue Ptr = St->getBasePtr();
  if (StOffset) {
    SDLoc DL(IVal);
    Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(),
                      Ptr, DAG.getConstant(StOffset, DL, Ptr.getValueType()));
    NewAlign = MinAlign(NewAlign, StOffset);
  }

  // Truncate down to the new size.
  IVal = DAG.getNode(ISD::TRUNCATE, SDLoc(IVal), VT, IVal);

  ++OpsNarrowed;
  return DAG
      .getStore(St->getChain(), SDLoc(St), IVal, Ptr,
                St->getPointerInfo().getWithOffset(StOffset), NewAlign);
}

/// Look for sequence of load / op / store where op is one of 'or', 'xor', and
/// 'and' of immediates. If 'op' is only touching some of the loaded bits, try
/// narrowing the load and store if it would end up being a win for performance
/// or code size.
SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
  StoreSDNode *ST  = cast<StoreSDNode>(N);
  if (!ST->isSimple())
    return SDValue();

  SDValue Chain = ST->getChain();
  SDValue Value = ST->getValue();
  SDValue Ptr   = ST->getBasePtr();
  EVT VT = Value.getValueType();

  if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse())
    return SDValue();

  unsigned Opc = Value.getOpcode();

  // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
  // is a byte mask indicating a consecutive number of bytes, check to see if
  // Y is known to provide just those bytes.  If so, we try to replace the
  // load + replace + store sequence with a single (narrower) store, which makes
  // the load dead.
  if (Opc == ISD::OR) {
    std::pair<unsigned, unsigned> MaskedLoad;
    MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain);
    if (MaskedLoad.first)
      if (SDValue NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
                                                  Value.getOperand(1), ST,this))
        return NewST;

    // Or is commutative, so try swapping X and Y.
    MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
    if (MaskedLoad.first)
      if (SDValue NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
                                                  Value.getOperand(0), ST,this))
        return NewST;
  }

  if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
      Value.getOperand(1).getOpcode() != ISD::Constant)
    return SDValue();

  SDValue N0 = Value.getOperand(0);
  if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
      Chain == SDValue(N0.getNode(), 1)) {
    LoadSDNode *LD = cast<LoadSDNode>(N0);
    if (LD->getBasePtr() != Ptr ||
        LD->getPointerInfo().getAddrSpace() !=
        ST->getPointerInfo().getAddrSpace())
      return SDValue();

    // Find the type to narrow it the load / op / store to.
    SDValue N1 = Value.getOperand(1);
    unsigned BitWidth = N1.getValueSizeInBits();
    APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue();
    if (Opc == ISD::AND)
      Imm ^= APInt::getAllOnesValue(BitWidth);
    if (Imm == 0 || Imm.isAllOnesValue())
      return SDValue();
    unsigned ShAmt = Imm.countTrailingZeros();
    unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1;
    unsigned NewBW = NextPowerOf2(MSB - ShAmt);
    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
    // The narrowing should be profitable, the load/store operation should be
    // legal (or custom) and the store size should be equal to the NewVT width.
    while (NewBW < BitWidth &&
           (NewVT.getStoreSizeInBits() != NewBW ||
            !TLI.isOperationLegalOrCustom(Opc, NewVT) ||
            !TLI.isNarrowingProfitable(VT, NewVT))) {
      NewBW = NextPowerOf2(NewBW);
      NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
    }
    if (NewBW >= BitWidth)
      return SDValue();

    // If the lsb changed does not start at the type bitwidth boundary,
    // start at the previous one.
    if (ShAmt % NewBW)
      ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW;
    APInt Mask = APInt::getBitsSet(BitWidth, ShAmt,
                                   std::min(BitWidth, ShAmt + NewBW));
    if ((Imm & Mask) == Imm) {
      APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW);
      if (Opc == ISD::AND)
        NewImm ^= APInt::getAllOnesValue(NewBW);
      uint64_t PtrOff = ShAmt / 8;
      // For big endian targets, we need to adjust the offset to the pointer to
      // load the correct bytes.
      if (DAG.getDataLayout().isBigEndian())
        PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;

      unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
      Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
      if (NewAlign < DAG.getDataLayout().getABITypeAlignment(NewVTTy))
        return SDValue();

      SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD),
                                   Ptr.getValueType(), Ptr,
                                   DAG.getConstant(PtrOff, SDLoc(LD),
                                                   Ptr.getValueType()));
      SDValue NewLD =
          DAG.getLoad(NewVT, SDLoc(N0), LD->getChain(), NewPtr,
                      LD->getPointerInfo().getWithOffset(PtrOff), NewAlign,
                      LD->getMemOperand()->getFlags(), LD->getAAInfo());
      SDValue NewVal = DAG.getNode(Opc, SDLoc(Value), NewVT, NewLD,
                                   DAG.getConstant(NewImm, SDLoc(Value),
                                                   NewVT));
      SDValue NewST =
          DAG.getStore(Chain, SDLoc(N), NewVal, NewPtr,
                       ST->getPointerInfo().getWithOffset(PtrOff), NewAlign);

      AddToWorklist(NewPtr.getNode());
      AddToWorklist(NewLD.getNode());
      AddToWorklist(NewVal.getNode());
      WorklistRemover DeadNodes(*this);
      DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1));
      ++OpsNarrowed;
      return NewST;
    }
  }

  return SDValue();
}

/// For a given floating point load / store pair, if the load value isn't used
/// by any other operations, then consider transforming the pair to integer
/// load / store operations if the target deems the transformation profitable.
SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
  StoreSDNode *ST  = cast<StoreSDNode>(N);
  SDValue Value = ST->getValue();
  if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) &&
      Value.hasOneUse()) {
    LoadSDNode *LD = cast<LoadSDNode>(Value);
    EVT VT = LD->getMemoryVT();
    if (!VT.isFloatingPoint() ||
        VT != ST->getMemoryVT() ||
        LD->isNonTemporal() ||
        ST->isNonTemporal() ||
        LD->getPointerInfo().getAddrSpace() != 0 ||
        ST->getPointerInfo().getAddrSpace() != 0)
      return SDValue();

    EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
    if (!TLI.isOperationLegal(ISD::LOAD, IntVT) ||
        !TLI.isOperationLegal(ISD::STORE, IntVT) ||
        !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) ||
        !TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT))
      return SDValue();

    unsigned LDAlign = LD->getAlignment();
    unsigned STAlign = ST->getAlignment();
    Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
    unsigned ABIAlign = DAG.getDataLayout().getABITypeAlignment(IntVTTy);
    if (LDAlign < ABIAlign || STAlign < ABIAlign)
      return SDValue();

    SDValue NewLD =
        DAG.getLoad(IntVT, SDLoc(Value), LD->getChain(), LD->getBasePtr(),
                    LD->getPointerInfo(), LDAlign);

    SDValue NewST =
        DAG.getStore(ST->getChain(), SDLoc(N), NewLD, ST->getBasePtr(),
                     ST->getPointerInfo(), STAlign);

    AddToWorklist(NewLD.getNode());
    AddToWorklist(NewST.getNode());
    WorklistRemover DeadNodes(*this);
    DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1));
    ++LdStFP2Int;
    return NewST;
  }

  return SDValue();
}

// This is a helper function for visitMUL to check the profitability
// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
// MulNode is the original multiply, AddNode is (add x, c1),
// and ConstNode is c2.
//
// If the (add x, c1) has multiple uses, we could increase
// the number of adds if we make this transformation.
// It would only be worth doing this if we can remove a
// multiply in the process. Check for that here.
// To illustrate:
//     (A + c1) * c3
//     (A + c2) * c3
// We're checking for cases where we have common "c3 * A" expressions.
bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode,
                                              SDValue &AddNode,
                                              SDValue &ConstNode) {
  APInt Val;

  // If the add only has one use, this would be OK to do.
  if (AddNode.getNode()->hasOneUse())
    return true;

  // Walk all the users of the constant with which we're multiplying.
  for (SDNode *Use : ConstNode->uses()) {
    if (Use == MulNode) // This use is the one we're on right now. Skip it.
      continue;

    if (Use->getOpcode() == ISD::MUL) { // We have another multiply use.
      SDNode *OtherOp;
      SDNode *MulVar = AddNode.getOperand(0).getNode();

      // OtherOp is what we're multiplying against the constant.
      if (Use->getOperand(0) == ConstNode)
        OtherOp = Use->getOperand(1).getNode();
      else
        OtherOp = Use->getOperand(0).getNode();

      // Check to see if multiply is with the same operand of our "add".
      //
      //     ConstNode  = CONST
      //     Use = ConstNode * A  <-- visiting Use. OtherOp is A.
      //     ...
      //     AddNode  = (A + c1)  <-- MulVar is A.
      //         = AddNode * ConstNode   <-- current visiting instruction.
      //
      // If we make this transformation, we will have a common
      // multiply (ConstNode * A) that we can save.
      if (OtherOp == MulVar)
        return true;

      // Now check to see if a future expansion will give us a common
      // multiply.
      //
      //     ConstNode  = CONST
      //     AddNode    = (A + c1)
      //     ...   = AddNode * ConstNode <-- current visiting instruction.
      //     ...
      //     OtherOp = (A + c2)
      //     Use     = OtherOp * ConstNode <-- visiting Use.
      //
      // If we make this transformation, we will have a common
      // multiply (CONST * A) after we also do the same transformation
      // to the "t2" instruction.
      if (OtherOp->getOpcode() == ISD::ADD &&
          DAG.isConstantIntBuildVectorOrConstantInt(OtherOp->getOperand(1)) &&
          OtherOp->getOperand(0).getNode() == MulVar)
        return true;
    }
  }

  // Didn't find a case where this would be profitable.
  return false;
}

SDValue DAGCombiner::getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes,
                                         unsigned NumStores) {
  SmallVector<SDValue, 8> Chains;
  SmallPtrSet<const SDNode *, 8> Visited;
  SDLoc StoreDL(StoreNodes[0].MemNode);

  for (unsigned i = 0; i < NumStores; ++i) {
    Visited.insert(StoreNodes[i].MemNode);
  }

  // don't include nodes that are children or repeated nodes.
  for (unsigned i = 0; i < NumStores; ++i) {
    if (Visited.insert(StoreNodes[i].MemNode->getChain().getNode()).second)
      Chains.push_back(StoreNodes[i].MemNode->getChain());
  }

  assert(Chains.size() > 0 && "Chain should have generated a chain");
  return DAG.getTokenFactor(StoreDL, Chains);
}

bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
    SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT, unsigned NumStores,
    bool IsConstantSrc, bool UseVector, bool UseTrunc) {
  // Make sure we have something to merge.
  if (NumStores < 2)
    return false;

  // The latest Node in the DAG.
  SDLoc DL(StoreNodes[0].MemNode);

  int64_t ElementSizeBits = MemVT.getStoreSizeInBits();
  unsigned SizeInBits = NumStores * ElementSizeBits;
  unsigned NumMemElts = MemVT.isVector() ? MemVT.getVectorNumElements() : 1;

  EVT StoreTy;
  if (UseVector) {
    unsigned Elts = NumStores * NumMemElts;
    // Get the type for the merged vector store.
    StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts);
  } else
    StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);

  SDValue StoredVal;
  if (UseVector) {
    if (IsConstantSrc) {
      SmallVector<SDValue, 8> BuildVector;
      for (unsigned I = 0; I != NumStores; ++I) {
        StoreSDNode *St = cast<StoreSDNode>(StoreNodes[I].MemNode);
        SDValue Val = St->getValue();
        // If constant is of the wrong type, convert it now.
        if (MemVT != Val.getValueType()) {
          Val = peekThroughBitcasts(Val);
          // Deal with constants of wrong size.
          if (ElementSizeBits != Val.getValueSizeInBits()) {
            EVT IntMemVT =
                EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
            if (isa<ConstantFPSDNode>(Val)) {
              // Not clear how to truncate FP values.
              return false;
            } else if (auto *C = dyn_cast<ConstantSDNode>(Val))
              Val = DAG.getConstant(C->getAPIntValue()
                                        .zextOrTrunc(Val.getValueSizeInBits())
                                        .zextOrTrunc(ElementSizeBits),
                                    SDLoc(C), IntMemVT);
          }
          // Make sure correctly size type is the correct type.
          Val = DAG.getBitcast(MemVT, Val);
        }
        BuildVector.push_back(Val);
      }
      StoredVal = DAG.getNode(MemVT.isVector() ? ISD::CONCAT_VECTORS
                                               : ISD::BUILD_VECTOR,
                              DL, StoreTy, BuildVector);
    } else {
      SmallVector<SDValue, 8> Ops;
      for (unsigned i = 0; i < NumStores; ++i) {
        StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
        SDValue Val = peekThroughBitcasts(St->getValue());
        // All operands of BUILD_VECTOR / CONCAT_VECTOR must be of
        // type MemVT. If the underlying value is not the correct
        // type, but it is an extraction of an appropriate vector we
        // can recast Val to be of the correct type. This may require
        // converting between EXTRACT_VECTOR_ELT and
        // EXTRACT_SUBVECTOR.
        if ((MemVT != Val.getValueType()) &&
            (Val.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
             Val.getOpcode() == ISD::EXTRACT_SUBVECTOR)) {
          EVT MemVTScalarTy = MemVT.getScalarType();
          // We may need to add a bitcast here to get types to line up.
          if (MemVTScalarTy != Val.getValueType().getScalarType()) {
            Val = DAG.getBitcast(MemVT, Val);
          } else {
            unsigned OpC = MemVT.isVector() ? ISD::EXTRACT_SUBVECTOR
                                            : ISD::EXTRACT_VECTOR_ELT;
            SDValue Vec = Val.getOperand(0);
            SDValue Idx = Val.getOperand(1);
            Val = DAG.getNode(OpC, SDLoc(Val), MemVT, Vec, Idx);
          }
        }
        Ops.push_back(Val);
      }

      // Build the extracted vector elements back into a vector.
      StoredVal = DAG.getNode(MemVT.isVector() ? ISD::CONCAT_VECTORS
                                               : ISD::BUILD_VECTOR,
                              DL, StoreTy, Ops);
    }
  } else {
    // We should always use a vector store when merging extracted vector
    // elements, so this path implies a store of constants.
    assert(IsConstantSrc && "Merged vector elements should use vector store");

    APInt StoreInt(SizeInBits, 0);

    // Construct a single integer constant which is made of the smaller
    // constant inputs.
    bool IsLE = DAG.getDataLayout().isLittleEndian();
    for (unsigned i = 0; i < NumStores; ++i) {
      unsigned Idx = IsLE ? (NumStores - 1 - i) : i;
      StoreSDNode *St  = cast<StoreSDNode>(StoreNodes[Idx].MemNode);

      SDValue Val = St->getValue();
      Val = peekThroughBitcasts(Val);
      StoreInt <<= ElementSizeBits;
      if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) {
        StoreInt |= C->getAPIntValue()
                        .zextOrTrunc(ElementSizeBits)
                        .zextOrTrunc(SizeInBits);
      } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) {
        StoreInt |= C->getValueAPF()
                        .bitcastToAPInt()
                        .zextOrTrunc(ElementSizeBits)
                        .zextOrTrunc(SizeInBits);
        // If fp truncation is necessary give up for now.
        if (MemVT.getSizeInBits() != ElementSizeBits)
          return false;
      } else {
        llvm_unreachable("Invalid constant element type");
      }
    }

    // Create the new Load and Store operations.
    StoredVal = DAG.getConstant(StoreInt, DL, StoreTy);
  }

  LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
  SDValue NewChain = getMergeStoreChains(StoreNodes, NumStores);

  // make sure we use trunc store if it's necessary to be legal.
  SDValue NewStore;
  if (!UseTrunc) {
    NewStore = DAG.getStore(NewChain, DL, StoredVal, FirstInChain->getBasePtr(),
                            FirstInChain->getPointerInfo(),
                            FirstInChain->getAlignment());
  } else { // Must be realized as a trunc store
    EVT LegalizedStoredValTy =
        TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType());
    unsigned LegalizedStoreSize = LegalizedStoredValTy.getSizeInBits();
    ConstantSDNode *C = cast<ConstantSDNode>(StoredVal);
    SDValue ExtendedStoreVal =
        DAG.getConstant(C->getAPIntValue().zextOrTrunc(LegalizedStoreSize), DL,
                        LegalizedStoredValTy);
    NewStore = DAG.getTruncStore(
        NewChain, DL, ExtendedStoreVal, FirstInChain->getBasePtr(),
        FirstInChain->getPointerInfo(), StoredVal.getValueType() /*TVT*/,
        FirstInChain->getAlignment(),
        FirstInChain->getMemOperand()->getFlags());
  }

  // Replace all merged stores with the new store.
  for (unsigned i = 0; i < NumStores; ++i)
    CombineTo(StoreNodes[i].MemNode, NewStore);

  AddToWorklist(NewChain.getNode());
  return true;
}

void DAGCombiner::getStoreMergeCandidates(
    StoreSDNode *St, SmallVectorImpl<MemOpLink> &StoreNodes,
    SDNode *&RootNode) {
  // This holds the base pointer, index, and the offset in bytes from the base
  // pointer.
  BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
  EVT MemVT = St->getMemoryVT();

  SDValue Val = peekThroughBitcasts(St->getValue());
  // We must have a base and an offset.
  if (!BasePtr.getBase().getNode())
    return;

  // Do not handle stores to undef base pointers.
  if (BasePtr.getBase().isUndef())
    return;

  bool IsConstantSrc = isa<ConstantSDNode>(Val) || isa<ConstantFPSDNode>(Val);
  bool IsExtractVecSrc = (Val.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
                          Val.getOpcode() == ISD::EXTRACT_SUBVECTOR);
  bool IsLoadSrc = isa<LoadSDNode>(Val);
  BaseIndexOffset LBasePtr;
  // Match on loadbaseptr if relevant.
  EVT LoadVT;
  if (IsLoadSrc) {
    auto *Ld = cast<LoadSDNode>(Val);
    LBasePtr = BaseIndexOffset::match(Ld, DAG);
    LoadVT = Ld->getMemoryVT();
    // Load and store should be the same type.
    if (MemVT != LoadVT)
      return;
    // Loads must only have one use.
    if (!Ld->hasNUsesOfValue(1, 0))
      return;
    // The memory operands must not be volatile/indexed/atomic.
    // TODO: May be able to relax for unordered atomics (see D66309)
    if (!Ld->isSimple() || Ld->isIndexed())
      return;
  }
  auto CandidateMatch = [&](StoreSDNode *Other, BaseIndexOffset &Ptr,
                            int64_t &Offset) -> bool {
    // The memory operands must not be volatile/indexed/atomic.
    // TODO: May be able to relax for unordered atomics (see D66309)
    if (!Other->isSimple() ||  Other->isIndexed())
      return false;
    // Don't mix temporal stores with non-temporal stores.
    if (St->isNonTemporal() != Other->isNonTemporal())
      return false;
    SDValue OtherBC = peekThroughBitcasts(Other->getValue());
    // Allow merging constants of different types as integers.
    bool NoTypeMatch = (MemVT.isInteger()) ? !MemVT.bitsEq(Other->getMemoryVT())
                                           : Other->getMemoryVT() != MemVT;
    if (IsLoadSrc) {
      if (NoTypeMatch)
        return false;
      // The Load's Base Ptr must also match
      if (LoadSDNode *OtherLd = dyn_cast<LoadSDNode>(OtherBC)) {
        BaseIndexOffset LPtr = BaseIndexOffset::match(OtherLd, DAG);
        if (LoadVT != OtherLd->getMemoryVT())
          return false;
        // Loads must only have one use.
        if (!OtherLd->hasNUsesOfValue(1, 0))
          return false;
        // The memory operands must not be volatile/indexed/atomic.
        // TODO: May be able to relax for unordered atomics (see D66309)
        if (!OtherLd->isSimple() ||
            OtherLd->isIndexed())
          return false;
        // Don't mix temporal loads with non-temporal loads.
        if (cast<LoadSDNode>(Val)->isNonTemporal() != OtherLd->isNonTemporal())
          return false;
        if (!(LBasePtr.equalBaseIndex(LPtr, DAG)))
          return false;
      } else
        return false;
    }
    if (IsConstantSrc) {
      if (NoTypeMatch)
        return false;
      if (!(isa<ConstantSDNode>(OtherBC) || isa<ConstantFPSDNode>(OtherBC)))
        return false;
    }
    if (IsExtractVecSrc) {
      // Do not merge truncated stores here.
      if (Other->isTruncatingStore())
        return false;
      if (!MemVT.bitsEq(OtherBC.getValueType()))
        return false;
      if (OtherBC.getOpcode() != ISD::EXTRACT_VECTOR_ELT &&
          OtherBC.getOpcode() != ISD::EXTRACT_SUBVECTOR)
        return false;
    }
    Ptr = BaseIndexOffset::match(Other, DAG);
    return (BasePtr.equalBaseIndex(Ptr, DAG, Offset));
  };

  // Check if the pair of StoreNode and the RootNode already bail out many
  // times which is over the limit in dependence check.
  auto OverLimitInDependenceCheck = [&](SDNode *StoreNode,
                                        SDNode *RootNode) -> bool {
    auto RootCount = StoreRootCountMap.find(StoreNode);
    if (RootCount != StoreRootCountMap.end() &&
        RootCount->second.first == RootNode &&
        RootCount->second.second > StoreMergeDependenceLimit)
      return true;
    return false;
  };

  // We looking for a root node which is an ancestor to all mergable
  // stores. We search up through a load, to our root and then down
  // through all children. For instance we will find Store{1,2,3} if
  // St is Store1, Store2. or Store3 where the root is not a load
  // which always true for nonvolatile ops. TODO: Expand
  // the search to find all valid candidates through multiple layers of loads.
  //
  // Root
  // |-------|-------|
  // Load    Load    Store3
  // |       |
  // Store1   Store2
  //
  // FIXME: We should be able to climb and
  // descend TokenFactors to find candidates as well.

  RootNode = St->getChain().getNode();

  unsigned NumNodesExplored = 0;
  if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(RootNode)) {
    RootNode = Ldn->getChain().getNode();
    for (auto I = RootNode->use_begin(), E = RootNode->use_end();
         I != E && NumNodesExplored < 1024; ++I, ++NumNodesExplored)
      if (I.getOperandNo() == 0 && isa<LoadSDNode>(*I)) // walk down chain
        for (auto I2 = (*I)->use_begin(), E2 = (*I)->use_end(); I2 != E2; ++I2)
          if (I2.getOperandNo() == 0)
            if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I2)) {
              BaseIndexOffset Ptr;
              int64_t PtrDiff;
              if (CandidateMatch(OtherST, Ptr, PtrDiff) &&
                  !OverLimitInDependenceCheck(OtherST, RootNode))
                StoreNodes.push_back(MemOpLink(OtherST, PtrDiff));
            }
  } else
    for (auto I = RootNode->use_begin(), E = RootNode->use_end();
         I != E && NumNodesExplored < 1024; ++I, ++NumNodesExplored)
      if (I.getOperandNo() == 0)
        if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I)) {
          BaseIndexOffset Ptr;
          int64_t PtrDiff;
          if (CandidateMatch(OtherST, Ptr, PtrDiff) &&
              !OverLimitInDependenceCheck(OtherST, RootNode))
            StoreNodes.push_back(MemOpLink(OtherST, PtrDiff));
        }
}

// We need to check that merging these stores does not cause a loop in
// the DAG. Any store candidate may depend on another candidate
// indirectly through its operand (we already consider dependencies
// through the chain). Check in parallel by searching up from
// non-chain operands of candidates.
bool DAGCombiner::checkMergeStoreCandidatesForDependencies(
    SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores,
    SDNode *RootNode) {
  // FIXME: We should be able to truncate a full search of
  // predecessors by doing a BFS and keeping tabs the originating
  // stores from which worklist nodes come from in a similar way to
  // TokenFactor simplfication.

  SmallPtrSet<const SDNode *, 32> Visited;
  SmallVector<const SDNode *, 8> Worklist;

  // RootNode is a predecessor to all candidates so we need not search
  // past it. Add RootNode (peeking through TokenFactors). Do not count
  // these towards size check.

  Worklist.push_back(RootNode);
  while (!Worklist.empty()) {
    auto N = Worklist.pop_back_val();
    if (!Visited.insert(N).second)
      continue; // Already present in Visited.
    if (N->getOpcode() == ISD::TokenFactor) {
      for (SDValue Op : N->ops())
        Worklist.push_back(Op.getNode());
    }
  }

  // Don't count pruning nodes towards max.
  unsigned int Max = 1024 + Visited.size();
  // Search Ops of store candidates.
  for (unsigned i = 0; i < NumStores; ++i) {
    SDNode *N = StoreNodes[i].MemNode;
    // Of the 4 Store Operands:
    //   * Chain (Op 0) -> We have already considered these
    //                    in candidate selection and can be
    //                    safely ignored
    //   * Value (Op 1) -> Cycles may happen (e.g. through load chains)
    //   * Address (Op 2) -> Merged addresses may only vary by a fixed constant,
    //                       but aren't necessarily fromt the same base node, so
    //                       cycles possible (e.g. via indexed store).
    //   * (Op 3) -> Represents the pre or post-indexing offset (or undef for
    //               non-indexed stores). Not constant on all targets (e.g. ARM)
    //               and so can participate in a cycle.
    for (unsigned j = 1; j < N->getNumOperands(); ++j)
      Worklist.push_back(N->getOperand(j).getNode());
  }
  // Search through DAG. We can stop early if we find a store node.
  for (unsigned i = 0; i < NumStores; ++i)
    if (SDNode::hasPredecessorHelper(StoreNodes[i].MemNode, Visited, Worklist,
                                     Max)) {
      // If the searching bail out, record the StoreNode and RootNode in the
      // StoreRootCountMap. If we have seen the pair many times over a limit,
      // we won't add the StoreNode into StoreNodes set again.
      if (Visited.size() >= Max) {
        auto &RootCount = StoreRootCountMap[StoreNodes[i].MemNode];
        if (RootCount.first == RootNode)
          RootCount.second++;
        else
          RootCount = {RootNode, 1};
      }
      return false;
    }
  return true;
}

bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
  if (OptLevel == CodeGenOpt::None || !EnableStoreMerging)
    return false;

  EVT MemVT = St->getMemoryVT();
  int64_t ElementSizeBytes = MemVT.getStoreSize();
  unsigned NumMemElts = MemVT.isVector() ? MemVT.getVectorNumElements() : 1;

  if (MemVT.getSizeInBits() * 2 > MaximumLegalStoreInBits)
    return false;

  bool NoVectors = DAG.getMachineFunction().getFunction().hasFnAttribute(
      Attribute::NoImplicitFloat);

  // This function cannot currently deal with non-byte-sized memory sizes.
  if (ElementSizeBytes * 8 != MemVT.getSizeInBits())
    return false;

  if (!MemVT.isSimple())
    return false;

  // Perform an early exit check. Do not bother looking at stored values that
  // are not constants, loads, or extracted vector elements.
  SDValue StoredVal = peekThroughBitcasts(St->getValue());
  bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
  bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) ||
                       isa<ConstantFPSDNode>(StoredVal);
  bool IsExtractVecSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
                          StoredVal.getOpcode() == ISD::EXTRACT_SUBVECTOR);
  bool IsNonTemporalStore = St->isNonTemporal();
  bool IsNonTemporalLoad =
      IsLoadSrc && cast<LoadSDNode>(StoredVal)->isNonTemporal();

  if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecSrc)
    return false;

  SmallVector<MemOpLink, 8> StoreNodes;
  SDNode *RootNode;
  // Find potential store merge candidates by searching through chain sub-DAG
  getStoreMergeCandidates(St, StoreNodes, RootNode);

  // Check if there is anything to merge.
  if (StoreNodes.size() < 2)
    return false;

  // Sort the memory operands according to their distance from the
  // base pointer.
  llvm::sort(StoreNodes, [](MemOpLink LHS, MemOpLink RHS) {
    return LHS.OffsetFromBase < RHS.OffsetFromBase;
  });

  // Store Merge attempts to merge the lowest stores. This generally
  // works out as if successful, as the remaining stores are checked
  // after the first collection of stores is merged. However, in the
  // case that a non-mergeable store is found first, e.g., {p[-2],
  // p[0], p[1], p[2], p[3]}, we would fail and miss the subsequent
  // mergeable cases. To prevent this, we prune such stores from the
  // front of StoreNodes here.

  bool RV = false;
  while (StoreNodes.size() > 1) {
    size_t StartIdx = 0;
    while ((StartIdx + 1 < StoreNodes.size()) &&
           StoreNodes[StartIdx].OffsetFromBase + ElementSizeBytes !=
               StoreNodes[StartIdx + 1].OffsetFromBase)
      ++StartIdx;

    // Bail if we don't have enough candidates to merge.
    if (StartIdx + 1 >= StoreNodes.size())
      return RV;

    if (StartIdx)
      StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + StartIdx);

    // Scan the memory operations on the chain and find the first
    // non-consecutive store memory address.
    unsigned NumConsecutiveStores = 1;
    int64_t StartAddress = StoreNodes[0].OffsetFromBase;
    // Check that the addresses are consecutive starting from the second
    // element in the list of stores.
    for (unsigned i = 1, e = StoreNodes.size(); i < e; ++i) {
      int64_t CurrAddress = StoreNodes[i].OffsetFromBase;
      if (CurrAddress - StartAddress != (ElementSizeBytes * i))
        break;
      NumConsecutiveStores = i + 1;
    }

    if (NumConsecutiveStores < 2) {
      StoreNodes.erase(StoreNodes.begin(),
                       StoreNodes.begin() + NumConsecutiveStores);
      continue;
    }

    // The node with the lowest store address.
    LLVMContext &Context = *DAG.getContext();
    const DataLayout &DL = DAG.getDataLayout();

    // Store the constants into memory as one consecutive store.
    if (IsConstantSrc) {
      while (NumConsecutiveStores >= 2) {
        LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
        unsigned FirstStoreAS = FirstInChain->getAddressSpace();
        unsigned FirstStoreAlign = FirstInChain->getAlignment();
        unsigned LastLegalType = 1;
        unsigned LastLegalVectorType = 1;
        bool LastIntegerTrunc = false;
        bool NonZero = false;
        unsigned FirstZeroAfterNonZero = NumConsecutiveStores;
        for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
          StoreSDNode *ST = cast<StoreSDNode>(StoreNodes[i].MemNode);
          SDValue StoredVal = ST->getValue();
          bool IsElementZero = false;
          if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal))
            IsElementZero = C->isNullValue();
          else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal))
            IsElementZero = C->getConstantFPValue()->isNullValue();
          if (IsElementZero) {
            if (NonZero && FirstZeroAfterNonZero == NumConsecutiveStores)
              FirstZeroAfterNonZero = i;
          }
          NonZero |= !IsElementZero;

          // Find a legal type for the constant store.
          unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8;
          EVT StoreTy = EVT::getIntegerVT(Context, SizeInBits);
          bool IsFast = false;

          // Break early when size is too large to be legal.
          if (StoreTy.getSizeInBits() > MaximumLegalStoreInBits)
            break;

          if (TLI.isTypeLegal(StoreTy) &&
              TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
              TLI.allowsMemoryAccess(Context, DL, StoreTy,
                                     *FirstInChain->getMemOperand(), &IsFast) &&
              IsFast) {
            LastIntegerTrunc = false;
            LastLegalType = i + 1;
            // Or check whether a truncstore is legal.
          } else if (TLI.getTypeAction(Context, StoreTy) ==
                     TargetLowering::TypePromoteInteger) {
            EVT LegalizedStoredValTy =
                TLI.getTypeToTransformTo(Context, StoredVal.getValueType());
            if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) &&
                TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) &&
                TLI.allowsMemoryAccess(Context, DL, StoreTy,
                                       *FirstInChain->getMemOperand(),
                                       &IsFast) &&
                IsFast) {
              LastIntegerTrunc = true;
              LastLegalType = i + 1;
            }
          }

          // We only use vectors if the constant is known to be zero or the
          // target allows it and the function is not marked with the
          // noimplicitfloat attribute.
          if ((!NonZero ||
               TLI.storeOfVectorConstantIsCheap(MemVT, i + 1, FirstStoreAS)) &&
              !NoVectors) {
            // Find a legal type for the vector store.
            unsigned Elts = (i + 1) * NumMemElts;
            EVT Ty = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
            if (TLI.isTypeLegal(Ty) && TLI.isTypeLegal(MemVT) &&
                TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
                TLI.allowsMemoryAccess(
                    Context, DL, Ty, *FirstInChain->getMemOperand(), &IsFast) &&
                IsFast)
              LastLegalVectorType = i + 1;
          }
        }

        bool UseVector = (LastLegalVectorType > LastLegalType) && !NoVectors;
        unsigned NumElem = (UseVector) ? LastLegalVectorType : LastLegalType;

        // Check if we found a legal integer type that creates a meaningful
        // merge.
        if (NumElem < 2) {
          // We know that candidate stores are in order and of correct
          // shape. While there is no mergeable sequence from the
          // beginning one may start later in the sequence. The only
          // reason a merge of size N could have failed where another of
          // the same size would not have, is if the alignment has
          // improved or we've dropped a non-zero value. Drop as many
          // candidates as we can here.
          unsigned NumSkip = 1;
          while (
              (NumSkip < NumConsecutiveStores) &&
              (NumSkip < FirstZeroAfterNonZero) &&
              (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign))
            NumSkip++;

          StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
          NumConsecutiveStores -= NumSkip;
          continue;
        }

        // Check that we can merge these candidates without causing a cycle.
        if (!checkMergeStoreCandidatesForDependencies(StoreNodes, NumElem,
                                                      RootNode)) {
          StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
          NumConsecutiveStores -= NumElem;
          continue;
        }

        RV |= MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumElem, true,
                                              UseVector, LastIntegerTrunc);

        // Remove merged stores for next iteration.
        StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
        NumConsecutiveStores -= NumElem;
      }
      continue;
    }

    // When extracting multiple vector elements, try to store them
    // in one vector store rather than a sequence of scalar stores.
    if (IsExtractVecSrc) {
      // Loop on Consecutive Stores on success.
      while (NumConsecutiveStores >= 2) {
        LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
        unsigned FirstStoreAS = FirstInChain->getAddressSpace();
        unsigned FirstStoreAlign = FirstInChain->getAlignment();
        unsigned NumStoresToMerge = 1;
        for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
          // Find a legal type for the vector store.
          unsigned Elts = (i + 1) * NumMemElts;
          EVT Ty =
              EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts);
          bool IsFast;

          // Break early when size is too large to be legal.
          if (Ty.getSizeInBits() > MaximumLegalStoreInBits)
            break;

          if (TLI.isTypeLegal(Ty) &&
              TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
              TLI.allowsMemoryAccess(Context, DL, Ty,
                                     *FirstInChain->getMemOperand(), &IsFast) &&
              IsFast)
            NumStoresToMerge = i + 1;
        }

        // Check if we found a legal integer type creating a meaningful
        // merge.
        if (NumStoresToMerge < 2) {
          // We know that candidate stores are in order and of correct
          // shape. While there is no mergeable sequence from the
          // beginning one may start later in the sequence. The only
          // reason a merge of size N could have failed where another of
          // the same size would not have, is if the alignment has
          // improved. Drop as many candidates as we can here.
          unsigned NumSkip = 1;
          while (
              (NumSkip < NumConsecutiveStores) &&
              (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign))
            NumSkip++;

          StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
          NumConsecutiveStores -= NumSkip;
          continue;
        }

        // Check that we can merge these candidates without causing a cycle.
        if (!checkMergeStoreCandidatesForDependencies(
                StoreNodes, NumStoresToMerge, RootNode)) {
          StoreNodes.erase(StoreNodes.begin(),
                           StoreNodes.begin() + NumStoresToMerge);
          NumConsecutiveStores -= NumStoresToMerge;
          continue;
        }

        RV |= MergeStoresOfConstantsOrVecElts(
            StoreNodes, MemVT, NumStoresToMerge, false, true, false);

        StoreNodes.erase(StoreNodes.begin(),
                         StoreNodes.begin() + NumStoresToMerge);
        NumConsecutiveStores -= NumStoresToMerge;
      }
      continue;
    }

    // Below we handle the case of multiple consecutive stores that
    // come from multiple consecutive loads. We merge them into a single
    // wide load and a single wide store.

    // Look for load nodes which are used by the stored values.
    SmallVector<MemOpLink, 8> LoadNodes;

    // Find acceptable loads. Loads need to have the same chain (token factor),
    // must not be zext, volatile, indexed, and they must be consecutive.
    BaseIndexOffset LdBasePtr;

    for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
      StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
      SDValue Val = peekThroughBitcasts(St->getValue());
      LoadSDNode *Ld = cast<LoadSDNode>(Val);

      BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld, DAG);
      // If this is not the first ptr that we check.
      int64_t LdOffset = 0;
      if (LdBasePtr.getBase().getNode()) {
        // The base ptr must be the same.
        if (!LdBasePtr.equalBaseIndex(LdPtr, DAG, LdOffset))
          break;
      } else {
        // Check that all other base pointers are the same as this one.
        LdBasePtr = LdPtr;
      }

      // We found a potential memory operand to merge.
      LoadNodes.push_back(MemOpLink(Ld, LdOffset));
    }

    while (NumConsecutiveStores >= 2 && LoadNodes.size() >= 2) {
      // If we have load/store pair instructions and we only have two values,
      // don't bother merging.
      unsigned RequiredAlignment;
      if (LoadNodes.size() == 2 &&
          TLI.hasPairedLoad(MemVT, RequiredAlignment) &&
          StoreNodes[0].MemNode->getAlignment() >= RequiredAlignment) {
        StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + 2);
        LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + 2);
        break;
      }
      LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
      unsigned FirstStoreAS = FirstInChain->getAddressSpace();
      unsigned FirstStoreAlign = FirstInChain->getAlignment();
      LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
      unsigned FirstLoadAlign = FirstLoad->getAlignment();

      // Scan the memory operations on the chain and find the first
      // non-consecutive load memory address. These variables hold the index in
      // the store node array.

      unsigned LastConsecutiveLoad = 1;

      // This variable refers to the size and not index in the array.
      unsigned LastLegalVectorType = 1;
      unsigned LastLegalIntegerType = 1;
      bool isDereferenceable = true;
      bool DoIntegerTruncate = false;
      StartAddress = LoadNodes[0].OffsetFromBase;
      SDValue FirstChain = FirstLoad->getChain();
      for (unsigned i = 1; i < LoadNodes.size(); ++i) {
        // All loads must share the same chain.
        if (LoadNodes[i].MemNode->getChain() != FirstChain)
          break;

        int64_t CurrAddress = LoadNodes[i].OffsetFromBase;
        if (CurrAddress - StartAddress != (ElementSizeBytes * i))
          break;
        LastConsecutiveLoad = i;

        if (isDereferenceable && !LoadNodes[i].MemNode->isDereferenceable())
          isDereferenceable = false;

        // Find a legal type for the vector store.
        unsigned Elts = (i + 1) * NumMemElts;
        EVT StoreTy = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);

        // Break early when size is too large to be legal.
        if (StoreTy.getSizeInBits() > MaximumLegalStoreInBits)
          break;

        bool IsFastSt, IsFastLd;
        if (TLI.isTypeLegal(StoreTy) &&
            TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
            TLI.allowsMemoryAccess(Context, DL, StoreTy,
                                   *FirstInChain->getMemOperand(), &IsFastSt) &&
            IsFastSt &&
            TLI.allowsMemoryAccess(Context, DL, StoreTy,
                                   *FirstLoad->getMemOperand(), &IsFastLd) &&
            IsFastLd) {
          LastLegalVectorType = i + 1;
        }

        // Find a legal type for the integer store.
        unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8;
        StoreTy = EVT::getIntegerVT(Context, SizeInBits);
        if (TLI.isTypeLegal(StoreTy) &&
            TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
            TLI.allowsMemoryAccess(Context, DL, StoreTy,
                                   *FirstInChain->getMemOperand(), &IsFastSt) &&
            IsFastSt &&
            TLI.allowsMemoryAccess(Context, DL, StoreTy,
                                   *FirstLoad->getMemOperand(), &IsFastLd) &&
            IsFastLd) {
          LastLegalIntegerType = i + 1;
          DoIntegerTruncate = false;
          // Or check whether a truncstore and extload is legal.
        } else if (TLI.getTypeAction(Context, StoreTy) ==
                   TargetLowering::TypePromoteInteger) {
          EVT LegalizedStoredValTy = TLI.getTypeToTransformTo(Context, StoreTy);
          if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) &&
              TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) &&
              TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValTy,
                                 StoreTy) &&
              TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValTy,
                                 StoreTy) &&
              TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValTy, StoreTy) &&
              TLI.allowsMemoryAccess(Context, DL, StoreTy,
                                     *FirstInChain->getMemOperand(),
                                     &IsFastSt) &&
              IsFastSt &&
              TLI.allowsMemoryAccess(Context, DL, StoreTy,
                                     *FirstLoad->getMemOperand(), &IsFastLd) &&
              IsFastLd) {
            LastLegalIntegerType = i + 1;
            DoIntegerTruncate = true;
          }
        }
      }

      // Only use vector types if the vector type is larger than the integer
      // type. If they are the same, use integers.
      bool UseVectorTy =
          LastLegalVectorType > LastLegalIntegerType && !NoVectors;
      unsigned LastLegalType =
          std::max(LastLegalVectorType, LastLegalIntegerType);

      // We add +1 here because the LastXXX variables refer to location while
      // the NumElem refers to array/index size.
      unsigned NumElem =
          std::min(NumConsecutiveStores, LastConsecutiveLoad + 1);
      NumElem = std::min(LastLegalType, NumElem);

      if (NumElem < 2) {
        // We know that candidate stores are in order and of correct
        // shape. While there is no mergeable sequence from the
        // beginning one may start later in the sequence. The only
        // reason a merge of size N could have failed where another of
        // the same size would not have is if the alignment or either
        // the load or store has improved. Drop as many candidates as we
        // can here.
        unsigned NumSkip = 1;
        while ((NumSkip < LoadNodes.size()) &&
               (LoadNodes[NumSkip].MemNode->getAlignment() <= FirstLoadAlign) &&
               (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign))
          NumSkip++;
        StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
        LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumSkip);
        NumConsecutiveStores -= NumSkip;
        continue;
      }

      // Check that we can merge these candidates without causing a cycle.
      if (!checkMergeStoreCandidatesForDependencies(StoreNodes, NumElem,
                                                    RootNode)) {
        StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
        LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumElem);
        NumConsecutiveStores -= NumElem;
        continue;
      }

      // Find if it is better to use vectors or integers to load and store
      // to memory.
      EVT JointMemOpVT;
      if (UseVectorTy) {
        // Find a legal type for the vector store.
        unsigned Elts = NumElem * NumMemElts;
        JointMemOpVT = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
      } else {
        unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
        JointMemOpVT = EVT::getIntegerVT(Context, SizeInBits);
      }

      SDLoc LoadDL(LoadNodes[0].MemNode);
      SDLoc StoreDL(StoreNodes[0].MemNode);

      // The merged loads are required to have the same incoming chain, so
      // using the first's chain is acceptable.

      SDValue NewStoreChain = getMergeStoreChains(StoreNodes, NumElem);
      AddToWorklist(NewStoreChain.getNode());

      MachineMemOperand::Flags LdMMOFlags =
          isDereferenceable ? MachineMemOperand::MODereferenceable
                            : MachineMemOperand::MONone;
      if (IsNonTemporalLoad)
        LdMMOFlags |= MachineMemOperand::MONonTemporal;

      MachineMemOperand::Flags StMMOFlags =
          IsNonTemporalStore ? MachineMemOperand::MONonTemporal
                             : MachineMemOperand::MONone;

      SDValue NewLoad, NewStore;
      if (UseVectorTy || !DoIntegerTruncate) {
        NewLoad =
            DAG.getLoad(JointMemOpVT, LoadDL, FirstLoad->getChain(),
                        FirstLoad->getBasePtr(), FirstLoad->getPointerInfo(),
                        FirstLoadAlign, LdMMOFlags);
        NewStore = DAG.getStore(
            NewStoreChain, StoreDL, NewLoad, FirstInChain->getBasePtr(),
            FirstInChain->getPointerInfo(), FirstStoreAlign, StMMOFlags);
      } else { // This must be the truncstore/extload case
        EVT ExtendedTy =
            TLI.getTypeToTransformTo(*DAG.getContext(), JointMemOpVT);
        NewLoad = DAG.getExtLoad(ISD::EXTLOAD, LoadDL, ExtendedTy,
                                 FirstLoad->getChain(), FirstLoad->getBasePtr(),
                                 FirstLoad->getPointerInfo(), JointMemOpVT,
                                 FirstLoadAlign, LdMMOFlags);
        NewStore = DAG.getTruncStore(NewStoreChain, StoreDL, NewLoad,
                                     FirstInChain->getBasePtr(),
                                     FirstInChain->getPointerInfo(),
                                     JointMemOpVT, FirstInChain->getAlignment(),
                                     FirstInChain->getMemOperand()->getFlags());
      }

      // Transfer chain users from old loads to the new load.
      for (unsigned i = 0; i < NumElem; ++i) {
        LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode);
        DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1),
                                      SDValue(NewLoad.getNode(), 1));
      }

      // Replace the all stores with the new store. Recursively remove
      // corresponding value if its no longer used.
      for (unsigned i = 0; i < NumElem; ++i) {
        SDValue Val = StoreNodes[i].MemNode->getOperand(1);
        CombineTo(StoreNodes[i].MemNode, NewStore);
        if (Val.getNode()->use_empty())
          recursivelyDeleteUnusedNodes(Val.getNode());
      }

      RV = true;
      StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
      LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumElem);
      NumConsecutiveStores -= NumElem;
    }
  }
  return RV;
}

SDValue DAGCombiner::replaceStoreChain(StoreSDNode *ST, SDValue BetterChain) {
  SDLoc SL(ST);
  SDValue ReplStore;

  // Replace the chain to avoid dependency.
  if (ST->isTruncatingStore()) {
    ReplStore = DAG.getTruncStore(BetterChain, SL, ST->getValue(),
                                  ST->getBasePtr(), ST->getMemoryVT(),
                                  ST->getMemOperand());
  } else {
    ReplStore = DAG.getStore(BetterChain, SL, ST->getValue(), ST->getBasePtr(),
                             ST->getMemOperand());
  }

  // Create token to keep both nodes around.
  SDValue Token = DAG.getNode(ISD::TokenFactor, SL,
                              MVT::Other, ST->getChain(), ReplStore);

  // Make sure the new and old chains are cleaned up.
  AddToWorklist(Token.getNode());

  // Don't add users to work list.
  return CombineTo(ST, Token, false);
}

SDValue DAGCombiner::replaceStoreOfFPConstant(StoreSDNode *ST) {
  SDValue Value = ST->getValue();
  if (Value.getOpcode() == ISD::TargetConstantFP)
    return SDValue();

  SDLoc DL(ST);

  SDValue Chain = ST->getChain();
  SDValue Ptr = ST->getBasePtr();

  const ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Value);

  // NOTE: If the original store is volatile, this transform must not increase
  // the number of stores.  For example, on x86-32 an f64 can be stored in one
  // processor operation but an i64 (which is not legal) requires two.  So the
  // transform should not be done in this case.

  SDValue Tmp;
  switch (CFP->getSimpleValueType(0).SimpleTy) {
  default:
    llvm_unreachable("Unknown FP type");
  case MVT::f16:    // We don't do this for these yet.
  case MVT::f80:
  case MVT::f128:
  case MVT::ppcf128:
    return SDValue();
  case MVT::f32:
    if ((isTypeLegal(MVT::i32) && !LegalOperations && ST->isSimple()) ||
        TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
      ;
      Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
                            bitcastToAPInt().getZExtValue(), SDLoc(CFP),
                            MVT::i32);
      return DAG.getStore(Chain, DL, Tmp, Ptr, ST->getMemOperand());
    }

    return SDValue();
  case MVT::f64:
    if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations &&
         ST->isSimple()) ||
        TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
      ;
      Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
                            getZExtValue(), SDLoc(CFP), MVT::i64);
      return DAG.getStore(Chain, DL, Tmp,
                          Ptr, ST->getMemOperand());
    }

    if (ST->isSimple() &&
        TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
      // Many FP stores are not made apparent until after legalize, e.g. for
      // argument passing.  Since this is so common, custom legalize the
      // 64-bit integer store into two 32-bit stores.
      uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
      SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32);
      SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32);
      if (DAG.getDataLayout().isBigEndian())
        std::swap(Lo, Hi);

      unsigned Alignment = ST->getAlignment();
      MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
      AAMDNodes AAInfo = ST->getAAInfo();

      SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
                                 ST->getAlignment(), MMOFlags, AAInfo);
      Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
                        DAG.getConstant(4, DL, Ptr.getValueType()));
      Alignment = MinAlign(Alignment, 4U);
      SDValue St1 = DAG.getStore(Chain, DL, Hi, Ptr,
                                 ST->getPointerInfo().getWithOffset(4),
                                 Alignment, MMOFlags, AAInfo);
      return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
                         St0, St1);
    }

    return SDValue();
  }
}

SDValue DAGCombiner::visitSTORE(SDNode *N) {
  StoreSDNode *ST  = cast<StoreSDNode>(N);
  SDValue Chain = ST->getChain();
  SDValue Value = ST->getValue();
  SDValue Ptr   = ST->getBasePtr();

  // If this is a store of a bit convert, store the input value if the
  // resultant store does not need a higher alignment than the original.
  if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() &&
      ST->isUnindexed()) {
    EVT SVT = Value.getOperand(0).getValueType();
    // If the store is volatile, we only want to change the store type if the
    // resulting store is legal. Otherwise we might increase the number of
    // memory accesses. We don't care if the original type was legal or not
    // as we assume software couldn't rely on the number of accesses of an
    // illegal type.
    // TODO: May be able to relax for unordered atomics (see D66309)
    if (((!LegalOperations && ST->isSimple()) ||
         TLI.isOperationLegal(ISD::STORE, SVT)) &&
        TLI.isStoreBitCastBeneficial(Value.getValueType(), SVT,
                                     DAG, *ST->getMemOperand())) {
      return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
                          ST->getPointerInfo(), ST->getAlignment(),
                          ST->getMemOperand()->getFlags(), ST->getAAInfo());
    }
  }

  // Turn 'store undef, Ptr' -> nothing.
  if (Value.isUndef() && ST->isUnindexed())
    return Chain;

  // Try to infer better alignment information than the store already has.
  if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) {
    if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
      if (Align > ST->getAlignment() && ST->getSrcValueOffset() % Align == 0) {
        SDValue NewStore =
            DAG.getTruncStore(Chain, SDLoc(N), Value, Ptr, ST->getPointerInfo(),
                              ST->getMemoryVT(), Align,
                              ST->getMemOperand()->getFlags(), ST->getAAInfo());
        // NewStore will always be N as we are only refining the alignment
        assert(NewStore.getNode() == N);
        (void)NewStore;
      }
    }
  }

  // Try transforming a pair floating point load / store ops to integer
  // load / store ops.
  if (SDValue NewST = TransformFPLoadStorePair(N))
    return NewST;

  // Try transforming several stores into STORE (BSWAP).
  if (SDValue Store = MatchStoreCombine(ST))
    return Store;

  if (ST->isUnindexed()) {
    // Walk up chain skipping non-aliasing memory nodes, on this store and any
    // adjacent stores.
    if (findBetterNeighborChains(ST)) {
      // replaceStoreChain uses CombineTo, which handled all of the worklist
      // manipulation. Return the original node to not do anything else.
      return SDValue(ST, 0);
    }
    Chain = ST->getChain();
  }

  // FIXME: is there such a thing as a truncating indexed store?
  if (ST->isTruncatingStore() && ST->isUnindexed() &&
      Value.getValueType().isInteger() &&
      (!isa<ConstantSDNode>(Value) ||
       !cast<ConstantSDNode>(Value)->isOpaque())) {
    APInt TruncDemandedBits =
        APInt::getLowBitsSet(Value.getScalarValueSizeInBits(),
                             ST->getMemoryVT().getScalarSizeInBits());

    // See if we can simplify the input to this truncstore with knowledge that
    // only the low bits are being used.  For example:
    // "truncstore (or (shl x, 8), y), i8"  -> "truncstore y, i8"
    AddToWorklist(Value.getNode());
    if (SDValue Shorter = DAG.GetDemandedBits(Value, TruncDemandedBits))
      return DAG.getTruncStore(Chain, SDLoc(N), Shorter, Ptr, ST->getMemoryVT(),
                               ST->getMemOperand());

    // Otherwise, see if we can simplify the operation with
    // SimplifyDemandedBits, which only works if the value has a single use.
    if (SimplifyDemandedBits(Value, TruncDemandedBits)) {
      // Re-visit the store if anything changed and the store hasn't been merged
      // with another node (N is deleted) SimplifyDemandedBits will add Value's
      // node back to the worklist if necessary, but we also need to re-visit
      // the Store node itself.
      if (N->getOpcode() != ISD::DELETED_NODE)
        AddToWorklist(N);
      return SDValue(N, 0);
    }
  }

  // If this is a load followed by a store to the same location, then the store
  // is dead/noop.
  // TODO: Can relax for unordered atomics (see D66309)
  if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) {
    if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() &&
        ST->isUnindexed() && ST->isSimple() &&
        // There can't be any side effects between the load and store, such as
        // a call or store.
        Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) {
      // The store is dead, remove it.
      return Chain;
    }
  }

  // TODO: Can relax for unordered atomics (see D66309)
  if (StoreSDNode *ST1 = dyn_cast<StoreSDNode>(Chain)) {
    if (ST->isUnindexed() && ST->isSimple() &&
        ST1->isUnindexed() && ST1->isSimple()) {
      if (ST1->getBasePtr() == Ptr && ST1->getValue() == Value &&
          ST->getMemoryVT() == ST1->getMemoryVT()) {
        // If this is a store followed by a store with the same value to the
        // same location, then the store is dead/noop.
        return Chain;
      }

      if (OptLevel != CodeGenOpt::None && ST1->hasOneUse() &&
          !ST1->getBasePtr().isUndef()) {
        const BaseIndexOffset STBase = BaseIndexOffset::match(ST, DAG);
        const BaseIndexOffset ChainBase = BaseIndexOffset::match(ST1, DAG);
        unsigned STBitSize = ST->getMemoryVT().getSizeInBits();
        unsigned ChainBitSize = ST1->getMemoryVT().getSizeInBits();
        // If this is a store who's preceding store to a subset of the current
        // location and no one other node is chained to that store we can
        // effectively drop the store. Do not remove stores to undef as they may
        // be used as data sinks.
        if (STBase.contains(DAG, STBitSize, ChainBase, ChainBitSize)) {
          CombineTo(ST1, ST1->getChain());
          return SDValue();
        }

        // If ST stores to a subset of preceding store's write set, we may be
        // able to fold ST's value into the preceding stored value. As we know
        // the other uses of ST1's chain are unconcerned with ST, this folding
        // will not affect those nodes.
        int64_t BitOffset;
        if (ChainBase.contains(DAG, ChainBitSize, STBase, STBitSize,
                               BitOffset)) {
          SDValue ChainValue = ST1->getValue();
          if (auto *C1 = dyn_cast<ConstantSDNode>(ChainValue)) {
            if (auto *C = dyn_cast<ConstantSDNode>(Value)) {
              APInt Val = C1->getAPIntValue();
              APInt InsertVal = C->getAPIntValue().zextOrTrunc(STBitSize);
              // FIXME: Handle Big-endian mode.
              if (!DAG.getDataLayout().isBigEndian()) {
                Val.insertBits(InsertVal, BitOffset);
                SDValue NewSDVal =
                    DAG.getConstant(Val, SDLoc(C), ChainValue.getValueType(),
                                    C1->isTargetOpcode(), C1->isOpaque());
                SDNode *NewST1 = DAG.UpdateNodeOperands(
                    ST1, ST1->getChain(), NewSDVal, ST1->getOperand(2),
                    ST1->getOperand(3));
                return CombineTo(ST, SDValue(NewST1, 0));
              }
            }
          }
        } // End ST subset of ST1 case.
      }
    }
  }

  // If this is an FP_ROUND or TRUNC followed by a store, fold this into a
  // truncating store.  We can do this even if this is already a truncstore.
  if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
      && Value.getNode()->hasOneUse() && ST->isUnindexed() &&
      TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
                            ST->getMemoryVT())) {
    return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0),
                             Ptr, ST->getMemoryVT(), ST->getMemOperand());
  }

  // Always perform this optimization before types are legal. If the target
  // prefers, also try this after legalization to catch stores that were created
  // by intrinsics or other nodes.
  if (!LegalTypes || (TLI.mergeStoresAfterLegalization(ST->getMemoryVT()))) {
    while (true) {
      // There can be multiple store sequences on the same chain.
      // Keep trying to merge store sequences until we are unable to do so
      // or until we merge the last store on the chain.
      bool Changed = MergeConsecutiveStores(ST);
      if (!Changed) break;
      // Return N as merge only uses CombineTo and no worklist clean
      // up is necessary.
      if (N->getOpcode() == ISD::DELETED_NODE || !isa<StoreSDNode>(N))
        return SDValue(N, 0);
    }
  }

  // Try transforming N to an indexed store.
  if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
    return SDValue(N, 0);

  // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
  //
  // Make sure to do this only after attempting to merge stores in order to
  //  avoid changing the types of some subset of stores due to visit order,
  //  preventing their merging.
  if (isa<ConstantFPSDNode>(ST->getValue())) {
    if (SDValue NewSt = replaceStoreOfFPConstant(ST))
      return NewSt;
  }

  if (SDValue NewSt = splitMergedValStore(ST))
    return NewSt;

  return ReduceLoadOpStoreWidth(N);
}

SDValue DAGCombiner::visitLIFETIME_END(SDNode *N) {
  const auto *LifetimeEnd = cast<LifetimeSDNode>(N);
  if (!LifetimeEnd->hasOffset())
    return SDValue();

  const BaseIndexOffset LifetimeEndBase(N->getOperand(1), SDValue(),
                                        LifetimeEnd->getOffset(), false);

  // We walk up the chains to find stores.
  SmallVector<SDValue, 8> Chains = {N->getOperand(0)};
  while (!Chains.empty()) {
    SDValue Chain = Chains.back();
    Chains.pop_back();
    if (!Chain.hasOneUse())
      continue;
    switch (Chain.getOpcode()) {
    case ISD::TokenFactor:
      for (unsigned Nops = Chain.getNumOperands(); Nops;)
        Chains.push_back(Chain.getOperand(--Nops));
      break;
    case ISD::LIFETIME_START:
    case ISD::LIFETIME_END:
      // We can forward past any lifetime start/end that can be proven not to
      // alias the node.
      if (!isAlias(Chain.getNode(), N))
        Chains.push_back(Chain.getOperand(0));
      break;
    case ISD::STORE: {
      StoreSDNode *ST = dyn_cast<StoreSDNode>(Chain);
      // TODO: Can relax for unordered atomics (see D66309)
      if (!ST->isSimple() || ST->isIndexed())
        continue;
      const BaseIndexOffset StoreBase = BaseIndexOffset::match(ST, DAG);
      // If we store purely within object bounds just before its lifetime ends,
      // we can remove the store.
      if (LifetimeEndBase.contains(DAG, LifetimeEnd->getSize() * 8, StoreBase,
                                   ST->getMemoryVT().getStoreSizeInBits())) {
        LLVM_DEBUG(dbgs() << "\nRemoving store:"; StoreBase.dump();
                   dbgs() << "\nwithin LIFETIME_END of : ";
                   LifetimeEndBase.dump(); dbgs() << "\n");
        CombineTo(ST, ST->getChain());
        return SDValue(N, 0);
      }
    }
    }
  }
  return SDValue();
}

/// For the instruction sequence of store below, F and I values
/// are bundled together as an i64 value before being stored into memory.
/// Sometimes it is more efficent to generate separate stores for F and I,
/// which can remove the bitwise instructions or sink them to colder places.
///
///   (store (or (zext (bitcast F to i32) to i64),
///              (shl (zext I to i64), 32)), addr)  -->
///   (store F, addr) and (store I, addr+4)
///
/// Similarly, splitting for other merged store can also be beneficial, like:
/// For pair of {i32, i32}, i64 store --> two i32 stores.
/// For pair of {i32, i16}, i64 store --> two i32 stores.
/// For pair of {i16, i16}, i32 store --> two i16 stores.
/// For pair of {i16, i8},  i32 store --> two i16 stores.
/// For pair of {i8, i8},   i16 store --> two i8 stores.
///
/// We allow each target to determine specifically which kind of splitting is
/// supported.
///
/// The store patterns are commonly seen from the simple code snippet below
/// if only std::make_pair(...) is sroa transformed before inlined into hoo.
///   void goo(const std::pair<int, float> &);
///   hoo() {
///     ...
///     goo(std::make_pair(tmp, ftmp));
///     ...
///   }
///
SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {
  if (OptLevel == CodeGenOpt::None)
    return SDValue();

  // Can't change the number of memory accesses for a volatile store or break
  // atomicity for an atomic one.
  if (!ST->isSimple())
    return SDValue();

  SDValue Val = ST->getValue();
  SDLoc DL(ST);

  // Match OR operand.
  if (!Val.getValueType().isScalarInteger() || Val.getOpcode() != ISD::OR)
    return SDValue();

  // Match SHL operand and get Lower and Higher parts of Val.
  SDValue Op1 = Val.getOperand(0);
  SDValue Op2 = Val.getOperand(1);
  SDValue Lo, Hi;
  if (Op1.getOpcode() != ISD::SHL) {
    std::swap(Op1, Op2);
    if (Op1.getOpcode() != ISD::SHL)
      return SDValue();
  }
  Lo = Op2;
  Hi = Op1.getOperand(0);
  if (!Op1.hasOneUse())
    return SDValue();

  // Match shift amount to HalfValBitSize.
  unsigned HalfValBitSize = Val.getValueSizeInBits() / 2;
  ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(Op1.getOperand(1));
  if (!ShAmt || ShAmt->getAPIntValue() != HalfValBitSize)
    return SDValue();

  // Lo and Hi are zero-extended from int with size less equal than 32
  // to i64.
  if (Lo.getOpcode() != ISD::ZERO_EXTEND || !Lo.hasOneUse() ||
      !Lo.getOperand(0).getValueType().isScalarInteger() ||
      Lo.getOperand(0).getValueSizeInBits() > HalfValBitSize ||
      Hi.getOpcode() != ISD::ZERO_EXTEND || !Hi.hasOneUse() ||
      !Hi.getOperand(0).getValueType().isScalarInteger() ||
      Hi.getOperand(0).getValueSizeInBits() > HalfValBitSize)
    return SDValue();

  // Use the EVT of low and high parts before bitcast as the input
  // of target query.
  EVT LowTy = (Lo.getOperand(0).getOpcode() == ISD::BITCAST)
                  ? Lo.getOperand(0).getValueType()
                  : Lo.getValueType();
  EVT HighTy = (Hi.getOperand(0).getOpcode() == ISD::BITCAST)
                   ? Hi.getOperand(0).getValueType()
                   : Hi.getValueType();
  if (!TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
    return SDValue();

  // Start to split store.
  unsigned Alignment = ST->getAlignment();
  MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
  AAMDNodes AAInfo = ST->getAAInfo();

  // Change the sizes of Lo and Hi's value types to HalfValBitSize.
  EVT VT = EVT::getIntegerVT(*DAG.getContext(), HalfValBitSize);
  Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Lo.getOperand(0));
  Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Hi.getOperand(0));

  SDValue Chain = ST->getChain();
  SDValue Ptr = ST->getBasePtr();
  // Lower value store.
  SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
                             ST->getAlignment(), MMOFlags, AAInfo);
  Ptr =
      DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
                  DAG.getConstant(HalfValBitSize / 8, DL, Ptr.getValueType()));
  // Higher value store.
  SDValue St1 =
      DAG.getStore(St0, DL, Hi, Ptr,
                   ST->getPointerInfo().getWithOffset(HalfValBitSize / 8),
                   Alignment / 2, MMOFlags, AAInfo);
  return St1;
}

/// Convert a disguised subvector insertion into a shuffle:
SDValue DAGCombiner::combineInsertEltToShuffle(SDNode *N, unsigned InsIndex) {
  SDValue InsertVal = N->getOperand(1);
  SDValue Vec = N->getOperand(0);

  // (insert_vector_elt (vector_shuffle X, Y), (extract_vector_elt X, N), InsIndex)
  //   --> (vector_shuffle X, Y)
  if (Vec.getOpcode() == ISD::VECTOR_SHUFFLE && Vec.hasOneUse() &&
      InsertVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
      isa<ConstantSDNode>(InsertVal.getOperand(1))) {
    ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Vec.getNode());
    ArrayRef<int> Mask = SVN->getMask();

    SDValue X = Vec.getOperand(0);
    SDValue Y = Vec.getOperand(1);

    // Vec's operand 0 is using indices from 0 to N-1 and
    // operand 1 from N to 2N - 1, where N is the number of
    // elements in the vectors.
    int XOffset = -1;
    if (InsertVal.getOperand(0) == X) {
      XOffset = 0;
    } else if (InsertVal.getOperand(0) == Y) {
      XOffset = X.getValueType().getVectorNumElements();
    }

    if (XOffset != -1) {
      SmallVector<int, 16> NewMask(Mask.begin(), Mask.end());

      auto *ExtrIndex = cast<ConstantSDNode>(InsertVal.getOperand(1));
      NewMask[InsIndex] = XOffset + ExtrIndex->getZExtValue();
      assert(NewMask[InsIndex] <
                 (int)(2 * Vec.getValueType().getVectorNumElements()) &&
             NewMask[InsIndex] >= 0 && "NewMask[InsIndex] is out of bound");

      SDValue LegalShuffle =
              TLI.buildLegalVectorShuffle(Vec.getValueType(), SDLoc(N), X,
                                          Y, NewMask, DAG);
      if (LegalShuffle)
        return LegalShuffle;
    }
  }

  // insert_vector_elt V, (bitcast X from vector type), IdxC -->
  // bitcast(shuffle (bitcast V), (extended X), Mask)
  // Note: We do not use an insert_subvector node because that requires a
  // legal subvector type.
  if (InsertVal.getOpcode() != ISD::BITCAST || !InsertVal.hasOneUse() ||
      !InsertVal.getOperand(0).getValueType().isVector())
    return SDValue();

  SDValue SubVec = InsertVal.getOperand(0);
  SDValue DestVec = N->getOperand(0);
  EVT SubVecVT = SubVec.getValueType();
  EVT VT = DestVec.getValueType();
  unsigned NumSrcElts = SubVecVT.getVectorNumElements();
  unsigned ExtendRatio = VT.getSizeInBits() / SubVecVT.getSizeInBits();
  unsigned NumMaskVals = ExtendRatio * NumSrcElts;

  // Step 1: Create a shuffle mask that implements this insert operation. The
  // vector that we are inserting into will be operand 0 of the shuffle, so
  // those elements are just 'i'. The inserted subvector is in the first
  // positions of operand 1 of the shuffle. Example:
  // insert v4i32 V, (v2i16 X), 2 --> shuffle v8i16 V', X', {0,1,2,3,8,9,6,7}
  SmallVector<int, 16> Mask(NumMaskVals);
  for (unsigned i = 0; i != NumMaskVals; ++i) {
    if (i / NumSrcElts == InsIndex)
      Mask[i] = (i % NumSrcElts) + NumMaskVals;
    else
      Mask[i] = i;
  }

  // Bail out if the target can not handle the shuffle we want to create.
  EVT SubVecEltVT = SubVecVT.getVectorElementType();
  EVT ShufVT = EVT::getVectorVT(*DAG.getContext(), SubVecEltVT, NumMaskVals);
  if (!TLI.isShuffleMaskLegal(Mask, ShufVT))
    return SDValue();

  // Step 2: Create a wide vector from the inserted source vector by appending
  // undefined elements. This is the same size as our destination vector.
  SDLoc DL(N);
  SmallVector<SDValue, 8> ConcatOps(ExtendRatio, DAG.getUNDEF(SubVecVT));
  ConcatOps[0] = SubVec;
  SDValue PaddedSubV = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShufVT, ConcatOps);

  // Step 3: Shuffle in the padded subvector.
  SDValue DestVecBC = DAG.getBitcast(ShufVT, DestVec);
  SDValue Shuf = DAG.getVectorShuffle(ShufVT, DL, DestVecBC, PaddedSubV, Mask);
  AddToWorklist(PaddedSubV.getNode());
  AddToWorklist(DestVecBC.getNode());
  AddToWorklist(Shuf.getNode());
  return DAG.getBitcast(VT, Shuf);
}

SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
  SDValue InVec = N->getOperand(0);
  SDValue InVal = N->getOperand(1);
  SDValue EltNo = N->getOperand(2);
  SDLoc DL(N);

  EVT VT = InVec.getValueType();
  unsigned NumElts = VT.getVectorNumElements();

  // Remove redundant insertions:
  // (insert_vector_elt x (extract_vector_elt x idx) idx) -> x
  if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
      InVec == InVal.getOperand(0) && EltNo == InVal.getOperand(1))
    return InVec;

  auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
  if (!IndexC) {
    // If this is variable insert to undef vector, it might be better to splat:
    // inselt undef, InVal, EltNo --> build_vector < InVal, InVal, ... >
    if (InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT)) {
      SmallVector<SDValue, 8> Ops(NumElts, InVal);
      return DAG.getBuildVector(VT, DL, Ops);
    }
    return SDValue();
  }

  // We must know which element is being inserted for folds below here.
  unsigned Elt = IndexC->getZExtValue();
  if (SDValue Shuf = combineInsertEltToShuffle(N, Elt))
    return Shuf;

  // Canonicalize insert_vector_elt dag nodes.
  // Example:
  // (insert_vector_elt (insert_vector_elt A, Idx0), Idx1)
  // -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0)
  //
  // Do this only if the child insert_vector node has one use; also
  // do this only if indices are both constants and Idx1 < Idx0.
  if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse()
      && isa<ConstantSDNode>(InVec.getOperand(2))) {
    unsigned OtherElt = InVec.getConstantOperandVal(2);
    if (Elt < OtherElt) {
      // Swap nodes.
      SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
                                  InVec.getOperand(0), InVal, EltNo);
      AddToWorklist(NewOp.getNode());
      return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()),
                         VT, NewOp, InVec.getOperand(1), InVec.getOperand(2));
    }
  }

  // If we can't generate a legal BUILD_VECTOR, exit
  if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
    return SDValue();

  // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
  // be converted to a BUILD_VECTOR).  Fill in the Ops vector with the
  // vector elements.
  SmallVector<SDValue, 8> Ops;
  // Do not combine these two vectors if the output vector will not replace
  // the input vector.
  if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) {
    Ops.append(InVec.getNode()->op_begin(),
               InVec.getNode()->op_end());
  } else if (InVec.isUndef()) {
    Ops.append(NumElts, DAG.getUNDEF(InVal.getValueType()));
  } else {
    return SDValue();
  }
  assert(Ops.size() == NumElts && "Unexpected vector size");

  // Insert the element
  if (Elt < Ops.size()) {
    // All the operands of BUILD_VECTOR must have the same type;
    // we enforce that here.
    EVT OpVT = Ops[0].getValueType();
    Ops[Elt] = OpVT.isInteger() ? DAG.getAnyExtOrTrunc(InVal, DL, OpVT) : InVal;
  }

  // Return the new vector
  return DAG.getBuildVector(VT, DL, Ops);
}

SDValue DAGCombiner::scalarizeExtractedVectorLoad(SDNode *EVE, EVT InVecVT,
                                                  SDValue EltNo,
                                                  LoadSDNode *OriginalLoad) {
  assert(OriginalLoad->isSimple());

  EVT ResultVT = EVE->getValueType(0);
  EVT VecEltVT = InVecVT.getVectorElementType();
  unsigned Align = OriginalLoad->getAlignment();
  unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
      VecEltVT.getTypeForEVT(*DAG.getContext()));

  if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT))
    return SDValue();

  ISD::LoadExtType ExtTy = ResultVT.bitsGT(VecEltVT) ?
    ISD::NON_EXTLOAD : ISD::EXTLOAD;
  if (!TLI.shouldReduceLoadWidth(OriginalLoad, ExtTy, VecEltVT))
    return SDValue();

  Align = NewAlign;

  SDValue NewPtr = OriginalLoad->getBasePtr();
  SDValue Offset;
  EVT PtrType = NewPtr.getValueType();
  MachinePointerInfo MPI;
  SDLoc DL(EVE);
  if (auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) {
    int Elt = ConstEltNo->getZExtValue();
    unsigned PtrOff = VecEltVT.getSizeInBits() * Elt / 8;
    Offset = DAG.getConstant(PtrOff, DL, PtrType);
    MPI = OriginalLoad->getPointerInfo().getWithOffset(PtrOff);
  } else {
    Offset = DAG.getZExtOrTrunc(EltNo, DL, PtrType);
    Offset = DAG.getNode(
        ISD::MUL, DL, PtrType, Offset,
        DAG.getConstant(VecEltVT.getStoreSize(), DL, PtrType));
    // Discard the pointer info except the address space because the memory
    // operand can't represent this new access since the offset is variable.
    MPI = MachinePointerInfo(OriginalLoad->getPointerInfo().getAddrSpace());
  }
  NewPtr = DAG.getNode(ISD::ADD, DL, PtrType, NewPtr, Offset);

  // The replacement we need to do here is a little tricky: we need to
  // replace an extractelement of a load with a load.
  // Use ReplaceAllUsesOfValuesWith to do the replacement.
  // Note that this replacement assumes that the extractvalue is the only
  // use of the load; that's okay because we don't want to perform this
  // transformation in other cases anyway.
  SDValue Load;
  SDValue Chain;
  if (ResultVT.bitsGT(VecEltVT)) {
    // If the result type of vextract is wider than the load, then issue an
    // extending load instead.
    ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, ResultVT,
                                                  VecEltVT)
                                   ? ISD::ZEXTLOAD
                                   : ISD::EXTLOAD;
    Load = DAG.getExtLoad(ExtType, SDLoc(EVE), ResultVT,
                          OriginalLoad->getChain(), NewPtr, MPI, VecEltVT,
                          Align, OriginalLoad->getMemOperand()->getFlags(),
                          OriginalLoad->getAAInfo());
    Chain = Load.getValue(1);
  } else {
    Load = DAG.getLoad(VecEltVT, SDLoc(EVE), OriginalLoad->getChain(), NewPtr,
                       MPI, Align, OriginalLoad->getMemOperand()->getFlags(),
                       OriginalLoad->getAAInfo());
    Chain = Load.getValue(1);
    if (ResultVT.bitsLT(VecEltVT))
      Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load);
    else
      Load = DAG.getBitcast(ResultVT, Load);
  }
  WorklistRemover DeadNodes(*this);
  SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) };
  SDValue To[] = { Load, Chain };
  DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
  // Make sure to revisit this node to clean it up; it will usually be dead.
  AddToWorklist(EVE);
  // Since we're explicitly calling ReplaceAllUses, add the new node to the
  // worklist explicitly as well.
  AddUsersToWorklist(Load.getNode()); // Add users too
  AddToWorklist(Load.getNode());
  ++OpsNarrowed;
  return SDValue(EVE, 0);
}

/// Transform a vector binary operation into a scalar binary operation by moving
/// the math/logic after an extract element of a vector.
static SDValue scalarizeExtractedBinop(SDNode *ExtElt, SelectionDAG &DAG,
                                       bool LegalOperations) {
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDValue Vec = ExtElt->getOperand(0);
  SDValue Index = ExtElt->getOperand(1);
  auto *IndexC = dyn_cast<ConstantSDNode>(Index);
  if (!IndexC || !TLI.isBinOp(Vec.getOpcode()) || !Vec.hasOneUse() ||
      Vec.getNode()->getNumValues() != 1)
    return SDValue();

  // Targets may want to avoid this to prevent an expensive register transfer.
  if (!TLI.shouldScalarizeBinop(Vec))
    return SDValue();

  // Extracting an element of a vector constant is constant-folded, so this
  // transform is just replacing a vector op with a scalar op while moving the
  // extract.
  SDValue Op0 = Vec.getOperand(0);
  SDValue Op1 = Vec.getOperand(1);
  if (isAnyConstantBuildVector(Op0, true) ||
      isAnyConstantBuildVector(Op1, true)) {
    // extractelt (binop X, C), IndexC --> binop (extractelt X, IndexC), C'
    // extractelt (binop C, X), IndexC --> binop C', (extractelt X, IndexC)
    SDLoc DL(ExtElt);
    EVT VT = ExtElt->getValueType(0);
    SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op0, Index);
    SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op1, Index);
    return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1);
  }

  return SDValue();
}

SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
  SDValue VecOp = N->getOperand(0);
  SDValue Index = N->getOperand(1);
  EVT ScalarVT = N->getValueType(0);
  EVT VecVT = VecOp.getValueType();
  if (VecOp.isUndef())
    return DAG.getUNDEF(ScalarVT);

  // extract_vector_elt (insert_vector_elt vec, val, idx), idx) -> val
  //
  // This only really matters if the index is non-constant since other combines
  // on the constant elements already work.
  SDLoc DL(N);
  if (VecOp.getOpcode() == ISD::INSERT_VECTOR_ELT &&
      Index == VecOp.getOperand(2)) {
    SDValue Elt = VecOp.getOperand(1);
    return VecVT.isInteger() ? DAG.getAnyExtOrTrunc(Elt, DL, ScalarVT) : Elt;
  }

  // (vextract (scalar_to_vector val, 0) -> val
  if (VecOp.getOpcode() == ISD::SCALAR_TO_VECTOR) {
    // Check if the result type doesn't match the inserted element type. A
    // SCALAR_TO_VECTOR may truncate the inserted element and the
    // EXTRACT_VECTOR_ELT may widen the extracted vector.
    SDValue InOp = VecOp.getOperand(0);
    if (InOp.getValueType() != ScalarVT) {
      assert(InOp.getValueType().isInteger() && ScalarVT.isInteger());
      return DAG.getSExtOrTrunc(InOp, DL, ScalarVT);
    }
    return InOp;
  }

  // extract_vector_elt of out-of-bounds element -> UNDEF
  auto *IndexC = dyn_cast<ConstantSDNode>(Index);
  unsigned NumElts = VecVT.getVectorNumElements();
  if (IndexC && IndexC->getAPIntValue().uge(NumElts))
    return DAG.getUNDEF(ScalarVT);

  // extract_vector_elt (build_vector x, y), 1 -> y
  if (IndexC && VecOp.getOpcode() == ISD::BUILD_VECTOR &&
      TLI.isTypeLegal(VecVT) &&
      (VecOp.hasOneUse() || TLI.aggressivelyPreferBuildVectorSources(VecVT))) {
    SDValue Elt = VecOp.getOperand(IndexC->getZExtValue());
    EVT InEltVT = Elt.getValueType();

    // Sometimes build_vector's scalar input types do not match result type.
    if (ScalarVT == InEltVT)
      return Elt;

    // TODO: It may be useful to truncate if free if the build_vector implicitly
    // converts.
  }

  // TODO: These transforms should not require the 'hasOneUse' restriction, but
  // there are regressions on multiple targets without it. We can end up with a
  // mess of scalar and vector code if we reduce only part of the DAG to scalar.
  if (IndexC && VecOp.getOpcode() == ISD::BITCAST && VecVT.isInteger() &&
      VecOp.hasOneUse()) {
    // The vector index of the LSBs of the source depend on the endian-ness.
    bool IsLE = DAG.getDataLayout().isLittleEndian();
    unsigned ExtractIndex = IndexC->getZExtValue();
    // extract_elt (v2i32 (bitcast i64:x)), BCTruncElt -> i32 (trunc i64:x)
    unsigned BCTruncElt = IsLE ? 0 : NumElts - 1;
    SDValue BCSrc = VecOp.getOperand(0);
    if (ExtractIndex == BCTruncElt && BCSrc.getValueType().isScalarInteger())
      return DAG.getNode(ISD::TRUNCATE, DL, ScalarVT, BCSrc);

    if (LegalTypes && BCSrc.getValueType().isInteger() &&
        BCSrc.getOpcode() == ISD::SCALAR_TO_VECTOR) {
      // ext_elt (bitcast (scalar_to_vec i64 X to v2i64) to v4i32), TruncElt -->
      // trunc i64 X to i32
      SDValue X = BCSrc.getOperand(0);
      assert(X.getValueType().isScalarInteger() && ScalarVT.isScalarInteger() &&
             "Extract element and scalar to vector can't change element type "
             "from FP to integer.");
      unsigned XBitWidth = X.getValueSizeInBits();
      unsigned VecEltBitWidth = VecVT.getScalarSizeInBits();
      BCTruncElt = IsLE ? 0 : XBitWidth / VecEltBitWidth - 1;

      // An extract element return value type can be wider than its vector
      // operand element type. In that case, the high bits are undefined, so
      // it's possible that we may need to extend rather than truncate.
      if (ExtractIndex == BCTruncElt && XBitWidth > VecEltBitWidth) {
        assert(XBitWidth % VecEltBitWidth == 0 &&
               "Scalar bitwidth must be a multiple of vector element bitwidth");
        return DAG.getAnyExtOrTrunc(X, DL, ScalarVT);
      }
    }
  }

  if (SDValue BO = scalarizeExtractedBinop(N, DAG, LegalOperations))
    return BO;

  // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT.
  // We only perform this optimization before the op legalization phase because
  // we may introduce new vector instructions which are not backed by TD
  // patterns. For example on AVX, extracting elements from a wide vector
  // without using extract_subvector. However, if we can find an underlying
  // scalar value, then we can always use that.
  if (IndexC && VecOp.getOpcode() == ISD::VECTOR_SHUFFLE) {
    auto *Shuf = cast<ShuffleVectorSDNode>(VecOp);
    // Find the new index to extract from.
    int OrigElt = Shuf->getMaskElt(IndexC->getZExtValue());

    // Extracting an undef index is undef.
    if (OrigElt == -1)
      return DAG.getUNDEF(ScalarVT);

    // Select the right vector half to extract from.
    SDValue SVInVec;
    if (OrigElt < (int)NumElts) {
      SVInVec = VecOp.getOperand(0);
    } else {
      SVInVec = VecOp.getOperand(1);
      OrigElt -= NumElts;
    }

    if (SVInVec.getOpcode() == ISD::BUILD_VECTOR) {
      SDValue InOp = SVInVec.getOperand(OrigElt);
      if (InOp.getValueType() != ScalarVT) {
        assert(InOp.getValueType().isInteger() && ScalarVT.isInteger());
        InOp = DAG.getSExtOrTrunc(InOp, DL, ScalarVT);
      }

      return InOp;
    }

    // FIXME: We should handle recursing on other vector shuffles and
    // scalar_to_vector here as well.

    if (!LegalOperations ||
        // FIXME: Should really be just isOperationLegalOrCustom.
        TLI.isOperationLegal(ISD::EXTRACT_VECTOR_ELT, VecVT) ||
        TLI.isOperationExpand(ISD::VECTOR_SHUFFLE, VecVT)) {
      EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
      return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, SVInVec,
                         DAG.getConstant(OrigElt, DL, IndexTy));
    }
  }

  // If only EXTRACT_VECTOR_ELT nodes use the source vector we can
  // simplify it based on the (valid) extraction indices.
  if (llvm::all_of(VecOp->uses(), [&](SDNode *Use) {
        return Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
               Use->getOperand(0) == VecOp &&
               isa<ConstantSDNode>(Use->getOperand(1));
      })) {
    APInt DemandedElts = APInt::getNullValue(NumElts);
    for (SDNode *Use : VecOp->uses()) {
      auto *CstElt = cast<ConstantSDNode>(Use->getOperand(1));
      if (CstElt->getAPIntValue().ult(NumElts))
        DemandedElts.setBit(CstElt->getZExtValue());
    }
    if (SimplifyDemandedVectorElts(VecOp, DemandedElts, true)) {
      // We simplified the vector operand of this extract element. If this
      // extract is not dead, visit it again so it is folded properly.
      if (N->getOpcode() != ISD::DELETED_NODE)
        AddToWorklist(N);
      return SDValue(N, 0);
    }
  }

  // Everything under here is trying to match an extract of a loaded value.
  // If the result of load has to be truncated, then it's not necessarily
  // profitable.
  bool BCNumEltsChanged = false;
  EVT ExtVT = VecVT.getVectorElementType();
  EVT LVT = ExtVT;
  if (ScalarVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, ScalarVT))
    return SDValue();

  if (VecOp.getOpcode() == ISD::BITCAST) {
    // Don't duplicate a load with other uses.
    if (!VecOp.hasOneUse())
      return SDValue();

    EVT BCVT = VecOp.getOperand(0).getValueType();
    if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
      return SDValue();
    if (NumElts != BCVT.getVectorNumElements())
      BCNumEltsChanged = true;
    VecOp = VecOp.getOperand(0);
    ExtVT = BCVT.getVectorElementType();
  }

  // extract (vector load $addr), i --> load $addr + i * size
  if (!LegalOperations && !IndexC && VecOp.hasOneUse() &&
      ISD::isNormalLoad(VecOp.getNode()) &&
      !Index->hasPredecessor(VecOp.getNode())) {
    auto *VecLoad = dyn_cast<LoadSDNode>(VecOp);
    if (VecLoad && VecLoad->isSimple())
      return scalarizeExtractedVectorLoad(N, VecVT, Index, VecLoad);
  }

  // Perform only after legalization to ensure build_vector / vector_shuffle
  // optimizations have already been done.
  if (!LegalOperations || !IndexC)
    return SDValue();

  // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size)
  // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size)
  // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr)
  int Elt = IndexC->getZExtValue();
  LoadSDNode *LN0 = nullptr;
  if (ISD::isNormalLoad(VecOp.getNode())) {
    LN0 = cast<LoadSDNode>(VecOp);
  } else if (VecOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
             VecOp.getOperand(0).getValueType() == ExtVT &&
             ISD::isNormalLoad(VecOp.getOperand(0).getNode())) {
    // Don't duplicate a load with other uses.
    if (!VecOp.hasOneUse())
      return SDValue();

    LN0 = cast<LoadSDNode>(VecOp.getOperand(0));
  }
  if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(VecOp)) {
    // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1)
    // =>
    // (load $addr+1*size)

    // Don't duplicate a load with other uses.
    if (!VecOp.hasOneUse())
      return SDValue();

    // If the bit convert changed the number of elements, it is unsafe
    // to examine the mask.
    if (BCNumEltsChanged)
      return SDValue();

    // Select the input vector, guarding against out of range extract vector.
    int Idx = (Elt > (int)NumElts) ? -1 : Shuf->getMaskElt(Elt);
    VecOp = (Idx < (int)NumElts) ? VecOp.getOperand(0) : VecOp.getOperand(1);

    if (VecOp.getOpcode() == ISD::BITCAST) {
      // Don't duplicate a load with other uses.
      if (!VecOp.hasOneUse())
        return SDValue();

      VecOp = VecOp.getOperand(0);
    }
    if (ISD::isNormalLoad(VecOp.getNode())) {
      LN0 = cast<LoadSDNode>(VecOp);
      Elt = (Idx < (int)NumElts) ? Idx : Idx - (int)NumElts;
      Index = DAG.getConstant(Elt, DL, Index.getValueType());
    }
  }

  // Make sure we found a non-volatile load and the extractelement is
  // the only use.
  if (!LN0 || !LN0->hasNUsesOfValue(1,0) || !LN0->isSimple())
    return SDValue();

  // If Idx was -1 above, Elt is going to be -1, so just return undef.
  if (Elt == -1)
    return DAG.getUNDEF(LVT);

  return scalarizeExtractedVectorLoad(N, VecVT, Index, LN0);
}

// Simplify (build_vec (ext )) to (bitcast (build_vec ))
SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
  // We perform this optimization post type-legalization because
  // the type-legalizer often scalarizes integer-promoted vectors.
  // Performing this optimization before may create bit-casts which
  // will be type-legalized to complex code sequences.
  // We perform this optimization only before the operation legalizer because we
  // may introduce illegal operations.
  if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes)
    return SDValue();

  unsigned NumInScalars = N->getNumOperands();
  SDLoc DL(N);
  EVT VT = N->getValueType(0);

  // Check to see if this is a BUILD_VECTOR of a bunch of values
  // which come from any_extend or zero_extend nodes. If so, we can create
  // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR
  // optimizations. We do not handle sign-extend because we can't fill the sign
  // using shuffles.
  EVT SourceType = MVT::Other;
  bool AllAnyExt = true;

  for (unsigned i = 0; i != NumInScalars; ++i) {
    SDValue In = N->getOperand(i);
    // Ignore undef inputs.
    if (In.isUndef()) continue;

    bool AnyExt  = In.getOpcode() == ISD::ANY_EXTEND;
    bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND;

    // Abort if the element is not an extension.
    if (!ZeroExt && !AnyExt) {
      SourceType = MVT::Other;
      break;
    }

    // The input is a ZeroExt or AnyExt. Check the original type.
    EVT InTy = In.getOperand(0).getValueType();

    // Check that all of the widened source types are the same.
    if (SourceType == MVT::Other)
      // First time.
      SourceType = InTy;
    else if (InTy != SourceType) {
      // Multiple income types. Abort.
      SourceType = MVT::Other;
      break;
    }

    // Check if all of the extends are ANY_EXTENDs.
    AllAnyExt &= AnyExt;
  }

  // In order to have valid types, all of the inputs must be extended from the
  // same source type and all of the inputs must be any or zero extend.
  // Scalar sizes must be a power of two.
  EVT OutScalarTy = VT.getScalarType();
  bool ValidTypes = SourceType != MVT::Other &&
                 isPowerOf2_32(OutScalarTy.getSizeInBits()) &&
                 isPowerOf2_32(SourceType.getSizeInBits());

  // Create a new simpler BUILD_VECTOR sequence which other optimizations can
  // turn into a single shuffle instruction.
  if (!ValidTypes)
    return SDValue();

  bool isLE = DAG.getDataLayout().isLittleEndian();
  unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
  assert(ElemRatio > 1 && "Invalid element size ratio");
  SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
                               DAG.getConstant(0, DL, SourceType);

  unsigned NewBVElems = ElemRatio * VT.getVectorNumElements();
  SmallVector<SDValue, 8> Ops(NewBVElems, Filler);

  // Populate the new build_vector
  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
    SDValue Cast = N->getOperand(i);
    assert((Cast.getOpcode() == ISD::ANY_EXTEND ||
            Cast.getOpcode() == ISD::ZERO_EXTEND ||
            Cast.isUndef()) && "Invalid cast opcode");
    SDValue In;
    if (Cast.isUndef())
      In = DAG.getUNDEF(SourceType);
    else
      In = Cast->getOperand(0);
    unsigned Index = isLE ? (i * ElemRatio) :
                            (i * ElemRatio + (ElemRatio - 1));

    assert(Index < Ops.size() && "Invalid index");
    Ops[Index] = In;
  }

  // The type of the new BUILD_VECTOR node.
  EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems);
  assert(VecVT.getSizeInBits() == VT.getSizeInBits() &&
         "Invalid vector size");
  // Check if the new vector type is legal.
  if (!isTypeLegal(VecVT) ||
      (!TLI.isOperationLegal(ISD::BUILD_VECTOR, VecVT) &&
       TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)))
    return SDValue();

  // Make the new BUILD_VECTOR.
  SDValue BV = DAG.getBuildVector(VecVT, DL, Ops);

  // The new BUILD_VECTOR node has the potential to be further optimized.
  AddToWorklist(BV.getNode());
  // Bitcast to the desired type.
  return DAG.getBitcast(VT, BV);
}

SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
                                           ArrayRef<int> VectorMask,
                                           SDValue VecIn1, SDValue VecIn2,
                                           unsigned LeftIdx, bool DidSplitVec) {
  MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
  SDValue ZeroIdx = DAG.getConstant(0, DL, IdxTy);

  EVT VT = N->getValueType(0);
  EVT InVT1 = VecIn1.getValueType();
  EVT InVT2 = VecIn2.getNode() ? VecIn2.getValueType() : InVT1;

  unsigned NumElems = VT.getVectorNumElements();
  unsigned ShuffleNumElems = NumElems;

  // If we artificially split a vector in two already, then the offsets in the
  // operands will all be based off of VecIn1, even those in VecIn2.
  unsigned Vec2Offset = DidSplitVec ? 0 : InVT1.getVectorNumElements();

  // We can't generate a shuffle node with mismatched input and output types.
  // Try to make the types match the type of the output.
  if (InVT1 != VT || InVT2 != VT) {
    if ((VT.getSizeInBits() % InVT1.getSizeInBits() == 0) && InVT1 == InVT2) {
      // If the output vector length is a multiple of both input lengths,
      // we can concatenate them and pad the rest with undefs.
      unsigned NumConcats = VT.getSizeInBits() / InVT1.getSizeInBits();
      assert(NumConcats >= 2 && "Concat needs at least two inputs!");
      SmallVector<SDValue, 2> ConcatOps(NumConcats, DAG.getUNDEF(InVT1));
      ConcatOps[0] = VecIn1;
      ConcatOps[1] = VecIn2 ? VecIn2 : DAG.getUNDEF(InVT1);
      VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
      VecIn2 = SDValue();
    } else if (InVT1.getSizeInBits() == VT.getSizeInBits() * 2) {
      if (!TLI.isExtractSubvectorCheap(VT, InVT1, NumElems))
        return SDValue();

      if (!VecIn2.getNode()) {
        // If we only have one input vector, and it's twice the size of the
        // output, split it in two.
        VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1,
                             DAG.getConstant(NumElems, DL, IdxTy));
        VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1, ZeroIdx);
        // Since we now have shorter input vectors, adjust the offset of the
        // second vector's start.
        Vec2Offset = NumElems;
      } else if (InVT2.getSizeInBits() <= InVT1.getSizeInBits()) {
        // VecIn1 is wider than the output, and we have another, possibly
        // smaller input. Pad the smaller input with undefs, shuffle at the
        // input vector width, and extract the output.
        // The shuffle type is different than VT, so check legality again.
        if (LegalOperations &&
            !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, InVT1))
          return SDValue();

        // Legalizing INSERT_SUBVECTOR is tricky - you basically have to
        // lower it back into a BUILD_VECTOR. So if the inserted type is
        // illegal, don't even try.
        if (InVT1 != InVT2) {
          if (!TLI.isTypeLegal(InVT2))
            return SDValue();
          VecIn2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT1,
                               DAG.getUNDEF(InVT1), VecIn2, ZeroIdx);
        }
        ShuffleNumElems = NumElems * 2;
      } else {
        // Both VecIn1 and VecIn2 are wider than the output, and VecIn2 is wider
        // than VecIn1. We can't handle this for now - this case will disappear
        // when we start sorting the vectors by type.
        return SDValue();
      }
    } else if (InVT2.getSizeInBits() * 2 == VT.getSizeInBits() &&
               InVT1.getSizeInBits() == VT.getSizeInBits()) {
      SmallVector<SDValue, 2> ConcatOps(2, DAG.getUNDEF(InVT2));
      ConcatOps[0] = VecIn2;
      VecIn2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
    } else {
      // TODO: Support cases where the length mismatch isn't exactly by a
      // factor of 2.
      // TODO: Move this check upwards, so that if we have bad type
      // mismatches, we don't create any DAG nodes.
      return SDValue();
    }
  }

  // Initialize mask to undef.
  SmallVector<int, 8> Mask(ShuffleNumElems, -1);

  // Only need to run up to the number of elements actually used, not the
  // total number of elements in the shuffle - if we are shuffling a wider
  // vector, the high lanes should be set to undef.
  for (unsigned i = 0; i != NumElems; ++i) {
    if (VectorMask[i] <= 0)
      continue;

    unsigned ExtIndex = N->getOperand(i).getConstantOperandVal(1);
    if (VectorMask[i] == (int)LeftIdx) {
      Mask[i] = ExtIndex;
    } else if (VectorMask[i] == (int)LeftIdx + 1) {
      Mask[i] = Vec2Offset + ExtIndex;
    }
  }

  // The type the input vectors may have changed above.
  InVT1 = VecIn1.getValueType();

  // If we already have a VecIn2, it should have the same type as VecIn1.
  // If we don't, get an undef/zero vector of the appropriate type.
  VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(InVT1);
  assert(InVT1 == VecIn2.getValueType() && "Unexpected second input type.");

  SDValue Shuffle = DAG.getVectorShuffle(InVT1, DL, VecIn1, VecIn2, Mask);
  if (ShuffleNumElems > NumElems)
    Shuffle = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuffle, ZeroIdx);

  return Shuffle;
}

static SDValue reduceBuildVecToShuffleWithZero(SDNode *BV, SelectionDAG &DAG) {
  assert(BV->getOpcode() == ISD::BUILD_VECTOR && "Expected build vector");

  // First, determine where the build vector is not undef.
  // TODO: We could extend this to handle zero elements as well as undefs.
  int NumBVOps = BV->getNumOperands();
  int ZextElt = -1;
  for (int i = 0; i != NumBVOps; ++i) {
    SDValue Op = BV->getOperand(i);
    if (Op.isUndef())
      continue;
    if (ZextElt == -1)
      ZextElt = i;
    else
      return SDValue();
  }
  // Bail out if there's no non-undef element.
  if (ZextElt == -1)
    return SDValue();

  // The build vector contains some number of undef elements and exactly
  // one other element. That other element must be a zero-extended scalar
  // extracted from a vector at a constant index to turn this into a shuffle.
  // Also, require that the build vector does not implicitly truncate/extend
  // its elements.
  // TODO: This could be enhanced to allow ANY_EXTEND as well as ZERO_EXTEND.
  EVT VT = BV->getValueType(0);
  SDValue Zext = BV->getOperand(ZextElt);
  if (Zext.getOpcode() != ISD::ZERO_EXTEND || !Zext.hasOneUse() ||
      Zext.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
      !isa<ConstantSDNode>(Zext.getOperand(0).getOperand(1)) ||
      Zext.getValueSizeInBits() != VT.getScalarSizeInBits())
    return SDValue();

  // The zero-extend must be a multiple of the source size, and we must be
  // building a vector of the same size as the source of the extract element.
  SDValue Extract = Zext.getOperand(0);
  unsigned DestSize = Zext.getValueSizeInBits();
  unsigned SrcSize = Extract.getValueSizeInBits();
  if (DestSize % SrcSize != 0 ||
      Extract.getOperand(0).getValueSizeInBits() != VT.getSizeInBits())
    return SDValue();

  // Create a shuffle mask that will combine the extracted element with zeros
  // and undefs.
  int ZextRatio = DestSize / SrcSize;
  int NumMaskElts = NumBVOps * ZextRatio;
  SmallVector<int, 32> ShufMask(NumMaskElts, -1);
  for (int i = 0; i != NumMaskElts; ++i) {
    if (i / ZextRatio == ZextElt) {
      // The low bits of the (potentially translated) extracted element map to
      // the source vector. The high bits map to zero. We will use a zero vector
      // as the 2nd source operand of the shuffle, so use the 1st element of
      // that vector (mask value is number-of-elements) for the high bits.
      if (i % ZextRatio == 0)
        ShufMask[i] = Extract.getConstantOperandVal(1);
      else
        ShufMask[i] = NumMaskElts;
    }

    // Undef elements of the build vector remain undef because we initialize
    // the shuffle mask with -1.
  }

  // buildvec undef, ..., (zext (extractelt V, IndexC)), undef... -->
  // bitcast (shuffle V, ZeroVec, VectorMask)
  SDLoc DL(BV);
  EVT VecVT = Extract.getOperand(0).getValueType();
  SDValue ZeroVec = DAG.getConstant(0, DL, VecVT);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDValue Shuf = TLI.buildLegalVectorShuffle(VecVT, DL, Extract.getOperand(0),
                                             ZeroVec, ShufMask, DAG);
  if (!Shuf)
    return SDValue();
  return DAG.getBitcast(VT, Shuf);
}

// Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
// operations. If the types of the vectors we're extracting from allow it,
// turn this into a vector_shuffle node.
SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
  SDLoc DL(N);
  EVT VT = N->getValueType(0);

  // Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes.
  if (!isTypeLegal(VT))
    return SDValue();

  if (SDValue V = reduceBuildVecToShuffleWithZero(N, DAG))
    return V;

  // May only combine to shuffle after legalize if shuffle is legal.
  if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, VT))
    return SDValue();

  bool UsesZeroVector = false;
  unsigned NumElems = N->getNumOperands();

  // Record, for each element of the newly built vector, which input vector
  // that element comes from. -1 stands for undef, 0 for the zero vector,
  // and positive values for the input vectors.
  // VectorMask maps each element to its vector number, and VecIn maps vector
  // numbers to their initial SDValues.

  SmallVector<int, 8> VectorMask(NumElems, -1);
  SmallVector<SDValue, 8> VecIn;
  VecIn.push_back(SDValue());

  for (unsigned i = 0; i != NumElems; ++i) {
    SDValue Op = N->getOperand(i);

    if (Op.isUndef())
      continue;

    // See if we can use a blend with a zero vector.
    // TODO: Should we generalize this to a blend with an arbitrary constant
    // vector?
    if (isNullConstant(Op) || isNullFPConstant(Op)) {
      UsesZeroVector = true;
      VectorMask[i] = 0;
      continue;
    }

    // Not an undef or zero. If the input is something other than an
    // EXTRACT_VECTOR_ELT with an in-range constant index, bail out.
    if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        !isa<ConstantSDNode>(Op.getOperand(1)))
      return SDValue();
    SDValue ExtractedFromVec = Op.getOperand(0);

    const APInt &ExtractIdx = Op.getConstantOperandAPInt(1);
    if (ExtractIdx.uge(ExtractedFromVec.getValueType().getVectorNumElements()))
      return SDValue();

    // All inputs must have the same element type as the output.
    if (VT.getVectorElementType() !=
        ExtractedFromVec.getValueType().getVectorElementType())
      return SDValue();

    // Have we seen this input vector before?
    // The vectors are expected to be tiny (usually 1 or 2 elements), so using
    // a map back from SDValues to numbers isn't worth it.
    unsigned Idx = std::distance(
        VecIn.begin(), std::find(VecIn.begin(), VecIn.end(), ExtractedFromVec));
    if (Idx == VecIn.size())
      VecIn.push_back(ExtractedFromVec);

    VectorMask[i] = Idx;
  }

  // If we didn't find at least one input vector, bail out.
  if (VecIn.size() < 2)
    return SDValue();

  // If all the Operands of BUILD_VECTOR extract from same
  // vector, then split the vector efficiently based on the maximum
  // vector access index and adjust the VectorMask and
  // VecIn accordingly.
  bool DidSplitVec = false;
  if (VecIn.size() == 2) {
    unsigned MaxIndex = 0;
    unsigned NearestPow2 = 0;
    SDValue Vec = VecIn.back();
    EVT InVT = Vec.getValueType();
    MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
    SmallVector<unsigned, 8> IndexVec(NumElems, 0);

    for (unsigned i = 0; i < NumElems; i++) {
      if (VectorMask[i] <= 0)
        continue;
      unsigned Index = N->getOperand(i).getConstantOperandVal(1);
      IndexVec[i] = Index;
      MaxIndex = std::max(MaxIndex, Index);
    }

    NearestPow2 = PowerOf2Ceil(MaxIndex);
    if (InVT.isSimple() && NearestPow2 > 2 && MaxIndex < NearestPow2 &&
        NumElems * 2 < NearestPow2) {
      unsigned SplitSize = NearestPow2 / 2;
      EVT SplitVT = EVT::getVectorVT(*DAG.getContext(),
                                     InVT.getVectorElementType(), SplitSize);
      if (TLI.isTypeLegal(SplitVT)) {
        SDValue VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec,
                                     DAG.getConstant(SplitSize, DL, IdxTy));
        SDValue VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec,
                                     DAG.getConstant(0, DL, IdxTy));
        VecIn.pop_back();
        VecIn.push_back(VecIn1);
        VecIn.push_back(VecIn2);
        DidSplitVec = true;

        for (unsigned i = 0; i < NumElems; i++) {
          if (VectorMask[i] <= 0)
            continue;
          VectorMask[i] = (IndexVec[i] < SplitSize) ? 1 : 2;
        }
      }
    }
  }

  // TODO: We want to sort the vectors by descending length, so that adjacent
  // pairs have similar length, and the longer vector is always first in the
  // pair.

  // TODO: Should this fire if some of the input vectors has illegal type (like
  // it does now), or should we let legalization run its course first?

  // Shuffle phase:
  // Take pairs of vectors, and shuffle them so that the result has elements
  // from these vectors in the correct places.
  // For example, given:
  // t10: i32 = extract_vector_elt t1, Constant:i64<0>
  // t11: i32 = extract_vector_elt t2, Constant:i64<0>
  // t12: i32 = extract_vector_elt t3, Constant:i64<0>
  // t13: i32 = extract_vector_elt t1, Constant:i64<1>
  // t14: v4i32 = BUILD_VECTOR t10, t11, t12, t13
  // We will generate:
  // t20: v4i32 = vector_shuffle<0,4,u,1> t1, t2
  // t21: v4i32 = vector_shuffle<u,u,0,u> t3, undef
  SmallVector<SDValue, 4> Shuffles;
  for (unsigned In = 0, Len = (VecIn.size() / 2); In < Len; ++In) {
    unsigned LeftIdx = 2 * In + 1;
    SDValue VecLeft = VecIn[LeftIdx];
    SDValue VecRight =
        (LeftIdx + 1) < VecIn.size() ? VecIn[LeftIdx + 1] : SDValue();

    if (SDValue Shuffle = createBuildVecShuffle(DL, N, VectorMask, VecLeft,
                                                VecRight, LeftIdx, DidSplitVec))
      Shuffles.push_back(Shuffle);
    else
      return SDValue();
  }

  // If we need the zero vector as an "ingredient" in the blend tree, add it
  // to the list of shuffles.
  if (UsesZeroVector)
    Shuffles.push_back(VT.isInteger() ? DAG.getConstant(0, DL, VT)
                                      : DAG.getConstantFP(0.0, DL, VT));

  // If we only have one shuffle, we're done.
  if (Shuffles.size() == 1)
    return Shuffles[0];

  // Update the vector mask to point to the post-shuffle vectors.
  for (int &Vec : VectorMask)
    if (Vec == 0)
      Vec = Shuffles.size() - 1;
    else
      Vec = (Vec - 1) / 2;

  // More than one shuffle. Generate a binary tree of blends, e.g. if from
  // the previous step we got the set of shuffles t10, t11, t12, t13, we will
  // generate:
  // t10: v8i32 = vector_shuffle<0,8,u,u,u,u,u,u> t1, t2
  // t11: v8i32 = vector_shuffle<u,u,0,8,u,u,u,u> t3, t4
  // t12: v8i32 = vector_shuffle<u,u,u,u,0,8,u,u> t5, t6
  // t13: v8i32 = vector_shuffle<u,u,u,u,u,u,0,8> t7, t8
  // t20: v8i32 = vector_shuffle<0,1,10,11,u,u,u,u> t10, t11
  // t21: v8i32 = vector_shuffle<u,u,u,u,4,5,14,15> t12, t13
  // t30: v8i32 = vector_shuffle<0,1,2,3,12,13,14,15> t20, t21

  // Make sure the initial size of the shuffle list is even.
  if (Shuffles.size() % 2)
    Shuffles.push_back(DAG.getUNDEF(VT));

  for (unsigned CurSize = Shuffles.size(); CurSize > 1; CurSize /= 2) {
    if (CurSize % 2) {
      Shuffles[CurSize] = DAG.getUNDEF(VT);
      CurSize++;
    }
    for (unsigned In = 0, Len = CurSize / 2; In < Len; ++In) {
      int Left = 2 * In;
      int Right = 2 * In + 1;
      SmallVector<int, 8> Mask(NumElems, -1);
      for (unsigned i = 0; i != NumElems; ++i) {
        if (VectorMask[i] == Left) {
          Mask[i] = i;
          VectorMask[i] = In;
        } else if (VectorMask[i] == Right) {
          Mask[i] = i + NumElems;
          VectorMask[i] = In;
        }
      }

      Shuffles[In] =
          DAG.getVectorShuffle(VT, DL, Shuffles[Left], Shuffles[Right], Mask);
    }
  }
  return Shuffles[0];
}

// Try to turn a build vector of zero extends of extract vector elts into a
// a vector zero extend and possibly an extract subvector.
// TODO: Support sign extend?
// TODO: Allow undef elements?
SDValue DAGCombiner::convertBuildVecZextToZext(SDNode *N) {
  if (LegalOperations)
    return SDValue();

  EVT VT = N->getValueType(0);

  bool FoundZeroExtend = false;
  SDValue Op0 = N->getOperand(0);
  auto checkElem = [&](SDValue Op) -> int64_t {
    unsigned Opc = Op.getOpcode();
    FoundZeroExtend |= (Opc == ISD::ZERO_EXTEND);
    if ((Opc == ISD::ZERO_EXTEND || Opc == ISD::ANY_EXTEND) &&
        Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
        Op0.getOperand(0).getOperand(0) == Op.getOperand(0).getOperand(0))
      if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(0).getOperand(1)))
        return C->getZExtValue();
    return -1;
  };

  // Make sure the first element matches
  // (zext (extract_vector_elt X, C))
  int64_t Offset = checkElem(Op0);
  if (Offset < 0)
    return SDValue();

  unsigned NumElems = N->getNumOperands();
  SDValue In = Op0.getOperand(0).getOperand(0);
  EVT InSVT = In.getValueType().getScalarType();
  EVT InVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumElems);

  // Don't create an illegal input type after type legalization.
  if (LegalTypes && !TLI.isTypeLegal(InVT))
    return SDValue();

  // Ensure all the elements come from the same vector and are adjacent.
  for (unsigned i = 1; i != NumElems; ++i) {
    if ((Offset + i) != checkElem(N->getOperand(i)))
      return SDValue();
  }

  SDLoc DL(N);
  In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InVT, In,
                   Op0.getOperand(0).getOperand(1));
  return DAG.getNode(FoundZeroExtend ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND, DL,
                     VT, In);
}

SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
  EVT VT = N->getValueType(0);

  // A vector built entirely of undefs is undef.
  if (ISD::allOperandsUndef(N))
    return DAG.getUNDEF(VT);

  // If this is a splat of a bitcast from another vector, change to a
  // concat_vector.
  // For example:
  //   (build_vector (i64 (bitcast (v2i32 X))), (i64 (bitcast (v2i32 X)))) ->
  //     (v2i64 (bitcast (concat_vectors (v2i32 X), (v2i32 X))))
  //
  // If X is a build_vector itself, the concat can become a larger build_vector.
  // TODO: Maybe this is useful for non-splat too?
  if (!LegalOperations) {
    if (SDValue Splat = cast<BuildVectorSDNode>(N)->getSplatValue()) {
      Splat = peekThroughBitcasts(Splat);
      EVT SrcVT = Splat.getValueType();
      if (SrcVT.isVector()) {
        unsigned NumElts = N->getNumOperands() * SrcVT.getVectorNumElements();
        EVT NewVT = EVT::getVectorVT(*DAG.getContext(),
                                     SrcVT.getVectorElementType(), NumElts);
        if (!LegalTypes || TLI.isTypeLegal(NewVT)) {
          SmallVector<SDValue, 8> Ops(N->getNumOperands(), Splat);
          SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N),
                                       NewVT, Ops);
          return DAG.getBitcast(VT, Concat);
        }
      }
    }
  }

  // A splat of a single element is a SPLAT_VECTOR if supported on the target.
  if (TLI.getOperationAction(ISD::SPLAT_VECTOR, VT) != TargetLowering::Expand)
    if (SDValue V = cast<BuildVectorSDNode>(N)->getSplatValue()) {
      assert(!V.isUndef() && "Splat of undef should have been handled earlier");
      return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, V);
    }

  // Check if we can express BUILD VECTOR via subvector extract.
  if (!LegalTypes && (N->getNumOperands() > 1)) {
    SDValue Op0 = N->getOperand(0);
    auto checkElem = [&](SDValue Op) -> uint64_t {
      if ((Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) &&
          (Op0.getOperand(0) == Op.getOperand(0)))
        if (auto CNode = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
          return CNode->getZExtValue();
      return -1;
    };

    int Offset = checkElem(Op0);
    for (unsigned i = 0; i < N->getNumOperands(); ++i) {
      if (Offset + i != checkElem(N->getOperand(i))) {
        Offset = -1;
        break;
      }
    }

    if ((Offset == 0) &&
        (Op0.getOperand(0).getValueType() == N->getValueType(0)))
      return Op0.getOperand(0);
    if ((Offset != -1) &&
        ((Offset % N->getValueType(0).getVectorNumElements()) ==
         0)) // IDX must be multiple of output size.
      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), N->getValueType(0),
                         Op0.getOperand(0), Op0.getOperand(1));
  }

  if (SDValue V = convertBuildVecZextToZext(N))
    return V;

  if (SDValue V = reduceBuildVecExtToExtBuildVec(N))
    return V;

  if (SDValue V = reduceBuildVecToShuffle(N))
    return V;

  return SDValue();
}

static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  EVT OpVT = N->getOperand(0).getValueType();

  // If the operands are legal vectors, leave them alone.
  if (TLI.isTypeLegal(OpVT))
    return SDValue();

  SDLoc DL(N);
  EVT VT = N->getValueType(0);
  SmallVector<SDValue, 8> Ops;

  EVT SVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits());
  SDValue ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);

  // Keep track of what we encounter.
  bool AnyInteger = false;
  bool AnyFP = false;
  for (const SDValue &Op : N->ops()) {
    if (ISD::BITCAST == Op.getOpcode() &&
        !Op.getOperand(0).getValueType().isVector())
      Ops.push_back(Op.getOperand(0));
    else if (ISD::UNDEF == Op.getOpcode())
      Ops.push_back(ScalarUndef);
    else
      return SDValue();

    // Note whether we encounter an integer or floating point scalar.
    // If it's neither, bail out, it could be something weird like x86mmx.
    EVT LastOpVT = Ops.back().getValueType();
    if (LastOpVT.isFloatingPoint())
      AnyFP = true;
    else if (LastOpVT.isInteger())
      AnyInteger = true;
    else
      return SDValue();
  }

  // If any of the operands is a floating point scalar bitcast to a vector,
  // use floating point types throughout, and bitcast everything.
  // Replace UNDEFs by another scalar UNDEF node, of the final desired type.
  if (AnyFP) {
    SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits());
    ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
    if (AnyInteger) {
      for (SDValue &Op : Ops) {
        if (Op.getValueType() == SVT)
          continue;
        if (Op.isUndef())
          Op = ScalarUndef;
        else
          Op = DAG.getBitcast(SVT, Op);
      }
    }
  }

  EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT,
                               VT.getSizeInBits() / SVT.getSizeInBits());
  return DAG.getBitcast(VT, DAG.getBuildVector(VecVT, DL, Ops));
}

// Check to see if this is a CONCAT_VECTORS of a bunch of EXTRACT_SUBVECTOR
// operations. If so, and if the EXTRACT_SUBVECTOR vector inputs come from at
// most two distinct vectors the same size as the result, attempt to turn this
// into a legal shuffle.
static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {
  EVT VT = N->getValueType(0);
  EVT OpVT = N->getOperand(0).getValueType();
  int NumElts = VT.getVectorNumElements();
  int NumOpElts = OpVT.getVectorNumElements();

  SDValue SV0 = DAG.getUNDEF(VT), SV1 = DAG.getUNDEF(VT);
  SmallVector<int, 8> Mask;

  for (SDValue Op : N->ops()) {
    Op = peekThroughBitcasts(Op);

    // UNDEF nodes convert to UNDEF shuffle mask values.
    if (Op.isUndef()) {
      Mask.append((unsigned)NumOpElts, -1);
      continue;
    }

    if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR)
      return SDValue();

    // What vector are we extracting the subvector from and at what index?
    SDValue ExtVec = Op.getOperand(0);

    // We want the EVT of the original extraction to correctly scale the
    // extraction index.
    EVT ExtVT = ExtVec.getValueType();
    ExtVec = peekThroughBitcasts(ExtVec);

    // UNDEF nodes convert to UNDEF shuffle mask values.
    if (ExtVec.isUndef()) {
      Mask.append((unsigned)NumOpElts, -1);
      continue;
    }

    if (!isa<ConstantSDNode>(Op.getOperand(1)))
      return SDValue();
    int ExtIdx = Op.getConstantOperandVal(1);

    // Ensure that we are extracting a subvector from a vector the same
    // size as the result.
    if (ExtVT.getSizeInBits() != VT.getSizeInBits())
      return SDValue();

    // Scale the subvector index to account for any bitcast.
    int NumExtElts = ExtVT.getVectorNumElements();
    if (0 == (NumExtElts % NumElts))
      ExtIdx /= (NumExtElts / NumElts);
    else if (0 == (NumElts % NumExtElts))
      ExtIdx *= (NumElts / NumExtElts);
    else
      return SDValue();

    // At most we can reference 2 inputs in the final shuffle.
    if (SV0.isUndef() || SV0 == ExtVec) {
      SV0 = ExtVec;
      for (int i = 0; i != NumOpElts; ++i)
        Mask.push_back(i + ExtIdx);
    } else if (SV1.isUndef() || SV1 == ExtVec) {
      SV1 = ExtVec;
      for (int i = 0; i != NumOpElts; ++i)
        Mask.push_back(i + ExtIdx + NumElts);
    } else {
      return SDValue();
    }
  }

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  return TLI.buildLegalVectorShuffle(VT, SDLoc(N), DAG.getBitcast(VT, SV0),
                                     DAG.getBitcast(VT, SV1), Mask, DAG);
}

SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
  // If we only have one input vector, we don't need to do any concatenation.
  if (N->getNumOperands() == 1)
    return N->getOperand(0);

  // Check if all of the operands are undefs.
  EVT VT = N->getValueType(0);
  if (ISD::allOperandsUndef(N))
    return DAG.getUNDEF(VT);

  // Optimize concat_vectors where all but the first of the vectors are undef.
  if (std::all_of(std::next(N->op_begin()), N->op_end(), [](const SDValue &Op) {
        return Op.isUndef();
      })) {
    SDValue In = N->getOperand(0);
    assert(In.getValueType().isVector() && "Must concat vectors");

    // If the input is a concat_vectors, just make a larger concat by padding
    // with smaller undefs.
    if (In.getOpcode() == ISD::CONCAT_VECTORS && In.hasOneUse()) {
      unsigned NumOps = N->getNumOperands() * In.getNumOperands();
      SmallVector<SDValue, 4> Ops(In->op_begin(), In->op_end());
      Ops.resize(NumOps, DAG.getUNDEF(Ops[0].getValueType()));
      return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
    }

    SDValue Scalar = peekThroughOneUseBitcasts(In);

    // concat_vectors(scalar_to_vector(scalar), undef) ->
    //     scalar_to_vector(scalar)
    if (!LegalOperations && Scalar.getOpcode() == ISD::SCALAR_TO_VECTOR &&
         Scalar.hasOneUse()) {
      EVT SVT = Scalar.getValueType().getVectorElementType();
      if (SVT == Scalar.getOperand(0).getValueType())
        Scalar = Scalar.getOperand(0);
    }

    // concat_vectors(scalar, undef) -> scalar_to_vector(scalar)
    if (!Scalar.getValueType().isVector()) {
      // If the bitcast type isn't legal, it might be a trunc of a legal type;
      // look through the trunc so we can still do the transform:
      //   concat_vectors(trunc(scalar), undef) -> scalar_to_vector(scalar)
      if (Scalar->getOpcode() == ISD::TRUNCATE &&
          !TLI.isTypeLegal(Scalar.getValueType()) &&
          TLI.isTypeLegal(Scalar->getOperand(0).getValueType()))
        Scalar = Scalar->getOperand(0);

      EVT SclTy = Scalar.getValueType();

      if (!SclTy.isFloatingPoint() && !SclTy.isInteger())
        return SDValue();

      // Bail out if the vector size is not a multiple of the scalar size.
      if (VT.getSizeInBits() % SclTy.getSizeInBits())
        return SDValue();

      unsigned VNTNumElms = VT.getSizeInBits() / SclTy.getSizeInBits();
      if (VNTNumElms < 2)
        return SDValue();

      EVT NVT = EVT::getVectorVT(*DAG.getContext(), SclTy, VNTNumElms);
      if (!TLI.isTypeLegal(NVT) || !TLI.isTypeLegal(Scalar.getValueType()))
        return SDValue();

      SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), NVT, Scalar);
      return DAG.getBitcast(VT, Res);
    }
  }

  // Fold any combination of BUILD_VECTOR or UNDEF nodes into one BUILD_VECTOR.
  // We have already tested above for an UNDEF only concatenation.
  // fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
  // -> (BUILD_VECTOR A, B, ..., C, D, ...)
  auto IsBuildVectorOrUndef = [](const SDValue &Op) {
    return ISD::UNDEF == Op.getOpcode() || ISD::BUILD_VECTOR == Op.getOpcode();
  };
  if (llvm::all_of(N->ops(), IsBuildVectorOrUndef)) {
    SmallVector<SDValue, 8> Opnds;
    EVT SVT = VT.getScalarType();

    EVT MinVT = SVT;
    if (!SVT.isFloatingPoint()) {
      // If BUILD_VECTOR are from built from integer, they may have different
      // operand types. Get the smallest type and truncate all operands to it.
      bool FoundMinVT = false;
      for (const SDValue &Op : N->ops())
        if (ISD::BUILD_VECTOR == Op.getOpcode()) {
          EVT OpSVT = Op.getOperand(0).getValueType();
          MinVT = (!FoundMinVT || OpSVT.bitsLE(MinVT)) ? OpSVT : MinVT;
          FoundMinVT = true;
        }
      assert(FoundMinVT && "Concat vector type mismatch");
    }

    for (const SDValue &Op : N->ops()) {
      EVT OpVT = Op.getValueType();
      unsigned NumElts = OpVT.getVectorNumElements();

      if (ISD::UNDEF == Op.getOpcode())
        Opnds.append(NumElts, DAG.getUNDEF(MinVT));

      if (ISD::BUILD_VECTOR == Op.getOpcode()) {
        if (SVT.isFloatingPoint()) {
          assert(SVT == OpVT.getScalarType() && "Concat vector type mismatch");
          Opnds.append(Op->op_begin(), Op->op_begin() + NumElts);
        } else {
          for (unsigned i = 0; i != NumElts; ++i)
            Opnds.push_back(
                DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinVT, Op.getOperand(i)));
        }
      }
    }

    assert(VT.getVectorNumElements() == Opnds.size() &&
           "Concat vector type mismatch");
    return DAG.getBuildVector(VT, SDLoc(N), Opnds);
  }

  // Fold CONCAT_VECTORS of only bitcast scalars (or undef) to BUILD_VECTOR.
  if (SDValue V = combineConcatVectorOfScalars(N, DAG))
    return V;

  // Fold CONCAT_VECTORS of EXTRACT_SUBVECTOR (or undef) to VECTOR_SHUFFLE.
  if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT))
    if (SDValue V = combineConcatVectorOfExtracts(N, DAG))
      return V;

  // Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR
  // nodes often generate nop CONCAT_VECTOR nodes.
  // Scan the CONCAT_VECTOR operands and look for a CONCAT operations that
  // place the incoming vectors at the exact same location.
  SDValue SingleSource = SDValue();
  unsigned PartNumElem = N->getOperand(0).getValueType().getVectorNumElements();

  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
    SDValue Op = N->getOperand(i);

    if (Op.isUndef())
      continue;

    // Check if this is the identity extract:
    if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR)
      return SDValue();

    // Find the single incoming vector for the extract_subvector.
    if (SingleSource.getNode()) {
      if (Op.getOperand(0) != SingleSource)
        return SDValue();
    } else {
      SingleSource = Op.getOperand(0);

      // Check the source type is the same as the type of the result.
      // If not, this concat may extend the vector, so we can not
      // optimize it away.
      if (SingleSource.getValueType() != N->getValueType(0))
        return SDValue();
    }

    auto *CS = dyn_cast<ConstantSDNode>(Op.getOperand(1));
    // The extract index must be constant.
    if (!CS)
      return SDValue();

    // Check that we are reading from the identity index.
    unsigned IdentityIndex = i * PartNumElem;
    if (CS->getAPIntValue() != IdentityIndex)
      return SDValue();
  }

  if (SingleSource.getNode())
    return SingleSource;

  return SDValue();
}

// Helper that peeks through INSERT_SUBVECTOR/CONCAT_VECTORS to find
// if the subvector can be sourced for free.
static SDValue getSubVectorSrc(SDValue V, SDValue Index, EVT SubVT) {
  if (V.getOpcode() == ISD::INSERT_SUBVECTOR &&
      V.getOperand(1).getValueType() == SubVT && V.getOperand(2) == Index) {
    return V.getOperand(1);
  }
  auto *IndexC = dyn_cast<ConstantSDNode>(Index);
  if (IndexC && V.getOpcode() == ISD::CONCAT_VECTORS &&
      V.getOperand(0).getValueType() == SubVT &&
      (IndexC->getZExtValue() % SubVT.getVectorNumElements()) == 0) {
    uint64_t SubIdx = IndexC->getZExtValue() / SubVT.getVectorNumElements();
    return V.getOperand(SubIdx);
  }
  return SDValue();
}

static SDValue narrowInsertExtractVectorBinOp(SDNode *Extract,
                                              SelectionDAG &DAG) {
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDValue BinOp = Extract->getOperand(0);
  unsigned BinOpcode = BinOp.getOpcode();
  if (!TLI.isBinOp(BinOpcode) || BinOp.getNode()->getNumValues() != 1)
    return SDValue();

  EVT VecVT = BinOp.getValueType();
  SDValue Bop0 = BinOp.getOperand(0), Bop1 = BinOp.getOperand(1);
  if (VecVT != Bop0.getValueType() || VecVT != Bop1.getValueType())
    return SDValue();

  SDValue Index = Extract->getOperand(1);
  EVT SubVT = Extract->getValueType(0);
  if (!TLI.isOperationLegalOrCustom(BinOpcode, SubVT))
    return SDValue();

  SDValue Sub0 = getSubVectorSrc(Bop0, Index, SubVT);
  SDValue Sub1 = getSubVectorSrc(Bop1, Index, SubVT);

  // TODO: We could handle the case where only 1 operand is being inserted by
  //       creating an extract of the other operand, but that requires checking
  //       number of uses and/or costs.
  if (!Sub0 || !Sub1)
    return SDValue();

  // We are inserting both operands of the wide binop only to extract back
  // to the narrow vector size. Eliminate all of the insert/extract:
  // ext (binop (ins ?, X, Index), (ins ?, Y, Index)), Index --> binop X, Y
  return DAG.getNode(BinOpcode, SDLoc(Extract), SubVT, Sub0, Sub1,
                     BinOp->getFlags());
}

/// If we are extracting a subvector produced by a wide binary operator try
/// to use a narrow binary operator and/or avoid concatenation and extraction.
static SDValue narrowExtractedVectorBinOp(SDNode *Extract, SelectionDAG &DAG) {
  // TODO: Refactor with the caller (visitEXTRACT_SUBVECTOR), so we can share
  // some of these bailouts with other transforms.

  if (SDValue V = narrowInsertExtractVectorBinOp(Extract, DAG))
    return V;

  // The extract index must be a constant, so we can map it to a concat operand.
  auto *ExtractIndexC = dyn_cast<ConstantSDNode>(Extract->getOperand(1));
  if (!ExtractIndexC)
    return SDValue();

  // We are looking for an optionally bitcasted wide vector binary operator
  // feeding an extract subvector.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDValue BinOp = peekThroughBitcasts(Extract->getOperand(0));
  unsigned BOpcode = BinOp.getOpcode();
  if (!TLI.isBinOp(BOpcode) || BinOp.getNode()->getNumValues() != 1)
    return SDValue();

  // The binop must be a vector type, so we can extract some fraction of it.
  EVT WideBVT = BinOp.getValueType();
  if (!WideBVT.isVector())
    return SDValue();

  EVT VT = Extract->getValueType(0);
  unsigned ExtractIndex = ExtractIndexC->getZExtValue();
  assert(ExtractIndex % VT.getVectorNumElements() == 0 &&
         "Extract index is not a multiple of the vector length.");

  // Bail out if this is not a proper multiple width extraction.
  unsigned WideWidth = WideBVT.getSizeInBits();
  unsigned NarrowWidth = VT.getSizeInBits();
  if (WideWidth % NarrowWidth != 0)
    return SDValue();

  // Bail out if we are extracting a fraction of a single operation. This can
  // occur because we potentially looked through a bitcast of the binop.
  unsigned NarrowingRatio = WideWidth / NarrowWidth;
  unsigned WideNumElts = WideBVT.getVectorNumElements();
  if (WideNumElts % NarrowingRatio != 0)
    return SDValue();

  // Bail out if the target does not support a narrower version of the binop.
  EVT NarrowBVT = EVT::getVectorVT(*DAG.getContext(), WideBVT.getScalarType(),
                                   WideNumElts / NarrowingRatio);
  if (!TLI.isOperationLegalOrCustomOrPromote(BOpcode, NarrowBVT))
    return SDValue();

  // If extraction is cheap, we don't need to look at the binop operands
  // for concat ops. The narrow binop alone makes this transform profitable.
  // We can't just reuse the original extract index operand because we may have
  // bitcasted.
  unsigned ConcatOpNum = ExtractIndex / VT.getVectorNumElements();
  unsigned ExtBOIdx = ConcatOpNum * NarrowBVT.getVectorNumElements();
  EVT ExtBOIdxVT = Extract->getOperand(1).getValueType();
  if (TLI.isExtractSubvectorCheap(NarrowBVT, WideBVT, ExtBOIdx) &&
      BinOp.hasOneUse() && Extract->getOperand(0)->hasOneUse()) {
    // extract (binop B0, B1), N --> binop (extract B0, N), (extract B1, N)
    SDLoc DL(Extract);
    SDValue NewExtIndex = DAG.getConstant(ExtBOIdx, DL, ExtBOIdxVT);
    SDValue X = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
                            BinOp.getOperand(0), NewExtIndex);
    SDValue Y = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
                            BinOp.getOperand(1), NewExtIndex);
    SDValue NarrowBinOp = DAG.getNode(BOpcode, DL, NarrowBVT, X, Y,
                                      BinOp.getNode()->getFlags());
    return DAG.getBitcast(VT, NarrowBinOp);
  }

  // Only handle the case where we are doubling and then halving. A larger ratio
  // may require more than two narrow binops to replace the wide binop.
  if (NarrowingRatio != 2)
    return SDValue();

  // TODO: The motivating case for this transform is an x86 AVX1 target. That
  // target has temptingly almost legal versions of bitwise logic ops in 256-bit
  // flavors, but no other 256-bit integer support. This could be extended to
  // handle any binop, but that may require fixing/adding other folds to avoid
  // codegen regressions.
  if (BOpcode != ISD::AND && BOpcode != ISD::OR && BOpcode != ISD::XOR)
    return SDValue();

  // We need at least one concatenation operation of a binop operand to make
  // this transform worthwhile. The concat must double the input vector sizes.
  auto GetSubVector = [ConcatOpNum](SDValue V) -> SDValue {
    if (V.getOpcode() == ISD::CONCAT_VECTORS && V.getNumOperands() == 2)
      return V.getOperand(ConcatOpNum);
    return SDValue();
  };
  SDValue SubVecL = GetSubVector(peekThroughBitcasts(BinOp.getOperand(0)));
  SDValue SubVecR = GetSubVector(peekThroughBitcasts(BinOp.getOperand(1)));

  if (SubVecL || SubVecR) {
    // If a binop operand was not the result of a concat, we must extract a
    // half-sized operand for our new narrow binop:
    // extract (binop (concat X1, X2), (concat Y1, Y2)), N --> binop XN, YN
    // extract (binop (concat X1, X2), Y), N --> binop XN, (extract Y, IndexC)
    // extract (binop X, (concat Y1, Y2)), N --> binop (extract X, IndexC), YN
    SDLoc DL(Extract);
    SDValue IndexC = DAG.getConstant(ExtBOIdx, DL, ExtBOIdxVT);
    SDValue X = SubVecL ? DAG.getBitcast(NarrowBVT, SubVecL)
                        : DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
                                      BinOp.getOperand(0), IndexC);

    SDValue Y = SubVecR ? DAG.getBitcast(NarrowBVT, SubVecR)
                        : DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
                                      BinOp.getOperand(1), IndexC);

    SDValue NarrowBinOp = DAG.getNode(BOpcode, DL, NarrowBVT, X, Y);
    return DAG.getBitcast(VT, NarrowBinOp);
  }

  return SDValue();
}

/// If we are extracting a subvector from a wide vector load, convert to a
/// narrow load to eliminate the extraction:
/// (extract_subvector (load wide vector)) --> (load narrow vector)
static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG) {
  // TODO: Add support for big-endian. The offset calculation must be adjusted.
  if (DAG.getDataLayout().isBigEndian())
    return SDValue();

  auto *Ld = dyn_cast<LoadSDNode>(Extract->getOperand(0));
  auto *ExtIdx = dyn_cast<ConstantSDNode>(Extract->getOperand(1));
  if (!Ld || Ld->getExtensionType() || !Ld->isSimple() ||
      !ExtIdx)
    return SDValue();

  // Allow targets to opt-out.
  EVT VT = Extract->getValueType(0);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (!TLI.shouldReduceLoadWidth(Ld, Ld->getExtensionType(), VT))
    return SDValue();

  // The narrow load will be offset from the base address of the old load if
  // we are extracting from something besides index 0 (little-endian).
  SDLoc DL(Extract);
  SDValue BaseAddr = Ld->getOperand(1);
  unsigned Offset = ExtIdx->getZExtValue() * VT.getScalarType().getStoreSize();

  // TODO: Use "BaseIndexOffset" to make this more effective.
  SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
  MachineFunction &MF = DAG.getMachineFunction();
  MachineMemOperand *MMO = MF.getMachineMemOperand(Ld->getMemOperand(), Offset,
                                                   VT.getStoreSize());
  SDValue NewLd = DAG.getLoad(VT, DL, Ld->getChain(), NewAddr, MMO);
  DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
  return NewLd;
}

SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode *N) {
  EVT NVT = N->getValueType(0);
  SDValue V = N->getOperand(0);

  // Extract from UNDEF is UNDEF.
  if (V.isUndef())
    return DAG.getUNDEF(NVT);

  if (TLI.isOperationLegalOrCustomOrPromote(ISD::LOAD, NVT))
    if (SDValue NarrowLoad = narrowExtractedVectorLoad(N, DAG))
      return NarrowLoad;

  // Combine an extract of an extract into a single extract_subvector.
  // ext (ext X, C), 0 --> ext X, C
  SDValue Index = N->getOperand(1);
  if (isNullConstant(Index) && V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
      V.hasOneUse() && isa<ConstantSDNode>(V.getOperand(1))) {
    if (TLI.isExtractSubvectorCheap(NVT, V.getOperand(0).getValueType(),
                                    V.getConstantOperandVal(1)) &&
        TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NVT)) {
      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT, V.getOperand(0),
                         V.getOperand(1));
    }
  }

  // Try to move vector bitcast after extract_subv by scaling extraction index:
  // extract_subv (bitcast X), Index --> bitcast (extract_subv X, Index')
  if (isa<ConstantSDNode>(Index) && V.getOpcode() == ISD::BITCAST &&
      V.getOperand(0).getValueType().isVector()) {
    SDValue SrcOp = V.getOperand(0);
    EVT SrcVT = SrcOp.getValueType();
    unsigned SrcNumElts = SrcVT.getVectorNumElements();
    unsigned DestNumElts = V.getValueType().getVectorNumElements();
    if ((SrcNumElts % DestNumElts) == 0) {
      unsigned SrcDestRatio = SrcNumElts / DestNumElts;
      unsigned NewExtNumElts = NVT.getVectorNumElements() * SrcDestRatio;
      EVT NewExtVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
                                      NewExtNumElts);
      if (TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NewExtVT)) {
        unsigned IndexValScaled = N->getConstantOperandVal(1) * SrcDestRatio;
        SDLoc DL(N);
        SDValue NewIndex = DAG.getIntPtrConstant(IndexValScaled, DL);
        SDValue NewExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewExtVT,
                                         V.getOperand(0), NewIndex);
        return DAG.getBitcast(NVT, NewExtract);
      }
    }
    // TODO - handle (DestNumElts % SrcNumElts) == 0
  }

  // Combine:
  //    (extract_subvec (concat V1, V2, ...), i)
  // Into:
  //    Vi if possible
  // Only operand 0 is checked as 'concat' assumes all inputs of the same
  // type.
  if (V.getOpcode() == ISD::CONCAT_VECTORS && isa<ConstantSDNode>(Index) &&
      V.getOperand(0).getValueType() == NVT) {
    unsigned Idx = N->getConstantOperandVal(1);
    unsigned NumElems = NVT.getVectorNumElements();
    assert((Idx % NumElems) == 0 &&
           "IDX in concat is not a multiple of the result vector length.");
    return V->getOperand(Idx / NumElems);
  }

  V = peekThroughBitcasts(V);

  // If the input is a build vector. Try to make a smaller build vector.
  if (V.getOpcode() == ISD::BUILD_VECTOR) {
    if (auto *IdxC = dyn_cast<ConstantSDNode>(Index)) {
      EVT InVT = V.getValueType();
      unsigned ExtractSize = NVT.getSizeInBits();
      unsigned EltSize = InVT.getScalarSizeInBits();
      // Only do this if we won't split any elements.
      if (ExtractSize % EltSize == 0) {
        unsigned NumElems = ExtractSize / EltSize;
        EVT EltVT = InVT.getVectorElementType();
        EVT ExtractVT = NumElems == 1 ? EltVT
                                      : EVT::getVectorVT(*DAG.getContext(),
                                                         EltVT, NumElems);
        if ((Level < AfterLegalizeDAG ||
             (NumElems == 1 ||
              TLI.isOperationLegal(ISD::BUILD_VECTOR, ExtractVT))) &&
            (!LegalTypes || TLI.isTypeLegal(ExtractVT))) {
          unsigned IdxVal = IdxC->getZExtValue();
          IdxVal *= NVT.getScalarSizeInBits();
          IdxVal /= EltSize;

          if (NumElems == 1) {
            SDValue Src = V->getOperand(IdxVal);
            if (EltVT != Src.getValueType())
              Src = DAG.getNode(ISD::TRUNCATE, SDLoc(N), InVT, Src);
            return DAG.getBitcast(NVT, Src);
          }

          // Extract the pieces from the original build_vector.
          SDValue BuildVec = DAG.getBuildVector(
              ExtractVT, SDLoc(N), V->ops().slice(IdxVal, NumElems));
          return DAG.getBitcast(NVT, BuildVec);
        }
      }
    }
  }

  if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
    // Handle only simple case where vector being inserted and vector
    // being extracted are of same size.
    EVT SmallVT = V.getOperand(1).getValueType();
    if (!NVT.bitsEq(SmallVT))
      return SDValue();

    // Only handle cases where both indexes are constants.
    auto *ExtIdx = dyn_cast<ConstantSDNode>(Index);
    auto *InsIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
    if (InsIdx && ExtIdx) {
      // Combine:
      //    (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx)
      // Into:
      //    indices are equal or bit offsets are equal => V1
      //    otherwise => (extract_subvec V1, ExtIdx)
      if (InsIdx->getZExtValue() * SmallVT.getScalarSizeInBits() ==
          ExtIdx->getZExtValue() * NVT.getScalarSizeInBits())
        return DAG.getBitcast(NVT, V.getOperand(1));
      return DAG.getNode(
          ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT,
          DAG.getBitcast(N->getOperand(0).getValueType(), V.getOperand(0)),
          Index);
    }
  }

  if (SDValue NarrowBOp = narrowExtractedVectorBinOp(N, DAG))
    return NarrowBOp;

  if (SimplifyDemandedVectorElts(SDValue(N, 0)))
    return SDValue(N, 0);

  return SDValue();
}

/// Try to convert a wide shuffle of concatenated vectors into 2 narrow shuffles
/// followed by concatenation. Narrow vector ops may have better performance
/// than wide ops, and this can unlock further narrowing of other vector ops.
/// Targets can invert this transform later if it is not profitable.
static SDValue foldShuffleOfConcatUndefs(ShuffleVectorSDNode *Shuf,
                                         SelectionDAG &DAG) {
  SDValue N0 = Shuf->getOperand(0), N1 = Shuf->getOperand(1);
  if (N0.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
      N1.getOpcode() != ISD::CONCAT_VECTORS || N1.getNumOperands() != 2 ||
      !N0.getOperand(1).isUndef() || !N1.getOperand(1).isUndef())
    return SDValue();

  // Split the wide shuffle mask into halves. Any mask element that is accessing
  // operand 1 is offset down to account for narrowing of the vectors.
  ArrayRef<int> Mask = Shuf->getMask();
  EVT VT = Shuf->getValueType(0);
  unsigned NumElts = VT.getVectorNumElements();
  unsigned HalfNumElts = NumElts / 2;
  SmallVector<int, 16> Mask0(HalfNumElts, -1);
  SmallVector<int, 16> Mask1(HalfNumElts, -1);
  for (unsigned i = 0; i != NumElts; ++i) {
    if (Mask[i] == -1)
      continue;
    int M = Mask[i] < (int)NumElts ? Mask[i] : Mask[i] - (int)HalfNumElts;
    if (i < HalfNumElts)
      Mask0[i] = M;
    else
      Mask1[i - HalfNumElts] = M;
  }

  // Ask the target if this is a valid transform.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
                                HalfNumElts);
  if (!TLI.isShuffleMaskLegal(Mask0, HalfVT) ||
      !TLI.isShuffleMaskLegal(Mask1, HalfVT))
    return SDValue();

  // shuffle (concat X, undef), (concat Y, undef), Mask -->
  // concat (shuffle X, Y, Mask0), (shuffle X, Y, Mask1)
  SDValue X = N0.getOperand(0), Y = N1.getOperand(0);
  SDLoc DL(Shuf);
  SDValue Shuf0 = DAG.getVectorShuffle(HalfVT, DL, X, Y, Mask0);
  SDValue Shuf1 = DAG.getVectorShuffle(HalfVT, DL, X, Y, Mask1);
  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Shuf0, Shuf1);
}

// Tries to turn a shuffle of two CONCAT_VECTORS into a single concat,
// or turn a shuffle of a single concat into simpler shuffle then concat.
static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) {
  EVT VT = N->getValueType(0);
  unsigned NumElts = VT.getVectorNumElements();

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
  ArrayRef<int> Mask = SVN->getMask();

  SmallVector<SDValue, 4> Ops;
  EVT ConcatVT = N0.getOperand(0).getValueType();
  unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements();
  unsigned NumConcats = NumElts / NumElemsPerConcat;

  auto IsUndefMaskElt = [](int i) { return i == -1; };

  // Special case: shuffle(concat(A,B)) can be more efficiently represented
  // as concat(shuffle(A,B),UNDEF) if the shuffle doesn't set any of the high
  // half vector elements.
  if (NumElemsPerConcat * 2 == NumElts && N1.isUndef() &&
      llvm::all_of(Mask.slice(NumElemsPerConcat, NumElemsPerConcat),
                   IsUndefMaskElt)) {
    N0 = DAG.getVectorShuffle(ConcatVT, SDLoc(N), N0.getOperand(0),
                              N0.getOperand(1),
                              Mask.slice(0, NumElemsPerConcat));
    N1 = DAG.getUNDEF(ConcatVT);
    return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N0, N1);
  }

  // Look at every vector that's inserted. We're looking for exact
  // subvector-sized copies from a concatenated vector
  for (unsigned I = 0; I != NumConcats; ++I) {
    unsigned Begin = I * NumElemsPerConcat;
    ArrayRef<int> SubMask = Mask.slice(Begin, NumElemsPerConcat);

    // Make sure we're dealing with a copy.
    if (llvm::all_of(SubMask, IsUndefMaskElt)) {
      Ops.push_back(DAG.getUNDEF(ConcatVT));
      continue;
    }

    int OpIdx = -1;
    for (int i = 0; i != (int)NumElemsPerConcat; ++i) {
      if (IsUndefMaskElt(SubMask[i]))
        continue;
      if ((SubMask[i] % (int)NumElemsPerConcat) != i)
        return SDValue();
      int EltOpIdx = SubMask[i] / NumElemsPerConcat;
      if (0 <= OpIdx && EltOpIdx != OpIdx)
        return SDValue();
      OpIdx = EltOpIdx;
    }
    assert(0 <= OpIdx && "Unknown concat_vectors op");

    if (OpIdx < (int)N0.getNumOperands())
      Ops.push_back(N0.getOperand(OpIdx));
    else
      Ops.push_back(N1.getOperand(OpIdx - N0.getNumOperands()));
  }

  return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
}

// Attempt to combine a shuffle of 2 inputs of 'scalar sources' -
// BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR.
//
// SHUFFLE(BUILD_VECTOR(), BUILD_VECTOR()) -> BUILD_VECTOR() is always
// a simplification in some sense, but it isn't appropriate in general: some
// BUILD_VECTORs are substantially cheaper than others. The general case
// of a BUILD_VECTOR requires inserting each element individually (or
// performing the equivalent in a temporary stack variable). A BUILD_VECTOR of
// all constants is a single constant pool load.  A BUILD_VECTOR where each
// element is identical is a splat.  A BUILD_VECTOR where most of the operands
// are undef lowers to a small number of element insertions.
//
// To deal with this, we currently use a bunch of mostly arbitrary heuristics.
// We don't fold shuffles where one side is a non-zero constant, and we don't
// fold shuffles if the resulting (non-splat) BUILD_VECTOR would have duplicate
// non-constant operands. This seems to work out reasonably well in practice.
static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN,
                                       SelectionDAG &DAG,
                                       const TargetLowering &TLI) {
  EVT VT = SVN->getValueType(0);
  unsigned NumElts = VT.getVectorNumElements();
  SDValue N0 = SVN->getOperand(0);
  SDValue N1 = SVN->getOperand(1);

  if (!N0->hasOneUse())
    return SDValue();

  // If only one of N1,N2 is constant, bail out if it is not ALL_ZEROS as
  // discussed above.
  if (!N1.isUndef()) {
    if (!N1->hasOneUse())
      return SDValue();

    bool N0AnyConst = isAnyConstantBuildVector(N0);
    bool N1AnyConst = isAnyConstantBuildVector(N1);
    if (N0AnyConst && !N1AnyConst && !ISD::isBuildVectorAllZeros(N0.getNode()))
      return SDValue();
    if (!N0AnyConst && N1AnyConst && !ISD::isBuildVectorAllZeros(N1.getNode()))
      return SDValue();
  }

  // If both inputs are splats of the same value then we can safely merge this
  // to a single BUILD_VECTOR with undef elements based on the shuffle mask.
  bool IsSplat = false;
  auto *BV0 = dyn_cast<BuildVectorSDNode>(N0);
  auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
  if (BV0 && BV1)
    if (SDValue Splat0 = BV0->getSplatValue())
      IsSplat = (Splat0 == BV1->getSplatValue());

  SmallVector<SDValue, 8> Ops;
  SmallSet<SDValue, 16> DuplicateOps;
  for (int M : SVN->getMask()) {
    SDValue Op = DAG.getUNDEF(VT.getScalarType());
    if (M >= 0) {
      int Idx = M < (int)NumElts ? M : M - NumElts;
      SDValue &S = (M < (int)NumElts ? N0 : N1);
      if (S.getOpcode() == ISD::BUILD_VECTOR) {
        Op = S.getOperand(Idx);
      } else if (S.getOpcode() == ISD::SCALAR_TO_VECTOR) {
        SDValue Op0 = S.getOperand(0);
        Op = Idx == 0 ? Op0 : DAG.getUNDEF(Op0.getValueType());
      } else {
        // Operand can't be combined - bail out.
        return SDValue();
      }
    }

    // Don't duplicate a non-constant BUILD_VECTOR operand unless we're
    // generating a splat; semantically, this is fine, but it's likely to
    // generate low-quality code if the target can't reconstruct an appropriate
    // shuffle.
    if (!Op.isUndef() && !isa<ConstantSDNode>(Op) && !isa<ConstantFPSDNode>(Op))
      if (!IsSplat && !DuplicateOps.insert(Op).second)
        return SDValue();

    Ops.push_back(Op);
  }

  // BUILD_VECTOR requires all inputs to be of the same type, find the
  // maximum type and extend them all.
  EVT SVT = VT.getScalarType();
  if (SVT.isInteger())
    for (SDValue &Op : Ops)
      SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
  if (SVT != VT.getScalarType())
    for (SDValue &Op : Ops)
      Op = TLI.isZExtFree(Op.getValueType(), SVT)
               ? DAG.getZExtOrTrunc(Op, SDLoc(SVN), SVT)
               : DAG.getSExtOrTrunc(Op, SDLoc(SVN), SVT);
  return DAG.getBuildVector(VT, SDLoc(SVN), Ops);
}

// Match shuffles that can be converted to any_vector_extend_in_reg.
// This is often generated during legalization.
// e.g. v4i32 <0,u,1,u> -> (v2i64 any_vector_extend_in_reg(v4i32 src))
// TODO Add support for ZERO_EXTEND_VECTOR_INREG when we have a test case.
static SDValue combineShuffleToVectorExtend(ShuffleVectorSDNode *SVN,
                                            SelectionDAG &DAG,
                                            const TargetLowering &TLI,
                                            bool LegalOperations) {
  EVT VT = SVN->getValueType(0);
  bool IsBigEndian = DAG.getDataLayout().isBigEndian();

  // TODO Add support for big-endian when we have a test case.
  if (!VT.isInteger() || IsBigEndian)
    return SDValue();

  unsigned NumElts = VT.getVectorNumElements();
  unsigned EltSizeInBits = VT.getScalarSizeInBits();
  ArrayRef<int> Mask = SVN->getMask();
  SDValue N0 = SVN->getOperand(0);

  // shuffle<0,-1,1,-1> == (v2i64 anyextend_vector_inreg(v4i32))
  auto isAnyExtend = [&Mask, &NumElts](unsigned Scale) {
    for (unsigned i = 0; i != NumElts; ++i) {
      if (Mask[i] < 0)
        continue;
      if ((i % Scale) == 0 && Mask[i] == (int)(i / Scale))
        continue;
      return false;
    }
    return true;
  };

  // Attempt to match a '*_extend_vector_inreg' shuffle, we just search for
  // power-of-2 extensions as they are the most likely.
  for (unsigned Scale = 2; Scale < NumElts; Scale *= 2) {
    // Check for non power of 2 vector sizes
    if (NumElts % Scale != 0)
      continue;
    if (!isAnyExtend(Scale))
      continue;

    EVT OutSVT = EVT::getIntegerVT(*DAG.getContext(), EltSizeInBits * Scale);
    EVT OutVT = EVT::getVectorVT(*DAG.getContext(), OutSVT, NumElts / Scale);
    // Never create an illegal type. Only create unsupported operations if we
    // are pre-legalization.
    if (TLI.isTypeLegal(OutVT))
      if (!LegalOperations ||
          TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND_VECTOR_INREG, OutVT))
        return DAG.getBitcast(VT,
                              DAG.getNode(ISD::ANY_EXTEND_VECTOR_INREG,
                                          SDLoc(SVN), OutVT, N0));
  }

  return SDValue();
}

// Detect 'truncate_vector_inreg' style shuffles that pack the lower parts of
// each source element of a large type into the lowest elements of a smaller
// destination type. This is often generated during legalization.
// If the source node itself was a '*_extend_vector_inreg' node then we should
// then be able to remove it.
static SDValue combineTruncationShuffle(ShuffleVectorSDNode *SVN,
                                        SelectionDAG &DAG) {
  EVT VT = SVN->getValueType(0);
  bool IsBigEndian = DAG.getDataLayout().isBigEndian();

  // TODO Add support for big-endian when we have a test case.
  if (!VT.isInteger() || IsBigEndian)
    return SDValue();

  SDValue N0 = peekThroughBitcasts(SVN->getOperand(0));

  unsigned Opcode = N0.getOpcode();
  if (Opcode != ISD::ANY_EXTEND_VECTOR_INREG &&
      Opcode != ISD::SIGN_EXTEND_VECTOR_INREG &&
      Opcode != ISD::ZERO_EXTEND_VECTOR_INREG)
    return SDValue();

  SDValue N00 = N0.getOperand(0);
  ArrayRef<int> Mask = SVN->getMask();
  unsigned NumElts = VT.getVectorNumElements();
  unsigned EltSizeInBits = VT.getScalarSizeInBits();
  unsigned ExtSrcSizeInBits = N00.getScalarValueSizeInBits();
  unsigned ExtDstSizeInBits = N0.getScalarValueSizeInBits();

  if (ExtDstSizeInBits % ExtSrcSizeInBits != 0)
    return SDValue();
  unsigned ExtScale = ExtDstSizeInBits / ExtSrcSizeInBits;

  // (v4i32 truncate_vector_inreg(v2i64)) == shuffle<0,2-1,-1>
  // (v8i16 truncate_vector_inreg(v4i32)) == shuffle<0,2,4,6,-1,-1,-1,-1>
  // (v8i16 truncate_vector_inreg(v2i64)) == shuffle<0,4,-1,-1,-1,-1,-1,-1>
  auto isTruncate = [&Mask, &NumElts](unsigned Scale) {
    for (unsigned i = 0; i != NumElts; ++i) {
      if (Mask[i] < 0)
        continue;
      if ((i * Scale) < NumElts && Mask[i] == (int)(i * Scale))
        continue;
      return false;
    }
    return true;
  };

  // At the moment we just handle the case where we've truncated back to the
  // same size as before the extension.
  // TODO: handle more extension/truncation cases as cases arise.
  if (EltSizeInBits != ExtSrcSizeInBits)
    return SDValue();

  // We can remove *extend_vector_inreg only if the truncation happens at
  // the same scale as the extension.
  if (isTruncate(ExtScale))
    return DAG.getBitcast(VT, N00);

  return SDValue();
}

// Combine shuffles of splat-shuffles of the form:
// shuffle (shuffle V, undef, splat-mask), undef, M
// If splat-mask contains undef elements, we need to be careful about
// introducing undef's in the folded mask which are not the result of composing
// the masks of the shuffles.
static SDValue combineShuffleOfSplatVal(ShuffleVectorSDNode *Shuf,
                                        SelectionDAG &DAG) {
  if (!Shuf->getOperand(1).isUndef())
    return SDValue();
  auto *Splat = dyn_cast<ShuffleVectorSDNode>(Shuf->getOperand(0));
  if (!Splat || !Splat->isSplat())
    return SDValue();

  ArrayRef<int> ShufMask = Shuf->getMask();
  ArrayRef<int> SplatMask = Splat->getMask();
  assert(ShufMask.size() == SplatMask.size() && "Mask length mismatch");

  // Prefer simplifying to the splat-shuffle, if possible. This is legal if
  // every undef mask element in the splat-shuffle has a corresponding undef
  // element in the user-shuffle's mask or if the composition of mask elements
  // would result in undef.
  // Examples for (shuffle (shuffle v, undef, SplatMask), undef, UserMask):
  // * UserMask=[0,2,u,u], SplatMask=[2,u,2,u] -> [2,2,u,u]
  //   In this case it is not legal to simplify to the splat-shuffle because we
  //   may be exposing the users of the shuffle an undef element at index 1
  //   which was not there before the combine.
  // * UserMask=[0,u,2,u], SplatMask=[2,u,2,u] -> [2,u,2,u]
  //   In this case the composition of masks yields SplatMask, so it's ok to
  //   simplify to the splat-shuffle.
  // * UserMask=[3,u,2,u], SplatMask=[2,u,2,u] -> [u,u,2,u]
  //   In this case the composed mask includes all undef elements of SplatMask
  //   and in addition sets element zero to undef. It is safe to simplify to
  //   the splat-shuffle.
  auto CanSimplifyToExistingSplat = [](ArrayRef<int> UserMask,
                                       ArrayRef<int> SplatMask) {
    for (unsigned i = 0, e = UserMask.size(); i != e; ++i)
      if (UserMask[i] != -1 && SplatMask[i] == -1 &&
          SplatMask[UserMask[i]] != -1)
        return false;
    return true;
  };
  if (CanSimplifyToExistingSplat(ShufMask, SplatMask))
    return Shuf->getOperand(0);

  // Create a new shuffle with a mask that is composed of the two shuffles'
  // masks.
  SmallVector<int, 32> NewMask;
  for (int Idx : ShufMask)
    NewMask.push_back(Idx == -1 ? -1 : SplatMask[Idx]);

  return DAG.getVectorShuffle(Splat->getValueType(0), SDLoc(Splat),
                              Splat->getOperand(0), Splat->getOperand(1),
                              NewMask);
}

/// If the shuffle mask is taking exactly one element from the first vector
/// operand and passing through all other elements from the second vector
/// operand, return the index of the mask element that is choosing an element
/// from the first operand. Otherwise, return -1.
static int getShuffleMaskIndexOfOneElementFromOp0IntoOp1(ArrayRef<int> Mask) {
  int MaskSize = Mask.size();
  int EltFromOp0 = -1;
  // TODO: This does not match if there are undef elements in the shuffle mask.
  // Should we ignore undefs in the shuffle mask instead? The trade-off is
  // removing an instruction (a shuffle), but losing the knowledge that some
  // vector lanes are not needed.
  for (int i = 0; i != MaskSize; ++i) {
    if (Mask[i] >= 0 && Mask[i] < MaskSize) {
      // We're looking for a shuffle of exactly one element from operand 0.
      if (EltFromOp0 != -1)
        return -1;
      EltFromOp0 = i;
    } else if (Mask[i] != i + MaskSize) {
      // Nothing from operand 1 can change lanes.
      return -1;
    }
  }
  return EltFromOp0;
}

/// If a shuffle inserts exactly one element from a source vector operand into
/// another vector operand and we can access the specified element as a scalar,
/// then we can eliminate the shuffle.
static SDValue replaceShuffleOfInsert(ShuffleVectorSDNode *Shuf,
                                      SelectionDAG &DAG) {
  // First, check if we are taking one element of a vector and shuffling that
  // element into another vector.
  ArrayRef<int> Mask = Shuf->getMask();
  SmallVector<int, 16> CommutedMask(Mask.begin(), Mask.end());
  SDValue Op0 = Shuf->getOperand(0);
  SDValue Op1 = Shuf->getOperand(1);
  int ShufOp0Index = getShuffleMaskIndexOfOneElementFromOp0IntoOp1(Mask);
  if (ShufOp0Index == -1) {
    // Commute mask and check again.
    ShuffleVectorSDNode::commuteMask(CommutedMask);
    ShufOp0Index = getShuffleMaskIndexOfOneElementFromOp0IntoOp1(CommutedMask);
    if (ShufOp0Index == -1)
      return SDValue();
    // Commute operands to match the commuted shuffle mask.
    std::swap(Op0, Op1);
    Mask = CommutedMask;
  }

  // The shuffle inserts exactly one element from operand 0 into operand 1.
  // Now see if we can access that element as a scalar via a real insert element
  // instruction.
  // TODO: We can try harder to locate the element as a scalar. Examples: it
  // could be an operand of SCALAR_TO_VECTOR, BUILD_VECTOR, or a constant.
  assert(Mask[ShufOp0Index] >= 0 && Mask[ShufOp0Index] < (int)Mask.size() &&
         "Shuffle mask value must be from operand 0");
  if (Op0.getOpcode() != ISD::INSERT_VECTOR_ELT)
    return SDValue();

  auto *InsIndexC = dyn_cast<ConstantSDNode>(Op0.getOperand(2));
  if (!InsIndexC || InsIndexC->getSExtValue() != Mask[ShufOp0Index])
    return SDValue();

  // There's an existing insertelement with constant insertion index, so we
  // don't need to check the legality/profitability of a replacement operation
  // that differs at most in the constant value. The target should be able to
  // lower any of those in a similar way. If not, legalization will expand this
  // to a scalar-to-vector plus shuffle.
  //
  // Note that the shuffle may move the scalar from the position that the insert
  // element used. Therefore, our new insert element occurs at the shuffle's
  // mask index value, not the insert's index value.
  // shuffle (insertelt v1, x, C), v2, mask --> insertelt v2, x, C'
  SDValue NewInsIndex = DAG.getConstant(ShufOp0Index, SDLoc(Shuf),
                                        Op0.getOperand(2).getValueType());
  return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Shuf), Op0.getValueType(),
                     Op1, Op0.getOperand(1), NewInsIndex);
}

/// If we have a unary shuffle of a shuffle, see if it can be folded away
/// completely. This has the potential to lose undef knowledge because the first
/// shuffle may not have an undef mask element where the second one does. So
/// only call this after doing simplifications based on demanded elements.
static SDValue simplifyShuffleOfShuffle(ShuffleVectorSDNode *Shuf) {
  // shuf (shuf0 X, Y, Mask0), undef, Mask
  auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(Shuf->getOperand(0));
  if (!Shuf0 || !Shuf->getOperand(1).isUndef())
    return SDValue();

  ArrayRef<int> Mask = Shuf->getMask();
  ArrayRef<int> Mask0 = Shuf0->getMask();
  for (int i = 0, e = (int)Mask.size(); i != e; ++i) {
    // Ignore undef elements.
    if (Mask[i] == -1)
      continue;
    assert(Mask[i] >= 0 && Mask[i] < e && "Unexpected shuffle mask value");

    // Is the element of the shuffle operand chosen by this shuffle the same as
    // the element chosen by the shuffle operand itself?
    if (Mask0[Mask[i]] != Mask0[i])
      return SDValue();
  }
  // Every element of this shuffle is identical to the result of the previous
  // shuffle, so we can replace this value.
  return Shuf->getOperand(0);
}

SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
  EVT VT = N->getValueType(0);
  unsigned NumElts = VT.getVectorNumElements();

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG");

  // Canonicalize shuffle undef, undef -> undef
  if (N0.isUndef() && N1.isUndef())
    return DAG.getUNDEF(VT);

  ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);

  // Canonicalize shuffle v, v -> v, undef
  if (N0 == N1) {
    SmallVector<int, 8> NewMask;
    for (unsigned i = 0; i != NumElts; ++i) {
      int Idx = SVN->getMaskElt(i);
      if (Idx >= (int)NumElts) Idx -= NumElts;
      NewMask.push_back(Idx);
    }
    return DAG.getVectorShuffle(VT, SDLoc(N), N0, DAG.getUNDEF(VT), NewMask);
  }

  // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask.
  if (N0.isUndef())
    return DAG.getCommutedVectorShuffle(*SVN);

  // Remove references to rhs if it is undef
  if (N1.isUndef()) {
    bool Changed = false;
    SmallVector<int, 8> NewMask;
    for (unsigned i = 0; i != NumElts; ++i) {
      int Idx = SVN->getMaskElt(i);
      if (Idx >= (int)NumElts) {
        Idx = -1;
        Changed = true;
      }
      NewMask.push_back(Idx);
    }
    if (Changed)
      return DAG.getVectorShuffle(VT, SDLoc(N), N0, N1, NewMask);
  }

  if (SDValue InsElt = replaceShuffleOfInsert(SVN, DAG))
    return InsElt;

  // A shuffle of a single vector that is a splatted value can always be folded.
  if (SDValue V = combineShuffleOfSplatVal(SVN, DAG))
    return V;

  // If it is a splat, check if the argument vector is another splat or a
  // build_vector.
  if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) {
    int SplatIndex = SVN->getSplatIndex();
    if (N0.hasOneUse() && TLI.isExtractVecEltCheap(VT, SplatIndex) &&
        TLI.isBinOp(N0.getOpcode()) && N0.getNode()->getNumValues() == 1) {
      // splat (vector_bo L, R), Index -->
      // splat (scalar_bo (extelt L, Index), (extelt R, Index))
      SDValue L = N0.getOperand(0), R = N0.getOperand(1);
      SDLoc DL(N);
      EVT EltVT = VT.getScalarType();
      SDValue Index = DAG.getIntPtrConstant(SplatIndex, DL);
      SDValue ExtL = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, L, Index);
      SDValue ExtR = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, R, Index);
      SDValue NewBO = DAG.getNode(N0.getOpcode(), DL, EltVT, ExtL, ExtR,
                                  N0.getNode()->getFlags());
      SDValue Insert = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, NewBO);
      SmallVector<int, 16> ZeroMask(VT.getVectorNumElements(), 0);
      return DAG.getVectorShuffle(VT, DL, Insert, DAG.getUNDEF(VT), ZeroMask);
    }

    // If this is a bit convert that changes the element type of the vector but
    // not the number of vector elements, look through it.  Be careful not to
    // look though conversions that change things like v4f32 to v2f64.
    SDNode *V = N0.getNode();
    if (V->getOpcode() == ISD::BITCAST) {
      SDValue ConvInput = V->getOperand(0);
      if (ConvInput.getValueType().isVector() &&
          ConvInput.getValueType().getVectorNumElements() == NumElts)
        V = ConvInput.getNode();
    }

    if (V->getOpcode() == ISD::BUILD_VECTOR) {
      assert(V->getNumOperands() == NumElts &&
             "BUILD_VECTOR has wrong number of operands");
      SDValue Base;
      bool AllSame = true;
      for (unsigned i = 0; i != NumElts; ++i) {
        if (!V->getOperand(i).isUndef()) {
          Base = V->getOperand(i);
          break;
        }
      }
      // Splat of <u, u, u, u>, return <u, u, u, u>
      if (!Base.getNode())
        return N0;
      for (unsigned i = 0; i != NumElts; ++i) {
        if (V->getOperand(i) != Base) {
          AllSame = false;
          break;
        }
      }
      // Splat of <x, x, x, x>, return <x, x, x, x>
      if (AllSame)
        return N0;

      // Canonicalize any other splat as a build_vector.
      SDValue Splatted = V->getOperand(SplatIndex);
      SmallVector<SDValue, 8> Ops(NumElts, Splatted);
      SDValue NewBV = DAG.getBuildVector(V->getValueType(0), SDLoc(N), Ops);

      // We may have jumped through bitcasts, so the type of the
      // BUILD_VECTOR may not match the type of the shuffle.
      if (V->getValueType(0) != VT)
        NewBV = DAG.getBitcast(VT, NewBV);
      return NewBV;
    }
  }

  // Simplify source operands based on shuffle mask.
  if (SimplifyDemandedVectorElts(SDValue(N, 0)))
    return SDValue(N, 0);

  // This is intentionally placed after demanded elements simplification because
  // it could eliminate knowledge of undef elements created by this shuffle.
  if (SDValue ShufOp = simplifyShuffleOfShuffle(SVN))
    return ShufOp;

  // Match shuffles that can be converted to any_vector_extend_in_reg.
  if (SDValue V = combineShuffleToVectorExtend(SVN, DAG, TLI, LegalOperations))
    return V;

  // Combine "truncate_vector_in_reg" style shuffles.
  if (SDValue V = combineTruncationShuffle(SVN, DAG))
    return V;

  if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
      Level < AfterLegalizeVectorOps &&
      (N1.isUndef() ||
      (N1.getOpcode() == ISD::CONCAT_VECTORS &&
       N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) {
    if (SDValue V = partitionShuffleOfConcats(N, DAG))
      return V;
  }

  // Attempt to combine a shuffle of 2 inputs of 'scalar sources' -
  // BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR.
  if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT))
    if (SDValue Res = combineShuffleOfScalars(SVN, DAG, TLI))
      return Res;

  // If this shuffle only has a single input that is a bitcasted shuffle,
  // attempt to merge the 2 shuffles and suitably bitcast the inputs/output
  // back to their original types.
  if (N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
      N1.isUndef() && Level < AfterLegalizeVectorOps &&
      TLI.isTypeLegal(VT)) {
    auto ScaleShuffleMask = [](ArrayRef<int> Mask, int Scale) {
      if (Scale == 1)
        return SmallVector<int, 8>(Mask.begin(), Mask.end());

      SmallVector<int, 8> NewMask;
      for (int M : Mask)
        for (int s = 0; s != Scale; ++s)
          NewMask.push_back(M < 0 ? -1 : Scale * M + s);
      return NewMask;
    };

    SDValue BC0 = peekThroughOneUseBitcasts(N0);
    if (BC0.getOpcode() == ISD::VECTOR_SHUFFLE && BC0.hasOneUse()) {
      EVT SVT = VT.getScalarType();
      EVT InnerVT = BC0->getValueType(0);
      EVT InnerSVT = InnerVT.getScalarType();

      // Determine which shuffle works with the smaller scalar type.
      EVT ScaleVT = SVT.bitsLT(InnerSVT) ? VT : InnerVT;
      EVT ScaleSVT = ScaleVT.getScalarType();

      if (TLI.isTypeLegal(ScaleVT) &&
          0 == (InnerSVT.getSizeInBits() % ScaleSVT.getSizeInBits()) &&
          0 == (SVT.getSizeInBits() % ScaleSVT.getSizeInBits())) {
        int InnerScale = InnerSVT.getSizeInBits() / ScaleSVT.getSizeInBits();
        int OuterScale = SVT.getSizeInBits() / ScaleSVT.getSizeInBits();

        // Scale the shuffle masks to the smaller scalar type.
        ShuffleVectorSDNode *InnerSVN = cast<ShuffleVectorSDNode>(BC0);
        SmallVector<int, 8> InnerMask =
            ScaleShuffleMask(InnerSVN->getMask(), InnerScale);
        SmallVector<int, 8> OuterMask =
            ScaleShuffleMask(SVN->getMask(), OuterScale);

        // Merge the shuffle masks.
        SmallVector<int, 8> NewMask;
        for (int M : OuterMask)
          NewMask.push_back(M < 0 ? -1 : InnerMask[M]);

        // Test for shuffle mask legality over both commutations.
        SDValue SV0 = BC0->getOperand(0);
        SDValue SV1 = BC0->getOperand(1);
        bool LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT);
        if (!LegalMask) {
          std::swap(SV0, SV1);
          ShuffleVectorSDNode::commuteMask(NewMask);
          LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT);
        }

        if (LegalMask) {
          SV0 = DAG.getBitcast(ScaleVT, SV0);
          SV1 = DAG.getBitcast(ScaleVT, SV1);
          return DAG.getBitcast(
              VT, DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask));
        }
      }
    }
  }

  // Canonicalize shuffles according to rules:
  //  shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A)
  //  shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B)
  //  shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B)
  if (N1.getOpcode() == ISD::VECTOR_SHUFFLE &&
      N0.getOpcode() != ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
      TLI.isTypeLegal(VT)) {
    // The incoming shuffle must be of the same type as the result of the
    // current shuffle.
    assert(N1->getOperand(0).getValueType() == VT &&
           "Shuffle types don't match");

    SDValue SV0 = N1->getOperand(0);
    SDValue SV1 = N1->getOperand(1);
    bool HasSameOp0 = N0 == SV0;
    bool IsSV1Undef = SV1.isUndef();
    if (HasSameOp0 || IsSV1Undef || N0 == SV1)
      // Commute the operands of this shuffle so that next rule
      // will trigger.
      return DAG.getCommutedVectorShuffle(*SVN);
  }

  // Try to fold according to rules:
  //   shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
  //   shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
  //   shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
  // Don't try to fold shuffles with illegal type.
  // Only fold if this shuffle is the only user of the other shuffle.
  if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && N->isOnlyUserOf(N0.getNode()) &&
      Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) {
    ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0);

    // Don't try to fold splats; they're likely to simplify somehow, or they
    // might be free.
    if (OtherSV->isSplat())
      return SDValue();

    // The incoming shuffle must be of the same type as the result of the
    // current shuffle.
    assert(OtherSV->getOperand(0).getValueType() == VT &&
           "Shuffle types don't match");

    SDValue SV0, SV1;
    SmallVector<int, 4> Mask;
    // Compute the combined shuffle mask for a shuffle with SV0 as the first
    // operand, and SV1 as the second operand.
    for (unsigned i = 0; i != NumElts; ++i) {
      int Idx = SVN->getMaskElt(i);
      if (Idx < 0) {
        // Propagate Undef.
        Mask.push_back(Idx);
        continue;
      }

      SDValue CurrentVec;
      if (Idx < (int)NumElts) {
        // This shuffle index refers to the inner shuffle N0. Lookup the inner
        // shuffle mask to identify which vector is actually referenced.
        Idx = OtherSV->getMaskElt(Idx);
        if (Idx < 0) {
          // Propagate Undef.
          Mask.push_back(Idx);
          continue;
        }

        CurrentVec = (Idx < (int) NumElts) ? OtherSV->getOperand(0)
                                           : OtherSV->getOperand(1);
      } else {
        // This shuffle index references an element within N1.
        CurrentVec = N1;
      }

      // Simple case where 'CurrentVec' is UNDEF.
      if (CurrentVec.isUndef()) {
        Mask.push_back(-1);
        continue;
      }

      // Canonicalize the shuffle index. We don't know yet if CurrentVec
      // will be the first or second operand of the combined shuffle.
      Idx = Idx % NumElts;
      if (!SV0.getNode() || SV0 == CurrentVec) {
        // Ok. CurrentVec is the left hand side.
        // Update the mask accordingly.
        SV0 = CurrentVec;
        Mask.push_back(Idx);
        continue;
      }

      // Bail out if we cannot convert the shuffle pair into a single shuffle.
      if (SV1.getNode() && SV1 != CurrentVec)
        return SDValue();

      // Ok. CurrentVec is the right hand side.
      // Update the mask accordingly.
      SV1 = CurrentVec;
      Mask.push_back(Idx + NumElts);
    }

    // Check if all indices in Mask are Undef. In case, propagate Undef.
    bool isUndefMask = true;
    for (unsigned i = 0; i != NumElts && isUndefMask; ++i)
      isUndefMask &= Mask[i] < 0;

    if (isUndefMask)
      return DAG.getUNDEF(VT);

    if (!SV0.getNode())
      SV0 = DAG.getUNDEF(VT);
    if (!SV1.getNode())
      SV1 = DAG.getUNDEF(VT);

    // Avoid introducing shuffles with illegal mask.
    //   shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
    //   shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
    //   shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
    //   shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, A, M2)
    //   shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, A, M2)
    //   shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, B, M2)
    return TLI.buildLegalVectorShuffle(VT, SDLoc(N), SV0, SV1, Mask, DAG);
  }

  if (SDValue V = foldShuffleOfConcatUndefs(SVN, DAG))
    return V;

  return SDValue();
}

SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) {
  SDValue InVal = N->getOperand(0);
  EVT VT = N->getValueType(0);

  // Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern
  // with a VECTOR_SHUFFLE and possible truncate.
  if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
    SDValue InVec = InVal->getOperand(0);
    SDValue EltNo = InVal->getOperand(1);
    auto InVecT = InVec.getValueType();
    if (ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(EltNo)) {
      SmallVector<int, 8> NewMask(InVecT.getVectorNumElements(), -1);
      int Elt = C0->getZExtValue();
      NewMask[0] = Elt;
      // If we have an implict truncate do truncate here as long as it's legal.
      // if it's not legal, this should
      if (VT.getScalarType() != InVal.getValueType() &&
          InVal.getValueType().isScalarInteger() &&
          isTypeLegal(VT.getScalarType())) {
        SDValue Val =
            DAG.getNode(ISD::TRUNCATE, SDLoc(InVal), VT.getScalarType(), InVal);
        return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Val);
      }
      if (VT.getScalarType() == InVecT.getScalarType() &&
          VT.getVectorNumElements() <= InVecT.getVectorNumElements()) {
        SDValue LegalShuffle =
          TLI.buildLegalVectorShuffle(InVecT, SDLoc(N), InVec,
                                      DAG.getUNDEF(InVecT), NewMask, DAG);
        if (LegalShuffle) {
          // If the initial vector is the correct size this shuffle is a
          // valid result.
          if (VT == InVecT)
            return LegalShuffle;
          // If not we must truncate the vector.
          if (VT.getVectorNumElements() != InVecT.getVectorNumElements()) {
            MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
            SDValue ZeroIdx = DAG.getConstant(0, SDLoc(N), IdxTy);
            EVT SubVT =
                EVT::getVectorVT(*DAG.getContext(), InVecT.getVectorElementType(),
                                 VT.getVectorNumElements());
            return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), SubVT,
                               LegalShuffle, ZeroIdx);
          }
        }
      }
    }
  }

  return SDValue();
}

SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
  EVT VT = N->getValueType(0);
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDValue N2 = N->getOperand(2);

  // If inserting an UNDEF, just return the original vector.
  if (N1.isUndef())
    return N0;

  // If this is an insert of an extracted vector into an undef vector, we can
  // just use the input to the extract.
  if (N0.isUndef() && N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
      N1.getOperand(1) == N2 && N1.getOperand(0).getValueType() == VT)
    return N1.getOperand(0);

  // If we are inserting a bitcast value into an undef, with the same
  // number of elements, just use the bitcast input of the extract.
  // i.e. INSERT_SUBVECTOR UNDEF (BITCAST N1) N2 ->
  //        BITCAST (INSERT_SUBVECTOR UNDEF N1 N2)
  if (N0.isUndef() && N1.getOpcode() == ISD::BITCAST &&
      N1.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
      N1.getOperand(0).getOperand(1) == N2 &&
      N1.getOperand(0).getOperand(0).getValueType().getVectorNumElements() ==
          VT.getVectorNumElements() &&
      N1.getOperand(0).getOperand(0).getValueType().getSizeInBits() ==
          VT.getSizeInBits()) {
    return DAG.getBitcast(VT, N1.getOperand(0).getOperand(0));
  }

  // If both N1 and N2 are bitcast values on which insert_subvector
  // would makes sense, pull the bitcast through.
  // i.e. INSERT_SUBVECTOR (BITCAST N0) (BITCAST N1) N2 ->
  //        BITCAST (INSERT_SUBVECTOR N0 N1 N2)
  if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) {
    SDValue CN0 = N0.getOperand(0);
    SDValue CN1 = N1.getOperand(0);
    EVT CN0VT = CN0.getValueType();
    EVT CN1VT = CN1.getValueType();
    if (CN0VT.isVector() && CN1VT.isVector() &&
        CN0VT.getVectorElementType() == CN1VT.getVectorElementType() &&
        CN0VT.getVectorNumElements() == VT.getVectorNumElements()) {
      SDValue NewINSERT = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N),
                                      CN0.getValueType(), CN0, CN1, N2);
      return DAG.getBitcast(VT, NewINSERT);
    }
  }

  // Combine INSERT_SUBVECTORs where we are inserting to the same index.
  // INSERT_SUBVECTOR( INSERT_SUBVECTOR( Vec, SubOld, Idx ), SubNew, Idx )
  // --> INSERT_SUBVECTOR( Vec, SubNew, Idx )
  if (N0.getOpcode() == ISD::INSERT_SUBVECTOR &&
      N0.getOperand(1).getValueType() == N1.getValueType() &&
      N0.getOperand(2) == N2)
    return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
                       N1, N2);

  // Eliminate an intermediate insert into an undef vector:
  // insert_subvector undef, (insert_subvector undef, X, 0), N2 -->
  // insert_subvector undef, X, N2
  if (N0.isUndef() && N1.getOpcode() == ISD::INSERT_SUBVECTOR &&
      N1.getOperand(0).isUndef() && isNullConstant(N1.getOperand(2)))
    return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0,
                       N1.getOperand(1), N2);

  if (!isa<ConstantSDNode>(N2))
    return SDValue();

  uint64_t InsIdx = cast<ConstantSDNode>(N2)->getZExtValue();

  // Push subvector bitcasts to the output, adjusting the index as we go.
  // insert_subvector(bitcast(v), bitcast(s), c1)
  // -> bitcast(insert_subvector(v, s, c2))
  if ((N0.isUndef() || N0.getOpcode() == ISD::BITCAST) &&
      N1.getOpcode() == ISD::BITCAST) {
    SDValue N0Src = peekThroughBitcasts(N0);
    SDValue N1Src = peekThroughBitcasts(N1);
    EVT N0SrcSVT = N0Src.getValueType().getScalarType();
    EVT N1SrcSVT = N1Src.getValueType().getScalarType();
    if ((N0.isUndef() || N0SrcSVT == N1SrcSVT) &&
        N0Src.getValueType().isVector() && N1Src.getValueType().isVector()) {
      EVT NewVT;
      SDLoc DL(N);
      SDValue NewIdx;
      MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
      LLVMContext &Ctx = *DAG.getContext();
      unsigned NumElts = VT.getVectorNumElements();
      unsigned EltSizeInBits = VT.getScalarSizeInBits();
      if ((EltSizeInBits % N1SrcSVT.getSizeInBits()) == 0) {
        unsigned Scale = EltSizeInBits / N1SrcSVT.getSizeInBits();
        NewVT = EVT::getVectorVT(Ctx, N1SrcSVT, NumElts * Scale);
        NewIdx = DAG.getConstant(InsIdx * Scale, DL, IdxVT);
      } else if ((N1SrcSVT.getSizeInBits() % EltSizeInBits) == 0) {
        unsigned Scale = N1SrcSVT.getSizeInBits() / EltSizeInBits;
        if ((NumElts % Scale) == 0 && (InsIdx % Scale) == 0) {
          NewVT = EVT::getVectorVT(Ctx, N1SrcSVT, NumElts / Scale);
          NewIdx = DAG.getConstant(InsIdx / Scale, DL, IdxVT);
        }
      }
      if (NewIdx && hasOperation(ISD::INSERT_SUBVECTOR, NewVT)) {
        SDValue Res = DAG.getBitcast(NewVT, N0Src);
        Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewVT, Res, N1Src, NewIdx);
        return DAG.getBitcast(VT, Res);
      }
    }
  }

  // Canonicalize insert_subvector dag nodes.
  // Example:
  // (insert_subvector (insert_subvector A, Idx0), Idx1)
  // -> (insert_subvector (insert_subvector A, Idx1), Idx0)
  if (N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.hasOneUse() &&
      N1.getValueType() == N0.getOperand(1).getValueType() &&
      isa<ConstantSDNode>(N0.getOperand(2))) {
    unsigned OtherIdx = N0.getConstantOperandVal(2);
    if (InsIdx < OtherIdx) {
      // Swap nodes.
      SDValue NewOp = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT,
                                  N0.getOperand(0), N1, N2);
      AddToWorklist(NewOp.getNode());
      return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N0.getNode()),
                         VT, NewOp, N0.getOperand(1), N0.getOperand(2));
    }
  }

  // If the input vector is a concatenation, and the insert replaces
  // one of the pieces, we can optimize into a single concat_vectors.
  if (N0.getOpcode() == ISD::CONCAT_VECTORS && N0.hasOneUse() &&
      N0.getOperand(0).getValueType() == N1.getValueType()) {
    unsigned Factor = N1.getValueType().getVectorNumElements();

    SmallVector<SDValue, 8> Ops(N0->op_begin(), N0->op_end());
    Ops[cast<ConstantSDNode>(N2)->getZExtValue() / Factor] = N1;

    return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
  }

  // Simplify source operands based on insertion.
  if (SimplifyDemandedVectorElts(SDValue(N, 0)))
    return SDValue(N, 0);

  return SDValue();
}

SDValue DAGCombiner::visitFP_TO_FP16(SDNode *N) {
  SDValue N0 = N->getOperand(0);

  // fold (fp_to_fp16 (fp16_to_fp op)) -> op
  if (N0->getOpcode() == ISD::FP16_TO_FP)
    return N0->getOperand(0);

  return SDValue();
}

SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) {
  SDValue N0 = N->getOperand(0);

  // fold fp16_to_fp(op & 0xffff) -> fp16_to_fp(op)
  if (N0->getOpcode() == ISD::AND) {
    ConstantSDNode *AndConst = getAsNonOpaqueConstant(N0.getOperand(1));
    if (AndConst && AndConst->getAPIntValue() == 0xffff) {
      return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), N->getValueType(0),
                         N0.getOperand(0));
    }
  }

  return SDValue();
}

SDValue DAGCombiner::visitVECREDUCE(SDNode *N) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N0.getValueType();
  unsigned Opcode = N->getOpcode();

  // VECREDUCE over 1-element vector is just an extract.
  if (VT.getVectorNumElements() == 1) {
    SDLoc dl(N);
    SDValue Res = DAG.getNode(
        ISD::EXTRACT_VECTOR_ELT, dl, VT.getVectorElementType(), N0,
        DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
    if (Res.getValueType() != N->getValueType(0))
      Res = DAG.getNode(ISD::ANY_EXTEND, dl, N->getValueType(0), Res);
    return Res;
  }

  // On an boolean vector an and/or reduction is the same as a umin/umax
  // reduction. Convert them if the latter is legal while the former isn't.
  if (Opcode == ISD::VECREDUCE_AND || Opcode == ISD::VECREDUCE_OR) {
    unsigned NewOpcode = Opcode == ISD::VECREDUCE_AND
        ? ISD::VECREDUCE_UMIN : ISD::VECREDUCE_UMAX;
    if (!TLI.isOperationLegalOrCustom(Opcode, VT) &&
        TLI.isOperationLegalOrCustom(NewOpcode, VT) &&
        DAG.ComputeNumSignBits(N0) == VT.getScalarSizeInBits())
      return DAG.getNode(NewOpcode, SDLoc(N), N->getValueType(0), N0);
  }

  return SDValue();
}

/// Returns a vector_shuffle if it able to transform an AND to a vector_shuffle
/// with the destination vector and a zero vector.
/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
///      vector_shuffle V, Zero, <0, 4, 2, 4>
SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
  assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");

  EVT VT = N->getValueType(0);
  SDValue LHS = N->getOperand(0);
  SDValue RHS = peekThroughBitcasts(N->getOperand(1));
  SDLoc DL(N);

  // Make sure we're not running after operation legalization where it
  // may have custom lowered the vector shuffles.
  if (LegalOperations)
    return SDValue();

  if (RHS.getOpcode() != ISD::BUILD_VECTOR)
    return SDValue();

  EVT RVT = RHS.getValueType();
  unsigned NumElts = RHS.getNumOperands();

  // Attempt to create a valid clear mask, splitting the mask into
  // sub elements and checking to see if each is
  // all zeros or all ones - suitable for shuffle masking.
  auto BuildClearMask = [&](int Split) {
    int NumSubElts = NumElts * Split;
    int NumSubBits = RVT.getScalarSizeInBits() / Split;

    SmallVector<int, 8> Indices;
    for (int i = 0; i != NumSubElts; ++i) {
      int EltIdx = i / Split;
      int SubIdx = i % Split;
      SDValue Elt = RHS.getOperand(EltIdx);
      if (Elt.isUndef()) {
        Indices.push_back(-1);
        continue;
      }

      APInt Bits;
      if (isa<ConstantSDNode>(Elt))
        Bits = cast<ConstantSDNode>(Elt)->getAPIntValue();
      else if (isa<ConstantFPSDNode>(Elt))
        Bits = cast<ConstantFPSDNode>(Elt)->getValueAPF().bitcastToAPInt();
      else
        return SDValue();

      // Extract the sub element from the constant bit mask.
      if (DAG.getDataLayout().isBigEndian()) {
        Bits.lshrInPlace((Split - SubIdx - 1) * NumSubBits);
      } else {
        Bits.lshrInPlace(SubIdx * NumSubBits);
      }

      if (Split > 1)
        Bits = Bits.trunc(NumSubBits);

      if (Bits.isAllOnesValue())
        Indices.push_back(i);
      else if (Bits == 0)
        Indices.push_back(i + NumSubElts);
      else
        return SDValue();
    }

    // Let's see if the target supports this vector_shuffle.
    EVT ClearSVT = EVT::getIntegerVT(*DAG.getContext(), NumSubBits);
    EVT ClearVT = EVT::getVectorVT(*DAG.getContext(), ClearSVT, NumSubElts);
    if (!TLI.isVectorClearMaskLegal(Indices, ClearVT))
      return SDValue();

    SDValue Zero = DAG.getConstant(0, DL, ClearVT);
    return DAG.getBitcast(VT, DAG.getVectorShuffle(ClearVT, DL,
                                                   DAG.getBitcast(ClearVT, LHS),
                                                   Zero, Indices));
  };

  // Determine maximum split level (byte level masking).
  int MaxSplit = 1;
  if (RVT.getScalarSizeInBits() % 8 == 0)
    MaxSplit = RVT.getScalarSizeInBits() / 8;

  for (int Split = 1; Split <= MaxSplit; ++Split)
    if (RVT.getScalarSizeInBits() % Split == 0)
      if (SDValue S = BuildClearMask(Split))
        return S;

  return SDValue();
}

/// If a vector binop is performed on splat values, it may be profitable to
/// extract, scalarize, and insert/splat.
static SDValue scalarizeBinOpOfSplats(SDNode *N, SelectionDAG &DAG) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  unsigned Opcode = N->getOpcode();
  EVT VT = N->getValueType(0);
  EVT EltVT = VT.getVectorElementType();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  // TODO: Remove/replace the extract cost check? If the elements are available
  //       as scalars, then there may be no extract cost. Should we ask if
  //       inserting a scalar back into a vector is cheap instead?
  int Index0, Index1;
  SDValue Src0 = DAG.getSplatSourceVector(N0, Index0);
  SDValue Src1 = DAG.getSplatSourceVector(N1, Index1);
  if (!Src0 || !Src1 || Index0 != Index1 ||
      Src0.getValueType().getVectorElementType() != EltVT ||
      Src1.getValueType().getVectorElementType() != EltVT ||
      !TLI.isExtractVecEltCheap(VT, Index0) ||
      !TLI.isOperationLegalOrCustom(Opcode, EltVT))
    return SDValue();

  SDLoc DL(N);
  SDValue IndexC =
      DAG.getConstant(Index0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()));
  SDValue X = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, N0, IndexC);
  SDValue Y = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, N1, IndexC);
  SDValue ScalarBO = DAG.getNode(Opcode, DL, EltVT, X, Y, N->getFlags());

  // If all lanes but 1 are undefined, no need to splat the scalar result.
  // TODO: Keep track of undefs and use that info in the general case.
  if (N0.getOpcode() == ISD::BUILD_VECTOR && N0.getOpcode() == N1.getOpcode() &&
      count_if(N0->ops(), [](SDValue V) { return !V.isUndef(); }) == 1 &&
      count_if(N1->ops(), [](SDValue V) { return !V.isUndef(); }) == 1) {
    // bo (build_vec ..undef, X, undef...), (build_vec ..undef, Y, undef...) -->
    // build_vec ..undef, (bo X, Y), undef...
    SmallVector<SDValue, 8> Ops(VT.getVectorNumElements(), DAG.getUNDEF(EltVT));
    Ops[Index0] = ScalarBO;
    return DAG.getBuildVector(VT, DL, Ops);
  }

  // bo (splat X, Index), (splat Y, Index) --> splat (bo X, Y), Index
  SmallVector<SDValue, 8> Ops(VT.getVectorNumElements(), ScalarBO);
  return DAG.getBuildVector(VT, DL, Ops);
}

/// Visit a binary vector operation, like ADD.
SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
  assert(N->getValueType(0).isVector() &&
         "SimplifyVBinOp only works on vectors!");

  SDValue LHS = N->getOperand(0);
  SDValue RHS = N->getOperand(1);
  SDValue Ops[] = {LHS, RHS};
  EVT VT = N->getValueType(0);
  unsigned Opcode = N->getOpcode();

  // See if we can constant fold the vector operation.
  if (SDValue Fold = DAG.FoldConstantVectorArithmetic(
          Opcode, SDLoc(LHS), LHS.getValueType(), Ops, N->getFlags()))
    return Fold;

  // Move unary shuffles with identical masks after a vector binop:
  // VBinOp (shuffle A, Undef, Mask), (shuffle B, Undef, Mask))
  //   --> shuffle (VBinOp A, B), Undef, Mask
  // This does not require type legality checks because we are creating the
  // same types of operations that are in the original sequence. We do have to
  // restrict ops like integer div that have immediate UB (eg, div-by-zero)
  // though. This code is adapted from the identical transform in instcombine.
  if (Opcode != ISD::UDIV && Opcode != ISD::SDIV &&
      Opcode != ISD::UREM && Opcode != ISD::SREM &&
      Opcode != ISD::UDIVREM && Opcode != ISD::SDIVREM) {
    auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(LHS);
    auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(RHS);
    if (Shuf0 && Shuf1 && Shuf0->getMask().equals(Shuf1->getMask()) &&
        LHS.getOperand(1).isUndef() && RHS.getOperand(1).isUndef() &&
        (LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) {
      SDLoc DL(N);
      SDValue NewBinOp = DAG.getNode(Opcode, DL, VT, LHS.getOperand(0),
                                     RHS.getOperand(0), N->getFlags());
      SDValue UndefV = LHS.getOperand(1);
      return DAG.getVectorShuffle(VT, DL, NewBinOp, UndefV, Shuf0->getMask());
    }
  }

  // The following pattern is likely to emerge with vector reduction ops. Moving
  // the binary operation ahead of insertion may allow using a narrower vector
  // instruction that has better performance than the wide version of the op:
  // VBinOp (ins undef, X, Z), (ins undef, Y, Z) --> ins VecC, (VBinOp X, Y), Z
  if (LHS.getOpcode() == ISD::INSERT_SUBVECTOR && LHS.getOperand(0).isUndef() &&
      RHS.getOpcode() == ISD::INSERT_SUBVECTOR && RHS.getOperand(0).isUndef() &&
      LHS.getOperand(2) == RHS.getOperand(2) &&
      (LHS.hasOneUse() || RHS.hasOneUse())) {
    SDValue X = LHS.getOperand(1);
    SDValue Y = RHS.getOperand(1);
    SDValue Z = LHS.getOperand(2);
    EVT NarrowVT = X.getValueType();
    if (NarrowVT == Y.getValueType() &&
        TLI.isOperationLegalOrCustomOrPromote(Opcode, NarrowVT)) {
      // (binop undef, undef) may not return undef, so compute that result.
      SDLoc DL(N);
      SDValue VecC =
          DAG.getNode(Opcode, DL, VT, DAG.getUNDEF(VT), DAG.getUNDEF(VT));
      SDValue NarrowBO = DAG.getNode(Opcode, DL, NarrowVT, X, Y);
      return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, VecC, NarrowBO, Z);
    }
  }

  // Make sure all but the first op are undef or constant.
  auto ConcatWithConstantOrUndef = [](SDValue Concat) {
    return Concat.getOpcode() == ISD::CONCAT_VECTORS &&
           std::all_of(std::next(Concat->op_begin()), Concat->op_end(),
                     [](const SDValue &Op) {
                       return Op.isUndef() ||
                              ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
                     });
  };

  // The following pattern is likely to emerge with vector reduction ops. Moving
  // the binary operation ahead of the concat may allow using a narrower vector
  // instruction that has better performance than the wide version of the op:
  // VBinOp (concat X, undef/constant), (concat Y, undef/constant) -->
  //   concat (VBinOp X, Y), VecC
  if (ConcatWithConstantOrUndef(LHS) && ConcatWithConstantOrUndef(RHS) &&
      (LHS.hasOneUse() || RHS.hasOneUse())) {
    EVT NarrowVT = LHS.getOperand(0).getValueType();
    if (NarrowVT == RHS.getOperand(0).getValueType() &&
        TLI.isOperationLegalOrCustomOrPromote(Opcode, NarrowVT)) {
      SDLoc DL(N);
      unsigned NumOperands = LHS.getNumOperands();
      SmallVector<SDValue, 4> ConcatOps;
      for (unsigned i = 0; i != NumOperands; ++i) {
        // This constant fold for operands 1 and up.
        ConcatOps.push_back(DAG.getNode(Opcode, DL, NarrowVT, LHS.getOperand(i),
                                        RHS.getOperand(i)));
      }

      return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
    }
  }

  if (SDValue V = scalarizeBinOpOfSplats(N, DAG))
    return V;

  return SDValue();
}

SDValue DAGCombiner::SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1,
                                    SDValue N2) {
  assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!");

  SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2,
                                 cast<CondCodeSDNode>(N0.getOperand(2))->get());

  // If we got a simplified select_cc node back from SimplifySelectCC, then
  // break it down into a new SETCC node, and a new SELECT node, and then return
  // the SELECT node, since we were called with a SELECT node.
  if (SCC.getNode()) {
    // Check to see if we got a select_cc back (to turn into setcc/select).
    // Otherwise, just return whatever node we got back, like fabs.
    if (SCC.getOpcode() == ISD::SELECT_CC) {
      const SDNodeFlags Flags = N0.getNode()->getFlags();
      SDValue SETCC = DAG.getNode(ISD::SETCC, SDLoc(N0),
                                  N0.getValueType(),
                                  SCC.getOperand(0), SCC.getOperand(1),
                                  SCC.getOperand(4), Flags);
      AddToWorklist(SETCC.getNode());
      SDValue SelectNode = DAG.getSelect(SDLoc(SCC), SCC.getValueType(), SETCC,
                                         SCC.getOperand(2), SCC.getOperand(3));
      SelectNode->setFlags(Flags);
      return SelectNode;
    }

    return SCC;
  }
  return SDValue();
}

/// Given a SELECT or a SELECT_CC node, where LHS and RHS are the two values
/// being selected between, see if we can simplify the select.  Callers of this
/// should assume that TheSelect is deleted if this returns true.  As such, they
/// should return the appropriate thing (e.g. the node) back to the top-level of
/// the DAG combiner loop to avoid it being looked at.
bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
                                    SDValue RHS) {
  // fold (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x))
  // The select + setcc is redundant, because fsqrt returns NaN for X < 0.
  if (const ConstantFPSDNode *NaN = isConstOrConstSplatFP(LHS)) {
    if (NaN->isNaN() && RHS.getOpcode() == ISD::FSQRT) {
      // We have: (select (setcc ?, ?, ?), NaN, (fsqrt ?))
      SDValue Sqrt = RHS;
      ISD::CondCode CC;
      SDValue CmpLHS;
      const ConstantFPSDNode *Zero = nullptr;

      if (TheSelect->getOpcode() == ISD::SELECT_CC) {
        CC = cast<CondCodeSDNode>(TheSelect->getOperand(4))->get();
        CmpLHS = TheSelect->getOperand(0);
        Zero = isConstOrConstSplatFP(TheSelect->getOperand(1));
      } else {
        // SELECT or VSELECT
        SDValue Cmp = TheSelect->getOperand(0);
        if (Cmp.getOpcode() == ISD::SETCC) {
          CC = cast<CondCodeSDNode>(Cmp.getOperand(2))->get();
          CmpLHS = Cmp.getOperand(0);
          Zero = isConstOrConstSplatFP(Cmp.getOperand(1));
        }
      }
      if (Zero && Zero->isZero() &&
          Sqrt.getOperand(0) == CmpLHS && (CC == ISD::SETOLT ||
          CC == ISD::SETULT || CC == ISD::SETLT)) {
        // We have: (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x))
        CombineTo(TheSelect, Sqrt);
        return true;
      }
    }
  }
  // Cannot simplify select with vector condition
  if (TheSelect->getOperand(0).getValueType().isVector()) return false;

  // If this is a select from two identical things, try to pull the operation
  // through the select.
  if (LHS.getOpcode() != RHS.getOpcode() ||
      !LHS.hasOneUse() || !RHS.hasOneUse())
    return false;

  // If this is a load and the token chain is identical, replace the select
  // of two loads with a load through a select of the address to load from.
  // This triggers in things like "select bool X, 10.0, 123.0" after the FP
  // constants have been dropped into the constant pool.
  if (LHS.getOpcode() == ISD::LOAD) {
    LoadSDNode *LLD = cast<LoadSDNode>(LHS);
    LoadSDNode *RLD = cast<LoadSDNode>(RHS);

    // Token chains must be identical.
    if (LHS.getOperand(0) != RHS.getOperand(0) ||
        // Do not let this transformation reduce the number of volatile loads.
        // Be conservative for atomics for the moment
        // TODO: This does appear to be legal for unordered atomics (see D66309)
        !LLD->isSimple() || !RLD->isSimple() ||
        // FIXME: If either is a pre/post inc/dec load,
        // we'd need to split out the address adjustment.
        LLD->isIndexed() || RLD->isIndexed() ||
        // If this is an EXTLOAD, the VT's must match.
        LLD->getMemoryVT() != RLD->getMemoryVT() ||
        // If this is an EXTLOAD, the kind of extension must match.
        (LLD->getExtensionType() != RLD->getExtensionType() &&
         // The only exception is if one of the extensions is anyext.
         LLD->getExtensionType() != ISD::EXTLOAD &&
         RLD->getExtensionType() != ISD::EXTLOAD) ||
        // FIXME: this discards src value information.  This is
        // over-conservative. It would be beneficial to be able to remember
        // both potential memory locations.  Since we are discarding
        // src value info, don't do the transformation if the memory
        // locations are not in the default address space.
        LLD->getPointerInfo().getAddrSpace() != 0 ||
        RLD->getPointerInfo().getAddrSpace() != 0 ||
        // We can't produce a CMOV of a TargetFrameIndex since we won't
        // generate the address generation required.
        LLD->getBasePtr().getOpcode() == ISD::TargetFrameIndex ||
        RLD->getBasePtr().getOpcode() == ISD::TargetFrameIndex ||
        !TLI.isOperationLegalOrCustom(TheSelect->getOpcode(),
                                      LLD->getBasePtr().getValueType()))
      return false;

    // The loads must not depend on one another.
    if (LLD->isPredecessorOf(RLD) || RLD->isPredecessorOf(LLD))
      return false;

    // Check that the select condition doesn't reach either load.  If so,
    // folding this will induce a cycle into the DAG.  If not, this is safe to
    // xform, so create a select of the addresses.

    SmallPtrSet<const SDNode *, 32> Visited;
    SmallVector<const SDNode *, 16> Worklist;

    // Always fail if LLD and RLD are not independent. TheSelect is a
    // predecessor to all Nodes in question so we need not search past it.

    Visited.insert(TheSelect);
    Worklist.push_back(LLD);
    Worklist.push_back(RLD);

    if (SDNode::hasPredecessorHelper(LLD, Visited, Worklist) ||
        SDNode::hasPredecessorHelper(RLD, Visited, Worklist))
      return false;

    SDValue Addr;
    if (TheSelect->getOpcode() == ISD::SELECT) {
      // We cannot do this optimization if any pair of {RLD, LLD} is a
      // predecessor to {RLD, LLD, CondNode}. As we've already compared the
      // Loads, we only need to check if CondNode is a successor to one of the
      // loads. We can further avoid this if there's no use of their chain
      // value.
      SDNode *CondNode = TheSelect->getOperand(0).getNode();
      Worklist.push_back(CondNode);

      if ((LLD->hasAnyUseOfValue(1) &&
           SDNode::hasPredecessorHelper(LLD, Visited, Worklist)) ||
          (RLD->hasAnyUseOfValue(1) &&
           SDNode::hasPredecessorHelper(RLD, Visited, Worklist)))
        return false;

      Addr = DAG.getSelect(SDLoc(TheSelect),
                           LLD->getBasePtr().getValueType(),
                           TheSelect->getOperand(0), LLD->getBasePtr(),
                           RLD->getBasePtr());
    } else {  // Otherwise SELECT_CC
      // We cannot do this optimization if any pair of {RLD, LLD} is a
      // predecessor to {RLD, LLD, CondLHS, CondRHS}. As we've already compared
      // the Loads, we only need to check if CondLHS/CondRHS is a successor to
      // one of the loads. We can further avoid this if there's no use of their
      // chain value.

      SDNode *CondLHS = TheSelect->getOperand(0).getNode();
      SDNode *CondRHS = TheSelect->getOperand(1).getNode();
      Worklist.push_back(CondLHS);
      Worklist.push_back(CondRHS);

      if ((LLD->hasAnyUseOfValue(1) &&
           SDNode::hasPredecessorHelper(LLD, Visited, Worklist)) ||
          (RLD->hasAnyUseOfValue(1) &&
           SDNode::hasPredecessorHelper(RLD, Visited, Worklist)))
        return false;

      Addr = DAG.getNode(ISD::SELECT_CC, SDLoc(TheSelect),
                         LLD->getBasePtr().getValueType(),
                         TheSelect->getOperand(0),
                         TheSelect->getOperand(1),
                         LLD->getBasePtr(), RLD->getBasePtr(),
                         TheSelect->getOperand(4));
    }

    SDValue Load;
    // It is safe to replace the two loads if they have different alignments,
    // but the new load must be the minimum (most restrictive) alignment of the
    // inputs.
    unsigned Alignment = std::min(LLD->getAlignment(), RLD->getAlignment());
    MachineMemOperand::Flags MMOFlags = LLD->getMemOperand()->getFlags();
    if (!RLD->isInvariant())
      MMOFlags &= ~MachineMemOperand::MOInvariant;
    if (!RLD->isDereferenceable())
      MMOFlags &= ~MachineMemOperand::MODereferenceable;
    if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
      // FIXME: Discards pointer and AA info.
      Load = DAG.getLoad(TheSelect->getValueType(0), SDLoc(TheSelect),
                         LLD->getChain(), Addr, MachinePointerInfo(), Alignment,
                         MMOFlags);
    } else {
      // FIXME: Discards pointer and AA info.
      Load = DAG.getExtLoad(
          LLD->getExtensionType() == ISD::EXTLOAD ? RLD->getExtensionType()
                                                  : LLD->getExtensionType(),
          SDLoc(TheSelect), TheSelect->getValueType(0), LLD->getChain(), Addr,
          MachinePointerInfo(), LLD->getMemoryVT(), Alignment, MMOFlags);
    }

    // Users of the select now use the result of the load.
    CombineTo(TheSelect, Load);

    // Users of the old loads now use the new load's chain.  We know the
    // old-load value is dead now.
    CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
    CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
    return true;
  }

  return false;
}

/// Try to fold an expression of the form (N0 cond N1) ? N2 : N3 to a shift and
/// bitwise 'and'.
SDValue DAGCombiner::foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0,
                                            SDValue N1, SDValue N2, SDValue N3,
                                            ISD::CondCode CC) {
  // If this is a select where the false operand is zero and the compare is a
  // check of the sign bit, see if we can perform the "gzip trick":
  // select_cc setlt X, 0, A, 0 -> and (sra X, size(X)-1), A
  // select_cc setgt X, 0, A, 0 -> and (not (sra X, size(X)-1)), A
  EVT XType = N0.getValueType();
  EVT AType = N2.getValueType();
  if (!isNullConstant(N3) || !XType.bitsGE(AType))
    return SDValue();

  // If the comparison is testing for a positive value, we have to invert
  // the sign bit mask, so only do that transform if the target has a bitwise
  // 'and not' instruction (the invert is free).
  if (CC == ISD::SETGT && TLI.hasAndNot(N2)) {
    // (X > -1) ? A : 0
    // (X >  0) ? X : 0 <-- This is canonical signed max.
    if (!(isAllOnesConstant(N1) || (isNullConstant(N1) && N0 == N2)))
      return SDValue();
  } else if (CC == ISD::SETLT) {
    // (X <  0) ? A : 0
    // (X <  1) ? X : 0 <-- This is un-canonicalized signed min.
    if (!(isNullConstant(N1) || (isOneConstant(N1) && N0 == N2)))
      return SDValue();
  } else {
    return SDValue();
  }

  // and (sra X, size(X)-1), A -> "and (srl X, C2), A" iff A is a single-bit
  // constant.
  EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType());
  auto *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
  if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue() - 1)) == 0)) {
    unsigned ShCt = XType.getSizeInBits() - N2C->getAPIntValue().logBase2() - 1;
    SDValue ShiftAmt = DAG.getConstant(ShCt, DL, ShiftAmtTy);
    SDValue Shift = DAG.getNode(ISD::SRL, DL, XType, N0, ShiftAmt);
    AddToWorklist(Shift.getNode());

    if (XType.bitsGT(AType)) {
      Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
      AddToWorklist(Shift.getNode());
    }

    if (CC == ISD::SETGT)
      Shift = DAG.getNOT(DL, Shift, AType);

    return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
  }

  SDValue ShiftAmt = DAG.getConstant(XType.getSizeInBits() - 1, DL, ShiftAmtTy);
  SDValue Shift = DAG.getNode(ISD::SRA, DL, XType, N0, ShiftAmt);
  AddToWorklist(Shift.getNode());

  if (XType.bitsGT(AType)) {
    Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
    AddToWorklist(Shift.getNode());
  }

  if (CC == ISD::SETGT)
    Shift = DAG.getNOT(DL, Shift, AType);

  return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
}

/// Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
/// where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
/// in it. This may be a win when the constant is not otherwise available
/// because it replaces two constant pool loads with one.
SDValue DAGCombiner::convertSelectOfFPConstantsToLoadOffset(
    const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2, SDValue N3,
    ISD::CondCode CC) {
  if (!TLI.reduceSelectOfFPConstantLoads(N0.getValueType()))
    return SDValue();

  // If we are before legalize types, we want the other legalization to happen
  // first (for example, to avoid messing with soft float).
  auto *TV = dyn_cast<ConstantFPSDNode>(N2);
  auto *FV = dyn_cast<ConstantFPSDNode>(N3);
  EVT VT = N2.getValueType();
  if (!TV || !FV || !TLI.isTypeLegal(VT))
    return SDValue();

  // If a constant can be materialized without loads, this does not make sense.
  if (TLI.getOperationAction(ISD::ConstantFP, VT) == TargetLowering::Legal ||
      TLI.isFPImmLegal(TV->getValueAPF(), TV->getValueType(0), ForCodeSize) ||
      TLI.isFPImmLegal(FV->getValueAPF(), FV->getValueType(0), ForCodeSize))
    return SDValue();

  // If both constants have multiple uses, then we won't need to do an extra
  // load. The values are likely around in registers for other users.
  if (!TV->hasOneUse() && !FV->hasOneUse())
    return SDValue();

  Constant *Elts[] = { const_cast<ConstantFP*>(FV->getConstantFPValue()),
                       const_cast<ConstantFP*>(TV->getConstantFPValue()) };
  Type *FPTy = Elts[0]->getType();
  const DataLayout &TD = DAG.getDataLayout();

  // Create a ConstantArray of the two constants.
  Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
  SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(DAG.getDataLayout()),
                                      TD.getPrefTypeAlignment(FPTy));
  unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();

  // Get offsets to the 0 and 1 elements of the array, so we can select between
  // them.
  SDValue Zero = DAG.getIntPtrConstant(0, DL);
  unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
  SDValue One = DAG.getIntPtrConstant(EltSize, SDLoc(FV));
  SDValue Cond =
      DAG.getSetCC(DL, getSetCCResultType(N0.getValueType()), N0, N1, CC);
  AddToWorklist(Cond.getNode());
  SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(), Cond, One, Zero);
  AddToWorklist(CstOffset.getNode());
  CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx, CstOffset);
  AddToWorklist(CPIdx.getNode());
  return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
                     MachinePointerInfo::getConstantPool(
                         DAG.getMachineFunction()), Alignment);
}

/// Simplify an expression of the form (N0 cond N1) ? N2 : N3
/// where 'cond' is the comparison specified by CC.
SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
                                      SDValue N2, SDValue N3, ISD::CondCode CC,
                                      bool NotExtCompare) {
  // (x ? y : y) -> y.
  if (N2 == N3) return N2;

  EVT CmpOpVT = N0.getValueType();
  EVT CmpResVT = getSetCCResultType(CmpOpVT);
  EVT VT = N2.getValueType();
  auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
  auto *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
  auto *N3C = dyn_cast<ConstantSDNode>(N3.getNode());

  // Determine if the condition we're dealing with is constant.
  if (SDValue SCC = DAG.FoldSetCC(CmpResVT, N0, N1, CC, DL)) {
    AddToWorklist(SCC.getNode());
    if (auto *SCCC = dyn_cast<ConstantSDNode>(SCC)) {
      // fold select_cc true, x, y -> x
      // fold select_cc false, x, y -> y
      return !(SCCC->isNullValue()) ? N2 : N3;
    }
  }

  if (SDValue V =
          convertSelectOfFPConstantsToLoadOffset(DL, N0, N1, N2, N3, CC))
    return V;

  if (SDValue V = foldSelectCCToShiftAnd(DL, N0, N1, N2, N3, CC))
    return V;

  // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A)
  // where y is has a single bit set.
  // A plaintext description would be, we can turn the SELECT_CC into an AND
  // when the condition can be materialized as an all-ones register.  Any
  // single bit-test can be materialized as an all-ones register with
  // shift-left and shift-right-arith.
  // TODO: The operation legality checks could be loosened to include "custom",
  //       but that may cause regressions for targets that do not have shift
  //       instructions.
  if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
      N0->getValueType(0) == VT && isNullConstant(N1) && isNullConstant(N2) &&
      TLI.isOperationLegal(ISD::SHL, VT) &&
      TLI.isOperationLegal(ISD::SRA, VT)) {
    SDValue AndLHS = N0->getOperand(0);
    auto *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
    if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) {
      // Shift the tested bit over the sign bit.
      const APInt &AndMask = ConstAndRHS->getAPIntValue();
      SDValue ShlAmt =
        DAG.getConstant(AndMask.countLeadingZeros(), SDLoc(AndLHS),
                        getShiftAmountTy(AndLHS.getValueType()));
      SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N0), VT, AndLHS, ShlAmt);

      // Now arithmetic right shift it all the way over, so the result is either
      // all-ones, or zero.
      SDValue ShrAmt =
        DAG.getConstant(AndMask.getBitWidth() - 1, SDLoc(Shl),
                        getShiftAmountTy(Shl.getValueType()));
      SDValue Shr = DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, ShrAmt);

      return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
    }
  }

  // fold select C, 16, 0 -> shl C, 4
  bool Fold = N2C && isNullConstant(N3) && N2C->getAPIntValue().isPowerOf2();
  bool Swap = N3C && isNullConstant(N2) && N3C->getAPIntValue().isPowerOf2();

  if ((Fold || Swap) &&
      TLI.getBooleanContents(CmpOpVT) ==
          TargetLowering::ZeroOrOneBooleanContent &&
      (!LegalOperations || TLI.isOperationLegal(ISD::SETCC, CmpOpVT))) {

    if (Swap) {
      CC = ISD::getSetCCInverse(CC, CmpOpVT.isInteger());
      std::swap(N2C, N3C);
    }

    // If the caller doesn't want us to simplify this into a zext of a compare,
    // don't do it.
    if (NotExtCompare && N2C->isOne())
      return SDValue();

    SDValue Temp, SCC;
    // zext (setcc n0, n1)
    if (LegalTypes) {
      SCC = DAG.getSetCC(DL, CmpResVT, N0, N1, CC);
      if (VT.bitsLT(SCC.getValueType()))
        Temp = DAG.getZeroExtendInReg(SCC, SDLoc(N2), VT);
      else
        Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), VT, SCC);
    } else {
      SCC = DAG.getSetCC(SDLoc(N0), MVT::i1, N0, N1, CC);
      Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), VT, SCC);
    }

    AddToWorklist(SCC.getNode());
    AddToWorklist(Temp.getNode());

    if (N2C->isOne())
      return Temp;

    // shl setcc result by log2 n2c
    return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp,
                       DAG.getConstant(N2C->getAPIntValue().logBase2(),
                                       SDLoc(Temp),
                                       getShiftAmountTy(Temp.getValueType())));
  }

  // select_cc seteq X, 0, sizeof(X), ctlz(X) -> ctlz(X)
  // select_cc seteq X, 0, sizeof(X), ctlz_zero_undef(X) -> ctlz(X)
  // select_cc seteq X, 0, sizeof(X), cttz(X) -> cttz(X)
  // select_cc seteq X, 0, sizeof(X), cttz_zero_undef(X) -> cttz(X)
  // select_cc setne X, 0, ctlz(X), sizeof(X) -> ctlz(X)
  // select_cc setne X, 0, ctlz_zero_undef(X), sizeof(X) -> ctlz(X)
  // select_cc setne X, 0, cttz(X), sizeof(X) -> cttz(X)
  // select_cc setne X, 0, cttz_zero_undef(X), sizeof(X) -> cttz(X)
  if (N1C && N1C->isNullValue() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
    SDValue ValueOnZero = N2;
    SDValue Count = N3;
    // If the condition is NE instead of E, swap the operands.
    if (CC == ISD::SETNE)
      std::swap(ValueOnZero, Count);
    // Check if the value on zero is a constant equal to the bits in the type.
    if (auto *ValueOnZeroC = dyn_cast<ConstantSDNode>(ValueOnZero)) {
      if (ValueOnZeroC->getAPIntValue() == VT.getSizeInBits()) {
        // If the other operand is cttz/cttz_zero_undef of N0, and cttz is
        // legal, combine to just cttz.
        if ((Count.getOpcode() == ISD::CTTZ ||
             Count.getOpcode() == ISD::CTTZ_ZERO_UNDEF) &&
            N0 == Count.getOperand(0) &&
            (!LegalOperations || TLI.isOperationLegal(ISD::CTTZ, VT)))
          return DAG.getNode(ISD::CTTZ, DL, VT, N0);
        // If the other operand is ctlz/ctlz_zero_undef of N0, and ctlz is
        // legal, combine to just ctlz.
        if ((Count.getOpcode() == ISD::CTLZ ||
             Count.getOpcode() == ISD::CTLZ_ZERO_UNDEF) &&
            N0 == Count.getOperand(0) &&
            (!LegalOperations || TLI.isOperationLegal(ISD::CTLZ, VT)))
          return DAG.getNode(ISD::CTLZ, DL, VT, N0);
      }
    }
  }

  return SDValue();
}

/// This is a stub for TargetLowering::SimplifySetCC.
SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
                                   ISD::CondCode Cond, const SDLoc &DL,
                                   bool foldBooleans) {
  TargetLowering::DAGCombinerInfo
    DagCombineInfo(DAG, Level, false, this);
  return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);
}

/// Given an ISD::SDIV node expressing a divide by constant, return
/// a DAG expression to select that will generate the same value by multiplying
/// by a magic number.
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
SDValue DAGCombiner::BuildSDIV(SDNode *N) {
  // when optimising for minimum size, we don't want to expand a div to a mul
  // and a shift.
  if (DAG.getMachineFunction().getFunction().hasMinSize())
    return SDValue();

  SmallVector<SDNode *, 8> Built;
  if (SDValue S = TLI.BuildSDIV(N, DAG, LegalOperations, Built)) {
    for (SDNode *N : Built)
      AddToWorklist(N);
    return S;
  }

  return SDValue();
}

/// Given an ISD::SDIV node expressing a divide by constant power of 2, return a
/// DAG expression that will generate the same value by right shifting.
SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) {
  ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
  if (!C)
    return SDValue();

  // Avoid division by zero.
  if (C->isNullValue())
    return SDValue();

  SmallVector<SDNode *, 8> Built;
  if (SDValue S = TLI.BuildSDIVPow2(N, C->getAPIntValue(), DAG, Built)) {
    for (SDNode *N : Built)
      AddToWorklist(N);
    return S;
  }

  return SDValue();
}

/// Given an ISD::UDIV node expressing a divide by constant, return a DAG
/// expression that will generate the same value by multiplying by a magic
/// number.
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
SDValue DAGCombiner::BuildUDIV(SDNode *N) {
  // when optimising for minimum size, we don't want to expand a div to a mul
  // and a shift.
  if (DAG.getMachineFunction().getFunction().hasMinSize())
    return SDValue();

  SmallVector<SDNode *, 8> Built;
  if (SDValue S = TLI.BuildUDIV(N, DAG, LegalOperations, Built)) {
    for (SDNode *N : Built)
      AddToWorklist(N);
    return S;
  }

  return SDValue();
}

/// Determines the LogBase2 value for a non-null input value using the
/// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
SDValue DAGCombiner::BuildLogBase2(SDValue V, const SDLoc &DL) {
  EVT VT = V.getValueType();
  unsigned EltBits = VT.getScalarSizeInBits();
  SDValue Ctlz = DAG.getNode(ISD::CTLZ, DL, VT, V);
  SDValue Base = DAG.getConstant(EltBits - 1, DL, VT);
  SDValue LogBase2 = DAG.getNode(ISD::SUB, DL, VT, Base, Ctlz);
  return LogBase2;
}

/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
/// For the reciprocal, we need to find the zero of the function:
///   F(X) = A X - 1 [which has a zero at X = 1/A]
///     =>
///   X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form
///     does not require additional intermediate precision]
/// For the last iteration, put numerator N into it to gain more precision:
///   Result = N X_i + X_i (N - N A X_i)
SDValue DAGCombiner::BuildDivEstimate(SDValue N, SDValue Op,
                                      SDNodeFlags Flags) {
  if (Level >= AfterLegalizeDAG)
    return SDValue();

  // TODO: Handle half and/or extended types?
  EVT VT = Op.getValueType();
  if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64)
    return SDValue();

  // If estimates are explicitly disabled for this function, we're done.
  MachineFunction &MF = DAG.getMachineFunction();
  int Enabled = TLI.getRecipEstimateDivEnabled(VT, MF);
  if (Enabled == TLI.ReciprocalEstimate::Disabled)
    return SDValue();

  // Estimates may be explicitly enabled for this type with a custom number of
  // refinement steps.
  int Iterations = TLI.getDivRefinementSteps(VT, MF);
  if (SDValue Est = TLI.getRecipEstimate(Op, DAG, Enabled, Iterations)) {
    AddToWorklist(Est.getNode());

    SDLoc DL(Op);
    if (Iterations) {
      SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);

      // Newton iterations: Est = Est + Est (N - Arg * Est)
      // If this is the last iteration, also multiply by the numerator.
      for (int i = 0; i < Iterations; ++i) {
        SDValue MulEst = Est;

        if (i == Iterations - 1) {
          MulEst = DAG.getNode(ISD::FMUL, DL, VT, N, Est, Flags);
          AddToWorklist(MulEst.getNode());
        }

        SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Op, MulEst, Flags);
        AddToWorklist(NewEst.getNode());

        NewEst = DAG.getNode(ISD::FSUB, DL, VT,
                             (i == Iterations - 1 ? N : FPOne), NewEst, Flags);
        AddToWorklist(NewEst.getNode());

        NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags);
        AddToWorklist(NewEst.getNode());

        Est = DAG.getNode(ISD::FADD, DL, VT, MulEst, NewEst, Flags);
        AddToWorklist(Est.getNode());
      }
    } else {
      // If no iterations are available, multiply with N.
      Est = DAG.getNode(ISD::FMUL, DL, VT, Est, N, Flags);
      AddToWorklist(Est.getNode());
    }

    return Est;
  }

  return SDValue();
}

/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
/// For the reciprocal sqrt, we need to find the zero of the function:
///   F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)]
///     =>
///   X_{i+1} = X_i (1.5 - A X_i^2 / 2)
/// As a result, we precompute A/2 prior to the iteration loop.
SDValue DAGCombiner::buildSqrtNROneConst(SDValue Arg, SDValue Est,
                                         unsigned Iterations,
                                         SDNodeFlags Flags, bool Reciprocal) {
  EVT VT = Arg.getValueType();
  SDLoc DL(Arg);
  SDValue ThreeHalves = DAG.getConstantFP(1.5, DL, VT);

  // We now need 0.5 * Arg which we can write as (1.5 * Arg - Arg) so that
  // this entire sequence requires only one FP constant.
  SDValue HalfArg = DAG.getNode(ISD::FMUL, DL, VT, ThreeHalves, Arg, Flags);
  HalfArg = DAG.getNode(ISD::FSUB, DL, VT, HalfArg, Arg, Flags);

  // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est)
  for (unsigned i = 0; i < Iterations; ++i) {
    SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, Est, Flags);
    NewEst = DAG.getNode(ISD::FMUL, DL, VT, HalfArg, NewEst, Flags);
    NewEst = DAG.getNode(ISD::FSUB, DL, VT, ThreeHalves, NewEst, Flags);
    Est = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags);
  }

  // If non-reciprocal square root is requested, multiply the result by Arg.
  if (!Reciprocal)
    Est = DAG.getNode(ISD::FMUL, DL, VT, Est, Arg, Flags);

  return Est;
}

/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
/// For the reciprocal sqrt, we need to find the zero of the function:
///   F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)]
///     =>
///   X_{i+1} = (-0.5 * X_i) * (A * X_i * X_i + (-3.0))
SDValue DAGCombiner::buildSqrtNRTwoConst(SDValue Arg, SDValue Est,
                                         unsigned Iterations,
                                         SDNodeFlags Flags, bool Reciprocal) {
  EVT VT = Arg.getValueType();
  SDLoc DL(Arg);
  SDValue MinusThree = DAG.getConstantFP(-3.0, DL, VT);
  SDValue MinusHalf = DAG.getConstantFP(-0.5, DL, VT);

  // This routine must enter the loop below to work correctly
  // when (Reciprocal == false).
  assert(Iterations > 0);

  // Newton iterations for reciprocal square root:
  // E = (E * -0.5) * ((A * E) * E + -3.0)
  for (unsigned i = 0; i < Iterations; ++i) {
    SDValue AE = DAG.getNode(ISD::FMUL, DL, VT, Arg, Est, Flags);
    SDValue AEE = DAG.getNode(ISD::FMUL, DL, VT, AE, Est, Flags);
    SDValue RHS = DAG.getNode(ISD::FADD, DL, VT, AEE, MinusThree, Flags);

    // When calculating a square root at the last iteration build:
    // S = ((A * E) * -0.5) * ((A * E) * E + -3.0)
    // (notice a common subexpression)
    SDValue LHS;
    if (Reciprocal || (i + 1) < Iterations) {
      // RSQRT: LHS = (E * -0.5)
      LHS = DAG.getNode(ISD::FMUL, DL, VT, Est, MinusHalf, Flags);
    } else {
      // SQRT: LHS = (A * E) * -0.5
      LHS = DAG.getNode(ISD::FMUL, DL, VT, AE, MinusHalf, Flags);
    }

    Est = DAG.getNode(ISD::FMUL, DL, VT, LHS, RHS, Flags);
  }

  return Est;
}

/// Build code to calculate either rsqrt(Op) or sqrt(Op). In the latter case
/// Op*rsqrt(Op) is actually computed, so additional postprocessing is needed if
/// Op can be zero.
SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags,
                                           bool Reciprocal) {
  if (Level >= AfterLegalizeDAG)
    return SDValue();

  // TODO: Handle half and/or extended types?
  EVT VT = Op.getValueType();
  if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64)
    return SDValue();

  // If estimates are explicitly disabled for this function, we're done.
  MachineFunction &MF = DAG.getMachineFunction();
  int Enabled = TLI.getRecipEstimateSqrtEnabled(VT, MF);
  if (Enabled == TLI.ReciprocalEstimate::Disabled)
    return SDValue();

  // Estimates may be explicitly enabled for this type with a custom number of
  // refinement steps.
  int Iterations = TLI.getSqrtRefinementSteps(VT, MF);

  bool UseOneConstNR = false;
  if (SDValue Est =
      TLI.getSqrtEstimate(Op, DAG, Enabled, Iterations, UseOneConstNR,
                          Reciprocal)) {
    AddToWorklist(Est.getNode());

    if (Iterations) {
      Est = UseOneConstNR
            ? buildSqrtNROneConst(Op, Est, Iterations, Flags, Reciprocal)
            : buildSqrtNRTwoConst(Op, Est, Iterations, Flags, Reciprocal);

      if (!Reciprocal) {
        // The estimate is now completely wrong if the input was exactly 0.0 or
        // possibly a denormal. Force the answer to 0.0 for those cases.
        SDLoc DL(Op);
        EVT CCVT = getSetCCResultType(VT);
        ISD::NodeType SelOpcode = VT.isVector() ? ISD::VSELECT : ISD::SELECT;
        const Function &F = DAG.getMachineFunction().getFunction();
        Attribute Denorms = F.getFnAttribute("denormal-fp-math");
        if (Denorms.getValueAsString().equals("ieee")) {
          // fabs(X) < SmallestNormal ? 0.0 : Est
          const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
          APFloat SmallestNorm = APFloat::getSmallestNormalized(FltSem);
          SDValue NormC = DAG.getConstantFP(SmallestNorm, DL, VT);
          SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
          SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op);
          SDValue IsDenorm = DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT);
          Est = DAG.getNode(SelOpcode, DL, VT, IsDenorm, FPZero, Est);
        } else {
          // X == 0.0 ? 0.0 : Est
          SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
          SDValue IsZero = DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
          Est = DAG.getNode(SelOpcode, DL, VT, IsZero, FPZero, Est);
        }
      }
    }
    return Est;
  }

  return SDValue();
}

SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags) {
  return buildSqrtEstimateImpl(Op, Flags, true);
}

SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags Flags) {
  return buildSqrtEstimateImpl(Op, Flags, false);
}

/// Return true if there is any possibility that the two addresses overlap.
bool DAGCombiner::isAlias(SDNode *Op0, SDNode *Op1) const {

  struct MemUseCharacteristics {
    bool IsVolatile;
    bool IsAtomic;
    SDValue BasePtr;
    int64_t Offset;
    Optional<int64_t> NumBytes;
    MachineMemOperand *MMO;
  };

  auto getCharacteristics = [](SDNode *N) -> MemUseCharacteristics {
    if (const auto *LSN = dyn_cast<LSBaseSDNode>(N)) {
      int64_t Offset = 0;
      if (auto *C = dyn_cast<ConstantSDNode>(LSN->getOffset()))
        Offset = (LSN->getAddressingMode() == ISD::PRE_INC)
                     ? C->getSExtValue()
                     : (LSN->getAddressingMode() == ISD::PRE_DEC)
                           ? -1 * C->getSExtValue()
                           : 0;
      return {LSN->isVolatile(), LSN->isAtomic(), LSN->getBasePtr(),
              Offset /*base offset*/,
              Optional<int64_t>(LSN->getMemoryVT().getStoreSize()),
              LSN->getMemOperand()};
    }
    if (const auto *LN = cast<LifetimeSDNode>(N))
      return {false /*isVolatile*/, /*isAtomic*/ false, LN->getOperand(1),
              (LN->hasOffset()) ? LN->getOffset() : 0,
              (LN->hasOffset()) ? Optional<int64_t>(LN->getSize())
                                : Optional<int64_t>(),
              (MachineMemOperand *)nullptr};
    // Default.
    return {false /*isvolatile*/, /*isAtomic*/ false, SDValue(),
            (int64_t)0 /*offset*/,
            Optional<int64_t>() /*size*/, (MachineMemOperand *)nullptr};
  };

  MemUseCharacteristics MUC0 = getCharacteristics(Op0),
                        MUC1 = getCharacteristics(Op1);

  // If they are to the same address, then they must be aliases.
  if (MUC0.BasePtr.getNode() && MUC0.BasePtr == MUC1.BasePtr &&
      MUC0.Offset == MUC1.Offset)
    return true;

  // If they are both volatile then they cannot be reordered.
  if (MUC0.IsVolatile && MUC1.IsVolatile)
    return true;

  // Be conservative about atomics for the moment
  // TODO: This is way overconservative for unordered atomics (see D66309)
  if (MUC0.IsAtomic && MUC1.IsAtomic)
    return true;

  if (MUC0.MMO && MUC1.MMO) {
    if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||
        (MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))
      return false;
  }

  // Try to prove that there is aliasing, or that there is no aliasing. Either
  // way, we can return now. If nothing can be proved, proceed with more tests.
  bool IsAlias;
  if (BaseIndexOffset::computeAliasing(Op0, MUC0.NumBytes, Op1, MUC1.NumBytes,
                                       DAG, IsAlias))
    return IsAlias;

  // The following all rely on MMO0 and MMO1 being valid. Fail conservatively if
  // either are not known.
  if (!MUC0.MMO || !MUC1.MMO)
    return true;

  // If one operation reads from invariant memory, and the other may store, they
  // cannot alias. These should really be checking the equivalent of mayWrite,
  // but it only matters for memory nodes other than load /store.
  if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||
      (MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))
    return false;

  // If we know required SrcValue1 and SrcValue2 have relatively large
  // alignment compared to the size and offset of the access, we may be able
  // to prove they do not alias. This check is conservative for now to catch
  // cases created by splitting vector types.
  int64_t SrcValOffset0 = MUC0.MMO->getOffset();
  int64_t SrcValOffset1 = MUC1.MMO->getOffset();
  unsigned OrigAlignment0 = MUC0.MMO->getBaseAlignment();
  unsigned OrigAlignment1 = MUC1.MMO->getBaseAlignment();
  if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 &&
      MUC0.NumBytes.hasValue() && MUC1.NumBytes.hasValue() &&
      *MUC0.NumBytes == *MUC1.NumBytes && OrigAlignment0 > *MUC0.NumBytes) {
    int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0;
    int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1;

    // There is no overlap between these relatively aligned accesses of
    // similar size. Return no alias.
    if ((OffAlign0 + *MUC0.NumBytes) <= OffAlign1 ||
        (OffAlign1 + *MUC1.NumBytes) <= OffAlign0)
      return false;
  }

  bool UseAA = CombinerGlobalAA.getNumOccurrences() > 0
                   ? CombinerGlobalAA
                   : DAG.getSubtarget().useAA();
#ifndef NDEBUG
  if (CombinerAAOnlyFunc.getNumOccurrences() &&
      CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
    UseAA = false;
#endif

  if (UseAA && AA && MUC0.MMO->getValue() && MUC1.MMO->getValue()) {
    // Use alias analysis information.
    int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
    int64_t Overlap0 = *MUC0.NumBytes + SrcValOffset0 - MinOffset;
    int64_t Overlap1 = *MUC1.NumBytes + SrcValOffset1 - MinOffset;
    AliasResult AAResult = AA->alias(
        MemoryLocation(MUC0.MMO->getValue(), Overlap0,
                       UseTBAA ? MUC0.MMO->getAAInfo() : AAMDNodes()),
        MemoryLocation(MUC1.MMO->getValue(), Overlap1,
                       UseTBAA ? MUC1.MMO->getAAInfo() : AAMDNodes()));
    if (AAResult == NoAlias)
      return false;
  }

  // Otherwise we have to assume they alias.
  return true;
}

/// Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
                                   SmallVectorImpl<SDValue> &Aliases) {
  SmallVector<SDValue, 8> Chains;     // List of chains to visit.
  SmallPtrSet<SDNode *, 16> Visited;  // Visited node set.

  // Get alias information for node.
  // TODO: relax aliasing for unordered atomics (see D66309)
  const bool IsLoad = isa<LoadSDNode>(N) && cast<LoadSDNode>(N)->isSimple();

  // Starting off.
  Chains.push_back(OriginalChain);
  unsigned Depth = 0;

  // Attempt to improve chain by a single step
  std::function<bool(SDValue &)> ImproveChain = [&](SDValue &C) -> bool {
    switch (C.getOpcode()) {
    case ISD::EntryToken:
      // No need to mark EntryToken.
      C = SDValue();
      return true;
    case ISD::LOAD:
    case ISD::STORE: {
      // Get alias information for C.
      // TODO: Relax aliasing for unordered atomics (see D66309)
      bool IsOpLoad = isa<LoadSDNode>(C.getNode()) &&
                      cast<LSBaseSDNode>(C.getNode())->isSimple();
      if ((IsLoad && IsOpLoad) || !isAlias(N, C.getNode())) {
        // Look further up the chain.
        C = C.getOperand(0);
        return true;
      }
      // Alias, so stop here.
      return false;
    }

    case ISD::CopyFromReg:
      // Always forward past past CopyFromReg.
      C = C.getOperand(0);
      return true;

    case ISD::LIFETIME_START:
    case ISD::LIFETIME_END: {
      // We can forward past any lifetime start/end that can be proven not to
      // alias the memory access.
      if (!isAlias(N, C.getNode())) {
        // Look further up the chain.
        C = C.getOperand(0);
        return true;
      }
      return false;
    }
    default:
      return false;
    }
  };

  // Look at each chain and determine if it is an alias.  If so, add it to the
  // aliases list.  If not, then continue up the chain looking for the next
  // candidate.
  while (!Chains.empty()) {
    SDValue Chain = Chains.pop_back_val();

    // Don't bother if we've seen Chain before.
    if (!Visited.insert(Chain.getNode()).second)
      continue;

    // For TokenFactor nodes, look at each operand and only continue up the
    // chain until we reach the depth limit.
    //
    // FIXME: The depth check could be made to return the last non-aliasing
    // chain we found before we hit a tokenfactor rather than the original
    // chain.
    if (Depth > TLI.getGatherAllAliasesMaxDepth()) {
      Aliases.clear();
      Aliases.push_back(OriginalChain);
      return;
    }

    if (Chain.getOpcode() == ISD::TokenFactor) {
      // We have to check each of the operands of the token factor for "small"
      // token factors, so we queue them up.  Adding the operands to the queue
      // (stack) in reverse order maintains the original order and increases the
      // likelihood that getNode will find a matching token factor (CSE.)
      if (Chain.getNumOperands() > 16) {
        Aliases.push_back(Chain);
        continue;
      }
      for (unsigned n = Chain.getNumOperands(); n;)
        Chains.push_back(Chain.getOperand(--n));
      ++Depth;
      continue;
    }
    // Everything else
    if (ImproveChain(Chain)) {
      // Updated Chain Found, Consider new chain if one exists.
      if (Chain.getNode())
        Chains.push_back(Chain);
      ++Depth;
      continue;
    }
    // No Improved Chain Possible, treat as Alias.
    Aliases.push_back(Chain);
  }
}

/// Walk up chain skipping non-aliasing memory nodes, looking for a better chain
/// (aliasing node.)
SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
  if (OptLevel == CodeGenOpt::None)
    return OldChain;

  // Ops for replacing token factor.
  SmallVector<SDValue, 8> Aliases;

  // Accumulate all the aliases to this node.
  GatherAllAliases(N, OldChain, Aliases);

  // If no operands then chain to entry token.
  if (Aliases.size() == 0)
    return DAG.getEntryNode();

  // If a single operand then chain to it.  We don't need to revisit it.
  if (Aliases.size() == 1)
    return Aliases[0];

  // Construct a custom tailored token factor.
  return DAG.getTokenFactor(SDLoc(N), Aliases);
}

namespace {
// TODO: Replace with with std::monostate when we move to C++17.
struct UnitT { } Unit;
bool operator==(const UnitT &, const UnitT &) { return true; }
bool operator!=(const UnitT &, const UnitT &) { return false; }
} // namespace

// This function tries to collect a bunch of potentially interesting
// nodes to improve the chains of, all at once. This might seem
// redundant, as this function gets called when visiting every store
// node, so why not let the work be done on each store as it's visited?
//
// I believe this is mainly important because MergeConsecutiveStores
// is unable to deal with merging stores of different sizes, so unless
// we improve the chains of all the potential candidates up-front
// before running MergeConsecutiveStores, it might only see some of
// the nodes that will eventually be candidates, and then not be able
// to go from a partially-merged state to the desired final
// fully-merged state.

bool DAGCombiner::parallelizeChainedStores(StoreSDNode *St) {
  SmallVector<StoreSDNode *, 8> ChainedStores;
  StoreSDNode *STChain = St;
  // Intervals records which offsets from BaseIndex have been covered. In
  // the common case, every store writes to the immediately previous address
  // space and thus merged with the previous interval at insertion time.

  using IMap =
      llvm::IntervalMap<int64_t, UnitT, 8, IntervalMapHalfOpenInfo<int64_t>>;
  IMap::Allocator A;
  IMap Intervals(A);

  // This holds the base pointer, index, and the offset in bytes from the base
  // pointer.
  const BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);

  // We must have a base and an offset.
  if (!BasePtr.getBase().getNode())
    return false;

  // Do not handle stores to undef base pointers.
  if (BasePtr.getBase().isUndef())
    return false;

  // Add ST's interval.
  Intervals.insert(0, (St->getMemoryVT().getSizeInBits() + 7) / 8, Unit);

  while (StoreSDNode *Chain = dyn_cast<StoreSDNode>(STChain->getChain())) {
    // If the chain has more than one use, then we can't reorder the mem ops.
    if (!SDValue(Chain, 0)->hasOneUse())
      break;
    // TODO: Relax for unordered atomics (see D66309)
    if (!Chain->isSimple() || Chain->isIndexed())
      break;

    // Find the base pointer and offset for this memory node.
    const BaseIndexOffset Ptr = BaseIndexOffset::match(Chain, DAG);
    // Check that the base pointer is the same as the original one.
    int64_t Offset;
    if (!BasePtr.equalBaseIndex(Ptr, DAG, Offset))
      break;
    int64_t Length = (Chain->getMemoryVT().getSizeInBits() + 7) / 8;
    // Make sure we don't overlap with other intervals by checking the ones to
    // the left or right before inserting.
    auto I = Intervals.find(Offset);
    // If there's a next interval, we should end before it.
    if (I != Intervals.end() && I.start() < (Offset + Length))
      break;
    // If there's a previous interval, we should start after it.
    if (I != Intervals.begin() && (--I).stop() <= Offset)
      break;
    Intervals.insert(Offset, Offset + Length, Unit);

    ChainedStores.push_back(Chain);
    STChain = Chain;
  }

  // If we didn't find a chained store, exit.
  if (ChainedStores.size() == 0)
    return false;

  // Improve all chained stores (St and ChainedStores members) starting from
  // where the store chain ended and return single TokenFactor.
  SDValue NewChain = STChain->getChain();
  SmallVector<SDValue, 8> TFOps;
  for (unsigned I = ChainedStores.size(); I;) {
    StoreSDNode *S = ChainedStores[--I];
    SDValue BetterChain = FindBetterChain(S, NewChain);
    S = cast<StoreSDNode>(DAG.UpdateNodeOperands(
        S, BetterChain, S->getOperand(1), S->getOperand(2), S->getOperand(3)));
    TFOps.push_back(SDValue(S, 0));
    ChainedStores[I] = S;
  }

  // Improve St's chain. Use a new node to avoid creating a loop from CombineTo.
  SDValue BetterChain = FindBetterChain(St, NewChain);
  SDValue NewST;
  if (St->isTruncatingStore())
    NewST = DAG.getTruncStore(BetterChain, SDLoc(St), St->getValue(),
                              St->getBasePtr(), St->getMemoryVT(),
                              St->getMemOperand());
  else
    NewST = DAG.getStore(BetterChain, SDLoc(St), St->getValue(),
                         St->getBasePtr(), St->getMemOperand());

  TFOps.push_back(NewST);

  // If we improved every element of TFOps, then we've lost the dependence on
  // NewChain to successors of St and we need to add it back to TFOps. Do so at
  // the beginning to keep relative order consistent with FindBetterChains.
  auto hasImprovedChain = [&](SDValue ST) -> bool {
    return ST->getOperand(0) != NewChain;
  };
  bool AddNewChain = llvm::all_of(TFOps, hasImprovedChain);
  if (AddNewChain)
    TFOps.insert(TFOps.begin(), NewChain);

  SDValue TF = DAG.getTokenFactor(SDLoc(STChain), TFOps);
  CombineTo(St, TF);

  // Add TF and its operands to the worklist.
  AddToWorklist(TF.getNode());
  for (const SDValue &Op : TF->ops())
    AddToWorklist(Op.getNode());
  AddToWorklist(STChain);
  return true;
}

bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) {
  if (OptLevel == CodeGenOpt::None)
    return false;

  const BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);

  // We must have a base and an offset.
  if (!BasePtr.getBase().getNode())
    return false;

  // Do not handle stores to undef base pointers.
  if (BasePtr.getBase().isUndef())
    return false;

  // Directly improve a chain of disjoint stores starting at St.
  if (parallelizeChainedStores(St))
    return true;

  // Improve St's Chain..
  SDValue BetterChain = FindBetterChain(St, St->getChain());
  if (St->getChain() != BetterChain) {
    replaceStoreChain(St, BetterChain);
    return true;
  }
  return false;
}

/// This is the entry point for the file.
void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis *AA,
                           CodeGenOpt::Level OptLevel) {
  /// This is the main entry point to this class.
  DAGCombiner(*this, AA, OptLevel).Run(Level);
}