reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
  185
  186
  187
  188
  189
  190
  191
  192
  193
  194
  195
  196
  197
  198
  199
  200
  201
  202
  203
  204
  205
  206
  207
  208
  209
  210
  211
  212
  213
  214
  215
  216
  217
  218
  219
  220
  221
  222
  223
  224
  225
  226
  227
  228
  229
  230
  231
  232
  233
  234
  235
  236
  237
  238
  239
  240
  241
  242
  243
  244
  245
  246
  247
  248
  249
  250
  251
  252
  253
  254
  255
  256
  257
  258
  259
  260
  261
  262
  263
  264
  265
  266
  267
  268
  269
  270
  271
  272
  273
  274
  275
  276
  277
  278
  279
  280
  281
  282
  283
  284
  285
  286
  287
  288
  289
  290
  291
  292
  293
  294
  295
  296
  297
  298
  299
  300
  301
  302
  303
  304
  305
  306
  307
  308
  309
  310
  311
  312
  313
  314
  315
  316
  317
  318
  319
  320
  321
  322
  323
  324
  325
  326
  327
  328
  329
  330
  331
  332
  333
  334
  335
  336
  337
  338
  339
  340
  341
  342
  343
  344
  345
  346
  347
  348
  349
  350
  351
  352
  353
  354
  355
  356
  357
  358
  359
  360
  361
  362
  363
  364
  365
  366
  367
  368
  369
  370
  371
  372
  373
  374
  375
  376
  377
  378
  379
  380
  381
  382
  383
  384
  385
  386
  387
  388
  389
  390
  391
  392
  393
  394
  395
  396
  397
  398
  399
  400
  401
  402
  403
  404
  405
  406
  407
  408
  409
  410
  411
  412
  413
  414
  415
  416
  417
  418
  419
  420
  421
  422
  423
  424
  425
  426
  427
  428
  429
  430
  431
  432
  433
  434
  435
  436
  437
  438
  439
  440
  441
  442
  443
  444
  445
  446
  447
  448
  449
  450
  451
  452
  453
  454
  455
  456
  457
  458
  459
  460
  461
  462
  463
  464
  465
  466
  467
  468
  469
  470
  471
  472
  473
  474
  475
  476
  477
  478
  479
  480
  481
  482
  483
  484
  485
  486
  487
  488
  489
  490
  491
  492
  493
  494
  495
  496
  497
  498
  499
  500
  501
  502
  503
  504
  505
  506
  507
  508
  509
  510
  511
  512
  513
  514
  515
  516
  517
  518
  519
  520
  521
  522
  523
  524
  525
  526
  527
  528
  529
  530
  531
  532
  533
  534
  535
  536
  537
  538
  539
  540
  541
  542
  543
  544
  545
  546
  547
  548
  549
  550
  551
  552
  553
  554
  555
  556
  557
  558
  559
  560
  561
  562
  563
  564
  565
  566
  567
  568
  569
  570
  571
  572
  573
  574
  575
  576
  577
  578
  579
  580
  581
  582
  583
  584
  585
  586
  587
  588
  589
  590
  591
  592
  593
  594
  595
  596
  597
  598
  599
  600
  601
  602
  603
  604
  605
  606
  607
  608
  609
  610
  611
  612
  613
  614
  615
  616
  617
  618
  619
  620
  621
  622
  623
  624
  625
  626
  627
  628
  629
  630
  631
  632
  633
  634
  635
  636
  637
  638
  639
  640
  641
  642
  643
  644
  645
  646
  647
  648
  649
  650
  651
  652
  653
  654
  655
  656
  657
  658
  659
  660
  661
  662
  663
  664
  665
  666
  667
  668
  669
  670
  671
  672
  673
  674
  675
  676
  677
  678
  679
  680
  681
  682
  683
  684
  685
  686
  687
  688
  689
  690
  691
  692
  693
  694
  695
  696
  697
  698
  699
  700
  701
  702
  703
  704
  705
  706
  707
  708
  709
  710
  711
  712
  713
  714
  715
  716
  717
  718
  719
  720
  721
  722
  723
  724
  725
  726
  727
  728
  729
  730
  731
  732
  733
  734
  735
  736
  737
  738
  739
  740
  741
  742
  743
  744
  745
  746
  747
  748
  749
  750
  751
  752
  753
  754
  755
  756
  757
  758
  759
  760
  761
  762
  763
  764
  765
  766
  767
  768
  769
  770
  771
  772
  773
  774
  775
  776
  777
  778
  779
  780
  781
  782
  783
  784
  785
  786
  787
  788
  789
  790
  791
  792
  793
  794
  795
  796
  797
  798
  799
  800
  801
  802
  803
  804
  805
  806
  807
  808
  809
  810
  811
  812
  813
  814
  815
  816
  817
  818
  819
  820
  821
  822
  823
  824
  825
  826
  827
  828
  829
  830
  831
  832
  833
  834
  835
  836
  837
  838
  839
  840
  841
  842
  843
  844
  845
  846
  847
  848
  849
  850
  851
  852
  853
  854
  855
  856
  857
  858
  859
  860
  861
  862
  863
  864
  865
  866
  867
  868
  869
  870
  871
  872
  873
  874
  875
  876
  877
  878
  879
  880
  881
  882
  883
  884
  885
  886
  887
  888
  889
  890
  891
  892
  893
  894
  895
  896
  897
  898
  899
  900
  901
  902
  903
  904
  905
  906
  907
  908
  909
  910
  911
  912
  913
  914
  915
  916
  917
  918
  919
  920
  921
  922
  923
  924
  925
  926
  927
  928
  929
  930
  931
  932
  933
  934
  935
  936
  937
  938
  939
  940
  941
  942
  943
  944
  945
  946
  947
  948
  949
  950
  951
  952
  953
  954
  955
  956
  957
  958
  959
  960
  961
  962
  963
  964
  965
  966
  967
  968
  969
  970
  971
  972
  973
  974
  975
  976
  977
  978
  979
  980
  981
  982
  983
  984
  985
  986
  987
  988
  989
  990
  991
  992
  993
  994
  995
  996
  997
  998
  999
 1000
 1001
 1002
 1003
 1004
 1005
 1006
 1007
 1008
 1009
 1010
 1011
 1012
 1013
 1014
 1015
 1016
 1017
 1018
 1019
 1020
 1021
 1022
 1023
 1024
 1025
 1026
 1027
 1028
 1029
 1030
 1031
 1032
 1033
 1034
 1035
 1036
 1037
 1038
 1039
 1040
 1041
 1042
 1043
 1044
 1045
 1046
 1047
 1048
 1049
 1050
 1051
 1052
 1053
 1054
 1055
 1056
 1057
 1058
 1059
 1060
 1061
 1062
 1063
 1064
 1065
 1066
 1067
 1068
 1069
 1070
 1071
 1072
 1073
 1074
 1075
 1076
 1077
 1078
 1079
 1080
 1081
 1082
 1083
 1084
 1085
 1086
 1087
 1088
 1089
 1090
 1091
 1092
 1093
 1094
 1095
 1096
 1097
 1098
 1099
 1100
 1101
 1102
 1103
 1104
 1105
 1106
 1107
 1108
 1109
 1110
 1111
 1112
 1113
 1114
 1115
 1116
 1117
 1118
 1119
 1120
 1121
 1122
 1123
 1124
 1125
 1126
 1127
 1128
 1129
 1130
 1131
 1132
 1133
 1134
 1135
 1136
 1137
 1138
 1139
 1140
 1141
 1142
 1143
 1144
 1145
 1146
 1147
 1148
 1149
 1150
 1151
 1152
 1153
 1154
 1155
 1156
 1157
 1158
 1159
 1160
 1161
 1162
 1163
 1164
 1165
 1166
 1167
 1168
 1169
 1170
 1171
 1172
 1173
 1174
 1175
 1176
 1177
 1178
 1179
 1180
 1181
 1182
 1183
 1184
 1185
 1186
 1187
 1188
 1189
 1190
 1191
 1192
 1193
 1194
 1195
 1196
 1197
 1198
 1199
 1200
 1201
 1202
 1203
 1204
 1205
 1206
 1207
 1208
 1209
 1210
 1211
 1212
 1213
 1214
 1215
 1216
 1217
 1218
 1219
 1220
 1221
 1222
 1223
 1224
 1225
 1226
 1227
 1228
 1229
 1230
 1231
 1232
 1233
 1234
 1235
 1236
 1237
 1238
 1239
 1240
 1241
 1242
 1243
 1244
 1245
 1246
 1247
 1248
 1249
 1250
 1251
 1252
 1253
 1254
 1255
 1256
 1257
 1258
 1259
 1260
 1261
 1262
 1263
 1264
 1265
 1266
 1267
 1268
 1269
 1270
 1271
 1272
 1273
 1274
 1275
 1276
 1277
 1278
 1279
 1280
 1281
 1282
 1283
 1284
 1285
 1286
 1287
 1288
 1289
 1290
 1291
 1292
 1293
 1294
 1295
 1296
 1297
 1298
 1299
 1300
 1301
 1302
 1303
 1304
 1305
 1306
 1307
 1308
 1309
 1310
 1311
 1312
 1313
 1314
 1315
 1316
 1317
 1318
 1319
 1320
 1321
 1322
 1323
 1324
 1325
 1326
 1327
 1328
 1329
 1330
 1331
 1332
 1333
 1334
 1335
 1336
 1337
 1338
 1339
 1340
 1341
 1342
 1343
 1344
 1345
 1346
 1347
 1348
 1349
 1350
 1351
 1352
 1353
 1354
 1355
 1356
 1357
 1358
 1359
 1360
 1361
 1362
 1363
 1364
 1365
 1366
 1367
 1368
 1369
 1370
 1371
 1372
 1373
 1374
 1375
 1376
 1377
 1378
 1379
 1380
 1381
 1382
 1383
 1384
 1385
 1386
 1387
 1388
 1389
 1390
 1391
 1392
 1393
 1394
 1395
 1396
 1397
 1398
 1399
 1400
 1401
 1402
 1403
 1404
 1405
 1406
 1407
 1408
 1409
 1410
 1411
 1412
 1413
 1414
 1415
 1416
 1417
 1418
 1419
 1420
 1421
 1422
 1423
 1424
 1425
 1426
 1427
 1428
 1429
 1430
 1431
 1432
 1433
 1434
 1435
 1436
 1437
 1438
 1439
 1440
 1441
 1442
 1443
 1444
 1445
 1446
 1447
 1448
 1449
 1450
 1451
 1452
 1453
 1454
 1455
 1456
 1457
 1458
 1459
 1460
 1461
 1462
 1463
 1464
 1465
 1466
 1467
 1468
 1469
 1470
 1471
 1472
 1473
 1474
 1475
 1476
 1477
 1478
 1479
 1480
 1481
 1482
 1483
 1484
 1485
 1486
 1487
 1488
 1489
 1490
 1491
 1492
 1493
 1494
 1495
 1496
 1497
 1498
 1499
 1500
 1501
 1502
 1503
 1504
 1505
 1506
 1507
 1508
 1509
 1510
 1511
 1512
 1513
 1514
 1515
 1516
 1517
 1518
 1519
 1520
 1521
 1522
 1523
 1524
 1525
 1526
 1527
 1528
 1529
 1530
 1531
 1532
 1533
 1534
 1535
 1536
 1537
 1538
 1539
 1540
 1541
 1542
 1543
 1544
 1545
 1546
 1547
 1548
 1549
 1550
 1551
 1552
 1553
 1554
 1555
 1556
 1557
 1558
 1559
 1560
 1561
 1562
 1563
 1564
 1565
 1566
 1567
 1568
 1569
 1570
 1571
 1572
 1573
 1574
 1575
 1576
 1577
 1578
 1579
 1580
 1581
 1582
 1583
 1584
 1585
 1586
 1587
 1588
 1589
 1590
 1591
 1592
 1593
 1594
 1595
 1596
 1597
 1598
 1599
 1600
 1601
 1602
 1603
 1604
 1605
 1606
 1607
 1608
 1609
 1610
 1611
 1612
 1613
 1614
 1615
 1616
 1617
 1618
 1619
 1620
 1621
 1622
 1623
 1624
 1625
 1626
 1627
 1628
 1629
 1630
 1631
 1632
 1633
 1634
 1635
 1636
 1637
 1638
 1639
 1640
 1641
 1642
 1643
 1644
 1645
 1646
 1647
 1648
 1649
 1650
 1651
 1652
 1653
 1654
 1655
 1656
 1657
 1658
 1659
 1660
 1661
 1662
 1663
 1664
 1665
 1666
 1667
 1668
 1669
 1670
 1671
 1672
 1673
 1674
 1675
 1676
 1677
 1678
 1679
 1680
 1681
 1682
 1683
 1684
 1685
 1686
 1687
 1688
 1689
 1690
 1691
 1692
 1693
 1694
 1695
 1696
 1697
 1698
 1699
 1700
 1701
 1702
 1703
 1704
 1705
 1706
 1707
 1708
 1709
 1710
 1711
 1712
 1713
 1714
 1715
 1716
 1717
 1718
 1719
 1720
 1721
 1722
 1723
 1724
 1725
 1726
 1727
 1728
 1729
 1730
 1731
 1732
 1733
 1734
 1735
 1736
 1737
 1738
 1739
 1740
 1741
 1742
 1743
 1744
 1745
 1746
 1747
 1748
 1749
 1750
 1751
 1752
 1753
 1754
 1755
 1756
 1757
 1758
 1759
 1760
 1761
 1762
 1763
 1764
 1765
 1766
 1767
 1768
 1769
 1770
 1771
 1772
 1773
 1774
 1775
 1776
 1777
 1778
 1779
 1780
 1781
 1782
 1783
 1784
 1785
 1786
 1787
 1788
 1789
 1790
 1791
 1792
 1793
 1794
 1795
 1796
 1797
 1798
 1799
 1800
 1801
 1802
 1803
 1804
 1805
 1806
 1807
 1808
 1809
 1810
 1811
 1812
 1813
 1814
 1815
 1816
 1817
 1818
 1819
 1820
 1821
 1822
 1823
 1824
 1825
 1826
 1827
 1828
 1829
 1830
 1831
 1832
 1833
 1834
 1835
 1836
 1837
 1838
 1839
 1840
 1841
 1842
 1843
 1844
 1845
 1846
 1847
 1848
 1849
 1850
 1851
 1852
 1853
 1854
 1855
 1856
 1857
 1858
 1859
 1860
 1861
 1862
 1863
 1864
 1865
 1866
 1867
 1868
 1869
 1870
 1871
 1872
 1873
 1874
 1875
 1876
 1877
 1878
 1879
 1880
 1881
 1882
 1883
 1884
 1885
 1886
 1887
 1888
 1889
 1890
 1891
 1892
 1893
 1894
 1895
 1896
 1897
 1898
 1899
 1900
 1901
 1902
 1903
 1904
 1905
 1906
 1907
 1908
 1909
 1910
 1911
 1912
 1913
 1914
 1915
 1916
 1917
 1918
 1919
 1920
 1921
 1922
 1923
 1924
 1925
 1926
 1927
 1928
 1929
 1930
 1931
 1932
 1933
 1934
 1935
 1936
 1937
 1938
 1939
 1940
 1941
 1942
 1943
 1944
 1945
 1946
 1947
 1948
 1949
 1950
 1951
 1952
 1953
 1954
 1955
 1956
 1957
 1958
 1959
 1960
 1961
 1962
 1963
 1964
 1965
 1966
 1967
 1968
 1969
 1970
 1971
 1972
 1973
 1974
 1975
 1976
 1977
 1978
 1979
 1980
 1981
 1982
 1983
 1984
 1985
 1986
 1987
 1988
 1989
 1990
 1991
 1992
 1993
 1994
 1995
 1996
 1997
 1998
 1999
 2000
 2001
 2002
 2003
 2004
 2005
 2006
 2007
 2008
 2009
 2010
 2011
 2012
 2013
 2014
 2015
 2016
 2017
 2018
 2019
 2020
 2021
 2022
 2023
 2024
 2025
 2026
 2027
 2028
 2029
 2030
 2031
 2032
 2033
 2034
 2035
 2036
 2037
 2038
 2039
 2040
 2041
 2042
 2043
 2044
 2045
 2046
 2047
 2048
 2049
 2050
 2051
 2052
 2053
 2054
 2055
 2056
 2057
 2058
 2059
 2060
 2061
 2062
 2063
 2064
 2065
 2066
 2067
 2068
 2069
 2070
 2071
 2072
 2073
 2074
 2075
 2076
 2077
 2078
 2079
 2080
 2081
 2082
 2083
 2084
 2085
 2086
 2087
 2088
 2089
 2090
 2091
 2092
 2093
 2094
 2095
 2096
 2097
 2098
 2099
 2100
 2101
 2102
 2103
 2104
 2105
 2106
 2107
 2108
 2109
 2110
 2111
 2112
 2113
 2114
 2115
 2116
 2117
 2118
 2119
 2120
 2121
 2122
 2123
 2124
 2125
 2126
 2127
 2128
 2129
 2130
 2131
 2132
 2133
 2134
 2135
 2136
 2137
 2138
 2139
 2140
 2141
 2142
 2143
 2144
 2145
 2146
 2147
 2148
 2149
 2150
 2151
 2152
 2153
 2154
 2155
 2156
 2157
 2158
 2159
 2160
 2161
 2162
 2163
 2164
 2165
 2166
 2167
 2168
 2169
 2170
 2171
 2172
 2173
 2174
 2175
 2176
 2177
 2178
 2179
 2180
 2181
 2182
 2183
 2184
 2185
 2186
 2187
 2188
 2189
 2190
 2191
 2192
 2193
 2194
 2195
 2196
 2197
 2198
 2199
 2200
 2201
 2202
 2203
 2204
 2205
 2206
 2207
 2208
 2209
 2210
 2211
 2212
 2213
 2214
 2215
 2216
 2217
 2218
 2219
 2220
 2221
 2222
 2223
 2224
 2225
 2226
 2227
 2228
 2229
 2230
 2231
 2232
 2233
 2234
 2235
 2236
 2237
 2238
 2239
 2240
 2241
 2242
 2243
 2244
 2245
 2246
 2247
 2248
 2249
 2250
 2251
 2252
 2253
 2254
 2255
 2256
 2257
 2258
 2259
 2260
 2261
 2262
 2263
 2264
 2265
 2266
 2267
 2268
 2269
 2270
 2271
 2272
 2273
 2274
 2275
 2276
 2277
 2278
 2279
 2280
 2281
 2282
 2283
 2284
 2285
 2286
 2287
 2288
 2289
 2290
 2291
 2292
 2293
 2294
 2295
 2296
 2297
 2298
 2299
 2300
 2301
 2302
 2303
 2304
 2305
 2306
 2307
 2308
 2309
 2310
 2311
 2312
 2313
 2314
 2315
 2316
 2317
 2318
 2319
 2320
 2321
 2322
 2323
 2324
 2325
 2326
 2327
 2328
 2329
 2330
 2331
 2332
 2333
 2334
 2335
 2336
 2337
 2338
 2339
 2340
 2341
 2342
 2343
 2344
 2345
 2346
 2347
 2348
 2349
 2350
 2351
 2352
 2353
 2354
 2355
 2356
 2357
 2358
 2359
 2360
 2361
 2362
 2363
 2364
 2365
 2366
 2367
 2368
 2369
 2370
 2371
 2372
 2373
 2374
 2375
 2376
 2377
 2378
 2379
 2380
 2381
 2382
 2383
 2384
 2385
 2386
 2387
 2388
 2389
 2390
 2391
 2392
 2393
 2394
 2395
 2396
 2397
 2398
 2399
 2400
 2401
 2402
 2403
 2404
 2405
 2406
 2407
 2408
 2409
 2410
 2411
 2412
 2413
 2414
 2415
 2416
 2417
 2418
 2419
 2420
 2421
 2422
 2423
 2424
 2425
 2426
 2427
 2428
 2429
 2430
 2431
 2432
 2433
 2434
 2435
 2436
 2437
 2438
 2439
 2440
 2441
 2442
 2443
 2444
 2445
 2446
 2447
 2448
 2449
 2450
 2451
 2452
 2453
 2454
 2455
 2456
 2457
 2458
 2459
 2460
 2461
 2462
 2463
 2464
 2465
 2466
 2467
 2468
 2469
 2470
 2471
 2472
 2473
 2474
 2475
 2476
 2477
 2478
 2479
 2480
 2481
 2482
 2483
 2484
 2485
 2486
 2487
 2488
 2489
 2490
 2491
 2492
 2493
 2494
 2495
 2496
 2497
 2498
 2499
 2500
 2501
 2502
 2503
 2504
 2505
 2506
 2507
 2508
 2509
 2510
 2511
 2512
 2513
 2514
 2515
 2516
 2517
 2518
 2519
 2520
 2521
 2522
 2523
 2524
 2525
 2526
 2527
 2528
 2529
 2530
 2531
 2532
 2533
 2534
 2535
 2536
 2537
 2538
 2539
 2540
 2541
 2542
 2543
 2544
 2545
 2546
 2547
 2548
 2549
 2550
 2551
 2552
 2553
 2554
 2555
 2556
 2557
 2558
 2559
 2560
 2561
 2562
 2563
 2564
 2565
 2566
 2567
 2568
 2569
 2570
 2571
 2572
 2573
 2574
 2575
 2576
 2577
 2578
 2579
 2580
 2581
 2582
 2583
 2584
 2585
 2586
 2587
 2588
 2589
 2590
 2591
 2592
 2593
 2594
 2595
 2596
 2597
 2598
 2599
 2600
 2601
 2602
 2603
 2604
 2605
 2606
 2607
 2608
 2609
 2610
 2611
 2612
 2613
 2614
 2615
 2616
 2617
 2618
 2619
 2620
 2621
 2622
 2623
 2624
 2625
 2626
 2627
 2628
 2629
 2630
 2631
 2632
 2633
 2634
 2635
 2636
 2637
 2638
 2639
 2640
 2641
 2642
 2643
 2644
 2645
 2646
 2647
 2648
 2649
 2650
 2651
 2652
 2653
 2654
 2655
 2656
 2657
 2658
 2659
 2660
 2661
 2662
 2663
 2664
 2665
 2666
 2667
 2668
 2669
 2670
 2671
 2672
 2673
 2674
 2675
 2676
 2677
 2678
 2679
 2680
 2681
 2682
 2683
 2684
 2685
 2686
 2687
 2688
 2689
 2690
 2691
 2692
 2693
 2694
 2695
 2696
 2697
 2698
 2699
 2700
 2701
 2702
 2703
 2704
 2705
 2706
 2707
 2708
 2709
 2710
 2711
 2712
 2713
 2714
 2715
 2716
 2717
 2718
 2719
 2720
 2721
 2722
 2723
 2724
 2725
 2726
 2727
 2728
 2729
 2730
 2731
 2732
 2733
 2734
 2735
 2736
 2737
 2738
 2739
 2740
 2741
 2742
 2743
 2744
 2745
 2746
 2747
 2748
 2749
 2750
 2751
 2752
 2753
 2754
 2755
 2756
 2757
 2758
 2759
 2760
 2761
 2762
 2763
 2764
 2765
 2766
 2767
 2768
 2769
 2770
 2771
 2772
 2773
 2774
 2775
 2776
 2777
 2778
 2779
 2780
 2781
 2782
 2783
 2784
 2785
 2786
 2787
 2788
 2789
 2790
 2791
 2792
 2793
 2794
 2795
 2796
 2797
 2798
 2799
 2800
 2801
 2802
 2803
 2804
 2805
 2806
 2807
 2808
 2809
 2810
 2811
 2812
 2813
 2814
 2815
 2816
 2817
 2818
 2819
 2820
 2821
 2822
 2823
 2824
 2825
 2826
 2827
 2828
 2829
 2830
 2831
 2832
 2833
 2834
 2835
 2836
 2837
 2838
 2839
 2840
 2841
 2842
 2843
 2844
 2845
 2846
 2847
 2848
 2849
 2850
 2851
 2852
 2853
 2854
 2855
 2856
 2857
 2858
 2859
 2860
 2861
 2862
 2863
 2864
 2865
 2866
 2867
 2868
 2869
 2870
 2871
 2872
 2873
 2874
 2875
 2876
 2877
 2878
 2879
 2880
 2881
 2882
 2883
 2884
 2885
 2886
 2887
 2888
 2889
 2890
 2891
 2892
 2893
 2894
 2895
 2896
 2897
 2898
 2899
 2900
 2901
 2902
 2903
 2904
 2905
 2906
 2907
 2908
 2909
 2910
 2911
 2912
 2913
 2914
 2915
 2916
 2917
 2918
 2919
 2920
 2921
 2922
 2923
 2924
 2925
 2926
 2927
 2928
 2929
 2930
 2931
 2932
 2933
 2934
 2935
 2936
 2937
 2938
 2939
 2940
 2941
 2942
 2943
 2944
 2945
 2946
 2947
 2948
 2949
 2950
 2951
 2952
 2953
 2954
 2955
 2956
 2957
 2958
 2959
 2960
 2961
 2962
 2963
 2964
 2965
 2966
 2967
 2968
 2969
 2970
 2971
 2972
 2973
 2974
 2975
 2976
 2977
 2978
 2979
 2980
 2981
 2982
 2983
 2984
 2985
 2986
 2987
 2988
 2989
 2990
 2991
 2992
 2993
 2994
 2995
 2996
 2997
 2998
 2999
 3000
 3001
 3002
 3003
 3004
 3005
 3006
 3007
 3008
 3009
 3010
 3011
 3012
 3013
 3014
 3015
 3016
 3017
 3018
 3019
 3020
 3021
 3022
 3023
 3024
 3025
 3026
 3027
 3028
 3029
 3030
 3031
 3032
 3033
 3034
 3035
 3036
 3037
 3038
 3039
 3040
 3041
 3042
 3043
 3044
 3045
 3046
 3047
 3048
 3049
 3050
 3051
 3052
 3053
 3054
 3055
 3056
 3057
 3058
 3059
 3060
 3061
 3062
 3063
 3064
 3065
 3066
 3067
 3068
 3069
 3070
 3071
 3072
 3073
 3074
 3075
 3076
 3077
 3078
 3079
 3080
 3081
 3082
 3083
 3084
 3085
 3086
 3087
 3088
 3089
 3090
 3091
 3092
 3093
 3094
 3095
 3096
 3097
 3098
 3099
 3100
 3101
 3102
 3103
 3104
 3105
 3106
 3107
 3108
 3109
 3110
 3111
 3112
 3113
 3114
 3115
 3116
 3117
 3118
 3119
 3120
 3121
 3122
 3123
 3124
 3125
 3126
 3127
 3128
 3129
 3130
 3131
 3132
 3133
 3134
 3135
 3136
 3137
 3138
 3139
 3140
 3141
 3142
 3143
 3144
 3145
 3146
 3147
 3148
 3149
 3150
 3151
 3152
 3153
 3154
 3155
 3156
 3157
 3158
 3159
 3160
 3161
 3162
 3163
 3164
 3165
 3166
 3167
 3168
 3169
 3170
 3171
 3172
 3173
 3174
 3175
 3176
 3177
 3178
 3179
 3180
 3181
 3182
 3183
 3184
 3185
 3186
 3187
 3188
 3189
 3190
 3191
 3192
 3193
 3194
 3195
 3196
 3197
 3198
 3199
 3200
 3201
 3202
 3203
 3204
 3205
 3206
 3207
 3208
 3209
 3210
 3211
 3212
 3213
 3214
 3215
 3216
 3217
 3218
 3219
 3220
 3221
 3222
 3223
 3224
 3225
 3226
 3227
 3228
 3229
 3230
 3231
 3232
 3233
 3234
 3235
 3236
 3237
 3238
 3239
 3240
 3241
 3242
 3243
 3244
 3245
 3246
 3247
 3248
 3249
 3250
 3251
 3252
 3253
 3254
 3255
 3256
 3257
 3258
 3259
 3260
 3261
 3262
 3263
 3264
 3265
 3266
 3267
 3268
 3269
 3270
 3271
 3272
 3273
 3274
 3275
 3276
 3277
 3278
 3279
 3280
 3281
 3282
 3283
 3284
 3285
 3286
 3287
 3288
 3289
 3290
 3291
 3292
 3293
 3294
 3295
 3296
 3297
 3298
 3299
 3300
 3301
 3302
 3303
 3304
 3305
 3306
 3307
 3308
 3309
 3310
 3311
 3312
 3313
 3314
 3315
 3316
 3317
 3318
 3319
 3320
 3321
 3322
 3323
 3324
 3325
 3326
 3327
 3328
 3329
 3330
 3331
 3332
 3333
 3334
 3335
 3336
 3337
 3338
 3339
 3340
 3341
 3342
 3343
 3344
 3345
 3346
 3347
 3348
 3349
 3350
 3351
 3352
 3353
 3354
 3355
 3356
 3357
 3358
 3359
 3360
 3361
 3362
 3363
 3364
 3365
 3366
 3367
 3368
 3369
 3370
 3371
 3372
 3373
 3374
 3375
 3376
 3377
 3378
 3379
 3380
 3381
 3382
 3383
 3384
 3385
 3386
 3387
 3388
 3389
 3390
 3391
 3392
 3393
 3394
 3395
 3396
 3397
 3398
 3399
 3400
 3401
 3402
 3403
 3404
 3405
 3406
 3407
 3408
 3409
 3410
 3411
 3412
 3413
 3414
 3415
 3416
 3417
 3418
 3419
 3420
 3421
 3422
 3423
 3424
 3425
 3426
 3427
 3428
 3429
 3430
 3431
 3432
 3433
 3434
 3435
 3436
 3437
 3438
 3439
 3440
 3441
 3442
 3443
 3444
 3445
 3446
 3447
 3448
 3449
 3450
 3451
 3452
 3453
 3454
 3455
 3456
 3457
 3458
 3459
 3460
 3461
 3462
 3463
 3464
 3465
 3466
 3467
 3468
 3469
 3470
 3471
 3472
 3473
 3474
 3475
 3476
 3477
 3478
 3479
 3480
 3481
 3482
 3483
 3484
 3485
 3486
 3487
 3488
 3489
 3490
 3491
 3492
 3493
 3494
 3495
 3496
 3497
 3498
 3499
 3500
 3501
 3502
 3503
 3504
 3505
 3506
 3507
 3508
 3509
 3510
 3511
 3512
 3513
 3514
 3515
 3516
 3517
 3518
 3519
 3520
 3521
 3522
 3523
 3524
 3525
 3526
 3527
 3528
 3529
 3530
 3531
 3532
 3533
 3534
 3535
 3536
 3537
 3538
 3539
 3540
 3541
 3542
 3543
 3544
 3545
 3546
 3547
 3548
 3549
 3550
 3551
 3552
 3553
 3554
 3555
 3556
 3557
 3558
 3559
 3560
 3561
 3562
 3563
 3564
 3565
 3566
 3567
 3568
 3569
 3570
 3571
 3572
 3573
 3574
 3575
 3576
 3577
 3578
 3579
 3580
 3581
 3582
 3583
 3584
 3585
 3586
 3587
 3588
 3589
 3590
 3591
 3592
 3593
 3594
 3595
 3596
 3597
 3598
 3599
 3600
 3601
 3602
 3603
 3604
 3605
 3606
 3607
 3608
 3609
 3610
 3611
 3612
 3613
 3614
 3615
 3616
 3617
 3618
 3619
 3620
 3621
 3622
 3623
 3624
 3625
 3626
 3627
 3628
 3629
 3630
 3631
 3632
 3633
 3634
 3635
 3636
 3637
 3638
 3639
 3640
 3641
 3642
 3643
 3644
 3645
 3646
 3647
 3648
 3649
 3650
 3651
 3652
 3653
 3654
 3655
 3656
 3657
 3658
 3659
 3660
 3661
 3662
 3663
 3664
 3665
 3666
 3667
 3668
 3669
 3670
 3671
 3672
 3673
 3674
 3675
 3676
 3677
 3678
 3679
 3680
 3681
 3682
 3683
 3684
 3685
 3686
 3687
 3688
 3689
 3690
 3691
 3692
 3693
 3694
 3695
 3696
 3697
 3698
 3699
 3700
 3701
 3702
 3703
 3704
 3705
 3706
 3707
 3708
 3709
 3710
 3711
 3712
 3713
 3714
 3715
 3716
 3717
 3718
 3719
 3720
 3721
 3722
 3723
 3724
 3725
 3726
 3727
 3728
 3729
 3730
 3731
 3732
 3733
 3734
 3735
 3736
 3737
 3738
 3739
 3740
 3741
 3742
 3743
 3744
 3745
 3746
 3747
 3748
 3749
 3750
 3751
 3752
 3753
 3754
 3755
 3756
 3757
 3758
 3759
 3760
 3761
 3762
 3763
 3764
 3765
 3766
 3767
 3768
 3769
 3770
 3771
 3772
 3773
 3774
 3775
 3776
 3777
 3778
 3779
 3780
 3781
 3782
 3783
 3784
 3785
 3786
 3787
 3788
 3789
 3790
 3791
 3792
 3793
 3794
 3795
 3796
 3797
 3798
 3799
 3800
 3801
 3802
 3803
 3804
 3805
 3806
 3807
 3808
 3809
 3810
 3811
 3812
 3813
 3814
 3815
 3816
 3817
 3818
 3819
 3820
 3821
 3822
 3823
 3824
 3825
 3826
 3827
 3828
 3829
 3830
 3831
 3832
 3833
 3834
 3835
 3836
 3837
 3838
 3839
 3840
 3841
 3842
 3843
 3844
 3845
 3846
 3847
 3848
 3849
 3850
 3851
 3852
 3853
 3854
 3855
 3856
 3857
 3858
 3859
 3860
 3861
 3862
 3863
 3864
 3865
 3866
 3867
 3868
 3869
 3870
 3871
 3872
 3873
 3874
 3875
 3876
 3877
 3878
 3879
 3880
 3881
 3882
 3883
 3884
 3885
 3886
 3887
 3888
 3889
 3890
 3891
 3892
 3893
 3894
 3895
 3896
 3897
 3898
 3899
 3900
 3901
 3902
 3903
 3904
 3905
 3906
 3907
 3908
 3909
 3910
 3911
 3912
 3913
 3914
 3915
 3916
 3917
 3918
 3919
 3920
 3921
 3922
 3923
 3924
 3925
 3926
 3927
 3928
 3929
 3930
 3931
 3932
 3933
 3934
 3935
 3936
 3937
 3938
 3939
 3940
 3941
 3942
 3943
 3944
 3945
 3946
 3947
 3948
 3949
 3950
 3951
 3952
 3953
 3954
 3955
 3956
 3957
 3958
 3959
 3960
 3961
 3962
 3963
 3964
 3965
 3966
 3967
 3968
 3969
 3970
 3971
 3972
 3973
 3974
 3975
 3976
 3977
 3978
 3979
 3980
 3981
 3982
 3983
 3984
 3985
 3986
 3987
 3988
 3989
 3990
 3991
 3992
 3993
 3994
 3995
 3996
 3997
 3998
 3999
 4000
 4001
 4002
 4003
 4004
 4005
 4006
 4007
 4008
 4009
 4010
 4011
 4012
 4013
 4014
 4015
 4016
 4017
 4018
 4019
 4020
 4021
 4022
 4023
 4024
 4025
 4026
 4027
 4028
 4029
 4030
 4031
 4032
 4033
 4034
 4035
 4036
 4037
 4038
 4039
 4040
 4041
 4042
 4043
 4044
 4045
 4046
 4047
 4048
 4049
 4050
 4051
 4052
 4053
 4054
 4055
 4056
 4057
 4058
 4059
 4060
 4061
 4062
 4063
 4064
 4065
 4066
 4067
 4068
 4069
 4070
 4071
 4072
 4073
 4074
 4075
 4076
 4077
 4078
 4079
 4080
 4081
 4082
 4083
 4084
 4085
 4086
 4087
 4088
 4089
 4090
 4091
 4092
 4093
 4094
 4095
 4096
 4097
 4098
 4099
 4100
 4101
 4102
 4103
 4104
 4105
 4106
 4107
 4108
 4109
 4110
 4111
 4112
 4113
 4114
 4115
 4116
 4117
 4118
 4119
 4120
 4121
 4122
 4123
 4124
 4125
 4126
 4127
 4128
 4129
 4130
 4131
 4132
 4133
 4134
 4135
 4136
 4137
 4138
 4139
 4140
 4141
 4142
 4143
 4144
 4145
 4146
 4147
 4148
 4149
 4150
 4151
 4152
 4153
 4154
 4155
 4156
 4157
 4158
 4159
 4160
 4161
 4162
 4163
 4164
 4165
 4166
 4167
 4168
 4169
 4170
 4171
 4172
 4173
 4174
 4175
 4176
 4177
 4178
 4179
 4180
 4181
 4182
 4183
 4184
 4185
 4186
 4187
 4188
 4189
 4190
 4191
 4192
 4193
 4194
 4195
 4196
 4197
 4198
 4199
 4200
 4201
 4202
 4203
 4204
 4205
 4206
 4207
 4208
 4209
 4210
 4211
 4212
 4213
 4214
 4215
 4216
 4217
 4218
 4219
 4220
 4221
 4222
 4223
 4224
 4225
 4226
 4227
 4228
 4229
 4230
 4231
 4232
 4233
 4234
 4235
 4236
 4237
 4238
 4239
 4240
 4241
 4242
 4243
 4244
 4245
 4246
 4247
 4248
 4249
 4250
 4251
 4252
 4253
 4254
 4255
 4256
 4257
 4258
 4259
 4260
 4261
 4262
 4263
 4264
 4265
 4266
 4267
 4268
 4269
 4270
 4271
 4272
 4273
 4274
 4275
 4276
 4277
 4278
 4279
 4280
 4281
 4282
 4283
 4284
 4285
 4286
 4287
 4288
 4289
 4290
 4291
 4292
 4293
 4294
 4295
 4296
 4297
 4298
 4299
 4300
 4301
 4302
 4303
 4304
 4305
 4306
 4307
 4308
 4309
 4310
 4311
 4312
 4313
 4314
 4315
 4316
 4317
 4318
 4319
 4320
 4321
 4322
 4323
 4324
 4325
 4326
 4327
 4328
 4329
 4330
 4331
 4332
 4333
 4334
 4335
 4336
 4337
 4338
 4339
 4340
 4341
 4342
 4343
 4344
 4345
 4346
 4347
 4348
 4349
 4350
 4351
 4352
 4353
 4354
 4355
 4356
 4357
 4358
 4359
 4360
 4361
 4362
 4363
 4364
 4365
 4366
 4367
 4368
 4369
 4370
 4371
 4372
 4373
 4374
 4375
 4376
 4377
 4378
 4379
 4380
 4381
 4382
 4383
 4384
 4385
 4386
 4387
 4388
 4389
 4390
 4391
 4392
 4393
 4394
 4395
 4396
 4397
 4398
 4399
 4400
 4401
 4402
 4403
 4404
 4405
 4406
 4407
 4408
 4409
 4410
 4411
 4412
 4413
 4414
 4415
 4416
 4417
 4418
 4419
 4420
 4421
 4422
 4423
 4424
 4425
 4426
 4427
 4428
 4429
 4430
 4431
 4432
 4433
 4434
 4435
 4436
 4437
 4438
 4439
 4440
 4441
 4442
 4443
 4444
 4445
 4446
 4447
 4448
 4449
 4450
 4451
 4452
 4453
 4454
 4455
 4456
 4457
 4458
 4459
 4460
 4461
 4462
 4463
 4464
 4465
 4466
 4467
 4468
 4469
 4470
 4471
 4472
 4473
 4474
 4475
 4476
 4477
 4478
 4479
 4480
 4481
 4482
 4483
 4484
 4485
 4486
 4487
 4488
 4489
 4490
 4491
 4492
 4493
 4494
 4495
 4496
 4497
 4498
 4499
 4500
 4501
 4502
 4503
 4504
 4505
 4506
 4507
 4508
 4509
 4510
 4511
 4512
 4513
 4514
 4515
 4516
 4517
 4518
 4519
 4520
 4521
 4522
 4523
 4524
 4525
 4526
 4527
 4528
 4529
 4530
 4531
 4532
 4533
 4534
 4535
 4536
 4537
 4538
 4539
 4540
 4541
 4542
 4543
 4544
 4545
 4546
 4547
 4548
 4549
 4550
 4551
 4552
 4553
 4554
 4555
 4556
 4557
 4558
 4559
 4560
 4561
 4562
 4563
 4564
 4565
 4566
 4567
 4568
 4569
 4570
 4571
 4572
 4573
 4574
 4575
 4576
 4577
 4578
 4579
 4580
 4581
 4582
 4583
 4584
 4585
 4586
 4587
 4588
 4589
 4590
 4591
 4592
 4593
 4594
 4595
 4596
 4597
 4598
 4599
 4600
 4601
 4602
 4603
 4604
 4605
 4606
 4607
 4608
 4609
 4610
 4611
 4612
 4613
 4614
 4615
 4616
 4617
 4618
 4619
 4620
 4621
 4622
 4623
 4624
 4625
 4626
 4627
 4628
 4629
 4630
 4631
 4632
 4633
 4634
 4635
 4636
 4637
 4638
 4639
 4640
 4641
 4642
 4643
 4644
 4645
 4646
 4647
 4648
 4649
 4650
 4651
 4652
 4653
 4654
 4655
 4656
 4657
 4658
 4659
 4660
 4661
 4662
 4663
 4664
 4665
 4666
 4667
 4668
 4669
 4670
 4671
 4672
 4673
 4674
 4675
 4676
 4677
 4678
 4679
 4680
 4681
 4682
 4683
 4684
 4685
 4686
 4687
 4688
 4689
 4690
 4691
 4692
 4693
 4694
 4695
 4696
 4697
 4698
 4699
 4700
 4701
 4702
 4703
 4704
 4705
 4706
 4707
 4708
 4709
 4710
 4711
 4712
 4713
 4714
 4715
 4716
 4717
 4718
 4719
 4720
 4721
 4722
 4723
 4724
 4725
 4726
 4727
 4728
 4729
 4730
 4731
 4732
 4733
 4734
 4735
 4736
 4737
 4738
 4739
 4740
 4741
 4742
 4743
 4744
 4745
 4746
 4747
 4748
 4749
 4750
 4751
 4752
 4753
 4754
 4755
 4756
 4757
 4758
 4759
 4760
 4761
 4762
 4763
 4764
 4765
 4766
 4767
 4768
 4769
 4770
 4771
 4772
 4773
 4774
 4775
 4776
 4777
 4778
 4779
 4780
 4781
 4782
 4783
 4784
 4785
 4786
 4787
 4788
 4789
 4790
 4791
 4792
 4793
 4794
 4795
 4796
 4797
 4798
 4799
 4800
 4801
 4802
 4803
 4804
 4805
 4806
 4807
 4808
 4809
 4810
 4811
 4812
 4813
 4814
 4815
 4816
 4817
 4818
 4819
 4820
 4821
 4822
 4823
 4824
 4825
 4826
 4827
 4828
 4829
 4830
 4831
 4832
 4833
 4834
 4835
 4836
 4837
 4838
 4839
 4840
 4841
 4842
 4843
 4844
 4845
 4846
 4847
 4848
 4849
 4850
 4851
 4852
 4853
 4854
 4855
 4856
 4857
 4858
 4859
 4860
 4861
 4862
 4863
 4864
 4865
 4866
 4867
 4868
 4869
 4870
 4871
 4872
 4873
 4874
 4875
 4876
 4877
 4878
 4879
 4880
 4881
 4882
 4883
 4884
 4885
 4886
 4887
 4888
 4889
 4890
 4891
 4892
 4893
 4894
 4895
 4896
 4897
 4898
 4899
 4900
 4901
 4902
 4903
 4904
 4905
 4906
 4907
 4908
 4909
 4910
 4911
 4912
 4913
 4914
 4915
 4916
 4917
 4918
 4919
 4920
 4921
 4922
 4923
 4924
 4925
 4926
 4927
 4928
 4929
 4930
 4931
 4932
 4933
 4934
 4935
 4936
 4937
 4938
 4939
 4940
 4941
 4942
 4943
 4944
 4945
 4946
 4947
 4948
 4949
 4950
 4951
 4952
 4953
 4954
 4955
 4956
 4957
 4958
 4959
 4960
 4961
 4962
 4963
 4964
 4965
 4966
 4967
 4968
 4969
 4970
 4971
 4972
 4973
 4974
 4975
 4976
 4977
 4978
 4979
 4980
 4981
 4982
 4983
 4984
 4985
 4986
 4987
 4988
 4989
 4990
 4991
 4992
 4993
 4994
 4995
 4996
 4997
 4998
 4999
 5000
 5001
 5002
 5003
 5004
 5005
 5006
 5007
 5008
 5009
 5010
 5011
 5012
 5013
 5014
 5015
 5016
 5017
 5018
 5019
 5020
 5021
 5022
 5023
 5024
 5025
 5026
 5027
 5028
 5029
 5030
 5031
 5032
 5033
 5034
 5035
 5036
 5037
 5038
 5039
 5040
 5041
 5042
 5043
 5044
 5045
 5046
 5047
 5048
 5049
 5050
 5051
 5052
 5053
 5054
 5055
 5056
 5057
 5058
 5059
 5060
 5061
 5062
 5063
 5064
 5065
 5066
 5067
 5068
 5069
 5070
 5071
 5072
 5073
 5074
 5075
 5076
 5077
 5078
 5079
 5080
 5081
 5082
 5083
 5084
 5085
 5086
 5087
 5088
 5089
 5090
 5091
 5092
 5093
 5094
 5095
 5096
 5097
 5098
 5099
 5100
 5101
 5102
 5103
 5104
 5105
 5106
 5107
 5108
 5109
 5110
 5111
 5112
 5113
 5114
 5115
 5116
 5117
 5118
 5119
 5120
 5121
 5122
 5123
 5124
 5125
 5126
 5127
 5128
 5129
 5130
 5131
 5132
 5133
 5134
 5135
 5136
 5137
 5138
 5139
 5140
 5141
 5142
 5143
 5144
 5145
 5146
 5147
 5148
 5149
 5150
 5151
 5152
 5153
 5154
 5155
 5156
 5157
 5158
 5159
 5160
 5161
 5162
 5163
 5164
 5165
 5166
 5167
 5168
 5169
 5170
 5171
 5172
 5173
 5174
 5175
 5176
 5177
 5178
 5179
 5180
 5181
 5182
 5183
 5184
 5185
 5186
 5187
 5188
 5189
 5190
 5191
 5192
 5193
 5194
 5195
 5196
 5197
 5198
 5199
 5200
 5201
 5202
 5203
 5204
 5205
 5206
 5207
 5208
 5209
 5210
 5211
 5212
 5213
 5214
 5215
 5216
 5217
 5218
 5219
 5220
 5221
 5222
 5223
 5224
 5225
 5226
 5227
 5228
 5229
 5230
 5231
 5232
 5233
 5234
 5235
 5236
 5237
 5238
 5239
 5240
 5241
 5242
 5243
 5244
 5245
 5246
 5247
 5248
 5249
 5250
 5251
 5252
 5253
 5254
 5255
 5256
 5257
 5258
 5259
 5260
 5261
 5262
 5263
 5264
 5265
 5266
 5267
 5268
 5269
 5270
 5271
 5272
 5273
 5274
 5275
 5276
 5277
 5278
 5279
 5280
 5281
 5282
 5283
 5284
 5285
 5286
 5287
 5288
 5289
 5290
 5291
 5292
 5293
 5294
 5295
 5296
 5297
 5298
 5299
 5300
 5301
 5302
 5303
 5304
 5305
 5306
 5307
 5308
 5309
 5310
 5311
 5312
 5313
 5314
 5315
 5316
 5317
 5318
 5319
 5320
 5321
 5322
 5323
 5324
 5325
 5326
 5327
 5328
 5329
 5330
 5331
 5332
 5333
 5334
 5335
 5336
 5337
 5338
 5339
 5340
 5341
 5342
 5343
 5344
 5345
 5346
 5347
 5348
 5349
 5350
 5351
 5352
 5353
 5354
 5355
 5356
 5357
 5358
 5359
 5360
 5361
 5362
 5363
 5364
 5365
 5366
 5367
 5368
 5369
 5370
 5371
 5372
 5373
 5374
 5375
 5376
 5377
 5378
 5379
 5380
 5381
 5382
 5383
 5384
 5385
 5386
 5387
 5388
 5389
 5390
 5391
 5392
 5393
 5394
 5395
 5396
 5397
 5398
 5399
 5400
 5401
 5402
 5403
 5404
 5405
 5406
 5407
 5408
 5409
 5410
 5411
 5412
 5413
 5414
 5415
 5416
 5417
 5418
 5419
 5420
 5421
 5422
 5423
 5424
 5425
 5426
 5427
 5428
 5429
 5430
 5431
 5432
 5433
 5434
 5435
 5436
 5437
 5438
 5439
 5440
 5441
 5442
 5443
 5444
 5445
 5446
 5447
 5448
 5449
 5450
 5451
 5452
 5453
 5454
 5455
 5456
 5457
 5458
 5459
 5460
 5461
 5462
 5463
 5464
 5465
 5466
 5467
 5468
 5469
 5470
 5471
 5472
 5473
 5474
 5475
 5476
 5477
 5478
 5479
 5480
 5481
 5482
 5483
 5484
 5485
 5486
 5487
 5488
 5489
 5490
 5491
 5492
 5493
 5494
 5495
 5496
 5497
 5498
 5499
 5500
 5501
 5502
 5503
 5504
 5505
 5506
 5507
 5508
 5509
 5510
 5511
 5512
 5513
 5514
 5515
 5516
 5517
 5518
 5519
 5520
 5521
 5522
 5523
 5524
 5525
 5526
 5527
 5528
 5529
 5530
 5531
 5532
 5533
 5534
 5535
 5536
 5537
 5538
 5539
 5540
 5541
 5542
 5543
 5544
 5545
 5546
 5547
 5548
 5549
 5550
 5551
 5552
 5553
 5554
 5555
 5556
 5557
 5558
 5559
 5560
 5561
 5562
 5563
 5564
 5565
 5566
 5567
 5568
 5569
 5570
 5571
 5572
 5573
 5574
 5575
 5576
 5577
 5578
 5579
 5580
 5581
 5582
 5583
 5584
 5585
 5586
 5587
 5588
 5589
 5590
 5591
 5592
 5593
 5594
 5595
 5596
 5597
 5598
 5599
 5600
 5601
 5602
 5603
 5604
 5605
 5606
 5607
 5608
 5609
 5610
 5611
 5612
 5613
 5614
 5615
 5616
 5617
 5618
 5619
 5620
 5621
 5622
 5623
 5624
 5625
 5626
 5627
 5628
 5629
 5630
 5631
 5632
 5633
 5634
 5635
 5636
 5637
 5638
 5639
 5640
 5641
 5642
 5643
 5644
 5645
 5646
 5647
 5648
 5649
 5650
 5651
 5652
 5653
 5654
 5655
 5656
 5657
 5658
 5659
 5660
 5661
 5662
 5663
 5664
 5665
 5666
 5667
 5668
 5669
 5670
 5671
 5672
 5673
 5674
 5675
 5676
 5677
 5678
 5679
 5680
 5681
 5682
 5683
 5684
 5685
 5686
 5687
 5688
 5689
 5690
 5691
 5692
 5693
 5694
 5695
 5696
 5697
 5698
 5699
 5700
 5701
 5702
 5703
 5704
 5705
 5706
 5707
 5708
 5709
 5710
 5711
 5712
 5713
 5714
 5715
 5716
 5717
 5718
 5719
 5720
 5721
 5722
 5723
 5724
 5725
 5726
 5727
 5728
 5729
 5730
 5731
 5732
 5733
 5734
 5735
 5736
 5737
 5738
 5739
 5740
 5741
 5742
 5743
 5744
 5745
 5746
 5747
 5748
 5749
 5750
 5751
 5752
 5753
 5754
 5755
 5756
 5757
 5758
 5759
 5760
 5761
 5762
 5763
 5764
 5765
 5766
 5767
 5768
 5769
 5770
 5771
 5772
 5773
 5774
 5775
 5776
 5777
 5778
 5779
 5780
 5781
 5782
 5783
 5784
 5785
 5786
 5787
 5788
 5789
 5790
 5791
 5792
 5793
 5794
 5795
 5796
 5797
 5798
 5799
 5800
 5801
 5802
 5803
 5804
 5805
 5806
 5807
 5808
 5809
 5810
 5811
 5812
 5813
 5814
 5815
 5816
 5817
 5818
 5819
 5820
 5821
 5822
 5823
 5824
 5825
 5826
 5827
 5828
 5829
 5830
 5831
 5832
 5833
 5834
 5835
 5836
 5837
 5838
 5839
 5840
 5841
 5842
 5843
 5844
 5845
 5846
 5847
 5848
 5849
 5850
 5851
 5852
 5853
 5854
 5855
 5856
 5857
 5858
 5859
 5860
 5861
 5862
 5863
 5864
 5865
 5866
 5867
 5868
 5869
 5870
 5871
 5872
 5873
 5874
 5875
 5876
 5877
 5878
 5879
 5880
 5881
 5882
 5883
 5884
 5885
 5886
 5887
 5888
 5889
 5890
 5891
 5892
 5893
 5894
 5895
 5896
 5897
 5898
 5899
 5900
 5901
 5902
 5903
 5904
 5905
 5906
 5907
 5908
 5909
 5910
 5911
 5912
 5913
 5914
 5915
 5916
 5917
 5918
 5919
 5920
 5921
 5922
 5923
 5924
 5925
 5926
 5927
 5928
 5929
 5930
 5931
 5932
 5933
 5934
 5935
 5936
 5937
 5938
 5939
 5940
 5941
 5942
 5943
 5944
 5945
 5946
 5947
 5948
 5949
 5950
 5951
 5952
 5953
 5954
 5955
 5956
 5957
 5958
 5959
 5960
 5961
 5962
 5963
 5964
 5965
 5966
 5967
 5968
 5969
 5970
 5971
 5972
 5973
 5974
 5975
 5976
 5977
 5978
 5979
 5980
 5981
 5982
 5983
 5984
 5985
 5986
 5987
 5988
 5989
 5990
 5991
 5992
 5993
 5994
 5995
 5996
 5997
 5998
 5999
 6000
 6001
 6002
 6003
 6004
 6005
 6006
 6007
 6008
 6009
 6010
 6011
 6012
 6013
 6014
 6015
 6016
 6017
 6018
 6019
 6020
 6021
 6022
 6023
 6024
 6025
 6026
 6027
 6028
 6029
 6030
 6031
 6032
 6033
 6034
 6035
 6036
 6037
 6038
 6039
 6040
 6041
 6042
 6043
 6044
 6045
 6046
 6047
 6048
 6049
 6050
 6051
 6052
 6053
 6054
 6055
 6056
 6057
 6058
 6059
 6060
 6061
 6062
 6063
 6064
 6065
 6066
 6067
 6068
 6069
 6070
 6071
 6072
 6073
 6074
 6075
 6076
 6077
 6078
 6079
 6080
 6081
 6082
 6083
 6084
 6085
 6086
 6087
 6088
 6089
 6090
 6091
 6092
 6093
 6094
 6095
 6096
 6097
 6098
 6099
 6100
 6101
 6102
 6103
 6104
 6105
 6106
 6107
 6108
 6109
 6110
 6111
 6112
 6113
 6114
 6115
 6116
 6117
 6118
 6119
 6120
 6121
 6122
 6123
 6124
 6125
 6126
 6127
 6128
 6129
 6130
 6131
 6132
 6133
 6134
 6135
 6136
 6137
 6138
 6139
 6140
 6141
 6142
 6143
 6144
 6145
 6146
 6147
 6148
 6149
 6150
 6151
 6152
 6153
 6154
 6155
 6156
 6157
 6158
 6159
 6160
 6161
 6162
 6163
 6164
 6165
 6166
 6167
 6168
 6169
 6170
 6171
 6172
 6173
 6174
 6175
 6176
 6177
 6178
 6179
 6180
 6181
 6182
 6183
 6184
 6185
 6186
 6187
 6188
 6189
 6190
 6191
 6192
 6193
 6194
 6195
 6196
 6197
 6198
 6199
 6200
 6201
 6202
 6203
 6204
 6205
 6206
 6207
 6208
 6209
 6210
 6211
 6212
 6213
 6214
 6215
 6216
 6217
 6218
 6219
 6220
 6221
 6222
 6223
 6224
 6225
 6226
 6227
 6228
 6229
 6230
 6231
 6232
 6233
 6234
 6235
 6236
 6237
 6238
 6239
 6240
 6241
 6242
 6243
 6244
 6245
 6246
 6247
 6248
 6249
 6250
 6251
 6252
 6253
 6254
 6255
 6256
 6257
 6258
 6259
 6260
 6261
 6262
 6263
 6264
 6265
 6266
 6267
 6268
 6269
 6270
 6271
 6272
 6273
 6274
 6275
 6276
 6277
 6278
 6279
 6280
 6281
 6282
 6283
 6284
 6285
 6286
 6287
 6288
 6289
 6290
 6291
 6292
 6293
 6294
 6295
 6296
 6297
 6298
 6299
 6300
 6301
 6302
 6303
 6304
 6305
 6306
 6307
 6308
 6309
 6310
 6311
 6312
 6313
 6314
 6315
 6316
 6317
 6318
 6319
 6320
 6321
 6322
 6323
 6324
 6325
 6326
 6327
 6328
 6329
 6330
 6331
 6332
 6333
 6334
 6335
 6336
 6337
 6338
 6339
 6340
 6341
 6342
 6343
 6344
 6345
 6346
 6347
 6348
 6349
 6350
 6351
 6352
 6353
 6354
 6355
 6356
 6357
 6358
 6359
 6360
 6361
 6362
 6363
 6364
 6365
 6366
 6367
 6368
 6369
 6370
 6371
 6372
 6373
 6374
 6375
 6376
 6377
 6378
 6379
 6380
 6381
 6382
 6383
 6384
 6385
 6386
 6387
 6388
 6389
 6390
 6391
 6392
 6393
 6394
 6395
 6396
 6397
 6398
 6399
 6400
 6401
 6402
 6403
 6404
 6405
 6406
 6407
 6408
 6409
 6410
 6411
 6412
 6413
 6414
 6415
 6416
 6417
 6418
 6419
 6420
 6421
 6422
 6423
 6424
 6425
 6426
 6427
 6428
 6429
 6430
 6431
 6432
 6433
 6434
 6435
 6436
 6437
 6438
 6439
 6440
 6441
 6442
 6443
 6444
 6445
 6446
 6447
 6448
 6449
 6450
 6451
 6452
 6453
 6454
 6455
 6456
 6457
 6458
 6459
 6460
 6461
 6462
 6463
 6464
 6465
 6466
 6467
 6468
 6469
 6470
 6471
 6472
 6473
 6474
 6475
 6476
 6477
 6478
 6479
 6480
 6481
 6482
 6483
 6484
 6485
 6486
 6487
 6488
 6489
 6490
 6491
 6492
 6493
 6494
 6495
 6496
 6497
 6498
 6499
 6500
 6501
 6502
 6503
 6504
 6505
 6506
 6507
 6508
 6509
 6510
 6511
 6512
 6513
 6514
 6515
 6516
 6517
 6518
 6519
 6520
 6521
 6522
 6523
 6524
 6525
 6526
 6527
 6528
 6529
 6530
 6531
 6532
 6533
 6534
 6535
 6536
 6537
 6538
 6539
 6540
 6541
 6542
 6543
 6544
 6545
 6546
 6547
 6548
 6549
 6550
 6551
 6552
 6553
 6554
 6555
 6556
 6557
 6558
 6559
 6560
 6561
 6562
 6563
 6564
 6565
 6566
 6567
 6568
 6569
 6570
 6571
 6572
 6573
 6574
 6575
 6576
 6577
 6578
 6579
 6580
 6581
 6582
 6583
 6584
 6585
 6586
 6587
 6588
 6589
 6590
 6591
 6592
 6593
 6594
 6595
 6596
 6597
 6598
 6599
 6600
 6601
 6602
 6603
 6604
 6605
 6606
 6607
 6608
 6609
 6610
 6611
 6612
 6613
 6614
 6615
 6616
 6617
 6618
 6619
 6620
 6621
 6622
 6623
 6624
 6625
 6626
 6627
 6628
 6629
 6630
 6631
 6632
 6633
 6634
 6635
 6636
 6637
 6638
 6639
 6640
 6641
 6642
 6643
 6644
 6645
 6646
 6647
 6648
 6649
 6650
 6651
 6652
 6653
 6654
 6655
 6656
 6657
 6658
 6659
 6660
 6661
 6662
 6663
 6664
 6665
 6666
 6667
 6668
 6669
 6670
 6671
 6672
 6673
 6674
 6675
 6676
 6677
 6678
 6679
 6680
 6681
 6682
 6683
 6684
 6685
 6686
 6687
 6688
 6689
 6690
 6691
 6692
 6693
 6694
 6695
 6696
 6697
 6698
 6699
 6700
 6701
 6702
 6703
 6704
 6705
 6706
 6707
 6708
 6709
 6710
 6711
 6712
 6713
 6714
 6715
 6716
 6717
 6718
 6719
 6720
 6721
 6722
 6723
 6724
 6725
 6726
 6727
 6728
 6729
 6730
 6731
 6732
 6733
 6734
 6735
 6736
 6737
 6738
 6739
 6740
 6741
 6742
 6743
 6744
 6745
 6746
 6747
 6748
 6749
 6750
 6751
 6752
 6753
 6754
 6755
 6756
 6757
 6758
 6759
 6760
 6761
 6762
 6763
 6764
 6765
 6766
 6767
 6768
 6769
 6770
 6771
 6772
 6773
 6774
 6775
 6776
 6777
 6778
 6779
 6780
 6781
 6782
 6783
 6784
 6785
 6786
 6787
 6788
 6789
 6790
 6791
 6792
 6793
 6794
 6795
 6796
 6797
 6798
 6799
 6800
 6801
 6802
 6803
 6804
 6805
 6806
 6807
 6808
 6809
 6810
 6811
 6812
 6813
 6814
 6815
 6816
 6817
 6818
 6819
 6820
 6821
 6822
 6823
 6824
 6825
 6826
 6827
 6828
 6829
 6830
 6831
 6832
 6833
 6834
 6835
 6836
 6837
 6838
 6839
 6840
 6841
 6842
 6843
 6844
 6845
 6846
 6847
 6848
 6849
 6850
 6851
 6852
 6853
 6854
 6855
 6856
 6857
 6858
 6859
 6860
 6861
 6862
 6863
 6864
 6865
 6866
 6867
 6868
 6869
 6870
 6871
 6872
 6873
 6874
 6875
 6876
 6877
 6878
 6879
 6880
 6881
 6882
 6883
 6884
 6885
 6886
 6887
 6888
 6889
 6890
 6891
 6892
 6893
 6894
 6895
 6896
 6897
 6898
 6899
 6900
 6901
 6902
 6903
 6904
 6905
 6906
 6907
 6908
 6909
 6910
 6911
 6912
 6913
 6914
 6915
 6916
 6917
 6918
 6919
 6920
 6921
 6922
 6923
 6924
 6925
 6926
 6927
 6928
 6929
 6930
 6931
 6932
 6933
 6934
 6935
 6936
 6937
 6938
 6939
 6940
 6941
 6942
 6943
 6944
 6945
 6946
 6947
 6948
 6949
 6950
 6951
 6952
 6953
 6954
 6955
 6956
 6957
 6958
 6959
 6960
 6961
 6962
 6963
 6964
 6965
 6966
 6967
 6968
 6969
 6970
 6971
 6972
 6973
 6974
 6975
 6976
 6977
 6978
 6979
 6980
 6981
 6982
 6983
 6984
 6985
 6986
 6987
 6988
 6989
 6990
 6991
 6992
 6993
 6994
 6995
 6996
 6997
 6998
 6999
 7000
 7001
 7002
 7003
 7004
 7005
 7006
 7007
 7008
 7009
 7010
 7011
 7012
 7013
 7014
 7015
 7016
 7017
 7018
 7019
 7020
 7021
 7022
 7023
 7024
 7025
 7026
 7027
 7028
 7029
 7030
 7031
 7032
 7033
 7034
 7035
 7036
 7037
 7038
 7039
 7040
 7041
 7042
 7043
 7044
 7045
 7046
 7047
 7048
 7049
 7050
 7051
 7052
 7053
 7054
 7055
 7056
 7057
 7058
 7059
 7060
 7061
 7062
 7063
 7064
 7065
 7066
 7067
 7068
 7069
 7070
 7071
 7072
 7073
 7074
 7075
 7076
 7077
 7078
 7079
 7080
 7081
 7082
 7083
 7084
 7085
 7086
 7087
 7088
 7089
 7090
 7091
 7092
 7093
 7094
 7095
 7096
 7097
 7098
 7099
 7100
 7101
 7102
 7103
 7104
 7105
 7106
 7107
 7108
 7109
 7110
 7111
 7112
 7113
 7114
 7115
 7116
 7117
 7118
 7119
 7120
 7121
 7122
 7123
 7124
 7125
 7126
 7127
 7128
 7129
 7130
 7131
 7132
 7133
 7134
 7135
 7136
 7137
 7138
 7139
 7140
 7141
 7142
 7143
 7144
 7145
 7146
 7147
 7148
 7149
 7150
 7151
 7152
 7153
 7154
 7155
 7156
 7157
 7158
 7159
 7160
 7161
 7162
 7163
 7164
 7165
 7166
 7167
 7168
 7169
 7170
 7171
 7172
 7173
 7174
 7175
 7176
 7177
 7178
 7179
 7180
 7181
 7182
 7183
 7184
 7185
 7186
 7187
 7188
 7189
 7190
 7191
 7192
 7193
 7194
 7195
 7196
 7197
 7198
 7199
 7200
 7201
 7202
 7203
 7204
 7205
 7206
 7207
 7208
 7209
 7210
 7211
 7212
 7213
 7214
 7215
 7216
 7217
 7218
 7219
 7220
 7221
 7222
 7223
 7224
 7225
 7226
 7227
 7228
 7229
 7230
 7231
 7232
 7233
 7234
 7235
 7236
 7237
 7238
 7239
 7240
 7241
 7242
 7243
 7244
 7245
 7246
 7247
 7248
 7249
 7250
 7251
 7252
 7253
 7254
 7255
 7256
 7257
 7258
 7259
 7260
 7261
 7262
 7263
 7264
 7265
 7266
 7267
 7268
 7269
 7270
 7271
 7272
 7273
 7274
 7275
 7276
 7277
 7278
 7279
 7280
 7281
 7282
 7283
 7284
 7285
 7286
 7287
 7288
 7289
 7290
 7291
 7292
 7293
 7294
 7295
 7296
 7297
 7298
 7299
 7300
 7301
 7302
 7303
 7304
 7305
 7306
 7307
 7308
 7309
 7310
 7311
 7312
 7313
 7314
 7315
 7316
 7317
 7318
 7319
 7320
 7321
 7322
 7323
 7324
 7325
 7326
 7327
 7328
 7329
 7330
 7331
 7332
 7333
 7334
 7335
 7336
 7337
 7338
 7339
 7340
 7341
 7342
 7343
 7344
 7345
 7346
 7347
 7348
 7349
 7350
 7351
 7352
 7353
 7354
 7355
 7356
 7357
 7358
 7359
 7360
 7361
 7362
 7363
 7364
 7365
 7366
 7367
 7368
 7369
 7370
 7371
 7372
 7373
 7374
 7375
 7376
 7377
 7378
 7379
 7380
 7381
 7382
 7383
 7384
 7385
 7386
 7387
 7388
 7389
 7390
 7391
 7392
 7393
 7394
 7395
 7396
 7397
 7398
 7399
 7400
 7401
 7402
 7403
 7404
 7405
 7406
 7407
 7408
 7409
 7410
 7411
 7412
 7413
 7414
 7415
 7416
 7417
 7418
 7419
 7420
 7421
 7422
 7423
 7424
 7425
 7426
 7427
 7428
 7429
 7430
 7431
 7432
 7433
 7434
 7435
 7436
 7437
 7438
 7439
 7440
 7441
 7442
 7443
 7444
 7445
 7446
 7447
 7448
 7449
 7450
 7451
 7452
 7453
 7454
 7455
 7456
 7457
 7458
 7459
 7460
 7461
 7462
 7463
 7464
 7465
 7466
 7467
 7468
 7469
 7470
 7471
 7472
 7473
 7474
 7475
 7476
 7477
 7478
 7479
 7480
 7481
 7482
 7483
 7484
 7485
 7486
 7487
 7488
 7489
 7490
 7491
 7492
 7493
 7494
 7495
 7496
 7497
 7498
 7499
 7500
 7501
 7502
 7503
 7504
 7505
 7506
 7507
 7508
 7509
 7510
 7511
 7512
 7513
 7514
 7515
 7516
 7517
 7518
 7519
 7520
 7521
 7522
 7523
 7524
 7525
 7526
 7527
 7528
 7529
 7530
 7531
 7532
 7533
 7534
 7535
 7536
 7537
 7538
 7539
 7540
 7541
 7542
 7543
 7544
 7545
 7546
 7547
 7548
 7549
 7550
 7551
 7552
 7553
 7554
 7555
 7556
 7557
 7558
 7559
 7560
 7561
 7562
 7563
 7564
 7565
 7566
 7567
 7568
 7569
 7570
 7571
 7572
 7573
 7574
 7575
 7576
 7577
 7578
 7579
 7580
 7581
 7582
 7583
 7584
 7585
 7586
 7587
 7588
 7589
 7590
 7591
 7592
 7593
 7594
 7595
 7596
 7597
 7598
 7599
 7600
 7601
 7602
 7603
 7604
 7605
 7606
 7607
 7608
 7609
 7610
 7611
 7612
 7613
 7614
 7615
 7616
 7617
 7618
 7619
 7620
 7621
 7622
 7623
 7624
 7625
 7626
 7627
 7628
 7629
 7630
 7631
 7632
 7633
 7634
 7635
 7636
 7637
 7638
 7639
 7640
 7641
 7642
 7643
 7644
 7645
 7646
 7647
 7648
 7649
 7650
 7651
 7652
 7653
 7654
 7655
 7656
 7657
 7658
 7659
 7660
 7661
 7662
 7663
 7664
 7665
 7666
 7667
 7668
 7669
 7670
 7671
 7672
 7673
 7674
 7675
 7676
 7677
 7678
 7679
 7680
 7681
 7682
 7683
 7684
 7685
 7686
 7687
 7688
 7689
 7690
 7691
 7692
 7693
 7694
 7695
 7696
 7697
 7698
 7699
 7700
 7701
 7702
 7703
 7704
 7705
 7706
 7707
 7708
 7709
 7710
 7711
 7712
 7713
 7714
 7715
 7716
 7717
 7718
 7719
 7720
 7721
 7722
 7723
 7724
 7725
 7726
 7727
 7728
 7729
 7730
 7731
 7732
 7733
 7734
 7735
 7736
 7737
 7738
 7739
 7740
 7741
 7742
 7743
 7744
 7745
 7746
 7747
 7748
 7749
 7750
 7751
 7752
 7753
 7754
 7755
 7756
 7757
 7758
 7759
 7760
 7761
 7762
 7763
 7764
 7765
 7766
 7767
 7768
 7769
 7770
 7771
 7772
 7773
 7774
 7775
 7776
 7777
 7778
 7779
 7780
 7781
 7782
 7783
 7784
 7785
 7786
 7787
 7788
 7789
 7790
 7791
 7792
 7793
 7794
 7795
 7796
 7797
 7798
 7799
 7800
 7801
 7802
 7803
 7804
 7805
 7806
 7807
 7808
 7809
 7810
 7811
 7812
 7813
 7814
 7815
 7816
 7817
 7818
 7819
 7820
 7821
 7822
 7823
 7824
 7825
 7826
 7827
 7828
 7829
 7830
 7831
 7832
 7833
 7834
 7835
 7836
 7837
 7838
 7839
 7840
 7841
 7842
 7843
 7844
 7845
 7846
 7847
 7848
 7849
 7850
 7851
 7852
 7853
 7854
 7855
 7856
 7857
 7858
 7859
 7860
 7861
 7862
 7863
 7864
 7865
 7866
 7867
 7868
 7869
 7870
 7871
 7872
 7873
 7874
 7875
 7876
 7877
 7878
 7879
 7880
 7881
 7882
 7883
 7884
 7885
 7886
 7887
 7888
 7889
 7890
 7891
 7892
 7893
 7894
 7895
 7896
 7897
 7898
 7899
 7900
 7901
 7902
 7903
 7904
 7905
 7906
 7907
 7908
 7909
 7910
 7911
 7912
 7913
 7914
 7915
 7916
 7917
 7918
 7919
 7920
 7921
 7922
 7923
 7924
 7925
 7926
 7927
 7928
 7929
 7930
 7931
 7932
 7933
 7934
 7935
 7936
 7937
 7938
 7939
 7940
 7941
 7942
 7943
 7944
 7945
 7946
 7947
 7948
 7949
 7950
 7951
 7952
 7953
 7954
 7955
 7956
 7957
 7958
 7959
 7960
 7961
 7962
 7963
 7964
 7965
 7966
 7967
 7968
 7969
 7970
 7971
 7972
 7973
 7974
 7975
 7976
 7977
 7978
 7979
 7980
 7981
 7982
 7983
 7984
 7985
 7986
 7987
 7988
 7989
 7990
 7991
 7992
 7993
 7994
 7995
 7996
 7997
 7998
 7999
 8000
 8001
 8002
 8003
 8004
 8005
 8006
 8007
 8008
 8009
 8010
 8011
 8012
 8013
 8014
 8015
 8016
 8017
 8018
 8019
 8020
 8021
 8022
 8023
 8024
 8025
 8026
 8027
 8028
 8029
 8030
 8031
 8032
 8033
 8034
 8035
 8036
 8037
 8038
 8039
 8040
 8041
 8042
 8043
 8044
 8045
 8046
 8047
 8048
 8049
 8050
 8051
 8052
 8053
 8054
 8055
 8056
 8057
 8058
 8059
 8060
 8061
 8062
 8063
 8064
 8065
 8066
 8067
 8068
 8069
 8070
 8071
 8072
 8073
 8074
 8075
 8076
 8077
 8078
 8079
 8080
 8081
 8082
 8083
 8084
 8085
 8086
 8087
 8088
 8089
 8090
 8091
 8092
 8093
 8094
 8095
 8096
 8097
 8098
 8099
 8100
 8101
 8102
 8103
 8104
 8105
 8106
 8107
 8108
 8109
 8110
 8111
 8112
 8113
 8114
 8115
 8116
 8117
 8118
 8119
 8120
 8121
 8122
 8123
 8124
 8125
 8126
 8127
 8128
 8129
 8130
 8131
 8132
 8133
 8134
 8135
 8136
 8137
 8138
 8139
 8140
 8141
 8142
 8143
 8144
 8145
 8146
 8147
 8148
 8149
 8150
 8151
 8152
 8153
 8154
 8155
 8156
 8157
 8158
 8159
 8160
 8161
 8162
 8163
 8164
 8165
 8166
 8167
 8168
 8169
 8170
 8171
 8172
 8173
 8174
 8175
 8176
 8177
 8178
 8179
 8180
 8181
 8182
 8183
 8184
 8185
 8186
 8187
 8188
 8189
 8190
 8191
 8192
 8193
 8194
 8195
 8196
 8197
 8198
 8199
 8200
 8201
 8202
 8203
 8204
 8205
 8206
 8207
 8208
 8209
 8210
 8211
 8212
 8213
 8214
 8215
 8216
 8217
 8218
 8219
 8220
 8221
 8222
 8223
 8224
 8225
 8226
 8227
 8228
 8229
 8230
 8231
 8232
 8233
 8234
 8235
 8236
 8237
 8238
 8239
 8240
 8241
 8242
 8243
 8244
 8245
 8246
 8247
 8248
 8249
 8250
 8251
 8252
 8253
 8254
 8255
 8256
 8257
 8258
 8259
 8260
 8261
 8262
 8263
 8264
 8265
 8266
 8267
 8268
 8269
 8270
 8271
 8272
 8273
 8274
 8275
 8276
 8277
 8278
 8279
 8280
 8281
 8282
 8283
 8284
 8285
 8286
 8287
 8288
 8289
 8290
 8291
 8292
 8293
 8294
 8295
 8296
 8297
 8298
 8299
 8300
 8301
 8302
 8303
 8304
 8305
 8306
 8307
 8308
 8309
 8310
 8311
 8312
 8313
 8314
 8315
 8316
 8317
 8318
 8319
 8320
 8321
 8322
 8323
 8324
 8325
 8326
 8327
 8328
 8329
 8330
 8331
 8332
 8333
 8334
 8335
 8336
 8337
 8338
 8339
 8340
 8341
 8342
 8343
 8344
 8345
 8346
 8347
 8348
 8349
 8350
 8351
 8352
 8353
 8354
 8355
 8356
 8357
 8358
 8359
 8360
 8361
 8362
 8363
 8364
 8365
 8366
 8367
 8368
 8369
 8370
 8371
 8372
 8373
 8374
 8375
 8376
 8377
 8378
 8379
 8380
 8381
 8382
 8383
 8384
 8385
 8386
 8387
 8388
 8389
 8390
 8391
 8392
 8393
 8394
 8395
 8396
 8397
 8398
 8399
 8400
 8401
 8402
 8403
 8404
 8405
 8406
 8407
 8408
 8409
 8410
 8411
 8412
 8413
 8414
 8415
 8416
 8417
 8418
 8419
 8420
 8421
 8422
 8423
 8424
 8425
 8426
 8427
 8428
 8429
 8430
 8431
 8432
 8433
 8434
 8435
 8436
 8437
 8438
 8439
 8440
 8441
 8442
 8443
 8444
 8445
 8446
 8447
 8448
 8449
 8450
 8451
 8452
 8453
 8454
 8455
 8456
 8457
 8458
 8459
 8460
 8461
 8462
 8463
 8464
 8465
 8466
 8467
 8468
 8469
 8470
 8471
 8472
 8473
 8474
 8475
 8476
 8477
 8478
 8479
 8480
 8481
 8482
 8483
 8484
 8485
 8486
 8487
 8488
 8489
 8490
 8491
 8492
 8493
 8494
 8495
 8496
 8497
 8498
 8499
 8500
 8501
 8502
 8503
 8504
 8505
 8506
 8507
 8508
 8509
 8510
 8511
 8512
 8513
 8514
 8515
 8516
 8517
 8518
 8519
 8520
 8521
 8522
 8523
 8524
 8525
 8526
 8527
 8528
 8529
 8530
 8531
 8532
 8533
 8534
 8535
 8536
 8537
 8538
 8539
 8540
 8541
 8542
 8543
 8544
 8545
 8546
 8547
 8548
 8549
 8550
 8551
 8552
 8553
 8554
 8555
 8556
 8557
 8558
 8559
 8560
 8561
 8562
 8563
 8564
 8565
 8566
 8567
 8568
 8569
 8570
 8571
 8572
 8573
 8574
 8575
 8576
 8577
 8578
 8579
 8580
 8581
 8582
 8583
 8584
 8585
 8586
 8587
 8588
 8589
 8590
 8591
 8592
 8593
 8594
 8595
 8596
 8597
 8598
 8599
 8600
 8601
 8602
 8603
 8604
 8605
 8606
 8607
 8608
 8609
 8610
 8611
 8612
 8613
 8614
 8615
 8616
 8617
 8618
 8619
 8620
 8621
 8622
 8623
 8624
 8625
 8626
 8627
 8628
 8629
 8630
 8631
 8632
 8633
 8634
 8635
 8636
 8637
 8638
 8639
 8640
 8641
 8642
 8643
 8644
 8645
 8646
 8647
 8648
 8649
 8650
 8651
 8652
 8653
 8654
 8655
 8656
 8657
 8658
 8659
 8660
 8661
 8662
 8663
 8664
 8665
 8666
 8667
 8668
 8669
 8670
 8671
 8672
 8673
 8674
 8675
 8676
 8677
 8678
 8679
 8680
 8681
 8682
 8683
 8684
 8685
 8686
 8687
 8688
 8689
 8690
 8691
 8692
 8693
 8694
 8695
 8696
 8697
 8698
 8699
 8700
 8701
 8702
 8703
 8704
 8705
 8706
 8707
 8708
 8709
 8710
 8711
 8712
 8713
 8714
 8715
 8716
 8717
 8718
 8719
 8720
 8721
 8722
 8723
 8724
 8725
 8726
 8727
 8728
 8729
 8730
 8731
 8732
 8733
 8734
 8735
 8736
 8737
 8738
 8739
 8740
 8741
 8742
 8743
 8744
 8745
 8746
 8747
 8748
 8749
 8750
 8751
 8752
 8753
 8754
 8755
 8756
 8757
 8758
 8759
 8760
 8761
 8762
 8763
 8764
 8765
 8766
 8767
 8768
 8769
 8770
 8771
 8772
 8773
 8774
 8775
 8776
 8777
 8778
 8779
 8780
 8781
 8782
 8783
 8784
 8785
 8786
 8787
 8788
 8789
 8790
 8791
 8792
 8793
 8794
 8795
 8796
 8797
 8798
 8799
 8800
 8801
 8802
 8803
 8804
 8805
 8806
 8807
 8808
 8809
 8810
 8811
 8812
 8813
 8814
 8815
 8816
 8817
 8818
 8819
 8820
 8821
 8822
 8823
 8824
 8825
 8826
 8827
 8828
 8829
 8830
 8831
 8832
 8833
 8834
 8835
 8836
 8837
 8838
 8839
 8840
 8841
 8842
 8843
 8844
 8845
 8846
 8847
 8848
 8849
 8850
 8851
 8852
 8853
 8854
 8855
 8856
 8857
 8858
 8859
 8860
 8861
 8862
 8863
 8864
 8865
 8866
 8867
 8868
 8869
 8870
 8871
 8872
 8873
 8874
 8875
 8876
 8877
 8878
 8879
 8880
 8881
 8882
 8883
 8884
 8885
 8886
 8887
 8888
 8889
 8890
 8891
 8892
 8893
 8894
 8895
 8896
 8897
 8898
 8899
 8900
 8901
 8902
 8903
 8904
 8905
 8906
 8907
 8908
 8909
 8910
 8911
 8912
 8913
 8914
 8915
 8916
 8917
 8918
 8919
 8920
 8921
 8922
 8923
 8924
 8925
 8926
 8927
 8928
 8929
 8930
 8931
 8932
 8933
 8934
 8935
 8936
 8937
 8938
 8939
 8940
 8941
 8942
 8943
 8944
 8945
 8946
 8947
 8948
 8949
 8950
 8951
 8952
 8953
 8954
 8955
 8956
 8957
 8958
 8959
 8960
 8961
 8962
 8963
 8964
 8965
 8966
 8967
 8968
 8969
 8970
 8971
 8972
 8973
 8974
 8975
 8976
 8977
 8978
 8979
 8980
 8981
 8982
 8983
 8984
 8985
 8986
 8987
 8988
 8989
 8990
 8991
 8992
 8993
 8994
 8995
 8996
 8997
 8998
 8999
 9000
 9001
 9002
 9003
 9004
 9005
 9006
 9007
 9008
 9009
 9010
 9011
 9012
 9013
 9014
 9015
 9016
 9017
 9018
 9019
 9020
 9021
 9022
 9023
 9024
 9025
 9026
 9027
 9028
 9029
 9030
 9031
 9032
 9033
 9034
 9035
 9036
 9037
 9038
 9039
 9040
 9041
 9042
 9043
 9044
 9045
 9046
 9047
 9048
 9049
 9050
 9051
 9052
 9053
 9054
 9055
 9056
 9057
 9058
 9059
 9060
 9061
 9062
 9063
 9064
 9065
 9066
 9067
 9068
 9069
 9070
 9071
 9072
 9073
 9074
 9075
 9076
 9077
 9078
 9079
 9080
 9081
 9082
 9083
 9084
 9085
 9086
 9087
 9088
 9089
 9090
 9091
 9092
 9093
 9094
 9095
 9096
 9097
 9098
 9099
 9100
 9101
 9102
 9103
 9104
 9105
 9106
 9107
 9108
 9109
 9110
 9111
 9112
 9113
 9114
 9115
 9116
 9117
 9118
 9119
 9120
 9121
 9122
 9123
 9124
 9125
 9126
 9127
 9128
 9129
 9130
 9131
 9132
 9133
 9134
 9135
 9136
 9137
 9138
 9139
 9140
 9141
 9142
 9143
 9144
 9145
 9146
 9147
 9148
 9149
 9150
 9151
 9152
 9153
 9154
 9155
 9156
 9157
 9158
 9159
 9160
 9161
 9162
 9163
 9164
 9165
 9166
 9167
 9168
 9169
 9170
 9171
 9172
 9173
 9174
 9175
 9176
 9177
 9178
 9179
 9180
 9181
 9182
 9183
 9184
 9185
 9186
 9187
 9188
 9189
 9190
 9191
 9192
 9193
 9194
 9195
 9196
 9197
 9198
 9199
 9200
 9201
 9202
 9203
 9204
 9205
 9206
 9207
 9208
 9209
 9210
 9211
 9212
 9213
 9214
 9215
 9216
 9217
 9218
 9219
 9220
 9221
 9222
 9223
 9224
 9225
 9226
 9227
 9228
 9229
 9230
 9231
 9232
 9233
 9234
 9235
 9236
 9237
 9238
 9239
 9240
 9241
 9242
 9243
 9244
 9245
 9246
 9247
 9248
 9249
 9250
 9251
 9252
 9253
 9254
 9255
 9256
 9257
 9258
 9259
 9260
 9261
 9262
 9263
 9264
 9265
 9266
 9267
 9268
 9269
 9270
 9271
 9272
 9273
 9274
 9275
 9276
 9277
 9278
 9279
 9280
 9281
 9282
 9283
 9284
 9285
 9286
 9287
 9288
 9289
 9290
 9291
 9292
 9293
 9294
 9295
 9296
 9297
 9298
 9299
 9300
 9301
 9302
 9303
 9304
 9305
 9306
 9307
 9308
 9309
 9310
 9311
 9312
 9313
 9314
 9315
 9316
 9317
 9318
 9319
 9320
 9321
 9322
 9323
 9324
 9325
 9326
 9327
 9328
 9329
 9330
 9331
 9332
 9333
 9334
 9335
 9336
 9337
 9338
 9339
 9340
 9341
 9342
 9343
 9344
 9345
 9346
 9347
 9348
 9349
 9350
 9351
 9352
 9353
 9354
 9355
 9356
 9357
 9358
 9359
 9360
 9361
 9362
 9363
 9364
 9365
 9366
 9367
 9368
 9369
 9370
 9371
 9372
 9373
 9374
 9375
 9376
 9377
 9378
 9379
 9380
 9381
 9382
 9383
 9384
 9385
 9386
 9387
 9388
 9389
 9390
 9391
 9392
 9393
 9394
 9395
 9396
 9397
 9398
 9399
 9400
 9401
 9402
 9403
 9404
 9405
 9406
 9407
 9408
 9409
 9410
 9411
 9412
 9413
 9414
 9415
 9416
 9417
 9418
 9419
 9420
 9421
 9422
 9423
 9424
 9425
 9426
 9427
 9428
 9429
 9430
 9431
 9432
 9433
 9434
 9435
 9436
 9437
 9438
 9439
 9440
 9441
 9442
 9443
 9444
 9445
 9446
 9447
 9448
 9449
 9450
 9451
 9452
 9453
 9454
 9455
 9456
 9457
 9458
 9459
 9460
 9461
 9462
 9463
 9464
 9465
 9466
 9467
 9468
 9469
 9470
 9471
 9472
 9473
 9474
 9475
 9476
 9477
 9478
 9479
 9480
 9481
 9482
 9483
 9484
 9485
 9486
 9487
 9488
 9489
 9490
 9491
 9492
 9493
 9494
 9495
 9496
 9497
 9498
 9499
 9500
 9501
 9502
 9503
 9504
 9505
 9506
 9507
 9508
 9509
 9510
 9511
 9512
 9513
 9514
 9515
 9516
 9517
 9518
 9519
 9520
 9521
 9522
 9523
 9524
 9525
 9526
 9527
 9528
 9529
 9530
 9531
 9532
 9533
 9534
 9535
 9536
 9537
 9538
 9539
 9540
 9541
 9542
 9543
 9544
 9545
 9546
 9547
 9548
 9549
 9550
 9551
 9552
 9553
 9554
 9555
 9556
 9557
 9558
 9559
 9560
 9561
 9562
 9563
 9564
 9565
 9566
 9567
 9568
 9569
 9570
 9571
 9572
 9573
 9574
 9575
 9576
 9577
 9578
 9579
 9580
 9581
 9582
 9583
 9584
 9585
 9586
 9587
 9588
 9589
 9590
 9591
 9592
 9593
 9594
 9595
 9596
 9597
 9598
 9599
 9600
 9601
 9602
 9603
 9604
 9605
 9606
 9607
 9608
 9609
 9610
 9611
 9612
 9613
 9614
 9615
 9616
 9617
 9618
 9619
 9620
 9621
 9622
 9623
 9624
 9625
 9626
 9627
 9628
 9629
 9630
 9631
 9632
 9633
 9634
 9635
 9636
 9637
 9638
 9639
 9640
 9641
 9642
 9643
 9644
 9645
 9646
 9647
 9648
 9649
 9650
 9651
 9652
 9653
 9654
 9655
 9656
 9657
 9658
 9659
 9660
 9661
 9662
 9663
 9664
 9665
 9666
 9667
 9668
 9669
 9670
 9671
 9672
 9673
 9674
 9675
 9676
 9677
 9678
 9679
 9680
 9681
 9682
 9683
 9684
 9685
 9686
 9687
 9688
 9689
 9690
 9691
 9692
 9693
 9694
 9695
 9696
 9697
 9698
 9699
 9700
 9701
 9702
 9703
 9704
 9705
 9706
 9707
 9708
 9709
 9710
 9711
 9712
 9713
 9714
 9715
 9716
 9717
 9718
 9719
 9720
 9721
 9722
 9723
 9724
 9725
 9726
 9727
 9728
 9729
 9730
 9731
 9732
 9733
 9734
 9735
 9736
 9737
 9738
 9739
 9740
 9741
 9742
 9743
 9744
 9745
 9746
 9747
 9748
 9749
 9750
 9751
 9752
 9753
 9754
 9755
 9756
 9757
 9758
 9759
 9760
 9761
 9762
 9763
 9764
 9765
 9766
 9767
 9768
 9769
 9770
 9771
 9772
 9773
 9774
 9775
 9776
 9777
 9778
 9779
 9780
 9781
 9782
 9783
 9784
 9785
 9786
 9787
 9788
 9789
 9790
 9791
 9792
 9793
 9794
 9795
 9796
 9797
 9798
 9799
 9800
 9801
 9802
 9803
 9804
 9805
 9806
 9807
 9808
 9809
 9810
 9811
 9812
 9813
 9814
 9815
 9816
 9817
 9818
 9819
 9820
 9821
 9822
 9823
 9824
 9825
 9826
 9827
 9828
 9829
 9830
 9831
 9832
 9833
 9834
 9835
 9836
 9837
 9838
 9839
 9840
 9841
 9842
 9843
 9844
 9845
 9846
 9847
 9848
 9849
 9850
 9851
 9852
 9853
 9854
 9855
 9856
 9857
 9858
 9859
 9860
 9861
 9862
 9863
 9864
 9865
 9866
 9867
 9868
 9869
 9870
 9871
 9872
 9873
 9874
 9875
 9876
 9877
 9878
 9879
 9880
 9881
 9882
 9883
 9884
 9885
 9886
 9887
 9888
 9889
 9890
 9891
 9892
 9893
 9894
 9895
 9896
 9897
 9898
 9899
 9900
 9901
 9902
 9903
 9904
 9905
 9906
 9907
 9908
 9909
 9910
 9911
 9912
 9913
 9914
 9915
 9916
 9917
 9918
 9919
 9920
 9921
 9922
 9923
 9924
 9925
 9926
 9927
 9928
 9929
 9930
 9931
 9932
 9933
 9934
 9935
 9936
 9937
 9938
 9939
 9940
 9941
 9942
 9943
 9944
 9945
 9946
 9947
 9948
 9949
 9950
 9951
 9952
 9953
 9954
 9955
 9956
 9957
 9958
 9959
 9960
 9961
 9962
 9963
 9964
 9965
 9966
 9967
 9968
 9969
 9970
 9971
 9972
 9973
 9974
 9975
 9976
 9977
 9978
 9979
 9980
 9981
 9982
 9983
 9984
 9985
 9986
 9987
 9988
 9989
 9990
 9991
 9992
 9993
 9994
 9995
 9996
 9997
 9998
 9999
10000
10001
10002
10003
10004
10005
10006
10007
10008
10009
10010
10011
10012
10013
10014
10015
10016
10017
10018
10019
10020
10021
10022
10023
10024
10025
10026
10027
10028
10029
10030
10031
10032
10033
10034
10035
10036
10037
10038
10039
10040
10041
10042
10043
10044
10045
10046
10047
10048
10049
10050
10051
10052
10053
10054
10055
10056
10057
10058
10059
10060
10061
10062
10063
10064
10065
10066
10067
10068
10069
10070
10071
10072
10073
10074
10075
10076
10077
10078
10079
10080
10081
10082
10083
10084
10085
10086
10087
10088
10089
10090
10091
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110
10111
10112
10113
10114
10115
10116
10117
10118
10119
10120
10121
10122
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132
10133
10134
10135
10136
10137
10138
10139
10140
10141
10142
10143
10144
10145
10146
10147
10148
10149
10150
10151
10152
10153
10154
10155
10156
10157
10158
10159
10160
10161
10162
10163
10164
10165
10166
10167
10168
10169
10170
10171
10172
10173
10174
10175
10176
10177
10178
10179
10180
10181
10182
10183
10184
10185
10186
10187
10188
10189
10190
10191
10192
10193
10194
10195
10196
10197
10198
10199
10200
10201
10202
10203
10204
10205
10206
10207
10208
10209
10210
10211
10212
10213
10214
10215
10216
10217
10218
10219
10220
10221
10222
10223
10224
10225
10226
10227
10228
10229
10230
10231
10232
10233
10234
10235
10236
10237
10238
10239
10240
10241
10242
10243
10244
10245
10246
10247
10248
10249
10250
10251
10252
10253
10254
10255
10256
10257
10258
10259
10260
10261
10262
10263
10264
10265
10266
10267
10268
10269
10270
10271
10272
10273
10274
10275
10276
10277
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289
10290
10291
10292
10293
10294
10295
10296
10297
10298
10299
10300
10301
10302
10303
10304
10305
10306
10307
10308
10309
10310
10311
10312
10313
10314
10315
10316
10317
10318
10319
10320
10321
10322
10323
10324
10325
10326
10327
10328
10329
10330
10331
10332
10333
10334
10335
10336
10337
10338
10339
10340
10341
10342
10343
10344
10345
10346
10347
10348
10349
10350
10351
10352
10353
10354
10355
10356
10357
10358
10359
10360
10361
10362
10363
10364
10365
10366
10367
10368
10369
10370
10371
10372
10373
10374
10375
10376
10377
10378
10379
10380
10381
10382
10383
10384
10385
10386
10387
10388
10389
10390
10391
10392
10393
10394
10395
10396
10397
10398
10399
10400
10401
10402
10403
10404
10405
10406
10407
10408
10409
10410
10411
10412
10413
10414
10415
10416
10417
10418
10419
10420
10421
10422
10423
10424
10425
10426
10427
10428
10429
10430
10431
10432
10433
10434
10435
10436
10437
10438
10439
10440
10441
10442
10443
10444
10445
10446
10447
10448
10449
10450
10451
10452
10453
10454
10455
10456
10457
10458
10459
10460
10461
10462
10463
10464
10465
10466
10467
10468
10469
10470
10471
10472
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482
10483
10484
10485
10486
10487
10488
10489
10490
10491
10492
10493
10494
10495
10496
10497
10498
10499
10500
10501
10502
10503
10504
10505
10506
10507
10508
10509
10510
10511
10512
10513
10514
10515
10516
10517
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527
10528
10529
10530
10531
10532
10533
10534
10535
10536
10537
10538
10539
10540
10541
10542
10543
10544
10545
10546
10547
10548
10549
10550
10551
10552
10553
10554
10555
10556
10557
10558
10559
10560
10561
10562
10563
10564
10565
10566
10567
10568
10569
10570
10571
10572
10573
10574
10575
10576
10577
10578
10579
10580
10581
10582
10583
10584
10585
10586
10587
10588
10589
10590
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600
10601
10602
10603
10604
10605
10606
10607
10608
10609
10610
10611
10612
10613
10614
10615
10616
10617
10618
10619
10620
10621
10622
10623
10624
10625
10626
10627
10628
10629
10630
10631
10632
10633
10634
10635
10636
10637
10638
10639
10640
10641
10642
10643
10644
10645
10646
10647
10648
10649
10650
10651
10652
10653
10654
10655
10656
10657
10658
10659
10660
10661
10662
10663
10664
10665
10666
10667
10668
10669
10670
10671
10672
10673
10674
10675
10676
10677
10678
10679
10680
10681
10682
10683
10684
10685
10686
10687
10688
10689
10690
10691
10692
10693
10694
10695
10696
10697
10698
10699
10700
10701
10702
10703
10704
10705
10706
10707
10708
10709
10710
10711
10712
10713
10714
10715
10716
10717
10718
10719
10720
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731
10732
10733
10734
10735
10736
10737
10738
10739
10740
10741
10742
10743
10744
10745
10746
10747
10748
10749
10750
10751
10752
10753
10754
10755
10756
10757
10758
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769
10770
10771
10772
10773
10774
10775
10776
10777
10778
10779
10780
10781
10782
10783
10784
10785
10786
10787
10788
10789
10790
10791
10792
10793
10794
10795
10796
10797
10798
10799
10800
10801
10802
10803
10804
10805
10806
10807
10808
10809
10810
10811
10812
10813
10814
10815
10816
10817
10818
10819
10820
10821
10822
10823
10824
10825
10826
10827
10828
10829
10830
10831
10832
10833
10834
10835
10836
10837
10838
10839
10840
10841
10842
10843
10844
10845
10846
10847
10848
10849
10850
10851
10852
10853
10854
10855
10856
10857
10858
10859
10860
10861
10862
10863
10864
10865
10866
10867
10868
10869
10870
10871
10872
10873
10874
10875
10876
10877
10878
10879
10880
10881
10882
10883
10884
10885
10886
10887
10888
10889
10890
10891
10892
10893
10894
10895
10896
10897
10898
10899
10900
10901
10902
10903
10904
10905
10906
10907
10908
10909
10910
10911
10912
10913
10914
10915
10916
10917
10918
10919
10920
10921
10922
10923
10924
10925
10926
10927
10928
10929
10930
10931
10932
10933
10934
10935
10936
10937
10938
10939
10940
10941
10942
10943
10944
10945
10946
10947
10948
10949
10950
10951
10952
10953
10954
10955
10956
10957
10958
10959
10960
10961
10962
10963
10964
10965
10966
10967
10968
10969
10970
10971
10972
10973
10974
10975
10976
10977
10978
10979
10980
10981
10982
10983
10984
10985
10986
10987
10988
10989
10990
10991
10992
10993
10994
10995
10996
10997
10998
10999
11000
11001
11002
11003
11004
11005
11006
11007
11008
11009
11010
11011
11012
11013
11014
11015
11016
11017
11018
11019
11020
11021
11022
11023
11024
11025
11026
11027
11028
11029
11030
11031
11032
11033
11034
11035
11036
11037
11038
11039
11040
11041
11042
11043
11044
11045
11046
11047
11048
11049
11050
11051
11052
11053
11054
11055
11056
11057
11058
11059
11060
11061
11062
11063
11064
11065
11066
11067
11068
11069
11070
11071
11072
11073
11074
11075
11076
11077
11078
11079
11080
11081
11082
11083
11084
11085
11086
11087
11088
11089
11090
11091
11092
11093
11094
11095
11096
11097
11098
11099
11100
11101
11102
11103
11104
11105
11106
11107
11108
11109
11110
11111
11112
11113
11114
11115
11116
11117
11118
11119
11120
11121
11122
11123
11124
11125
11126
11127
11128
11129
11130
11131
11132
11133
11134
11135
11136
11137
11138
11139
11140
11141
11142
11143
11144
11145
11146
11147
11148
11149
11150
11151
11152
11153
11154
11155
11156
11157
11158
11159
11160
11161
11162
11163
11164
11165
11166
11167
11168
11169
11170
11171
11172
11173
11174
11175
11176
11177
11178
11179
11180
11181
11182
11183
11184
11185
11186
11187
11188
11189
11190
11191
11192
11193
11194
11195
11196
11197
11198
11199
11200
11201
11202
11203
11204
11205
11206
11207
11208
11209
11210
11211
11212
11213
11214
11215
11216
11217
11218
11219
11220
11221
11222
11223
11224
11225
11226
11227
11228
11229
11230
11231
11232
11233
11234
11235
11236
11237
11238
11239
11240
11241
11242
11243
11244
11245
11246
11247
11248
11249
11250
11251
11252
11253
11254
11255
11256
11257
11258
11259
11260
11261
11262
11263
11264
11265
11266
11267
11268
11269
11270
11271
11272
11273
11274
11275
11276
11277
11278
11279
11280
11281
11282
11283
11284
11285
11286
11287
11288
11289
11290
11291
11292
11293
11294
11295
11296
11297
11298
11299
11300
11301
11302
11303
11304
11305
11306
11307
11308
11309
11310
11311
11312
11313
11314
11315
11316
11317
11318
11319
11320
11321
11322
11323
11324
11325
11326
11327
11328
11329
11330
11331
11332
11333
11334
11335
11336
11337
11338
11339
11340
11341
11342
11343
11344
11345
11346
11347
11348
11349
11350
11351
11352
11353
11354
11355
11356
11357
11358
11359
11360
11361
11362
11363
11364
11365
11366
11367
11368
11369
11370
11371
11372
11373
11374
11375
11376
11377
11378
11379
11380
11381
11382
11383
11384
11385
11386
11387
11388
11389
11390
11391
11392
11393
11394
11395
11396
11397
11398
11399
11400
11401
11402
11403
11404
11405
11406
11407
11408
11409
11410
11411
11412
11413
11414
11415
11416
11417
11418
11419
11420
11421
11422
11423
11424
11425
11426
11427
11428
11429
11430
11431
11432
11433
11434
11435
11436
11437
11438
11439
11440
11441
11442
11443
11444
11445
11446
11447
11448
11449
11450
11451
11452
11453
11454
11455
11456
11457
11458
11459
11460
11461
11462
11463
11464
11465
11466
11467
11468
11469
11470
11471
11472
11473
11474
11475
11476
11477
11478
11479
11480
11481
11482
11483
11484
11485
11486
11487
11488
11489
11490
11491
11492
11493
11494
11495
11496
11497
11498
11499
11500
11501
11502
11503
11504
11505
11506
11507
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518
11519
11520
11521
11522
11523
11524
11525
11526
11527
11528
11529
11530
11531
11532
11533
11534
11535
11536
11537
11538
11539
11540
11541
11542
11543
11544
11545
11546
11547
11548
11549
11550
11551
11552
11553
11554
11555
11556
11557
11558
11559
11560
11561
11562
11563
11564
11565
11566
11567
11568
11569
11570
11571
11572
11573
11574
11575
11576
11577
11578
11579
11580
11581
11582
11583
11584
11585
11586
11587
11588
11589
11590
11591
11592
11593
11594
11595
11596
11597
11598
11599
11600
11601
11602
11603
11604
11605
11606
11607
11608
11609
11610
11611
11612
11613
11614
11615
11616
11617
11618
11619
11620
11621
11622
11623
11624
11625
11626
11627
11628
11629
11630
11631
11632
11633
11634
11635
11636
11637
11638
11639
11640
11641
11642
11643
11644
11645
11646
11647
11648
11649
11650
11651
11652
11653
11654
11655
11656
11657
11658
11659
11660
11661
11662
11663
11664
11665
11666
11667
11668
11669
11670
11671
11672
11673
11674
11675
11676
11677
11678
11679
11680
11681
11682
11683
11684
11685
11686
11687
11688
11689
11690
11691
11692
11693
11694
11695
11696
11697
11698
11699
11700
11701
11702
11703
11704
11705
11706
11707
11708
11709
11710
11711
11712
11713
11714
11715
11716
11717
11718
11719
11720
11721
11722
11723
11724
11725
11726
11727
11728
11729
11730
11731
11732
11733
11734
11735
11736
11737
11738
11739
11740
11741
11742
11743
11744
11745
11746
11747
11748
11749
11750
11751
11752
11753
11754
11755
11756
11757
11758
11759
11760
11761
11762
11763
11764
11765
11766
11767
11768
11769
11770
11771
11772
11773
11774
11775
11776
11777
11778
11779
11780
11781
11782
11783
11784
11785
11786
11787
11788
11789
11790
11791
11792
11793
11794
11795
11796
11797
11798
11799
11800
11801
11802
11803
11804
11805
11806
11807
11808
11809
11810
11811
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821
11822
11823
11824
11825
11826
11827
11828
11829
11830
11831
11832
11833
11834
11835
11836
11837
11838
11839
11840
11841
11842
11843
11844
11845
11846
11847
11848
11849
11850
11851
11852
11853
11854
11855
11856
11857
11858
11859
11860
11861
11862
11863
11864
11865
11866
11867
11868
11869
11870
11871
11872
11873
11874
11875
11876
11877
11878
11879
11880
11881
11882
11883
11884
11885
11886
11887
11888
11889
11890
11891
11892
11893
11894
11895
11896
11897
11898
11899
11900
11901
11902
11903
11904
11905
11906
11907
11908
11909
11910
11911
11912
11913
11914
11915
11916
11917
11918
11919
11920
11921
11922
11923
11924
11925
11926
11927
11928
11929
11930
11931
11932
11933
11934
11935
11936
11937
11938
11939
11940
11941
11942
11943
11944
11945
11946
11947
11948
11949
11950
11951
11952
11953
11954
11955
11956
11957
11958
11959
11960
11961
11962
11963
11964
11965
11966
11967
11968
11969
11970
11971
11972
11973
11974
11975
11976
11977
11978
11979
11980
11981
11982
11983
11984
11985
11986
11987
11988
11989
11990
11991
11992
11993
11994
11995
11996
11997
11998
11999
12000
12001
12002
12003
12004
12005
12006
12007
12008
12009
12010
12011
12012
12013
12014
12015
12016
12017
12018
12019
12020
12021
12022
12023
12024
12025
12026
12027
12028
12029
12030
12031
12032
12033
12034
12035
12036
12037
12038
12039
12040
12041
12042
12043
12044
12045
12046
12047
12048
12049
12050
12051
12052
12053
12054
12055
12056
12057
12058
12059
12060
12061
12062
12063
12064
12065
12066
12067
12068
12069
12070
12071
12072
12073
12074
12075
12076
12077
12078
12079
12080
12081
12082
12083
12084
12085
12086
12087
12088
12089
12090
12091
12092
12093
12094
12095
12096
12097
12098
12099
12100
12101
12102
12103
12104
12105
12106
12107
12108
12109
12110
12111
12112
12113
12114
12115
12116
12117
12118
12119
12120
12121
12122
12123
12124
12125
12126
12127
12128
12129
12130
12131
12132
12133
12134
12135
12136
12137
12138
12139
12140
12141
12142
12143
12144
12145
12146
12147
12148
12149
12150
12151
12152
12153
12154
12155
12156
12157
12158
12159
12160
12161
12162
12163
12164
12165
12166
12167
12168
12169
12170
12171
12172
12173
12174
12175
12176
12177
12178
12179
12180
12181
12182
12183
12184
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194
12195
12196
12197
12198
12199
12200
12201
12202
12203
12204
12205
12206
12207
12208
12209
12210
12211
12212
12213
12214
12215
12216
12217
12218
12219
12220
12221
12222
12223
12224
12225
12226
12227
12228
12229
12230
12231
12232
12233
12234
12235
12236
12237
12238
12239
12240
12241
12242
12243
12244
12245
12246
12247
12248
12249
12250
12251
12252
12253
12254
12255
12256
12257
12258
12259
12260
12261
12262
12263
12264
12265
12266
12267
12268
12269
12270
12271
12272
12273
12274
12275
12276
12277
12278
12279
12280
12281
12282
12283
12284
12285
12286
12287
12288
12289
12290
12291
12292
12293
12294
12295
12296
12297
12298
12299
12300
12301
12302
12303
12304
12305
12306
12307
12308
12309
12310
12311
12312
12313
12314
12315
12316
12317
12318
12319
12320
12321
12322
12323
12324
12325
12326
12327
12328
12329
12330
12331
12332
12333
12334
12335
12336
12337
12338
12339
12340
12341
12342
12343
12344
12345
12346
12347
12348
12349
12350
12351
12352
12353
12354
12355
12356
12357
12358
12359
12360
12361
12362
12363
12364
12365
12366
12367
12368
12369
12370
12371
12372
12373
12374
12375
12376
12377
12378
12379
12380
12381
12382
12383
12384
12385
12386
12387
12388
12389
12390
12391
12392
12393
12394
12395
12396
12397
12398
12399
12400
12401
12402
12403
12404
12405
12406
12407
12408
12409
12410
12411
12412
12413
12414
12415
12416
12417
12418
12419
12420
12421
12422
12423
12424
12425
12426
12427
12428
12429
12430
12431
12432
12433
12434
12435
12436
12437
12438
12439
12440
12441
12442
12443
12444
12445
12446
12447
12448
12449
12450
12451
12452
12453
12454
12455
12456
12457
12458
12459
12460
12461
12462
12463
12464
12465
12466
12467
12468
12469
12470
12471
12472
12473
12474
12475
12476
12477
12478
12479
12480
12481
12482
12483
12484
12485
12486
12487
12488
12489
12490
12491
12492
12493
12494
12495
12496
12497
12498
12499
12500
12501
12502
12503
12504
12505
12506
12507
12508
12509
12510
12511
12512
12513
12514
12515
12516
12517
12518
12519
12520
12521
12522
12523
12524
12525
12526
12527
12528
12529
12530
12531
12532
12533
12534
12535
12536
12537
12538
12539
12540
12541
12542
12543
12544
12545
12546
12547
12548
12549
12550
12551
12552
12553
12554
12555
12556
12557
12558
12559
12560
12561
12562
12563
12564
12565
12566
12567
12568
12569
12570
12571
12572
12573
12574
12575
12576
12577
12578
12579
12580
12581
12582
12583
12584
12585
12586
12587
12588
12589
12590
12591
12592
12593
12594
12595
12596
12597
12598
12599
12600
12601
12602
12603
12604
12605
12606
12607
12608
12609
12610
12611
12612
12613
12614
12615
12616
12617
12618
12619
12620
12621
12622
12623
12624
12625
12626
12627
12628
12629
12630
12631
12632
12633
12634
12635
12636
12637
12638
12639
12640
12641
12642
12643
12644
12645
12646
12647
12648
12649
12650
12651
12652
12653
12654
12655
12656
12657
12658
12659
12660
12661
12662
12663
12664
12665
12666
12667
12668
12669
12670
12671
12672
12673
12674
12675
12676
12677
12678
12679
12680
12681
12682
12683
12684
12685
12686
12687
12688
12689
12690
12691
12692
12693
12694
12695
12696
12697
12698
12699
12700
12701
12702
12703
12704
12705
12706
12707
12708
12709
12710
12711
12712
12713
12714
12715
12716
12717
12718
12719
12720
12721
12722
12723
12724
12725
12726
12727
12728
12729
12730
12731
12732
12733
12734
12735
12736
12737
12738
12739
12740
12741
12742
12743
12744
12745
12746
12747
12748
12749
12750
12751
12752
12753
12754
12755
12756
12757
12758
12759
12760
12761
12762
12763
12764
12765
12766
12767
12768
12769
12770
12771
12772
12773
12774
12775
12776
12777
12778
12779
12780
12781
12782
12783
12784
12785
12786
12787
12788
12789
12790
12791
12792
12793
12794
12795
12796
12797
12798
12799
12800
12801
12802
12803
12804
12805
12806
12807
12808
12809
12810
12811
12812
12813
12814
12815
12816
12817
12818
12819
12820
12821
12822
12823
12824
12825
12826
12827
12828
12829
12830
12831
12832
12833
12834
12835
12836
12837
12838
12839
12840
12841
12842
12843
12844
12845
12846
12847
12848
12849
12850
12851
12852
12853
12854
12855
12856
12857
12858
12859
12860
12861
12862
12863
12864
12865
12866
12867
12868
12869
12870
12871
12872
12873
12874
12875
12876
12877
12878
12879
12880
12881
12882
12883
12884
12885
12886
12887
12888
12889
12890
12891
12892
12893
12894
12895
12896
12897
12898
12899
12900
12901
12902
12903
12904
12905
12906
12907
12908
12909
12910
12911
12912
12913
12914
12915
12916
12917
12918
12919
12920
12921
12922
12923
12924
12925
12926
12927
12928
12929
12930
12931
12932
12933
12934
12935
12936
12937
12938
12939
12940
12941
12942
12943
12944
12945
12946
12947
12948
12949
12950
12951
12952
12953
12954
12955
12956
12957
12958
12959
12960
12961
12962
12963
12964
12965
12966
12967
12968
12969
12970
12971
12972
12973
12974
12975
12976
12977
12978
12979
12980
12981
12982
12983
12984
12985
12986
12987
12988
12989
12990
12991
12992
12993
12994
12995
12996
12997
12998
12999
13000
13001
13002
13003
13004
13005
13006
13007
13008
13009
13010
13011
13012
13013
13014
13015
13016
13017
13018
13019
13020
13021
13022
13023
13024
13025
13026
13027
13028
13029
13030
13031
13032
13033
13034
13035
13036
13037
13038
13039
13040
13041
13042
13043
13044
13045
13046
13047
13048
13049
13050
13051
13052
13053
13054
13055
13056
13057
13058
13059
13060
13061
13062
13063
13064
13065
13066
13067
13068
13069
13070
13071
13072
13073
13074
13075
13076
13077
13078
13079
13080
13081
13082
13083
13084
13085
13086
13087
13088
13089
13090
13091
13092
13093
13094
13095
13096
13097
13098
13099
13100
13101
13102
13103
13104
13105
13106
13107
13108
13109
13110
13111
13112
13113
13114
13115
13116
13117
13118
13119
13120
13121
13122
13123
13124
13125
13126
13127
13128
13129
13130
13131
13132
13133
13134
13135
13136
13137
13138
13139
13140
13141
13142
13143
13144
13145
13146
13147
13148
13149
13150
13151
13152
13153
13154
13155
13156
13157
13158
13159
13160
13161
13162
13163
13164
13165
13166
13167
13168
13169
13170
13171
13172
13173
13174
13175
13176
13177
13178
13179
13180
13181
13182
13183
13184
13185
13186
13187
13188
13189
13190
13191
13192
13193
13194
13195
13196
13197
13198
13199
13200
13201
13202
13203
13204
13205
13206
13207
13208
13209
13210
13211
13212
13213
13214
13215
13216
13217
13218
13219
13220
13221
13222
13223
13224
13225
13226
13227
13228
13229
13230
13231
13232
13233
13234
13235
13236
13237
13238
13239
13240
13241
13242
13243
13244
13245
13246
13247
13248
13249
13250
13251
13252
13253
13254
13255
13256
13257
13258
13259
13260
13261
13262
13263
13264
13265
13266
13267
13268
13269
13270
13271
13272
13273
13274
13275
13276
13277
13278
13279
13280
13281
13282
13283
13284
13285
13286
13287
13288
13289
13290
13291
13292
13293
13294
13295
13296
13297
13298
13299
13300
13301
13302
13303
13304
13305
13306
13307
13308
13309
13310
13311
13312
13313
13314
13315
13316
13317
13318
13319
13320
13321
13322
13323
13324
13325
13326
13327
13328
13329
13330
13331
13332
13333
13334
13335
13336
13337
13338
13339
13340
13341
13342
13343
13344
13345
13346
13347
13348
13349
13350
13351
13352
13353
13354
13355
13356
13357
13358
13359
13360
13361
13362
13363
13364
13365
13366
13367
13368
13369
13370
13371
13372
13373
13374
13375
13376
13377
13378
13379
13380
13381
13382
13383
13384
13385
13386
13387
13388
13389
13390
13391
13392
13393
13394
13395
13396
13397
13398
13399
13400
13401
13402
13403
13404
13405
13406
13407
13408
13409
13410
13411
13412
13413
13414
13415
13416
13417
13418
13419
13420
13421
13422
13423
13424
13425
13426
13427
13428
13429
13430
13431
13432
13433
13434
13435
13436
13437
13438
13439
13440
13441
13442
13443
13444
13445
13446
13447
13448
13449
13450
13451
13452
13453
13454
13455
13456
13457
13458
13459
13460
13461
13462
13463
13464
13465
13466
13467
13468
13469
13470
13471
13472
13473
13474
13475
13476
13477
13478
13479
13480
13481
13482
13483
13484
13485
13486
13487
13488
13489
13490
13491
13492
13493
13494
13495
13496
13497
13498
13499
13500
13501
13502
13503
13504
13505
13506
13507
13508
13509
13510
13511
13512
13513
13514
13515
13516
13517
13518
13519
13520
13521
13522
13523
13524
13525
13526
13527
13528
13529
13530
13531
13532
13533
13534
13535
13536
13537
13538
13539
13540
13541
13542
13543
13544
13545
13546
13547
13548
13549
13550
13551
13552
13553
13554
13555
13556
13557
13558
13559
13560
13561
13562
13563
13564
13565
13566
13567
13568
13569
13570
13571
13572
13573
13574
13575
13576
13577
13578
13579
13580
13581
13582
13583
13584
13585
13586
13587
13588
13589
13590
13591
13592
13593
13594
13595
13596
13597
13598
13599
13600
13601
13602
13603
13604
13605
13606
13607
13608
13609
13610
13611
13612
13613
13614
13615
13616
13617
13618
13619
13620
13621
13622
13623
13624
13625
13626
13627
13628
13629
13630
13631
13632
13633
13634
13635
13636
13637
13638
13639
13640
13641
13642
13643
13644
13645
13646
13647
13648
13649
13650
13651
13652
13653
13654
13655
13656
13657
13658
13659
13660
13661
13662
13663
13664
13665
13666
13667
13668
13669
13670
13671
13672
13673
13674
13675
13676
13677
13678
13679
13680
13681
13682
13683
13684
13685
13686
13687
13688
13689
13690
13691
13692
13693
13694
13695
13696
13697
13698
13699
13700
13701
13702
13703
13704
13705
13706
13707
13708
13709
13710
13711
13712
13713
13714
13715
13716
13717
13718
13719
13720
13721
13722
13723
13724
13725
13726
13727
13728
13729
13730
13731
13732
13733
13734
13735
13736
13737
13738
13739
13740
13741
13742
13743
13744
13745
13746
13747
13748
13749
13750
13751
13752
13753
13754
13755
13756
13757
13758
13759
13760
13761
13762
13763
13764
13765
13766
13767
13768
13769
13770
13771
13772
13773
13774
13775
13776
13777
13778
13779
13780
13781
13782
13783
13784
13785
13786
13787
13788
13789
13790
13791
13792
13793
13794
13795
13796
13797
13798
13799
13800
13801
13802
13803
13804
13805
13806
13807
13808
13809
13810
13811
13812
13813
13814
13815
13816
13817
13818
13819
13820
13821
13822
13823
13824
13825
13826
13827
13828
13829
13830
13831
13832
13833
13834
13835
13836
13837
13838
13839
13840
13841
13842
13843
13844
13845
13846
13847
13848
13849
13850
13851
13852
13853
13854
13855
13856
13857
13858
13859
13860
13861
13862
13863
13864
13865
13866
13867
13868
13869
13870
13871
13872
13873
13874
13875
13876
13877
13878
13879
13880
13881
13882
13883
13884
13885
13886
13887
13888
13889
13890
13891
13892
13893
13894
13895
13896
13897
13898
13899
13900
13901
13902
13903
13904
13905
13906
13907
13908
13909
13910
13911
13912
13913
13914
13915
13916
13917
13918
13919
13920
13921
13922
13923
13924
13925
13926
13927
13928
13929
13930
13931
13932
13933
13934
13935
13936
13937
13938
13939
13940
13941
13942
13943
13944
13945
13946
13947
13948
13949
13950
13951
13952
13953
13954
13955
13956
13957
13958
13959
13960
13961
13962
13963
13964
13965
13966
13967
13968
13969
13970
13971
13972
13973
13974
13975
13976
13977
13978
13979
13980
13981
13982
13983
13984
13985
13986
13987
13988
13989
13990
13991
13992
13993
13994
13995
13996
13997
13998
13999
14000
14001
14002
14003
14004
14005
14006
14007
14008
14009
14010
14011
14012
14013
14014
14015
14016
14017
14018
14019
14020
14021
14022
14023
14024
14025
14026
14027
14028
14029
14030
14031
14032
14033
14034
14035
14036
14037
14038
14039
14040
14041
14042
14043
14044
14045
14046
14047
14048
14049
14050
14051
14052
14053
14054
14055
14056
14057
14058
14059
14060
14061
14062
14063
14064
14065
14066
14067
14068
14069
14070
14071
14072
14073
14074
14075
14076
14077
14078
14079
14080
14081
14082
14083
14084
14085
14086
14087
14088
14089
14090
14091
14092
14093
14094
14095
14096
14097
14098
14099
14100
14101
14102
14103
14104
14105
14106
14107
14108
14109
14110
14111
14112
14113
14114
14115
14116
14117
14118
14119
14120
14121
14122
14123
14124
14125
14126
14127
14128
14129
14130
14131
14132
14133
14134
14135
14136
14137
14138
14139
14140
14141
14142
14143
14144
14145
14146
14147
14148
14149
14150
14151
14152
14153
14154
14155
14156
14157
14158
14159
14160
14161
14162
14163
14164
14165
14166
14167
14168
14169
14170
14171
14172
14173
14174
14175
14176
14177
14178
14179
14180
14181
14182
14183
14184
14185
14186
14187
14188
14189
14190
14191
14192
14193
14194
14195
14196
14197
14198
14199
14200
14201
14202
14203
14204
14205
14206
14207
14208
14209
14210
14211
14212
14213
14214
14215
14216
14217
14218
14219
14220
14221
14222
14223
14224
14225
14226
14227
14228
14229
14230
14231
14232
14233
14234
14235
14236
14237
14238
14239
14240
14241
14242
14243
14244
14245
14246
14247
14248
14249
14250
14251
14252
14253
14254
14255
14256
14257
14258
14259
14260
14261
14262
14263
14264
14265
14266
14267
14268
14269
14270
14271
14272
14273
14274
14275
14276
14277
14278
14279
14280
14281
14282
14283
14284
14285
14286
14287
14288
14289
14290
14291
14292
14293
14294
14295
14296
14297
14298
14299
14300
14301
14302
14303
14304
14305
14306
14307
14308
14309
14310
14311
14312
14313
14314
14315
14316
14317
14318
14319
14320
14321
14322
14323
14324
14325
14326
14327
14328
14329
14330
14331
14332
14333
14334
14335
14336
14337
14338
14339
14340
14341
14342
14343
14344
14345
14346
14347
14348
14349
14350
14351
14352
14353
14354
14355
14356
14357
14358
14359
14360
14361
14362
14363
14364
14365
14366
14367
14368
14369
14370
14371
14372
14373
14374
14375
14376
14377
14378
14379
14380
14381
14382
14383
14384
14385
14386
14387
14388
14389
14390
14391
14392
14393
14394
14395
14396
14397
14398
14399
14400
14401
14402
14403
14404
14405
14406
14407
14408
14409
14410
14411
14412
14413
14414
14415
14416
14417
14418
14419
14420
14421
14422
14423
14424
14425
14426
14427
14428
14429
14430
14431
14432
14433
14434
14435
14436
14437
14438
14439
14440
14441
14442
14443
14444
14445
14446
14447
14448
14449
14450
14451
14452
14453
14454
14455
14456
14457
14458
14459
14460
14461
14462
14463
14464
14465
14466
14467
14468
14469
14470
14471
14472
14473
14474
14475
14476
14477
14478
14479
14480
14481
14482
14483
14484
14485
14486
14487
14488
14489
14490
14491
14492
14493
14494
14495
14496
14497
14498
14499
14500
14501
14502
14503
14504
14505
14506
14507
14508
14509
14510
14511
14512
14513
14514
14515
14516
14517
14518
14519
14520
14521
14522
14523
14524
14525
14526
14527
14528
14529
14530
14531
14532
14533
14534
14535
14536
14537
14538
14539
14540
14541
14542
14543
14544
14545
14546
14547
14548
14549
14550
14551
14552
14553
14554
14555
14556
14557
14558
14559
14560
14561
14562
14563
14564
14565
14566
14567
14568
14569
14570
14571
14572
14573
14574
14575
14576
14577
14578
14579
14580
14581
14582
14583
14584
14585
14586
14587
14588
14589
14590
14591
14592
14593
14594
14595
14596
14597
14598
14599
14600
14601
14602
14603
14604
14605
14606
14607
14608
14609
14610
14611
14612
14613
14614
14615
14616
14617
14618
14619
14620
14621
14622
14623
14624
14625
14626
14627
14628
14629
14630
14631
14632
14633
14634
14635
14636
14637
14638
14639
14640
14641
14642
14643
14644
14645
14646
14647
14648
14649
14650
14651
14652
14653
14654
14655
14656
14657
14658
14659
14660
14661
14662
14663
14664
14665
14666
14667
14668
14669
14670
14671
14672
14673
14674
14675
14676
14677
14678
14679
14680
14681
14682
14683
14684
14685
14686
14687
14688
14689
14690
14691
14692
14693
14694
14695
14696
14697
14698
14699
14700
14701
14702
14703
14704
14705
14706
14707
14708
14709
14710
14711
14712
14713
14714
14715
14716
14717
14718
14719
14720
14721
14722
14723
14724
14725
14726
14727
14728
14729
14730
14731
14732
14733
14734
14735
14736
14737
14738
14739
14740
14741
14742
14743
14744
14745
14746
14747
14748
14749
14750
14751
14752
14753
14754
14755
14756
14757
14758
14759
14760
14761
14762
14763
14764
14765
14766
14767
14768
14769
14770
14771
14772
14773
14774
14775
14776
14777
14778
14779
14780
14781
14782
14783
14784
14785
14786
14787
14788
14789
14790
14791
14792
14793
14794
14795
14796
14797
14798
14799
14800
14801
14802
14803
14804
14805
14806
14807
14808
14809
14810
14811
14812
14813
14814
14815
14816
14817
14818
14819
14820
14821
14822
14823
14824
14825
14826
14827
14828
14829
14830
14831
14832
14833
14834
14835
14836
14837
14838
14839
14840
14841
14842
14843
14844
14845
14846
14847
14848
14849
14850
14851
14852
14853
14854
14855
14856
14857
14858
14859
14860
14861
14862
14863
14864
14865
14866
14867
14868
14869
14870
14871
14872
14873
14874
14875
14876
14877
14878
14879
14880
14881
14882
14883
14884
14885
14886
14887
14888
14889
14890
14891
14892
14893
14894
14895
14896
14897
14898
14899
14900
14901
14902
14903
14904
14905
14906
14907
14908
14909
14910
14911
14912
14913
14914
14915
14916
14917
14918
14919
14920
14921
14922
14923
14924
14925
14926
14927
14928
14929
14930
14931
14932
14933
14934
14935
14936
14937
14938
14939
14940
14941
14942
14943
14944
14945
14946
14947
14948
14949
14950
14951
14952
14953
14954
14955
14956
14957
14958
14959
14960
14961
14962
14963
14964
14965
14966
14967
14968
14969
14970
14971
14972
14973
14974
14975
14976
14977
14978
14979
14980
14981
14982
14983
14984
14985
14986
14987
14988
14989
14990
14991
14992
14993
14994
14995
14996
14997
14998
14999
15000
15001
15002
15003
15004
15005
15006
15007
15008
15009
15010
15011
15012
15013
15014
15015
15016
15017
15018
15019
15020
15021
15022
15023
15024
15025
15026
15027
15028
15029
15030
15031
15032
15033
15034
15035
15036
15037
15038
15039
15040
15041
15042
15043
15044
15045
15046
15047
15048
15049
15050
15051
15052
15053
15054
15055
15056
15057
15058
15059
15060
15061
15062
15063
15064
15065
15066
15067
15068
15069
15070
15071
15072
15073
15074
15075
15076
15077
15078
15079
15080
15081
15082
15083
15084
15085
15086
15087
15088
15089
15090
15091
15092
15093
15094
15095
15096
15097
15098
15099
15100
15101
15102
15103
15104
15105
15106
15107
15108
15109
15110
15111
15112
15113
15114
15115
15116
15117
15118
15119
15120
15121
15122
15123
15124
15125
15126
15127
15128
15129
15130
15131
15132
15133
15134
15135
15136
15137
15138
15139
15140
15141
15142
15143
15144
15145
15146
15147
15148
15149
15150
15151
15152
15153
15154
15155
15156
15157
15158
15159
15160
15161
15162
15163
15164
15165
15166
15167
15168
15169
15170
15171
15172
15173
15174
15175
15176
15177
15178
15179
15180
15181
15182
15183
15184
15185
15186
15187
15188
15189
15190
15191
15192
15193
15194
15195
15196
15197
15198
15199
15200
15201
15202
15203
15204
15205
15206
15207
15208
15209
15210
15211
15212
15213
15214
15215
15216
15217
15218
15219
15220
15221
15222
15223
15224
15225
15226
15227
15228
15229
15230
15231
15232
15233
15234
15235
15236
15237
15238
15239
15240
15241
15242
15243
15244
15245
15246
15247
15248
15249
15250
15251
15252
15253
15254
15255
15256
15257
15258
15259
15260
15261
15262
15263
15264
15265
15266
15267
15268
15269
15270
15271
15272
15273
15274
15275
15276
15277
15278
15279
15280
15281
15282
15283
15284
15285
15286
15287
15288
15289
15290
15291
15292
15293
15294
15295
15296
15297
15298
15299
15300
15301
15302
15303
15304
15305
15306
15307
15308
15309
15310
15311
15312
15313
15314
15315
15316
15317
15318
15319
15320
15321
15322
15323
15324
15325
15326
15327
15328
15329
15330
15331
15332
15333
15334
15335
15336
15337
15338
15339
15340
15341
15342
15343
15344
15345
15346
15347
15348
15349
15350
15351
15352
15353
15354
15355
15356
15357
15358
15359
15360
15361
15362
15363
15364
15365
15366
15367
15368
15369
15370
15371
15372
15373
15374
15375
15376
15377
15378
15379
15380
15381
15382
15383
15384
15385
15386
15387
15388
15389
15390
15391
15392
15393
15394
15395
15396
15397
15398
15399
15400
15401
15402
15403
15404
15405
15406
15407
15408
15409
15410
15411
15412
15413
15414
15415
15416
15417
15418
15419
15420
15421
15422
15423
15424
15425
15426
15427
15428
15429
15430
15431
15432
15433
15434
15435
15436
15437
15438
15439
15440
15441
15442
15443
15444
15445
15446
15447
15448
15449
15450
15451
15452
15453
15454
15455
15456
15457
15458
15459
15460
15461
15462
15463
15464
15465
15466
15467
15468
15469
15470
15471
15472
15473
15474
15475
15476
15477
15478
15479
15480
15481
15482
15483
15484
15485
15486
15487
15488
15489
15490
15491
15492
15493
15494
15495
15496
15497
15498
15499
15500
15501
15502
15503
15504
15505
15506
15507
15508
15509
15510
15511
15512
15513
15514
15515
15516
15517
15518
15519
15520
15521
15522
15523
15524
15525
15526
15527
15528
15529
15530
15531
15532
15533
15534
15535
15536
15537
15538
15539
15540
15541
15542
15543
15544
15545
15546
15547
15548
15549
15550
15551
15552
15553
15554
15555
15556
15557
15558
15559
15560
15561
15562
15563
15564
15565
15566
15567
15568
15569
15570
15571
15572
15573
15574
15575
15576
15577
15578
15579
15580
15581
15582
15583
15584
15585
15586
15587
15588
15589
15590
15591
15592
15593
15594
15595
15596
15597
15598
15599
15600
15601
15602
15603
15604
15605
15606
15607
15608
15609
15610
15611
15612
15613
15614
15615
15616
15617
15618
15619
15620
15621
15622
15623
15624
15625
15626
15627
15628
15629
15630
15631
15632
15633
15634
15635
15636
15637
15638
15639
15640
15641
15642
15643
15644
15645
15646
15647
15648
15649
15650
15651
15652
15653
15654
15655
15656
15657
15658
15659
15660
15661
15662
15663
15664
15665
15666
15667
15668
15669
15670
15671
15672
15673
15674
15675
15676
15677
15678
15679
15680
15681
15682
15683
15684
15685
15686
15687
15688
15689
15690
15691
15692
15693
15694
15695
15696
15697
15698
15699
15700
15701
15702
15703
15704
15705
15706
15707
15708
15709
15710
15711
15712
15713
15714
15715
15716
15717
15718
15719
15720
15721
15722
15723
15724
15725
15726
15727
15728
15729
15730
15731
15732
15733
15734
15735
15736
15737
15738
15739
15740
15741
15742
15743
15744
15745
15746
15747
15748
15749
15750
15751
15752
15753
15754
15755
15756
15757
15758
15759
15760
15761
15762
15763
15764
15765
15766
15767
15768
15769
15770
15771
15772
15773
15774
15775
15776
15777
15778
15779
15780
15781
15782
15783
15784
15785
15786
15787
15788
15789
15790
15791
15792
15793
15794
15795
15796
15797
15798
15799
15800
15801
15802
15803
15804
15805
15806
15807
15808
15809
15810
15811
15812
15813
15814
15815
15816
15817
15818
15819
15820
15821
15822
15823
15824
15825
15826
15827
15828
15829
15830
15831
15832
15833
15834
15835
15836
15837
15838
15839
15840
15841
15842
15843
15844
15845
15846
15847
15848
15849
15850
15851
15852
15853
15854
15855
15856
15857
15858
15859
15860
15861
15862
15863
15864
15865
15866
15867
15868
15869
15870
15871
15872
15873
15874
15875
15876
15877
15878
15879
15880
15881
15882
15883
15884
15885
15886
15887
15888
15889
15890
15891
15892
15893
15894
15895
15896
15897
15898
15899
15900
15901
15902
15903
15904
15905
15906
15907
15908
15909
15910
15911
15912
15913
15914
15915
15916
15917
15918
15919
15920
15921
15922
15923
15924
15925
15926
15927
15928
15929
15930
15931
15932
15933
15934
15935
15936
15937
15938
15939
15940
15941
15942
15943
15944
15945
15946
15947
15948
15949
15950
15951
15952
15953
15954
15955
15956
15957
15958
15959
15960
15961
15962
15963
15964
15965
15966
15967
15968
15969
15970
15971
15972
15973
15974
15975
15976
15977
15978
15979
15980
15981
15982
15983
15984
15985
15986
15987
15988
15989
15990
15991
15992
15993
15994
15995
15996
15997
15998
15999
16000
16001
16002
16003
16004
16005
16006
16007
16008
16009
16010
16011
16012
16013
16014
16015
16016
16017
16018
16019
16020
16021
16022
16023
16024
16025
16026
16027
16028
16029
16030
16031
16032
16033
16034
16035
16036
16037
16038
16039
16040
16041
16042
16043
16044
16045
16046
16047
16048
16049
16050
16051
16052
16053
16054
16055
16056
16057
16058
16059
16060
16061
16062
16063
16064
16065
16066
16067
16068
16069
16070
16071
16072
16073
16074
16075
16076
16077
16078
16079
16080
16081
16082
16083
16084
16085
16086
16087
16088
16089
16090
16091
16092
16093
16094
16095
16096
16097
16098
16099
16100
16101
16102
16103
16104
16105
16106
16107
16108
16109
16110
16111
16112
16113
16114
16115
16116
16117
16118
16119
16120
16121
16122
16123
16124
16125
16126
16127
16128
16129
16130
16131
16132
16133
16134
16135
16136
16137
16138
16139
16140
16141
16142
16143
16144
16145
16146
16147
16148
16149
16150
16151
16152
16153
16154
16155
16156
16157
16158
16159
16160
16161
16162
16163
16164
16165
16166
16167
16168
16169
16170
16171
16172
16173
16174
16175
16176
16177
16178
16179
16180
16181
16182
16183
16184
16185
16186
16187
16188
16189
16190
16191
16192
16193
16194
16195
16196
16197
16198
16199
16200
16201
16202
16203
16204
16205
16206
16207
16208
16209
16210
16211
16212
16213
16214
16215
16216
16217
16218
16219
16220
16221
16222
16223
16224
16225
16226
16227
16228
16229
16230
16231
16232
16233
16234
16235
16236
16237
16238
16239
16240
16241
16242
16243
16244
16245
16246
16247
16248
16249
16250
16251
16252
16253
16254
16255
16256
16257
16258
16259
16260
16261
16262
16263
16264
16265
16266
16267
16268
16269
16270
16271
16272
16273
16274
16275
16276
16277
16278
16279
16280
16281
16282
16283
16284
16285
16286
16287
16288
16289
16290
16291
16292
16293
16294
16295
16296
16297
16298
16299
16300
16301
16302
16303
16304
16305
16306
16307
16308
16309
16310
16311
16312
16313
16314
16315
16316
16317
16318
16319
16320
16321
16322
16323
16324
16325
16326
16327
16328
16329
16330
16331
16332
16333
16334
16335
16336
16337
16338
16339
16340
16341
16342
16343
16344
16345
16346
16347
16348
16349
16350
16351
16352
16353
16354
16355
16356
16357
16358
16359
16360
16361
16362
16363
16364
16365
16366
16367
16368
16369
16370
16371
16372
16373
16374
16375
16376
16377
16378
16379
16380
16381
16382
16383
16384
16385
16386
16387
16388
16389
16390
16391
16392
16393
16394
16395
16396
16397
16398
16399
16400
16401
16402
16403
16404
16405
16406
16407
16408
16409
16410
16411
16412
16413
16414
16415
16416
16417
16418
16419
16420
16421
16422
16423
16424
16425
16426
16427
16428
16429
16430
16431
16432
16433
16434
16435
16436
16437
16438
16439
16440
16441
16442
16443
16444
16445
16446
16447
16448
16449
16450
16451
16452
16453
16454
16455
16456
16457
16458
16459
16460
16461
16462
16463
16464
16465
16466
16467
16468
16469
16470
16471
16472
16473
16474
16475
16476
16477
16478
16479
16480
16481
16482
16483
16484
16485
16486
16487
16488
16489
16490
16491
16492
16493
16494
16495
16496
16497
16498
16499
16500
16501
16502
16503
16504
16505
16506
16507
16508
16509
16510
16511
16512
16513
16514
16515
16516
16517
16518
16519
16520
16521
16522
16523
16524
16525
16526
16527
16528
16529
16530
16531
16532
16533
16534
16535
16536
16537
16538
16539
16540
16541
16542
16543
16544
16545
16546
16547
16548
16549
16550
16551
16552
16553
16554
16555
16556
16557
16558
16559
16560
16561
16562
16563
16564
16565
16566
16567
16568
16569
16570
16571
16572
16573
16574
16575
16576
16577
16578
16579
16580
16581
16582
16583
16584
16585
16586
16587
16588
16589
16590
16591
16592
16593
16594
16595
16596
16597
16598
16599
16600
16601
16602
16603
16604
16605
16606
16607
16608
16609
16610
16611
16612
16613
16614
16615
16616
16617
16618
16619
16620
16621
16622
16623
16624
16625
16626
16627
16628
16629
16630
16631
16632
16633
16634
16635
16636
16637
16638
16639
16640
16641
16642
16643
16644
16645
16646
16647
16648
16649
16650
16651
16652
16653
16654
16655
16656
16657
16658
16659
16660
16661
16662
16663
16664
16665
16666
16667
16668
16669
16670
16671
16672
16673
16674
16675
16676
16677
16678
16679
16680
16681
16682
16683
16684
16685
16686
16687
16688
16689
16690
16691
16692
16693
16694
16695
16696
16697
16698
16699
16700
16701
16702
16703
16704
16705
16706
16707
16708
16709
16710
16711
16712
16713
16714
16715
16716
16717
16718
16719
16720
16721
16722
16723
16724
16725
16726
16727
16728
16729
16730
16731
16732
16733
16734
16735
16736
16737
16738
16739
16740
16741
16742
16743
16744
16745
16746
16747
16748
16749
16750
16751
16752
16753
16754
16755
16756
16757
16758
16759
16760
16761
16762
16763
16764
16765
16766
16767
16768
16769
16770
16771
16772
16773
16774
16775
16776
16777
16778
16779
16780
16781
16782
16783
16784
16785
16786
16787
16788
16789
16790
16791
16792
16793
16794
16795
16796
16797
16798
16799
16800
16801
16802
16803
16804
16805
16806
16807
16808
16809
16810
16811
16812
16813
16814
16815
16816
16817
16818
16819
16820
16821
16822
16823
16824
16825
16826
16827
16828
16829
16830
16831
16832
16833
16834
16835
16836
16837
16838
16839
16840
16841
16842
16843
16844
16845
16846
16847
16848
16849
16850
16851
16852
16853
16854
16855
16856
16857
16858
16859
16860
16861
16862
16863
16864
16865
16866
16867
16868
16869
16870
16871
16872
16873
16874
16875
16876
16877
16878
16879
16880
16881
16882
16883
16884
16885
16886
16887
16888
16889
16890
16891
16892
16893
16894
16895
16896
16897
16898
16899
16900
16901
16902
16903
16904
16905
16906
16907
16908
16909
16910
16911
16912
16913
16914
16915
16916
16917
16918
16919
16920
16921
16922
16923
16924
16925
16926
16927
16928
16929
16930
16931
16932
16933
16934
16935
16936
16937
16938
16939
16940
16941
16942
16943
16944
16945
16946
16947
16948
16949
16950
16951
16952
16953
16954
16955
16956
16957
16958
16959
16960
16961
16962
16963
16964
16965
16966
16967
16968
16969
16970
16971
16972
16973
16974
16975
16976
16977
16978
16979
16980
16981
16982
16983
16984
16985
16986
16987
16988
16989
16990
16991
16992
16993
16994
16995
16996
16997
16998
16999
17000
17001
17002
17003
17004
17005
17006
17007
17008
17009
17010
17011
17012
17013
17014
17015
17016
17017
17018
17019
17020
17021
17022
17023
17024
17025
17026
17027
17028
17029
17030
17031
17032
17033
17034
17035
17036
17037
17038
17039
17040
17041
17042
17043
17044
17045
17046
17047
17048
17049
17050
17051
17052
17053
17054
17055
17056
17057
17058
17059
17060
17061
17062
17063
17064
17065
17066
17067
17068
17069
17070
17071
17072
17073
17074
17075
17076
17077
17078
17079
17080
17081
17082
17083
17084
17085
17086
17087
17088
17089
17090
17091
17092
17093
17094
17095
17096
17097
17098
17099
17100
17101
17102
17103
17104
17105
17106
17107
17108
17109
17110
17111
17112
17113
17114
17115
17116
17117
17118
17119
17120
17121
17122
17123
17124
17125
17126
17127
17128
17129
17130
17131
17132
17133
17134
17135
17136
17137
17138
17139
17140
17141
17142
17143
17144
17145
17146
17147
17148
17149
17150
17151
17152
17153
17154
17155
17156
17157
17158
17159
17160
17161
17162
17163
17164
17165
17166
17167
17168
17169
17170
17171
17172
17173
17174
17175
17176
17177
17178
17179
17180
17181
17182
17183
17184
17185
17186
17187
17188
17189
17190
17191
17192
17193
17194
17195
17196
17197
17198
17199
17200
17201
17202
17203
17204
17205
17206
17207
17208
17209
17210
17211
17212
17213
17214
17215
17216
17217
17218
17219
17220
17221
17222
17223
17224
17225
17226
17227
17228
17229
17230
17231
17232
17233
17234
17235
17236
17237
17238
17239
17240
17241
17242
17243
17244
17245
17246
17247
17248
17249
17250
17251
17252
17253
17254
17255
17256
17257
17258
17259
17260
17261
17262
17263
17264
17265
17266
17267
17268
17269
17270
17271
17272
17273
17274
17275
17276
17277
17278
17279
17280
17281
17282
17283
17284
17285
17286
17287
17288
17289
17290
17291
17292
17293
17294
17295
17296
17297
17298
17299
17300
17301
17302
17303
17304
17305
17306
17307
17308
17309
17310
17311
17312
17313
17314
17315
17316
17317
17318
17319
17320
17321
17322
17323
17324
17325
17326
17327
17328
17329
17330
17331
17332
17333
17334
17335
17336
17337
17338
17339
17340
17341
17342
17343
17344
17345
17346
17347
17348
17349
17350
17351
17352
17353
17354
17355
17356
17357
17358
17359
17360
17361
17362
17363
17364
17365
17366
17367
17368
17369
17370
17371
17372
17373
17374
17375
17376
17377
17378
17379
17380
17381
17382
17383
17384
17385
17386
17387
17388
17389
17390
17391
17392
17393
17394
17395
17396
17397
17398
17399
17400
17401
17402
17403
17404
17405
17406
17407
17408
17409
17410
17411
17412
17413
17414
17415
17416
17417
17418
17419
17420
17421
17422
17423
17424
17425
17426
17427
17428
17429
17430
17431
17432
17433
17434
17435
17436
17437
17438
17439
17440
17441
17442
17443
17444
17445
17446
17447
17448
17449
17450
17451
17452
17453
17454
17455
17456
17457
17458
17459
17460
17461
17462
17463
17464
17465
17466
17467
17468
17469
17470
17471
17472
17473
17474
17475
17476
17477
17478
17479
17480
17481
17482
17483
17484
17485
17486
17487
17488
17489
17490
17491
17492
17493
17494
17495
17496
17497
17498
17499
17500
17501
17502
17503
17504
17505
17506
17507
17508
17509
17510
17511
17512
17513
17514
17515
17516
17517
17518
17519
17520
17521
17522
17523
17524
17525
17526
17527
17528
17529
17530
17531
17532
17533
17534
17535
17536
17537
17538
17539
17540
17541
17542
17543
17544
17545
17546
17547
17548
17549
17550
17551
17552
17553
17554
17555
17556
17557
17558
17559
17560
17561
17562
17563
17564
17565
17566
17567
17568
17569
17570
17571
17572
17573
17574
17575
17576
17577
17578
17579
17580
17581
17582
17583
17584
17585
17586
17587
17588
17589
17590
17591
17592
17593
17594
17595
17596
17597
17598
17599
17600
17601
17602
17603
17604
17605
17606
17607
17608
17609
17610
17611
17612
17613
17614
17615
17616
17617
17618
17619
17620
17621
17622
17623
17624
17625
17626
17627
17628
17629
17630
17631
17632
17633
17634
17635
17636
17637
17638
17639
17640
17641
17642
17643
17644
17645
17646
17647
17648
17649
17650
17651
17652
17653
17654
17655
17656
17657
17658
17659
17660
17661
17662
17663
17664
17665
17666
17667
17668
17669
17670
17671
17672
17673
17674
17675
17676
17677
17678
17679
17680
17681
17682
17683
17684
17685
17686
17687
17688
17689
17690
17691
17692
17693
17694
17695
17696
17697
17698
17699
17700
17701
17702
17703
17704
17705
17706
17707
17708
17709
17710
17711
17712
17713
17714
17715
17716
17717
17718
17719
17720
17721
17722
17723
17724
17725
17726
17727
17728
17729
17730
17731
17732
17733
17734
17735
17736
17737
17738
17739
17740
17741
17742
17743
17744
17745
17746
17747
17748
17749
17750
17751
17752
17753
17754
17755
17756
17757
17758
17759
17760
17761
17762
17763
17764
17765
17766
17767
17768
17769
17770
17771
17772
17773
17774
17775
17776
17777
17778
17779
17780
17781
17782
17783
17784
17785
17786
17787
17788
17789
17790
17791
17792
17793
17794
17795
17796
17797
17798
17799
17800
17801
17802
17803
17804
17805
17806
17807
17808
17809
17810
17811
17812
17813
17814
17815
17816
17817
17818
17819
17820
17821
17822
17823
17824
17825
17826
17827
17828
17829
17830
17831
17832
17833
17834
17835
17836
17837
17838
17839
17840
17841
17842
17843
17844
17845
17846
17847
17848
17849
17850
17851
17852
17853
17854
17855
17856
17857
17858
17859
17860
17861
17862
17863
17864
17865
17866
17867
17868
17869
17870
17871
17872
17873
17874
17875
17876
17877
17878
17879
17880
17881
17882
17883
17884
17885
17886
17887
17888
17889
17890
17891
17892
17893
17894
17895
17896
17897
17898
17899
17900
17901
17902
17903
17904
17905
17906
17907
17908
17909
17910
17911
17912
17913
17914
17915
17916
17917
17918
17919
17920
17921
17922
17923
17924
17925
17926
17927
17928
17929
17930
17931
17932
17933
17934
17935
17936
17937
17938
17939
17940
17941
17942
17943
17944
17945
17946
17947
17948
17949
17950
17951
17952
17953
17954
17955
17956
17957
17958
17959
17960
17961
17962
17963
17964
17965
17966
17967
17968
17969
17970
17971
17972
17973
17974
17975
17976
17977
17978
17979
17980
17981
17982
17983
17984
17985
17986
17987
17988
17989
17990
17991
17992
17993
17994
17995
17996
17997
17998
17999
18000
18001
18002
18003
18004
18005
18006
18007
18008
18009
18010
18011
18012
18013
18014
18015
18016
18017
18018
18019
18020
18021
18022
18023
18024
18025
18026
18027
18028
18029
18030
18031
18032
18033
18034
18035
18036
18037
18038
18039
18040
18041
18042
18043
18044
18045
18046
18047
18048
18049
18050
18051
18052
18053
18054
18055
18056
18057
18058
18059
18060
18061
18062
18063
18064
18065
18066
18067
18068
18069
18070
18071
18072
18073
18074
18075
18076
18077
18078
18079
18080
18081
18082
18083
18084
18085
18086
18087
18088
18089
18090
18091
18092
18093
18094
18095
18096
18097
18098
18099
18100
18101
18102
18103
18104
18105
18106
18107
18108
18109
18110
18111
18112
18113
18114
18115
18116
18117
18118
18119
18120
18121
18122
18123
18124
18125
18126
18127
18128
18129
18130
18131
18132
18133
18134
18135
18136
18137
18138
18139
18140
18141
18142
18143
18144
18145
18146
18147
18148
18149
18150
18151
18152
18153
18154
18155
18156
18157
18158
18159
18160
18161
18162
18163
18164
18165
18166
18167
18168
18169
18170
18171
18172
18173
18174
18175
18176
18177
18178
18179
18180
18181
18182
18183
18184
18185
18186
18187
18188
18189
18190
18191
18192
18193
18194
18195
18196
18197
18198
18199
18200
18201
18202
18203
18204
18205
18206
18207
18208
18209
18210
18211
18212
18213
18214
18215
18216
18217
18218
18219
18220
18221
18222
18223
18224
18225
18226
18227
18228
18229
18230
18231
18232
18233
18234
18235
18236
18237
18238
18239
18240
18241
18242
18243
18244
18245
18246
18247
18248
18249
18250
18251
18252
18253
18254
18255
18256
18257
18258
18259
18260
18261
18262
18263
18264
18265
18266
18267
18268
18269
18270
18271
18272
18273
18274
18275
18276
18277
18278
18279
18280
18281
18282
18283
18284
18285
18286
18287
18288
18289
18290
18291
18292
18293
18294
18295
18296
18297
18298
18299
18300
18301
18302
18303
18304
18305
18306
18307
18308
18309
18310
18311
18312
18313
18314
18315
18316
18317
18318
18319
18320
18321
18322
18323
18324
18325
18326
18327
18328
18329
18330
18331
18332
18333
18334
18335
18336
18337
18338
18339
18340
18341
18342
18343
18344
18345
18346
18347
18348
18349
18350
18351
18352
18353
18354
18355
18356
18357
18358
18359
18360
18361
18362
18363
18364
18365
18366
18367
18368
18369
18370
18371
18372
18373
18374
18375
18376
18377
18378
18379
18380
18381
18382
18383
18384
18385
18386
18387
18388
18389
18390
18391
18392
18393
18394
18395
18396
18397
18398
18399
18400
18401
18402
18403
18404
18405
18406
18407
18408
18409
18410
18411
18412
18413
18414
18415
18416
18417
18418
18419
18420
18421
18422
18423
18424
18425
18426
18427
18428
18429
18430
18431
18432
18433
18434
18435
18436
18437
18438
18439
18440
18441
18442
18443
18444
18445
18446
18447
18448
18449
18450
18451
18452
18453
18454
18455
18456
18457
18458
18459
18460
18461
18462
18463
18464
18465
18466
18467
18468
18469
18470
18471
18472
18473
18474
18475
18476
18477
18478
18479
18480
18481
18482
18483
18484
18485
18486
18487
18488
18489
18490
18491
18492
18493
18494
18495
18496
18497
18498
18499
18500
18501
18502
18503
18504
18505
18506
18507
18508
18509
18510
18511
18512
18513
18514
18515
18516
18517
18518
18519
18520
18521
18522
18523
18524
18525
18526
18527
18528
18529
18530
18531
18532
18533
18534
18535
18536
18537
18538
18539
18540
18541
18542
18543
18544
18545
18546
18547
18548
18549
18550
18551
18552
18553
18554
18555
18556
18557
18558
18559
18560
18561
18562
18563
18564
18565
18566
18567
18568
18569
18570
18571
18572
18573
18574
18575
18576
18577
18578
18579
18580
18581
18582
18583
18584
18585
18586
18587
18588
18589
18590
18591
18592
18593
18594
18595
18596
18597
18598
18599
18600
18601
18602
18603
18604
18605
18606
18607
18608
18609
18610
18611
18612
18613
18614
18615
18616
18617
18618
18619
18620
18621
18622
18623
18624
18625
18626
18627
18628
18629
18630
18631
18632
18633
18634
18635
18636
18637
18638
18639
18640
18641
18642
18643
18644
18645
18646
18647
18648
18649
18650
18651
18652
18653
18654
18655
18656
18657
18658
18659
18660
18661
18662
18663
18664
18665
18666
18667
18668
18669
18670
18671
18672
18673
18674
18675
18676
18677
18678
18679
18680
18681
18682
18683
18684
18685
18686
18687
18688
18689
18690
18691
18692
18693
18694
18695
18696
18697
18698
18699
18700
18701
18702
18703
18704
18705
18706
18707
18708
18709
18710
18711
18712
18713
18714
18715
18716
18717
18718
18719
18720
18721
18722
18723
18724
18725
18726
18727
18728
18729
18730
18731
18732
18733
18734
18735
18736
18737
18738
18739
18740
18741
18742
18743
18744
18745
18746
18747
18748
18749
18750
18751
18752
18753
18754
18755
18756
18757
18758
18759
18760
18761
18762
18763
18764
18765
18766
18767
18768
18769
18770
18771
18772
18773
18774
18775
18776
18777
18778
18779
18780
18781
18782
18783
18784
18785
18786
18787
18788
18789
18790
18791
18792
18793
18794
18795
18796
18797
18798
18799
18800
18801
18802
18803
18804
18805
18806
18807
18808
18809
18810
18811
18812
18813
18814
18815
18816
18817
18818
18819
18820
18821
18822
18823
18824
18825
18826
18827
18828
18829
18830
18831
18832
18833
18834
18835
18836
18837
18838
18839
18840
18841
18842
18843
18844
18845
18846
18847
18848
18849
18850
18851
18852
18853
18854
18855
18856
18857
18858
18859
18860
18861
18862
18863
18864
18865
18866
18867
18868
18869
18870
18871
18872
18873
18874
18875
18876
18877
18878
18879
18880
18881
18882
18883
18884
18885
18886
18887
18888
18889
18890
18891
18892
18893
18894
18895
18896
18897
18898
18899
18900
18901
18902
18903
18904
18905
18906
18907
18908
18909
18910
18911
18912
18913
18914
18915
18916
18917
18918
18919
18920
18921
18922
18923
18924
18925
18926
18927
18928
18929
18930
18931
18932
18933
18934
18935
18936
18937
18938
18939
18940
18941
18942
18943
18944
18945
18946
18947
18948
18949
18950
18951
18952
18953
18954
18955
18956
18957
18958
18959
18960
18961
18962
18963
18964
18965
18966
18967
18968
18969
18970
18971
18972
18973
18974
18975
18976
18977
18978
18979
18980
18981
18982
18983
18984
18985
18986
18987
18988
18989
18990
18991
18992
18993
18994
18995
18996
18997
18998
18999
19000
19001
19002
19003
19004
19005
19006
19007
19008
19009
19010
19011
19012
19013
19014
19015
19016
19017
19018
19019
19020
19021
19022
19023
19024
19025
19026
19027
19028
19029
19030
19031
19032
19033
19034
19035
19036
19037
19038
19039
19040
19041
19042
19043
19044
19045
19046
19047
19048
19049
19050
19051
19052
19053
19054
19055
19056
19057
19058
19059
19060
19061
19062
19063
19064
19065
19066
19067
19068
19069
19070
19071
19072
19073
19074
19075
19076
19077
19078
19079
19080
19081
19082
19083
19084
19085
19086
19087
19088
19089
19090
19091
19092
19093
19094
19095
19096
19097
19098
19099
19100
19101
19102
19103
19104
19105
19106
19107
19108
19109
19110
19111
19112
19113
19114
19115
19116
19117
19118
19119
19120
19121
19122
19123
19124
19125
19126
19127
19128
19129
19130
19131
19132
19133
19134
19135
19136
19137
19138
19139
19140
19141
19142
19143
19144
19145
19146
19147
19148
19149
19150
19151
19152
19153
19154
19155
19156
19157
19158
19159
19160
19161
19162
19163
19164
19165
19166
19167
19168
19169
19170
19171
19172
19173
19174
19175
19176
19177
19178
19179
19180
19181
19182
19183
19184
19185
19186
19187
19188
19189
19190
19191
19192
19193
19194
19195
19196
19197
19198
19199
19200
19201
19202
19203
19204
19205
19206
19207
19208
19209
19210
19211
19212
19213
19214
19215
19216
19217
19218
19219
19220
19221
19222
19223
19224
19225
19226
19227
19228
19229
19230
19231
19232
19233
19234
19235
19236
19237
19238
19239
19240
19241
19242
19243
19244
19245
19246
19247
19248
19249
19250
19251
19252
19253
19254
19255
19256
19257
19258
19259
19260
19261
19262
19263
19264
19265
19266
19267
19268
19269
19270
19271
19272
19273
19274
19275
19276
19277
19278
19279
19280
19281
19282
19283
19284
19285
19286
19287
19288
19289
19290
19291
19292
19293
19294
19295
19296
19297
19298
19299
19300
19301
19302
19303
19304
19305
19306
19307
19308
19309
19310
19311
19312
19313
19314
19315
19316
19317
19318
19319
19320
19321
19322
19323
19324
19325
19326
19327
19328
19329
19330
19331
19332
19333
19334
19335
19336
19337
19338
19339
19340
19341
19342
19343
19344
19345
19346
19347
19348
19349
19350
19351
19352
19353
19354
19355
19356
19357
19358
19359
19360
19361
19362
19363
19364
19365
19366
19367
19368
19369
19370
19371
19372
19373
19374
19375
19376
19377
19378
19379
19380
19381
19382
19383
19384
19385
19386
19387
19388
19389
19390
19391
19392
19393
19394
19395
19396
19397
19398
19399
19400
19401
19402
19403
19404
19405
19406
19407
19408
19409
19410
19411
19412
19413
19414
19415
19416
19417
19418
19419
19420
19421
19422
19423
19424
19425
19426
19427
19428
19429
19430
19431
19432
19433
19434
19435
19436
19437
19438
19439
19440
19441
19442
19443
19444
19445
19446
19447
19448
19449
19450
19451
19452
19453
19454
19455
19456
19457
19458
19459
19460
19461
19462
19463
19464
19465
19466
19467
19468
19469
19470
19471
19472
19473
19474
19475
19476
19477
19478
19479
19480
19481
19482
19483
19484
19485
19486
19487
19488
19489
19490
19491
19492
19493
19494
19495
19496
19497
19498
19499
19500
19501
19502
19503
19504
19505
19506
19507
19508
19509
19510
19511
19512
19513
19514
19515
19516
19517
19518
19519
19520
19521
19522
19523
19524
19525
19526
19527
19528
19529
19530
19531
19532
19533
19534
19535
19536
19537
19538
19539
19540
19541
19542
19543
19544
19545
19546
19547
19548
19549
19550
19551
19552
19553
19554
19555
19556
19557
19558
19559
19560
19561
19562
19563
19564
19565
19566
19567
19568
19569
19570
19571
19572
19573
19574
19575
19576
19577
19578
19579
19580
19581
19582
19583
19584
19585
19586
19587
19588
19589
19590
19591
19592
19593
19594
19595
19596
19597
19598
19599
19600
19601
19602
19603
19604
19605
19606
19607
19608
19609
19610
19611
19612
19613
19614
19615
19616
19617
19618
19619
19620
19621
19622
19623
19624
19625
19626
19627
19628
19629
19630
19631
19632
19633
19634
19635
19636
19637
19638
19639
19640
19641
19642
19643
19644
19645
19646
19647
19648
19649
19650
19651
19652
19653
19654
19655
19656
19657
19658
19659
19660
19661
19662
19663
19664
19665
19666
19667
19668
19669
19670
19671
19672
19673
19674
19675
19676
19677
19678
19679
19680
19681
19682
19683
19684
19685
19686
19687
19688
19689
19690
19691
19692
19693
19694
19695
19696
19697
19698
19699
19700
19701
19702
19703
19704
19705
19706
19707
19708
19709
19710
19711
19712
19713
19714
19715
19716
19717
19718
19719
19720
19721
19722
19723
19724
19725
19726
19727
19728
19729
19730
19731
19732
19733
19734
19735
19736
19737
19738
19739
19740
19741
19742
19743
19744
19745
19746
19747
19748
19749
19750
19751
19752
19753
19754
19755
19756
19757
19758
19759
19760
19761
19762
19763
19764
19765
19766
19767
19768
19769
19770
19771
19772
19773
19774
19775
19776
19777
19778
19779
19780
19781
19782
19783
19784
19785
19786
19787
19788
19789
19790
19791
19792
19793
19794
19795
19796
19797
19798
19799
19800
19801
19802
19803
19804
19805
19806
19807
19808
19809
19810
19811
19812
19813
19814
19815
19816
19817
19818
19819
19820
19821
19822
19823
19824
19825
19826
19827
19828
19829
19830
19831
19832
19833
19834
19835
19836
19837
19838
19839
19840
19841
19842
19843
19844
19845
19846
19847
19848
19849
19850
19851
19852
19853
19854
19855
19856
19857
19858
19859
19860
19861
19862
19863
19864
19865
19866
19867
19868
19869
19870
19871
19872
19873
19874
19875
19876
19877
19878
19879
19880
19881
19882
19883
19884
19885
19886
19887
19888
19889
19890
19891
19892
19893
19894
19895
19896
19897
19898
19899
19900
19901
19902
19903
19904
19905
19906
19907
19908
19909
19910
19911
19912
19913
19914
19915
19916
19917
19918
19919
19920
19921
19922
19923
19924
19925
19926
19927
19928
19929
19930
19931
19932
19933
19934
19935
19936
19937
19938
19939
19940
19941
19942
19943
19944
19945
19946
19947
19948
19949
19950
19951
19952
19953
19954
19955
19956
19957
19958
19959
19960
19961
19962
19963
19964
19965
19966
19967
19968
19969
19970
19971
19972
19973
19974
19975
19976
19977
19978
19979
19980
19981
19982
19983
19984
19985
19986
19987
19988
19989
19990
19991
19992
19993
19994
19995
19996
19997
19998
19999
20000
20001
20002
20003
20004
20005
20006
20007
20008
20009
20010
20011
20012
20013
20014
20015
20016
20017
20018
20019
20020
20021
20022
20023
20024
20025
20026
20027
20028
20029
20030
20031
20032
20033
20034
20035
20036
20037
20038
20039
20040
20041
20042
20043
20044
20045
20046
20047
20048
20049
20050
20051
20052
20053
20054
20055
20056
20057
20058
20059
20060
20061
20062
20063
20064
20065
20066
20067
20068
20069
20070
20071
20072
20073
20074
20075
20076
20077
20078
20079
20080
20081
20082
20083
20084
20085
20086
20087
20088
20089
20090
20091
20092
20093
20094
20095
20096
20097
20098
20099
20100
20101
20102
20103
20104
20105
20106
20107
20108
20109
20110
20111
20112
20113
20114
20115
20116
20117
20118
20119
20120
20121
20122
20123
20124
20125
20126
20127
20128
20129
20130
20131
20132
20133
20134
20135
20136
20137
20138
20139
20140
20141
20142
20143
20144
20145
20146
20147
20148
20149
20150
20151
20152
20153
20154
20155
20156
20157
20158
20159
20160
20161
20162
20163
20164
20165
20166
20167
20168
20169
20170
20171
20172
20173
20174
20175
20176
20177
20178
20179
20180
20181
20182
20183
20184
20185
20186
20187
20188
20189
20190
20191
20192
20193
20194
20195
20196
20197
20198
20199
20200
20201
20202
20203
20204
20205
20206
20207
20208
20209
20210
20211
20212
20213
20214
20215
20216
20217
20218
20219
20220
20221
20222
20223
20224
20225
20226
20227
20228
20229
20230
20231
20232
20233
20234
20235
20236
20237
20238
20239
20240
20241
20242
20243
20244
20245
20246
20247
20248
20249
20250
20251
20252
20253
20254
20255
20256
20257
20258
20259
20260
20261
20262
20263
20264
20265
20266
20267
20268
20269
20270
20271
20272
20273
20274
20275
20276
20277
20278
20279
20280
20281
20282
20283
20284
20285
20286
20287
20288
20289
20290
20291
20292
20293
20294
20295
20296
20297
20298
20299
20300
20301
20302
20303
20304
20305
20306
20307
20308
20309
20310
20311
20312
20313
20314
20315
20316
20317
20318
20319
20320
20321
20322
20323
20324
20325
20326
20327
20328
20329
20330
20331
20332
20333
20334
20335
20336
20337
20338
20339
20340
20341
20342
20343
20344
20345
20346
20347
20348
20349
20350
20351
20352
20353
20354
20355
20356
20357
20358
20359
20360
20361
20362
20363
20364
20365
20366
20367
20368
20369
20370
20371
20372
20373
20374
20375
20376
20377
20378
20379
20380
20381
20382
20383
20384
20385
20386
20387
20388
20389
20390
20391
20392
20393
20394
20395
20396
20397
20398
20399
20400
20401
20402
20403
20404
20405
20406
20407
20408
20409
20410
20411
20412
20413
20414
20415
20416
20417
20418
20419
20420
20421
20422
20423
20424
20425
20426
20427
20428
20429
20430
20431
20432
20433
20434
20435
20436
20437
20438
20439
20440
20441
20442
20443
20444
20445
20446
20447
20448
20449
20450
20451
20452
20453
20454
20455
20456
20457
20458
20459
20460
20461
20462
20463
20464
20465
20466
20467
20468
20469
20470
20471
20472
20473
20474
20475
20476
20477
20478
20479
20480
20481
20482
20483
20484
20485
20486
20487
20488
20489
20490
20491
20492
20493
20494
20495
20496
20497
20498
20499
20500
20501
20502
20503
20504
20505
20506
20507
20508
20509
20510
20511
20512
20513
20514
20515
20516
20517
20518
20519
20520
20521
20522
20523
20524
20525
20526
20527
20528
20529
20530
20531
20532
20533
20534
20535
20536
20537
20538
20539
20540
20541
20542
20543
20544
20545
20546
20547
20548
20549
20550
20551
20552
20553
20554
20555
20556
20557
20558
20559
20560
20561
20562
20563
20564
20565
20566
20567
20568
20569
20570
20571
20572
20573
20574
20575
20576
20577
20578
20579
20580
20581
20582
20583
20584
20585
20586
20587
20588
20589
20590
20591
20592
20593
20594
20595
20596
20597
20598
20599
20600
20601
20602
20603
20604
20605
20606
20607
20608
20609
20610
20611
20612
20613
20614
20615
20616
20617
20618
20619
20620
20621
20622
20623
20624
20625
20626
20627
20628
20629
20630
20631
20632
20633
20634
20635
20636
20637
20638
20639
20640
20641
20642
20643
20644
20645
20646
20647
20648
20649
20650
20651
20652
20653
20654
20655
20656
20657
20658
20659
20660
20661
20662
20663
20664
20665
20666
20667
20668
20669
20670
20671
20672
20673
20674
20675
20676
20677
20678
20679
20680
20681
20682
20683
20684
20685
20686
20687
20688
20689
20690
20691
20692
20693
20694
20695
20696
20697
20698
20699
20700
20701
20702
20703
20704
20705
20706
20707
20708
20709
20710
20711
20712
20713
20714
20715
20716
20717
20718
20719
20720
20721
20722
20723
20724
20725
20726
20727
20728
20729
20730
20731
20732
20733
20734
20735
20736
20737
20738
20739
20740
20741
20742
20743
20744
20745
20746
20747
20748
20749
20750
20751
20752
20753
20754
20755
20756
20757
20758
20759
20760
20761
20762
20763
20764
20765
20766
20767
20768
20769
20770
20771
20772
20773
20774
20775
20776
20777
20778
20779
20780
20781
20782
20783
20784
20785
20786
20787
20788
20789
20790
20791
20792
20793
20794
20795
20796
20797
20798
20799
20800
20801
20802
20803
20804
20805
20806
20807
20808
20809
20810
20811
20812
20813
20814
20815
20816
20817
20818
20819
20820
20821
20822
20823
20824
20825
20826
20827
20828
20829
20830
20831
20832
20833
20834
20835
20836
20837
20838
20839
20840
20841
20842
20843
20844
20845
20846
20847
20848
20849
20850
20851
20852
20853
20854
20855
20856
20857
20858
20859
20860
20861
20862
20863
20864
20865
20866
20867
20868
20869
20870
20871
20872
20873
20874
20875
20876
20877
20878
20879
20880
20881
20882
20883
20884
20885
20886
20887
20888
20889
20890
20891
20892
20893
20894
20895
20896
20897
20898
20899
20900
20901
20902
20903
20904
20905
20906
20907
20908
20909
20910
20911
20912
20913
20914
20915
20916
20917
20918
20919
20920
20921
20922
20923
20924
20925
20926
20927
20928
20929
20930
20931
20932
20933
20934
20935
20936
20937
20938
20939
20940
20941
20942
20943
20944
20945
20946
20947
20948
20949
20950
20951
20952
20953
20954
20955
20956
20957
20958
20959
20960
20961
20962
20963
20964
20965
20966
20967
20968
20969
20970
20971
20972
20973
20974
20975
20976
20977
20978
20979
20980
20981
20982
20983
20984
20985
20986
20987
20988
20989
20990
20991
20992
20993
20994
20995
20996
20997
20998
20999
21000
21001
21002
21003
21004
21005
21006
21007
21008
21009
21010
21011
21012
21013
21014
21015
21016
21017
21018
21019
21020
21021
21022
21023
21024
21025
21026
21027
21028
21029
21030
21031
21032
21033
21034
21035
21036
21037
21038
21039
21040
21041
21042
21043
21044
21045
21046
21047
21048
21049
21050
21051
21052
21053
21054
21055
21056
21057
21058
21059
21060
21061
21062
21063
21064
21065
21066
21067
21068
21069
21070
21071
21072
21073
21074
21075
21076
21077
21078
21079
21080
21081
21082
21083
21084
21085
21086
21087
21088
21089
21090
21091
21092
21093
21094
21095
21096
21097
21098
21099
21100
21101
21102
21103
21104
21105
21106
21107
21108
21109
21110
21111
21112
21113
21114
21115
21116
21117
21118
21119
21120
21121
21122
21123
21124
21125
21126
21127
21128
21129
21130
21131
21132
21133
21134
21135
21136
21137
21138
21139
21140
21141
21142
21143
21144
21145
21146
21147
21148
21149
21150
21151
21152
21153
21154
21155
21156
21157
21158
21159
21160
21161
21162
21163
21164
21165
21166
21167
21168
21169
21170
21171
21172
21173
21174
21175
21176
21177
21178
21179
21180
21181
21182
21183
21184
21185
21186
21187
21188
21189
21190
21191
21192
21193
21194
21195
21196
21197
21198
21199
21200
21201
21202
21203
21204
21205
21206
21207
21208
21209
21210
21211
21212
21213
21214
21215
21216
21217
21218
21219
21220
21221
21222
21223
21224
21225
21226
21227
21228
21229
21230
21231
21232
21233
21234
21235
21236
21237
21238
21239
21240
21241
21242
21243
21244
21245
21246
21247
21248
21249
21250
21251
21252
21253
21254
21255
21256
21257
21258
21259
21260
21261
21262
21263
21264
21265
21266
21267
21268
21269
21270
21271
21272
21273
21274
21275
21276
21277
21278
21279
21280
21281
21282
21283
21284
21285
21286
21287
21288
21289
21290
21291
21292
21293
21294
21295
21296
21297
21298
21299
21300
21301
21302
21303
21304
21305
21306
21307
21308
21309
21310
21311
21312
21313
21314
21315
21316
21317
21318
21319
21320
21321
21322
21323
21324
21325
21326
21327
21328
21329
21330
21331
21332
21333
21334
21335
21336
21337
21338
21339
21340
21341
21342
21343
21344
21345
21346
21347
21348
21349
21350
21351
21352
21353
21354
21355
21356
21357
21358
21359
21360
21361
21362
21363
21364
21365
21366
21367
21368
21369
21370
21371
21372
21373
21374
21375
21376
21377
21378
21379
21380
21381
21382
21383
21384
21385
21386
21387
21388
21389
21390
21391
21392
21393
21394
21395
21396
21397
21398
21399
21400
21401
21402
21403
21404
21405
21406
21407
21408
21409
21410
21411
21412
21413
21414
21415
21416
21417
21418
21419
21420
21421
21422
21423
21424
21425
21426
21427
21428
21429
21430
21431
21432
21433
21434
21435
21436
21437
21438
21439
21440
21441
21442
21443
21444
21445
21446
21447
21448
21449
21450
21451
21452
21453
21454
21455
21456
21457
21458
21459
21460
21461
21462
21463
21464
21465
21466
21467
21468
21469
21470
21471
21472
21473
21474
21475
21476
21477
21478
21479
21480
21481
21482
21483
21484
21485
21486
21487
21488
21489
21490
21491
21492
21493
21494
21495
21496
21497
21498
21499
21500
21501
21502
21503
21504
21505
21506
21507
21508
21509
21510
21511
21512
21513
21514
21515
21516
21517
21518
21519
21520
21521
21522
21523
21524
21525
21526
21527
21528
21529
21530
21531
21532
21533
21534
21535
21536
21537
21538
21539
21540
21541
21542
21543
21544
21545
21546
21547
21548
21549
21550
21551
21552
21553
21554
21555
21556
21557
21558
21559
21560
21561
21562
21563
21564
21565
21566
21567
21568
21569
21570
21571
21572
21573
21574
21575
21576
21577
21578
21579
21580
21581
21582
21583
21584
21585
21586
21587
21588
21589
21590
21591
21592
21593
21594
21595
21596
21597
21598
21599
21600
21601
21602
21603
21604
21605
21606
21607
21608
21609
21610
21611
21612
21613
21614
21615
21616
21617
21618
21619
21620
21621
21622
21623
21624
21625
21626
21627
21628
21629
21630
21631
21632
21633
21634
21635
21636
21637
21638
21639
21640
21641
21642
21643
21644
21645
21646
21647
21648
21649
21650
21651
21652
21653
21654
21655
21656
21657
21658
21659
21660
21661
21662
21663
21664
21665
21666
21667
21668
21669
21670
21671
21672
21673
21674
21675
21676
21677
21678
21679
21680
21681
21682
21683
21684
21685
21686
21687
21688
21689
21690
21691
21692
21693
21694
21695
21696
21697
21698
21699
21700
21701
21702
21703
21704
21705
21706
21707
21708
21709
21710
21711
21712
21713
21714
21715
21716
21717
21718
21719
21720
21721
21722
21723
21724
21725
21726
21727
21728
21729
21730
21731
21732
21733
21734
21735
21736
21737
21738
21739
21740
21741
21742
21743
21744
21745
21746
21747
21748
21749
21750
21751
21752
21753
21754
21755
21756
21757
21758
21759
21760
21761
21762
21763
21764
21765
21766
21767
21768
21769
21770
21771
21772
21773
21774
21775
21776
21777
21778
21779
21780
21781
21782
21783
21784
21785
21786
21787
21788
21789
21790
21791
21792
21793
21794
21795
21796
21797
21798
21799
21800
21801
21802
21803
21804
21805
21806
21807
21808
21809
21810
21811
21812
21813
21814
21815
21816
21817
21818
21819
21820
21821
21822
21823
21824
21825
21826
21827
21828
21829
21830
21831
21832
21833
21834
21835
21836
21837
21838
21839
21840
21841
21842
21843
21844
21845
21846
21847
21848
21849
21850
21851
21852
21853
21854
21855
21856
21857
21858
21859
21860
21861
21862
21863
21864
21865
21866
21867
21868
21869
21870
21871
21872
21873
21874
21875
21876
21877
21878
21879
21880
21881
21882
21883
21884
21885
21886
21887
21888
21889
21890
21891
21892
21893
21894
21895
21896
21897
21898
21899
21900
21901
21902
21903
21904
21905
21906
21907
21908
21909
21910
21911
21912
21913
21914
21915
21916
21917
21918
21919
21920
21921
21922
21923
21924
21925
21926
21927
21928
21929
21930
21931
21932
21933
21934
21935
21936
21937
21938
21939
21940
21941
21942
21943
21944
21945
21946
21947
21948
21949
21950
21951
21952
21953
21954
21955
21956
21957
21958
21959
21960
21961
21962
21963
21964
21965
21966
21967
21968
21969
21970
21971
21972
21973
21974
21975
21976
21977
21978
21979
21980
21981
21982
21983
21984
21985
21986
21987
21988
21989
21990
21991
21992
21993
21994
21995
21996
21997
21998
21999
22000
22001
22002
22003
22004
22005
22006
22007
22008
22009
22010
22011
22012
22013
22014
22015
22016
22017
22018
22019
22020
22021
22022
22023
22024
22025
22026
22027
22028
22029
22030
22031
22032
22033
22034
22035
22036
22037
22038
22039
22040
22041
22042
22043
22044
22045
22046
22047
22048
22049
22050
22051
22052
22053
22054
22055
22056
22057
22058
22059
22060
22061
22062
22063
22064
22065
22066
22067
22068
22069
22070
22071
22072
22073
22074
22075
22076
22077
22078
22079
22080
22081
22082
22083
22084
22085
22086
22087
22088
22089
22090
22091
22092
22093
22094
22095
22096
22097
22098
22099
22100
22101
22102
22103
22104
22105
22106
22107
22108
22109
22110
22111
22112
22113
22114
22115
22116
22117
22118
22119
22120
22121
22122
22123
22124
22125
22126
22127
22128
22129
22130
22131
22132
22133
22134
22135
22136
22137
22138
22139
22140
22141
22142
22143
22144
22145
22146
22147
22148
22149
22150
22151
22152
22153
22154
22155
22156
22157
22158
22159
22160
22161
22162
22163
22164
22165
22166
22167
22168
22169
22170
22171
22172
22173
22174
22175
22176
22177
22178
22179
22180
22181
22182
22183
22184
22185
22186
22187
22188
22189
22190
22191
22192
22193
22194
22195
22196
22197
22198
22199
22200
22201
22202
22203
22204
22205
22206
22207
22208
22209
22210
22211
22212
22213
22214
22215
22216
22217
22218
22219
22220
22221
22222
22223
22224
22225
22226
22227
22228
22229
22230
22231
22232
22233
22234
22235
22236
22237
22238
22239
22240
22241
22242
22243
22244
22245
22246
22247
22248
22249
22250
22251
22252
22253
22254
22255
22256
22257
22258
22259
22260
22261
22262
22263
22264
22265
22266
22267
22268
22269
22270
22271
22272
22273
22274
22275
22276
22277
22278
22279
22280
22281
22282
22283
22284
22285
22286
22287
22288
22289
22290
22291
22292
22293
22294
22295
22296
22297
22298
22299
22300
22301
22302
22303
22304
22305
22306
22307
22308
22309
22310
22311
22312
22313
22314
22315
22316
22317
22318
22319
22320
22321
22322
22323
22324
22325
22326
22327
22328
22329
22330
22331
22332
22333
22334
22335
22336
22337
22338
22339
22340
22341
22342
22343
22344
22345
22346
22347
22348
22349
22350
22351
22352
22353
22354
22355
22356
22357
22358
22359
22360
22361
22362
22363
22364
22365
22366
22367
22368
22369
22370
22371
22372
22373
22374
22375
22376
22377
22378
22379
22380
22381
22382
22383
22384
22385
22386
22387
22388
22389
22390
22391
22392
22393
22394
22395
22396
22397
22398
22399
22400
22401
22402
22403
22404
22405
22406
22407
22408
22409
22410
22411
22412
22413
22414
22415
22416
22417
22418
22419
22420
22421
22422
22423
22424
22425
22426
22427
22428
22429
22430
22431
22432
22433
22434
22435
22436
22437
22438
22439
22440
22441
22442
22443
22444
22445
22446
22447
22448
22449
22450
22451
22452
22453
22454
22455
22456
22457
22458
22459
22460
22461
22462
22463
22464
22465
22466
22467
22468
22469
22470
22471
22472
22473
22474
22475
22476
22477
22478
22479
22480
22481
22482
22483
22484
22485
22486
22487
22488
22489
22490
22491
22492
22493
22494
22495
22496
22497
22498
22499
22500
22501
22502
22503
22504
22505
22506
22507
22508
22509
22510
22511
22512
22513
22514
22515
22516
22517
22518
22519
22520
22521
22522
22523
22524
22525
22526
22527
22528
22529
22530
22531
22532
22533
22534
22535
22536
22537
22538
22539
22540
22541
22542
22543
22544
22545
22546
22547
22548
22549
22550
22551
22552
22553
22554
22555
22556
22557
22558
22559
22560
22561
22562
22563
22564
22565
22566
22567
22568
22569
22570
22571
22572
22573
22574
22575
22576
22577
22578
22579
22580
22581
22582
22583
22584
22585
22586
22587
22588
22589
22590
22591
22592
22593
22594
22595
22596
22597
22598
22599
22600
22601
22602
22603
22604
22605
22606
22607
22608
22609
22610
22611
22612
22613
22614
22615
22616
22617
22618
22619
22620
22621
22622
22623
22624
22625
22626
22627
22628
22629
22630
22631
22632
22633
22634
22635
22636
22637
22638
22639
22640
22641
22642
22643
22644
22645
22646
22647
22648
22649
22650
22651
22652
22653
22654
22655
22656
22657
22658
22659
22660
22661
22662
22663
22664
22665
22666
22667
22668
22669
22670
22671
22672
22673
22674
22675
22676
22677
22678
22679
22680
22681
22682
22683
22684
22685
22686
22687
22688
22689
22690
22691
22692
22693
22694
22695
22696
22697
22698
22699
22700
22701
22702
22703
22704
22705
22706
22707
22708
22709
22710
22711
22712
22713
22714
22715
22716
22717
22718
22719
22720
22721
22722
22723
22724
22725
22726
22727
22728
22729
22730
22731
22732
22733
22734
22735
22736
22737
22738
22739
22740
22741
22742
22743
22744
22745
22746
22747
22748
22749
22750
22751
22752
22753
22754
22755
22756
22757
22758
22759
22760
22761
22762
22763
22764
22765
22766
22767
22768
22769
22770
22771
22772
22773
22774
22775
22776
22777
22778
22779
22780
22781
22782
22783
22784
22785
22786
22787
22788
22789
22790
22791
22792
22793
22794
22795
22796
22797
22798
22799
22800
22801
22802
22803
22804
22805
22806
22807
22808
22809
22810
22811
22812
22813
22814
22815
22816
22817
22818
22819
22820
22821
22822
22823
22824
22825
22826
22827
22828
22829
22830
22831
22832
22833
22834
22835
22836
22837
22838
22839
22840
22841
22842
22843
22844
22845
22846
22847
22848
22849
22850
22851
22852
22853
22854
22855
22856
22857
22858
22859
22860
22861
22862
22863
22864
22865
22866
22867
22868
22869
22870
22871
22872
22873
22874
22875
22876
22877
22878
22879
22880
22881
22882
22883
22884
22885
22886
22887
22888
22889
22890
22891
22892
22893
22894
22895
22896
22897
22898
22899
22900
22901
22902
22903
22904
22905
22906
22907
22908
22909
22910
22911
22912
22913
22914
22915
22916
22917
22918
22919
22920
22921
22922
22923
22924
22925
22926
22927
22928
22929
22930
22931
22932
22933
22934
22935
22936
22937
22938
22939
22940
22941
22942
22943
22944
22945
22946
22947
22948
22949
22950
22951
22952
22953
22954
22955
22956
22957
22958
22959
22960
22961
22962
22963
22964
22965
22966
22967
22968
22969
22970
22971
22972
22973
22974
22975
22976
22977
22978
22979
22980
22981
22982
22983
22984
22985
22986
22987
22988
22989
22990
22991
22992
22993
22994
22995
22996
22997
22998
22999
23000
23001
23002
23003
23004
23005
23006
23007
23008
23009
23010
23011
23012
23013
23014
23015
23016
23017
23018
23019
23020
23021
23022
23023
23024
23025
23026
23027
23028
23029
23030
23031
23032
23033
23034
23035
23036
23037
23038
23039
23040
23041
23042
23043
23044
23045
23046
23047
23048
23049
23050
23051
23052
23053
23054
23055
23056
23057
23058
23059
23060
23061
23062
23063
23064
23065
23066
23067
23068
23069
23070
23071
23072
23073
23074
23075
23076
23077
23078
23079
23080
23081
23082
23083
23084
23085
23086
23087
23088
23089
23090
23091
23092
23093
23094
23095
23096
23097
23098
23099
23100
23101
23102
23103
23104
23105
23106
23107
23108
23109
23110
23111
23112
23113
23114
23115
23116
23117
23118
23119
23120
23121
23122
23123
23124
23125
23126
23127
23128
23129
23130
23131
23132
23133
23134
23135
23136
23137
23138
23139
23140
23141
23142
23143
23144
23145
23146
23147
23148
23149
23150
23151
23152
23153
23154
23155
23156
23157
23158
23159
23160
23161
23162
23163
23164
23165
23166
23167
23168
23169
23170
23171
23172
23173
23174
23175
23176
23177
23178
23179
23180
23181
23182
23183
23184
23185
23186
23187
23188
23189
23190
23191
23192
23193
23194
23195
23196
23197
23198
23199
23200
23201
23202
23203
23204
23205
23206
23207
23208
23209
23210
23211
23212
23213
23214
23215
23216
23217
23218
23219
23220
23221
23222
23223
23224
23225
23226
23227
23228
23229
23230
23231
23232
23233
23234
23235
23236
23237
23238
23239
23240
23241
23242
23243
23244
23245
23246
23247
23248
23249
23250
23251
23252
23253
23254
23255
23256
23257
23258
23259
23260
23261
23262
23263
23264
23265
23266
23267
23268
23269
23270
23271
23272
23273
23274
23275
23276
23277
23278
23279
23280
23281
23282
23283
23284
23285
23286
23287
23288
23289
23290
23291
23292
23293
23294
23295
23296
23297
23298
23299
23300
23301
23302
23303
23304
23305
23306
23307
23308
23309
23310
23311
23312
23313
23314
23315
23316
23317
23318
23319
23320
23321
23322
23323
23324
23325
23326
23327
23328
23329
23330
23331
23332
23333
23334
23335
23336
23337
23338
23339
23340
23341
23342
23343
23344
23345
23346
23347
23348
23349
23350
23351
23352
23353
23354
23355
23356
23357
23358
23359
23360
23361
23362
23363
23364
23365
23366
23367
23368
23369
23370
23371
23372
23373
23374
23375
23376
23377
23378
23379
23380
23381
23382
23383
23384
23385
23386
23387
23388
23389
23390
23391
23392
23393
23394
23395
23396
23397
23398
23399
23400
23401
23402
23403
23404
23405
23406
23407
23408
23409
23410
23411
23412
23413
23414
23415
23416
23417
23418
23419
23420
23421
23422
23423
23424
23425
23426
23427
23428
23429
23430
23431
23432
23433
23434
23435
23436
23437
23438
23439
23440
23441
23442
23443
23444
23445
23446
23447
23448
23449
23450
23451
23452
23453
23454
23455
23456
23457
23458
23459
23460
23461
23462
23463
23464
23465
23466
23467
23468
23469
23470
23471
23472
23473
23474
23475
23476
23477
23478
23479
23480
23481
23482
23483
23484
23485
23486
23487
23488
23489
23490
23491
23492
23493
23494
23495
23496
23497
23498
23499
23500
23501
23502
23503
23504
23505
23506
23507
23508
23509
23510
23511
23512
23513
23514
23515
23516
23517
23518
23519
23520
23521
23522
23523
23524
23525
23526
23527
23528
23529
23530
23531
23532
23533
23534
23535
23536
23537
23538
23539
23540
23541
23542
23543
23544
23545
23546
23547
23548
23549
23550
23551
23552
23553
23554
23555
23556
23557
23558
23559
23560
23561
23562
23563
23564
23565
23566
23567
23568
23569
23570
23571
23572
23573
23574
23575
23576
23577
23578
23579
23580
23581
23582
23583
23584
23585
23586
23587
23588
23589
23590
23591
23592
23593
23594
23595
23596
23597
23598
23599
23600
23601
23602
23603
23604
23605
23606
23607
23608
23609
23610
23611
23612
23613
23614
23615
23616
23617
23618
23619
23620
23621
23622
23623
23624
23625
23626
23627
23628
23629
23630
23631
23632
23633
23634
23635
23636
23637
23638
23639
23640
23641
23642
23643
23644
23645
23646
23647
23648
23649
23650
23651
23652
23653
23654
23655
23656
23657
23658
23659
23660
23661
23662
23663
23664
23665
23666
23667
23668
23669
23670
23671
23672
23673
23674
23675
23676
23677
23678
23679
23680
23681
23682
23683
23684
23685
23686
23687
23688
23689
23690
23691
23692
23693
23694
23695
23696
23697
23698
23699
23700
23701
23702
23703
23704
23705
23706
23707
23708
23709
23710
23711
23712
23713
23714
23715
23716
23717
23718
23719
23720
23721
23722
23723
23724
23725
23726
23727
23728
23729
23730
23731
23732
23733
23734
23735
23736
23737
23738
23739
23740
23741
23742
23743
23744
23745
23746
23747
23748
23749
23750
23751
23752
23753
23754
23755
23756
23757
23758
23759
23760
23761
23762
23763
23764
23765
23766
23767
23768
23769
23770
23771
23772
23773
23774
23775
23776
23777
23778
23779
23780
23781
23782
23783
23784
23785
23786
23787
23788
23789
23790
23791
23792
23793
23794
23795
23796
23797
23798
23799
23800
23801
23802
23803
23804
23805
23806
23807
23808
23809
23810
23811
23812
23813
23814
23815
23816
23817
23818
23819
23820
23821
23822
23823
23824
23825
23826
23827
23828
23829
23830
23831
23832
23833
23834
23835
23836
23837
23838
23839
23840
23841
23842
23843
23844
23845
23846
23847
23848
23849
23850
23851
23852
23853
23854
23855
23856
23857
23858
23859
23860
23861
23862
23863
23864
23865
23866
23867
23868
23869
23870
23871
23872
23873
23874
23875
23876
23877
23878
23879
23880
23881
23882
23883
23884
23885
23886
23887
23888
23889
23890
23891
23892
23893
23894
23895
23896
23897
23898
23899
23900
23901
23902
23903
23904
23905
23906
23907
23908
23909
23910
23911
23912
23913
23914
23915
23916
23917
23918
23919
23920
23921
23922
23923
23924
23925
23926
23927
23928
23929
23930
23931
23932
23933
23934
23935
23936
23937
23938
23939
23940
23941
23942
23943
23944
23945
23946
23947
23948
23949
23950
23951
23952
23953
23954
23955
23956
23957
23958
23959
23960
23961
23962
23963
23964
23965
23966
23967
23968
23969
23970
23971
23972
23973
23974
23975
23976
23977
23978
23979
23980
23981
23982
23983
23984
23985
23986
23987
23988
23989
23990
23991
23992
23993
23994
23995
23996
23997
23998
23999
24000
24001
24002
24003
24004
24005
24006
24007
24008
24009
24010
24011
24012
24013
24014
24015
24016
24017
24018
24019
24020
24021
24022
24023
24024
24025
24026
24027
24028
24029
24030
24031
24032
24033
24034
24035
24036
24037
24038
24039
24040
24041
24042
24043
24044
24045
24046
24047
24048
24049
24050
24051
24052
24053
24054
24055
24056
24057
24058
24059
24060
24061
24062
24063
24064
24065
24066
24067
24068
24069
24070
24071
24072
24073
24074
24075
24076
24077
24078
24079
24080
24081
24082
24083
24084
24085
24086
24087
24088
24089
24090
24091
24092
24093
24094
24095
24096
24097
24098
24099
24100
24101
24102
24103
24104
24105
24106
24107
24108
24109
24110
24111
24112
24113
24114
24115
24116
24117
24118
24119
24120
24121
24122
24123
24124
24125
24126
24127
24128
24129
24130
24131
24132
24133
24134
24135
24136
24137
24138
24139
24140
24141
24142
24143
24144
24145
24146
24147
24148
24149
24150
24151
24152
24153
24154
24155
24156
24157
24158
24159
24160
24161
24162
24163
24164
24165
24166
24167
24168
24169
24170
24171
24172
24173
24174
24175
24176
24177
24178
24179
24180
24181
24182
24183
24184
24185
24186
24187
24188
24189
24190
24191
24192
24193
24194
24195
24196
24197
24198
24199
24200
24201
24202
24203
24204
24205
24206
24207
24208
24209
24210
24211
24212
24213
24214
24215
24216
24217
24218
24219
24220
24221
24222
24223
24224
24225
24226
24227
24228
24229
24230
24231
24232
24233
24234
24235
24236
24237
24238
24239
24240
24241
24242
24243
24244
24245
24246
24247
24248
24249
24250
24251
24252
24253
24254
24255
24256
24257
24258
24259
24260
24261
24262
24263
24264
24265
24266
24267
24268
24269
24270
24271
24272
24273
24274
24275
24276
24277
24278
24279
24280
24281
24282
24283
24284
24285
24286
24287
24288
24289
24290
24291
24292
24293
24294
24295
24296
24297
24298
24299
24300
24301
24302
24303
24304
24305
24306
24307
24308
24309
24310
24311
24312
24313
24314
24315
24316
24317
24318
24319
24320
24321
24322
24323
24324
24325
24326
24327
24328
24329
24330
24331
24332
24333
24334
24335
24336
24337
24338
24339
24340
24341
24342
24343
24344
24345
24346
24347
24348
24349
24350
24351
24352
24353
24354
24355
24356
24357
24358
24359
24360
24361
24362
24363
24364
24365
24366
24367
24368
24369
24370
24371
24372
24373
24374
24375
24376
24377
24378
24379
24380
24381
24382
24383
24384
24385
24386
24387
24388
24389
24390
24391
24392
24393
24394
24395
24396
24397
24398
24399
24400
24401
24402
24403
24404
24405
24406
24407
24408
24409
24410
24411
24412
24413
24414
24415
24416
24417
24418
24419
24420
24421
24422
24423
24424
24425
24426
24427
24428
24429
24430
24431
24432
24433
24434
24435
24436
24437
24438
24439
24440
24441
24442
24443
24444
24445
24446
24447
24448
24449
24450
24451
24452
24453
24454
24455
24456
24457
24458
24459
24460
24461
24462
24463
24464
24465
24466
24467
24468
24469
24470
24471
24472
24473
24474
24475
24476
24477
24478
24479
24480
24481
24482
24483
24484
24485
24486
24487
24488
24489
24490
24491
24492
24493
24494
24495
24496
24497
24498
24499
24500
24501
24502
24503
24504
24505
24506
24507
24508
24509
24510
24511
24512
24513
24514
24515
24516
24517
24518
24519
24520
24521
24522
24523
24524
24525
24526
24527
24528
24529
24530
24531
24532
24533
24534
24535
24536
24537
24538
24539
24540
24541
24542
24543
24544
24545
24546
24547
24548
24549
24550
24551
24552
24553
24554
24555
24556
24557
24558
24559
24560
24561
24562
24563
24564
24565
24566
24567
24568
24569
24570
24571
24572
24573
24574
24575
24576
24577
24578
24579
24580
24581
24582
24583
24584
24585
24586
24587
24588
24589
24590
24591
24592
24593
24594
24595
24596
24597
24598
24599
24600
24601
24602
24603
24604
24605
24606
24607
24608
24609
24610
24611
24612
24613
24614
24615
24616
24617
24618
24619
24620
24621
24622
24623
24624
24625
24626
24627
24628
24629
24630
24631
24632
24633
24634
24635
24636
24637
24638
24639
24640
24641
24642
24643
24644
24645
24646
24647
24648
24649
24650
24651
24652
24653
24654
24655
24656
24657
24658
24659
24660
24661
24662
24663
24664
24665
24666
24667
24668
24669
24670
24671
24672
24673
24674
24675
24676
24677
24678
24679
24680
24681
24682
24683
24684
24685
24686
24687
24688
24689
24690
24691
24692
24693
24694
24695
24696
24697
24698
24699
24700
24701
24702
24703
24704
24705
24706
24707
24708
24709
24710
24711
24712
24713
24714
24715
24716
24717
24718
24719
24720
24721
24722
24723
24724
24725
24726
24727
24728
24729
24730
24731
24732
24733
24734
24735
24736
24737
24738
24739
24740
24741
24742
24743
24744
24745
24746
24747
24748
24749
24750
24751
24752
24753
24754
24755
24756
24757
24758
24759
24760
24761
24762
24763
24764
24765
24766
24767
24768
24769
24770
24771
24772
24773
24774
24775
24776
24777
24778
24779
24780
24781
24782
24783
24784
24785
24786
24787
24788
24789
24790
24791
24792
24793
24794
24795
24796
24797
24798
24799
24800
24801
24802
24803
24804
24805
24806
24807
24808
24809
24810
24811
24812
24813
24814
24815
24816
24817
24818
24819
24820
24821
24822
24823
24824
24825
24826
24827
24828
24829
24830
24831
24832
24833
24834
24835
24836
24837
24838
24839
24840
24841
24842
24843
24844
24845
24846
24847
24848
24849
24850
24851
24852
24853
24854
24855
24856
24857
24858
24859
24860
24861
24862
24863
24864
24865
24866
24867
24868
24869
24870
24871
24872
24873
24874
24875
24876
24877
24878
24879
24880
24881
24882
24883
24884
24885
24886
24887
24888
24889
24890
24891
24892
24893
24894
24895
24896
24897
24898
24899
24900
24901
24902
24903
24904
24905
24906
24907
24908
24909
24910
24911
24912
24913
24914
24915
24916
24917
24918
24919
24920
24921
24922
24923
24924
24925
24926
24927
24928
24929
24930
24931
24932
24933
24934
24935
24936
24937
24938
24939
24940
24941
24942
24943
24944
24945
24946
24947
24948
24949
24950
24951
24952
24953
24954
24955
24956
24957
24958
24959
24960
24961
24962
24963
24964
24965
24966
24967
24968
24969
24970
24971
24972
24973
24974
24975
24976
24977
24978
24979
24980
24981
24982
24983
24984
24985
24986
24987
24988
24989
24990
24991
24992
24993
24994
24995
24996
24997
24998
24999
25000
25001
25002
25003
25004
25005
25006
25007
25008
25009
25010
25011
25012
25013
25014
25015
25016
25017
25018
25019
25020
25021
25022
25023
25024
25025
25026
25027
25028
25029
25030
25031
25032
25033
25034
25035
25036
25037
25038
25039
25040
25041
25042
25043
25044
25045
25046
25047
25048
25049
25050
25051
25052
25053
25054
25055
25056
25057
25058
25059
25060
25061
25062
25063
25064
25065
25066
25067
25068
25069
25070
25071
25072
25073
25074
25075
25076
25077
25078
25079
25080
25081
25082
25083
25084
25085
25086
25087
25088
25089
25090
25091
25092
25093
25094
25095
25096
25097
25098
25099
25100
25101
25102
25103
25104
25105
25106
25107
25108
25109
25110
25111
25112
25113
25114
25115
25116
25117
25118
25119
25120
25121
25122
25123
25124
25125
25126
25127
25128
25129
25130
25131
25132
25133
25134
25135
25136
25137
25138
25139
25140
25141
25142
25143
25144
25145
25146
25147
25148
25149
25150
25151
25152
25153
25154
25155
25156
25157
25158
25159
25160
25161
25162
25163
25164
25165
25166
25167
25168
25169
25170
25171
25172
25173
25174
25175
25176
25177
25178
25179
25180
25181
25182
25183
25184
25185
25186
25187
25188
25189
25190
25191
25192
25193
25194
25195
25196
25197
25198
25199
25200
25201
25202
25203
25204
25205
25206
25207
25208
25209
25210
25211
25212
25213
25214
25215
25216
25217
25218
25219
25220
25221
25222
25223
25224
25225
25226
25227
25228
25229
25230
25231
25232
25233
25234
25235
25236
25237
25238
25239
25240
25241
25242
25243
25244
25245
25246
25247
25248
25249
25250
25251
25252
25253
25254
25255
25256
25257
25258
25259
25260
25261
25262
25263
25264
25265
25266
25267
25268
25269
25270
25271
25272
25273
25274
25275
25276
25277
25278
25279
25280
25281
25282
25283
25284
25285
25286
25287
25288
25289
25290
25291
25292
25293
25294
25295
25296
25297
25298
25299
25300
25301
25302
25303
25304
25305
25306
25307
25308
25309
25310
25311
25312
25313
25314
25315
25316
25317
25318
25319
25320
25321
25322
25323
25324
25325
25326
25327
25328
25329
25330
25331
25332
25333
25334
25335
25336
25337
25338
25339
25340
25341
25342
25343
25344
25345
25346
25347
25348
25349
25350
25351
25352
25353
25354
25355
25356
25357
25358
25359
25360
25361
25362
25363
25364
25365
25366
25367
25368
25369
25370
25371
25372
25373
25374
25375
25376
25377
25378
25379
25380
25381
25382
25383
25384
25385
25386
25387
25388
25389
25390
25391
25392
25393
25394
25395
25396
25397
25398
25399
25400
25401
25402
25403
25404
25405
25406
25407
25408
25409
25410
25411
25412
25413
25414
25415
25416
25417
25418
25419
25420
25421
25422
25423
25424
25425
25426
25427
25428
25429
25430
25431
25432
25433
25434
25435
25436
25437
25438
25439
25440
25441
25442
25443
25444
25445
25446
25447
25448
25449
25450
25451
25452
25453
25454
25455
25456
25457
25458
25459
25460
25461
25462
25463
25464
25465
25466
25467
25468
25469
25470
25471
25472
25473
25474
25475
25476
25477
25478
25479
25480
25481
25482
25483
25484
25485
25486
25487
25488
25489
25490
25491
25492
25493
25494
25495
25496
25497
25498
25499
25500
25501
25502
25503
25504
25505
25506
25507
25508
25509
25510
25511
25512
25513
25514
25515
25516
25517
25518
25519
25520
25521
25522
25523
25524
25525
25526
25527
25528
25529
25530
25531
25532
25533
25534
25535
25536
25537
25538
25539
25540
25541
25542
25543
25544
25545
25546
25547
25548
25549
25550
25551
25552
25553
25554
25555
25556
25557
25558
25559
25560
25561
25562
25563
25564
25565
25566
25567
25568
25569
25570
25571
25572
25573
25574
25575
25576
25577
25578
25579
25580
25581
25582
25583
25584
25585
25586
25587
25588
25589
25590
25591
25592
25593
25594
25595
25596
25597
25598
25599
25600
25601
25602
25603
25604
25605
25606
25607
25608
25609
25610
25611
25612
25613
25614
25615
25616
25617
25618
25619
25620
25621
25622
25623
25624
25625
25626
25627
25628
25629
25630
25631
25632
25633
25634
25635
25636
25637
25638
25639
25640
25641
25642
25643
25644
25645
25646
25647
25648
25649
25650
25651
25652
25653
25654
25655
25656
25657
25658
25659
25660
25661
25662
25663
25664
25665
25666
25667
25668
25669
25670
25671
25672
25673
25674
25675
25676
25677
25678
25679
25680
25681
25682
25683
25684
25685
25686
25687
25688
25689
25690
25691
25692
25693
25694
25695
25696
25697
25698
25699
25700
25701
25702
25703
25704
25705
25706
25707
25708
25709
25710
25711
25712
25713
25714
25715
25716
25717
25718
25719
25720
25721
25722
25723
25724
25725
25726
25727
25728
25729
25730
25731
25732
25733
25734
25735
25736
25737
25738
25739
25740
25741
25742
25743
25744
25745
25746
25747
25748
25749
25750
25751
25752
25753
25754
25755
25756
25757
25758
25759
25760
25761
25762
25763
25764
25765
25766
25767
25768
25769
25770
25771
25772
25773
25774
25775
25776
25777
25778
25779
25780
25781
25782
25783
25784
25785
25786
25787
25788
25789
25790
25791
25792
25793
25794
25795
25796
25797
25798
25799
25800
25801
25802
25803
25804
25805
25806
25807
25808
25809
25810
25811
25812
25813
25814
25815
25816
25817
25818
25819
25820
25821
25822
25823
25824
25825
25826
25827
25828
25829
25830
25831
25832
25833
25834
25835
25836
25837
25838
25839
25840
25841
25842
25843
25844
25845
25846
25847
25848
25849
25850
25851
25852
25853
25854
25855
25856
25857
25858
25859
25860
25861
25862
25863
25864
25865
25866
25867
25868
25869
25870
25871
25872
25873
25874
25875
25876
25877
25878
25879
25880
25881
25882
25883
25884
25885
25886
25887
25888
25889
25890
25891
25892
25893
25894
25895
25896
25897
25898
25899
25900
25901
25902
25903
25904
25905
25906
25907
25908
25909
25910
25911
25912
25913
25914
25915
25916
25917
25918
25919
25920
25921
25922
25923
25924
25925
25926
25927
25928
25929
25930
25931
25932
25933
25934
25935
25936
25937
25938
25939
25940
25941
25942
25943
25944
25945
25946
25947
25948
25949
25950
25951
25952
25953
25954
25955
25956
25957
25958
25959
25960
25961
25962
25963
25964
25965
25966
25967
25968
25969
25970
25971
25972
25973
25974
25975
25976
25977
25978
25979
25980
25981
25982
25983
25984
25985
25986
25987
25988
25989
25990
25991
25992
25993
25994
25995
25996
25997
25998
25999
26000
26001
26002
26003
26004
26005
26006
26007
26008
26009
26010
26011
26012
26013
26014
26015
26016
26017
26018
26019
26020
26021
26022
26023
26024
26025
26026
26027
26028
26029
26030
26031
26032
26033
26034
26035
26036
26037
26038
26039
26040
26041
26042
26043
26044
26045
26046
26047
26048
26049
26050
26051
26052
26053
26054
26055
26056
26057
26058
26059
26060
26061
26062
26063
26064
26065
26066
26067
26068
26069
26070
26071
26072
26073
26074
26075
26076
26077
26078
26079
26080
26081
26082
26083
26084
26085
26086
26087
26088
26089
26090
26091
26092
26093
26094
26095
26096
26097
26098
26099
26100
26101
26102
26103
26104
26105
26106
26107
26108
26109
26110
26111
26112
26113
26114
26115
26116
26117
26118
26119
26120
26121
26122
26123
26124
26125
26126
26127
26128
26129
26130
26131
26132
26133
26134
26135
26136
26137
26138
26139
26140
26141
26142
26143
26144
26145
26146
26147
26148
26149
26150
26151
26152
26153
26154
26155
26156
26157
26158
26159
26160
26161
26162
26163
26164
26165
26166
26167
26168
26169
26170
26171
26172
26173
26174
26175
26176
26177
26178
26179
26180
26181
26182
26183
26184
26185
26186
26187
26188
26189
26190
26191
26192
26193
26194
26195
26196
26197
26198
26199
26200
26201
26202
26203
26204
26205
26206
26207
26208
26209
26210
26211
26212
26213
26214
26215
26216
26217
26218
26219
26220
26221
26222
26223
26224
26225
26226
26227
26228
26229
26230
26231
26232
26233
26234
26235
26236
26237
26238
26239
26240
26241
26242
26243
26244
26245
26246
26247
26248
26249
26250
26251
26252
26253
26254
26255
26256
26257
26258
26259
26260
26261
26262
26263
26264
26265
26266
26267
26268
26269
26270
26271
26272
26273
26274
26275
26276
26277
26278
26279
26280
26281
26282
26283
26284
26285
26286
26287
26288
26289
26290
26291
26292
26293
26294
26295
26296
26297
26298
26299
26300
26301
26302
26303
26304
26305
26306
26307
26308
26309
26310
26311
26312
26313
26314
26315
26316
26317
26318
26319
26320
26321
26322
26323
26324
26325
26326
26327
26328
26329
26330
26331
26332
26333
26334
26335
26336
26337
26338
26339
26340
26341
26342
26343
26344
26345
26346
26347
26348
26349
26350
26351
26352
26353
26354
26355
26356
26357
26358
26359
26360
26361
26362
26363
26364
26365
26366
26367
26368
26369
26370
26371
26372
26373
26374
26375
26376
26377
26378
26379
26380
26381
26382
26383
26384
26385
26386
26387
26388
26389
26390
26391
26392
26393
26394
26395
26396
26397
26398
26399
26400
26401
26402
26403
26404
26405
26406
26407
26408
26409
26410
26411
26412
26413
26414
26415
26416
26417
26418
26419
26420
26421
26422
26423
26424
26425
26426
26427
26428
26429
26430
26431
26432
26433
26434
26435
26436
26437
26438
26439
26440
26441
26442
26443
26444
26445
26446
26447
26448
26449
26450
26451
26452
26453
26454
26455
26456
26457
26458
26459
26460
26461
26462
26463
26464
26465
26466
26467
26468
26469
26470
26471
26472
26473
26474
26475
26476
26477
26478
26479
26480
26481
26482
26483
26484
26485
26486
26487
26488
26489
26490
26491
26492
26493
26494
26495
26496
26497
26498
26499
26500
26501
26502
26503
26504
26505
26506
26507
26508
26509
26510
26511
26512
26513
26514
26515
26516
26517
26518
26519
26520
26521
26522
26523
26524
26525
26526
26527
26528
26529
26530
26531
26532
26533
26534
26535
26536
26537
26538
26539
26540
26541
26542
26543
26544
26545
26546
26547
26548
26549
26550
26551
26552
26553
26554
26555
26556
26557
26558
26559
26560
26561
26562
26563
26564
26565
26566
26567
26568
26569
26570
26571
26572
26573
26574
26575
26576
26577
26578
26579
26580
26581
26582
26583
26584
26585
26586
26587
26588
26589
26590
26591
26592
26593
26594
26595
26596
26597
26598
26599
26600
26601
26602
26603
26604
26605
26606
26607
26608
26609
26610
26611
26612
26613
26614
26615
26616
26617
26618
26619
26620
26621
26622
26623
26624
26625
26626
26627
26628
26629
26630
26631
26632
26633
26634
26635
26636
26637
26638
26639
26640
26641
26642
26643
26644
26645
26646
26647
26648
26649
26650
26651
26652
26653
26654
26655
26656
26657
26658
26659
26660
26661
26662
26663
26664
26665
26666
26667
26668
26669
26670
26671
26672
26673
26674
26675
26676
26677
26678
26679
26680
26681
26682
26683
26684
26685
26686
26687
26688
26689
26690
26691
26692
26693
26694
26695
26696
26697
26698
26699
26700
26701
26702
26703
26704
26705
26706
26707
26708
26709
26710
26711
26712
26713
26714
26715
26716
26717
26718
26719
26720
26721
26722
26723
26724
26725
26726
26727
26728
26729
26730
26731
26732
26733
26734
26735
26736
26737
26738
26739
26740
26741
26742
26743
26744
26745
26746
26747
26748
26749
26750
26751
26752
26753
26754
26755
26756
26757
26758
26759
26760
26761
26762
26763
26764
26765
26766
26767
26768
26769
26770
26771
26772
26773
26774
26775
26776
26777
26778
26779
26780
26781
26782
26783
26784
26785
26786
26787
26788
26789
26790
26791
26792
26793
26794
26795
26796
26797
26798
26799
26800
26801
26802
26803
26804
26805
26806
26807
26808
26809
26810
26811
26812
26813
26814
26815
26816
26817
26818
26819
26820
26821
26822
26823
26824
26825
26826
26827
26828
26829
26830
26831
26832
26833
26834
26835
26836
26837
26838
26839
26840
26841
26842
26843
26844
26845
26846
26847
26848
26849
26850
26851
26852
26853
26854
26855
26856
26857
26858
26859
26860
26861
26862
26863
26864
26865
26866
26867
26868
26869
26870
26871
26872
26873
26874
26875
26876
26877
26878
26879
26880
26881
26882
26883
26884
26885
26886
26887
26888
26889
26890
26891
26892
26893
26894
26895
26896
26897
26898
26899
26900
26901
26902
26903
26904
26905
26906
26907
26908
26909
26910
26911
26912
26913
26914
26915
26916
26917
26918
26919
26920
26921
26922
26923
26924
26925
26926
26927
26928
26929
26930
26931
26932
26933
26934
26935
26936
26937
26938
26939
26940
26941
26942
26943
26944
26945
26946
26947
26948
26949
26950
26951
26952
26953
26954
26955
26956
26957
26958
26959
26960
26961
26962
26963
26964
26965
26966
26967
26968
26969
26970
26971
26972
26973
26974
26975
26976
26977
26978
26979
26980
26981
26982
26983
26984
26985
26986
26987
26988
26989
26990
26991
26992
26993
26994
26995
26996
26997
26998
26999
27000
27001
27002
27003
27004
27005
27006
27007
27008
27009
27010
27011
27012
27013
27014
27015
27016
27017
27018
27019
27020
27021
27022
27023
27024
27025
27026
27027
27028
27029
27030
27031
27032
27033
27034
27035
27036
27037
27038
27039
27040
27041
27042
27043
27044
27045
27046
27047
27048
27049
27050
27051
27052
27053
27054
27055
27056
27057
27058
27059
27060
27061
27062
27063
27064
27065
27066
27067
27068
27069
27070
27071
27072
27073
27074
27075
27076
27077
27078
27079
27080
27081
27082
27083
27084
27085
27086
27087
27088
27089
27090
27091
27092
27093
27094
27095
27096
27097
27098
27099
27100
27101
27102
27103
27104
27105
27106
27107
27108
27109
27110
27111
27112
27113
27114
27115
27116
27117
27118
27119
27120
27121
27122
27123
27124
27125
27126
27127
27128
27129
27130
27131
27132
27133
27134
27135
27136
27137
27138
27139
27140
27141
27142
27143
27144
27145
27146
27147
27148
27149
27150
27151
27152
27153
27154
27155
27156
27157
27158
27159
27160
27161
27162
27163
27164
27165
27166
27167
27168
27169
27170
27171
27172
27173
27174
27175
27176
27177
27178
27179
27180
27181
27182
27183
27184
27185
27186
27187
27188
27189
27190
27191
27192
27193
27194
27195
27196
27197
27198
27199
27200
27201
27202
27203
27204
27205
27206
27207
27208
27209
27210
27211
27212
27213
27214
27215
27216
27217
27218
27219
27220
27221
27222
27223
27224
27225
27226
27227
27228
27229
27230
27231
27232
27233
27234
27235
27236
27237
27238
27239
27240
27241
27242
27243
27244
27245
27246
27247
27248
27249
27250
27251
27252
27253
27254
27255
27256
27257
27258
27259
27260
27261
27262
27263
27264
27265
27266
27267
27268
27269
27270
27271
27272
27273
27274
27275
27276
27277
27278
27279
27280
27281
27282
27283
27284
27285
27286
27287
27288
27289
27290
27291
27292
27293
27294
27295
27296
27297
27298
27299
27300
27301
27302
27303
27304
27305
27306
27307
27308
27309
27310
27311
27312
27313
27314
27315
27316
27317
27318
27319
27320
27321
27322
27323
27324
27325
27326
27327
27328
27329
27330
27331
27332
27333
27334
27335
27336
27337
27338
27339
27340
27341
27342
27343
27344
27345
27346
27347
27348
27349
27350
27351
27352
27353
27354
27355
27356
27357
27358
27359
27360
27361
27362
27363
27364
27365
27366
27367
27368
27369
27370
27371
27372
27373
27374
27375
27376
27377
27378
27379
27380
27381
27382
27383
27384
27385
27386
27387
27388
27389
27390
27391
27392
27393
27394
27395
27396
27397
27398
27399
27400
27401
27402
27403
27404
27405
27406
27407
27408
27409
27410
27411
27412
27413
27414
27415
27416
27417
27418
27419
27420
27421
27422
27423
27424
27425
27426
27427
27428
27429
27430
27431
27432
27433
27434
27435
27436
27437
27438
27439
27440
27441
27442
27443
27444
27445
27446
27447
27448
27449
27450
27451
27452
27453
27454
27455
27456
27457
27458
27459
27460
27461
27462
27463
27464
27465
27466
27467
27468
27469
27470
27471
27472
27473
27474
27475
27476
27477
27478
27479
27480
27481
27482
27483
27484
27485
27486
27487
27488
27489
27490
27491
27492
27493
27494
27495
27496
27497
27498
27499
27500
27501
27502
27503
27504
27505
27506
27507
27508
27509
27510
27511
27512
27513
27514
27515
27516
27517
27518
27519
27520
27521
27522
27523
27524
27525
27526
27527
27528
27529
27530
27531
27532
27533
27534
27535
27536
27537
27538
27539
27540
27541
27542
27543
27544
27545
27546
27547
27548
27549
27550
27551
27552
27553
27554
27555
27556
27557
27558
27559
27560
27561
27562
27563
27564
27565
27566
27567
27568
27569
27570
27571
27572
27573
27574
27575
27576
27577
27578
27579
27580
27581
27582
27583
27584
27585
27586
27587
27588
27589
27590
27591
27592
27593
27594
27595
27596
27597
27598
27599
27600
27601
27602
27603
27604
27605
27606
27607
27608
27609
27610
27611
27612
27613
27614
27615
27616
27617
27618
27619
27620
27621
27622
27623
27624
27625
27626
27627
27628
27629
27630
27631
27632
27633
27634
27635
27636
27637
27638
27639
27640
27641
27642
27643
27644
27645
27646
27647
27648
27649
27650
27651
27652
27653
27654
27655
27656
27657
27658
27659
27660
27661
27662
27663
27664
27665
27666
27667
27668
27669
27670
27671
27672
27673
27674
27675
27676
27677
27678
27679
27680
27681
27682
27683
27684
27685
27686
27687
27688
27689
27690
27691
27692
27693
27694
27695
27696
27697
27698
27699
27700
27701
27702
27703
27704
27705
27706
27707
27708
27709
27710
27711
27712
27713
27714
27715
27716
27717
27718
27719
27720
27721
27722
27723
27724
27725
27726
27727
27728
27729
27730
27731
27732
27733
27734
27735
27736
27737
27738
27739
27740
27741
27742
27743
27744
27745
27746
27747
27748
27749
27750
27751
27752
27753
27754
27755
27756
27757
27758
27759
27760
27761
27762
27763
27764
27765
27766
27767
27768
27769
27770
27771
27772
27773
27774
27775
27776
27777
27778
27779
27780
27781
27782
27783
27784
27785
27786
27787
27788
27789
27790
27791
27792
27793
27794
27795
27796
27797
27798
27799
27800
27801
27802
27803
27804
27805
27806
27807
27808
27809
27810
27811
27812
27813
27814
27815
27816
27817
27818
27819
27820
27821
27822
27823
27824
27825
27826
27827
27828
27829
27830
27831
27832
27833
27834
27835
27836
27837
27838
27839
27840
27841
27842
27843
27844
27845
27846
27847
27848
27849
27850
27851
27852
27853
27854
27855
27856
27857
27858
27859
27860
27861
27862
27863
27864
27865
27866
27867
27868
27869
27870
27871
27872
27873
27874
27875
27876
27877
27878
27879
27880
27881
27882
27883
27884
27885
27886
27887
27888
27889
27890
27891
27892
27893
27894
27895
27896
27897
27898
27899
27900
27901
27902
27903
27904
27905
27906
27907
27908
27909
27910
27911
27912
27913
27914
27915
27916
27917
27918
27919
27920
27921
27922
27923
27924
27925
27926
27927
27928
27929
27930
27931
27932
27933
27934
27935
27936
27937
27938
27939
27940
27941
27942
27943
27944
27945
27946
27947
27948
27949
27950
27951
27952
27953
27954
27955
27956
27957
27958
27959
27960
27961
27962
27963
27964
27965
27966
27967
27968
27969
27970
27971
27972
27973
27974
27975
27976
27977
27978
27979
27980
27981
27982
27983
27984
27985
27986
27987
27988
27989
27990
27991
27992
27993
27994
27995
27996
27997
27998
27999
28000
28001
28002
28003
28004
28005
28006
28007
28008
28009
28010
28011
28012
28013
28014
28015
28016
28017
28018
28019
28020
28021
28022
28023
28024
28025
28026
28027
28028
28029
28030
28031
28032
28033
28034
28035
28036
28037
28038
28039
28040
28041
28042
28043
28044
28045
28046
28047
28048
28049
28050
28051
28052
28053
28054
28055
28056
28057
28058
28059
28060
28061
28062
28063
28064
28065
28066
28067
28068
28069
28070
28071
28072
28073
28074
28075
28076
28077
28078
28079
28080
28081
28082
28083
28084
28085
28086
28087
28088
28089
28090
28091
28092
28093
28094
28095
28096
28097
28098
28099
28100
28101
28102
28103
28104
28105
28106
28107
28108
28109
28110
28111
28112
28113
28114
28115
28116
28117
28118
28119
28120
28121
28122
28123
28124
28125
28126
28127
28128
28129
28130
28131
28132
28133
28134
28135
28136
28137
28138
28139
28140
28141
28142
28143
28144
28145
28146
28147
28148
28149
28150
28151
28152
28153
28154
28155
28156
28157
28158
28159
28160
28161
28162
28163
28164
28165
28166
28167
28168
28169
28170
28171
28172
28173
28174
28175
28176
28177
28178
28179
28180
28181
28182
28183
28184
28185
28186
28187
28188
28189
28190
28191
28192
28193
28194
28195
28196
28197
28198
28199
28200
28201
28202
28203
28204
28205
28206
28207
28208
28209
28210
28211
28212
28213
28214
28215
28216
28217
28218
28219
28220
28221
28222
28223
28224
28225
28226
28227
28228
28229
28230
28231
28232
28233
28234
28235
28236
28237
28238
28239
28240
28241
28242
28243
28244
28245
28246
28247
28248
28249
28250
28251
28252
28253
28254
28255
28256
28257
28258
28259
28260
28261
28262
28263
28264
28265
28266
28267
28268
28269
28270
28271
28272
28273
28274
28275
28276
28277
28278
28279
28280
28281
28282
28283
28284
28285
28286
28287
28288
28289
28290
28291
28292
28293
28294
28295
28296
28297
28298
28299
28300
28301
28302
28303
28304
28305
28306
28307
28308
28309
28310
28311
28312
28313
28314
28315
28316
28317
28318
28319
28320
28321
28322
28323
28324
28325
28326
28327
28328
28329
28330
28331
28332
28333
28334
28335
28336
28337
28338
28339
28340
28341
28342
28343
28344
28345
28346
28347
28348
28349
28350
28351
28352
28353
28354
28355
28356
28357
28358
28359
28360
28361
28362
28363
28364
28365
28366
28367
28368
28369
28370
28371
28372
28373
28374
28375
28376
28377
28378
28379
28380
28381
28382
28383
28384
28385
28386
28387
28388
28389
28390
28391
28392
28393
28394
28395
28396
28397
28398
28399
28400
28401
28402
28403
28404
28405
28406
28407
28408
28409
28410
28411
28412
28413
28414
28415
28416
28417
28418
28419
28420
28421
28422
28423
28424
28425
28426
28427
28428
28429
28430
28431
28432
28433
28434
28435
28436
28437
28438
28439
28440
28441
28442
28443
28444
28445
28446
28447
28448
28449
28450
28451
28452
28453
28454
28455
28456
28457
28458
28459
28460
28461
28462
28463
28464
28465
28466
28467
28468
28469
28470
28471
28472
28473
28474
28475
28476
28477
28478
28479
28480
28481
28482
28483
28484
28485
28486
28487
28488
28489
28490
28491
28492
28493
28494
28495
28496
28497
28498
28499
28500
28501
28502
28503
28504
28505
28506
28507
28508
28509
28510
28511
28512
28513
28514
28515
28516
28517
28518
28519
28520
28521
28522
28523
28524
28525
28526
28527
28528
28529
28530
28531
28532
28533
28534
28535
28536
28537
28538
28539
28540
28541
28542
28543
28544
28545
28546
28547
28548
28549
28550
28551
28552
28553
28554
28555
28556
28557
28558
28559
28560
28561
28562
28563
28564
28565
28566
28567
28568
28569
28570
28571
28572
28573
28574
28575
28576
28577
28578
28579
28580
28581
28582
28583
28584
28585
28586
28587
28588
28589
28590
28591
28592
28593
28594
28595
28596
28597
28598
28599
28600
28601
28602
28603
28604
28605
28606
28607
28608
28609
28610
28611
28612
28613
28614
28615
28616
28617
28618
28619
28620
28621
28622
28623
28624
28625
28626
28627
28628
28629
28630
28631
28632
28633
28634
28635
28636
28637
28638
28639
28640
28641
28642
28643
28644
28645
28646
28647
28648
28649
28650
28651
28652
28653
28654
28655
28656
28657
28658
28659
28660
28661
28662
28663
28664
28665
28666
28667
28668
28669
28670
28671
28672
28673
28674
28675
28676
28677
28678
28679
28680
28681
28682
28683
28684
28685
28686
28687
28688
28689
28690
28691
28692
28693
28694
28695
28696
28697
28698
28699
28700
28701
28702
28703
28704
28705
28706
28707
28708
28709
28710
28711
28712
28713
28714
28715
28716
28717
28718
28719
28720
28721
28722
28723
28724
28725
28726
28727
28728
28729
28730
28731
28732
28733
28734
28735
28736
28737
28738
28739
28740
28741
28742
28743
28744
28745
28746
28747
28748
28749
28750
28751
28752
28753
28754
28755
28756
28757
28758
28759
28760
28761
28762
28763
28764
28765
28766
28767
28768
28769
28770
28771
28772
28773
28774
28775
28776
28777
28778
28779
28780
28781
28782
28783
28784
28785
28786
28787
28788
28789
28790
28791
28792
28793
28794
28795
28796
28797
28798
28799
28800
28801
28802
28803
28804
28805
28806
28807
28808
28809
28810
28811
28812
28813
28814
28815
28816
28817
28818
28819
28820
28821
28822
28823
28824
28825
28826
28827
28828
28829
28830
28831
28832
28833
28834
28835
28836
28837
28838
28839
28840
28841
28842
28843
28844
28845
28846
28847
28848
28849
28850
28851
28852
28853
28854
28855
28856
28857
28858
28859
28860
28861
28862
28863
28864
28865
28866
28867
28868
28869
28870
28871
28872
28873
28874
28875
28876
28877
28878
28879
28880
28881
28882
28883
28884
28885
28886
28887
28888
28889
28890
28891
28892
28893
28894
28895
28896
28897
28898
28899
28900
28901
28902
28903
28904
28905
28906
28907
28908
28909
28910
28911
28912
28913
28914
28915
28916
28917
28918
28919
28920
28921
28922
28923
28924
28925
28926
28927
28928
28929
28930
28931
28932
28933
28934
28935
28936
28937
28938
28939
28940
28941
28942
28943
28944
28945
28946
28947
28948
28949
28950
28951
28952
28953
28954
28955
28956
28957
28958
28959
28960
28961
28962
28963
28964
28965
28966
28967
28968
28969
28970
28971
28972
28973
28974
28975
28976
28977
28978
28979
28980
28981
28982
28983
28984
28985
28986
28987
28988
28989
28990
28991
28992
28993
28994
28995
28996
28997
28998
28999
29000
29001
29002
29003
29004
29005
29006
29007
29008
29009
29010
29011
29012
29013
29014
29015
29016
29017
29018
29019
29020
29021
29022
29023
29024
29025
29026
29027
29028
29029
29030
29031
29032
29033
29034
29035
29036
29037
29038
29039
29040
29041
29042
29043
29044
29045
29046
29047
29048
29049
29050
29051
29052
29053
29054
29055
29056
29057
29058
29059
29060
29061
29062
29063
29064
29065
29066
29067
29068
29069
29070
29071
29072
29073
29074
29075
29076
29077
29078
29079
29080
29081
29082
29083
29084
29085
29086
29087
29088
29089
29090
29091
29092
29093
29094
29095
29096
29097
29098
29099
29100
29101
29102
29103
29104
29105
29106
29107
29108
29109
29110
29111
29112
29113
29114
29115
29116
29117
29118
29119
29120
29121
29122
29123
29124
29125
29126
29127
29128
29129
29130
29131
29132
29133
29134
29135
29136
29137
29138
29139
29140
29141
29142
29143
29144
29145
29146
29147
29148
29149
29150
29151
29152
29153
29154
29155
29156
29157
29158
29159
29160
29161
29162
29163
29164
29165
29166
29167
29168
29169
29170
29171
29172
29173
29174
29175
29176
29177
29178
29179
29180
29181
29182
29183
29184
29185
29186
29187
29188
29189
29190
29191
29192
29193
29194
29195
29196
29197
29198
29199
29200
29201
29202
29203
29204
29205
29206
29207
29208
29209
29210
29211
29212
29213
29214
29215
29216
29217
29218
29219
29220
29221
29222
29223
29224
29225
29226
29227
29228
29229
29230
29231
29232
29233
29234
29235
29236
29237
29238
29239
29240
29241
29242
29243
29244
29245
29246
29247
29248
29249
29250
29251
29252
29253
29254
29255
29256
29257
29258
29259
29260
29261
29262
29263
29264
29265
29266
29267
29268
29269
29270
29271
29272
29273
29274
29275
29276
29277
29278
29279
29280
29281
29282
29283
29284
29285
29286
29287
29288
29289
29290
29291
29292
29293
29294
29295
29296
29297
29298
29299
29300
29301
29302
29303
29304
29305
29306
29307
29308
29309
29310
29311
29312
29313
29314
29315
29316
29317
29318
29319
29320
29321
29322
29323
29324
29325
29326
29327
29328
29329
29330
29331
29332
29333
29334
29335
29336
29337
29338
29339
29340
29341
29342
29343
29344
29345
29346
29347
29348
29349
29350
29351
29352
29353
29354
29355
29356
29357
29358
29359
29360
29361
29362
29363
29364
29365
29366
29367
29368
29369
29370
29371
29372
29373
29374
29375
29376
29377
29378
29379
29380
29381
29382
29383
29384
29385
29386
29387
29388
29389
29390
29391
29392
29393
29394
29395
29396
29397
29398
29399
29400
29401
29402
29403
29404
29405
29406
29407
29408
29409
29410
29411
29412
29413
29414
29415
29416
29417
29418
29419
29420
29421
29422
29423
29424
29425
29426
29427
29428
29429
29430
29431
29432
29433
29434
29435
29436
29437
29438
29439
29440
29441
29442
29443
29444
29445
29446
29447
29448
29449
29450
29451
29452
29453
29454
29455
29456
29457
29458
29459
29460
29461
29462
29463
29464
29465
29466
29467
29468
29469
29470
29471
29472
29473
29474
29475
29476
29477
29478
29479
29480
29481
29482
29483
29484
29485
29486
29487
29488
29489
29490
29491
29492
29493
29494
29495
29496
29497
29498
29499
29500
29501
29502
29503
29504
29505
29506
29507
29508
29509
29510
29511
29512
29513
29514
29515
29516
29517
29518
29519
29520
29521
29522
29523
29524
29525
29526
29527
29528
29529
29530
29531
29532
29533
29534
29535
29536
29537
29538
29539
29540
29541
29542
29543
29544
29545
29546
29547
29548
29549
29550
29551
29552
29553
29554
29555
29556
29557
29558
29559
29560
29561
29562
29563
29564
29565
29566
29567
29568
29569
29570
29571
29572
29573
29574
29575
29576
29577
29578
29579
29580
29581
29582
29583
29584
29585
29586
29587
29588
29589
29590
29591
29592
29593
29594
29595
29596
29597
29598
29599
29600
29601
29602
29603
29604
29605
29606
29607
29608
29609
29610
29611
29612
29613
29614
29615
29616
29617
29618
29619
29620
29621
29622
29623
29624
29625
29626
29627
29628
29629
29630
29631
29632
29633
29634
29635
29636
29637
29638
29639
29640
29641
29642
29643
29644
29645
29646
29647
29648
29649
29650
29651
29652
29653
29654
29655
29656
29657
29658
29659
29660
29661
29662
29663
29664
29665
29666
29667
29668
29669
29670
29671
29672
29673
29674
29675
29676
29677
29678
29679
29680
29681
29682
29683
29684
29685
29686
29687
29688
29689
29690
29691
29692
29693
29694
29695
29696
29697
29698
29699
29700
29701
29702
29703
29704
29705
29706
29707
29708
29709
29710
29711
29712
29713
29714
29715
29716
29717
29718
29719
29720
29721
29722
29723
29724
29725
29726
29727
29728
29729
29730
29731
29732
29733
29734
29735
29736
29737
29738
29739
29740
29741
29742
29743
29744
29745
29746
29747
29748
29749
29750
29751
29752
29753
29754
29755
29756
29757
29758
29759
29760
29761
29762
29763
29764
29765
29766
29767
29768
29769
29770
29771
29772
29773
29774
29775
29776
29777
29778
29779
29780
29781
29782
29783
29784
29785
29786
29787
29788
29789
29790
29791
29792
29793
29794
29795
29796
29797
29798
29799
29800
29801
29802
29803
29804
29805
29806
29807
29808
29809
29810
29811
29812
29813
29814
29815
29816
29817
29818
29819
29820
29821
29822
29823
29824
29825
29826
29827
29828
29829
29830
29831
29832
29833
29834
29835
29836
29837
29838
29839
29840
29841
29842
29843
29844
29845
29846
29847
29848
29849
29850
29851
29852
29853
29854
29855
29856
29857
29858
29859
29860
29861
29862
29863
29864
29865
29866
29867
29868
29869
29870
29871
29872
29873
29874
29875
29876
29877
29878
29879
29880
29881
29882
29883
29884
29885
29886
29887
29888
29889
29890
29891
29892
29893
29894
29895
29896
29897
29898
29899
29900
29901
29902
29903
29904
29905
29906
29907
29908
29909
29910
29911
29912
29913
29914
29915
29916
29917
29918
29919
29920
29921
29922
29923
29924
29925
29926
29927
29928
29929
29930
29931
29932
29933
29934
29935
29936
29937
29938
29939
29940
29941
29942
29943
29944
29945
29946
29947
29948
29949
29950
29951
29952
29953
29954
29955
29956
29957
29958
29959
29960
29961
29962
29963
29964
29965
29966
29967
29968
29969
29970
29971
29972
29973
29974
29975
29976
29977
29978
29979
29980
29981
29982
29983
29984
29985
29986
29987
29988
29989
29990
29991
29992
29993
29994
29995
29996
29997
29998
29999
30000
30001
30002
30003
30004
30005
30006
30007
30008
30009
30010
30011
30012
30013
30014
30015
30016
30017
30018
30019
30020
30021
30022
30023
30024
30025
30026
30027
30028
30029
30030
30031
30032
30033
30034
30035
30036
30037
30038
30039
30040
30041
30042
30043
30044
30045
30046
30047
30048
30049
30050
30051
30052
30053
30054
30055
30056
30057
30058
30059
30060
30061
30062
30063
30064
30065
30066
30067
30068
30069
30070
30071
30072
30073
30074
30075
30076
30077
30078
30079
30080
30081
30082
30083
30084
30085
30086
30087
30088
30089
30090
30091
30092
30093
30094
30095
30096
30097
30098
30099
30100
30101
30102
30103
30104
30105
30106
30107
30108
30109
30110
30111
30112
30113
30114
30115
30116
30117
30118
30119
30120
30121
30122
30123
30124
30125
30126
30127
30128
30129
30130
30131
30132
30133
30134
30135
30136
30137
30138
30139
30140
30141
30142
30143
30144
30145
30146
30147
30148
30149
30150
30151
30152
30153
30154
30155
30156
30157
30158
30159
30160
30161
30162
30163
30164
30165
30166
30167
30168
30169
30170
30171
30172
30173
30174
30175
30176
30177
30178
30179
30180
30181
30182
30183
30184
30185
30186
30187
30188
30189
30190
30191
30192
30193
30194
30195
30196
30197
30198
30199
30200
30201
30202
30203
30204
30205
30206
30207
30208
30209
30210
30211
30212
30213
30214
30215
30216
30217
30218
30219
30220
30221
30222
30223
30224
30225
30226
30227
30228
30229
30230
30231
30232
30233
30234
30235
30236
30237
30238
30239
30240
30241
30242
30243
30244
30245
30246
30247
30248
30249
30250
30251
30252
30253
30254
30255
30256
30257
30258
30259
30260
30261
30262
30263
30264
30265
30266
30267
30268
30269
30270
30271
30272
30273
30274
30275
30276
30277
30278
30279
30280
30281
30282
30283
30284
30285
30286
30287
30288
30289
30290
30291
30292
30293
30294
30295
30296
30297
30298
30299
30300
30301
30302
30303
30304
30305
30306
30307
30308
30309
30310
30311
30312
30313
30314
30315
30316
30317
30318
30319
30320
30321
30322
30323
30324
30325
30326
30327
30328
30329
30330
30331
30332
30333
30334
30335
30336
30337
30338
30339
30340
30341
30342
30343
30344
30345
30346
30347
30348
30349
30350
30351
30352
30353
30354
30355
30356
30357
30358
30359
30360
30361
30362
30363
30364
30365
30366
30367
30368
30369
30370
30371
30372
30373
30374
30375
30376
30377
30378
30379
30380
30381
30382
30383
30384
30385
30386
30387
30388
30389
30390
30391
30392
30393
30394
30395
30396
30397
30398
30399
30400
30401
30402
30403
30404
30405
30406
30407
30408
30409
30410
30411
30412
30413
30414
30415
30416
30417
30418
30419
30420
30421
30422
30423
30424
30425
30426
30427
30428
30429
30430
30431
30432
30433
30434
30435
30436
30437
30438
30439
30440
30441
30442
30443
30444
30445
30446
30447
30448
30449
30450
30451
30452
30453
30454
30455
30456
30457
30458
30459
30460
30461
30462
30463
30464
30465
30466
30467
30468
30469
30470
30471
30472
30473
30474
30475
30476
30477
30478
30479
30480
30481
30482
30483
30484
30485
30486
30487
30488
30489
30490
30491
30492
30493
30494
30495
30496
30497
30498
30499
30500
30501
30502
30503
30504
30505
30506
30507
30508
30509
30510
30511
30512
30513
30514
30515
30516
30517
30518
30519
30520
30521
30522
30523
30524
30525
30526
30527
30528
30529
30530
30531
30532
30533
30534
30535
30536
30537
30538
30539
30540
30541
30542
30543
30544
30545
30546
30547
30548
30549
30550
30551
30552
30553
30554
30555
30556
30557
30558
30559
30560
30561
30562
30563
30564
30565
30566
30567
30568
30569
30570
30571
30572
30573
30574
30575
30576
30577
30578
30579
30580
30581
30582
30583
30584
30585
30586
30587
30588
30589
30590
30591
30592
30593
30594
30595
30596
30597
30598
30599
30600
30601
30602
30603
30604
30605
30606
30607
30608
30609
30610
30611
30612
30613
30614
30615
30616
30617
30618
30619
30620
30621
30622
30623
30624
30625
30626
30627
30628
30629
30630
30631
30632
30633
30634
30635
30636
30637
30638
30639
30640
30641
30642
30643
30644
30645
30646
30647
30648
30649
30650
30651
30652
30653
30654
30655
30656
30657
30658
30659
30660
30661
30662
30663
30664
30665
30666
30667
30668
30669
30670
30671
30672
30673
30674
30675
30676
30677
30678
30679
30680
30681
30682
30683
30684
30685
30686
30687
30688
30689
30690
30691
30692
30693
30694
30695
30696
30697
30698
30699
30700
30701
30702
30703
30704
30705
30706
30707
30708
30709
30710
30711
30712
30713
30714
30715
30716
30717
30718
30719
30720
30721
30722
30723
30724
30725
30726
30727
30728
30729
30730
30731
30732
30733
30734
30735
30736
30737
30738
30739
30740
30741
30742
30743
30744
30745
30746
30747
30748
30749
30750
30751
30752
30753
30754
30755
30756
30757
30758
30759
30760
30761
30762
30763
30764
30765
30766
30767
30768
30769
30770
30771
30772
30773
30774
30775
30776
30777
30778
30779
30780
30781
30782
30783
30784
30785
30786
30787
30788
30789
30790
30791
30792
30793
30794
30795
30796
30797
30798
30799
30800
30801
30802
30803
30804
30805
30806
30807
30808
30809
30810
30811
30812
30813
30814
30815
30816
30817
30818
30819
30820
30821
30822
30823
30824
30825
30826
30827
30828
30829
30830
30831
30832
30833
30834
30835
30836
30837
30838
30839
30840
30841
30842
30843
30844
30845
30846
30847
30848
30849
30850
30851
30852
30853
30854
30855
30856
30857
30858
30859
30860
30861
30862
30863
30864
30865
30866
30867
30868
30869
30870
30871
30872
30873
30874
30875
30876
30877
30878
30879
30880
30881
30882
30883
30884
30885
30886
30887
30888
30889
30890
30891
30892
30893
30894
30895
30896
30897
30898
30899
30900
30901
30902
30903
30904
30905
30906
30907
30908
30909
30910
30911
30912
30913
30914
30915
30916
30917
30918
30919
30920
30921
30922
30923
30924
30925
30926
30927
30928
30929
30930
30931
30932
30933
30934
30935
30936
30937
30938
30939
30940
30941
30942
30943
30944
30945
30946
30947
30948
30949
30950
30951
30952
30953
30954
30955
30956
30957
30958
30959
30960
30961
30962
30963
30964
30965
30966
30967
30968
30969
30970
30971
30972
30973
30974
30975
30976
30977
30978
30979
30980
30981
30982
30983
30984
30985
30986
30987
30988
30989
30990
30991
30992
30993
30994
30995
30996
30997
30998
30999
31000
31001
31002
31003
31004
31005
31006
31007
31008
31009
31010
31011
31012
31013
31014
31015
31016
31017
31018
31019
31020
31021
31022
31023
31024
31025
31026
31027
31028
31029
31030
31031
31032
31033
31034
31035
31036
31037
31038
31039
31040
31041
31042
31043
31044
31045
31046
31047
31048
31049
31050
31051
31052
31053
31054
31055
31056
31057
31058
31059
31060
31061
31062
31063
31064
31065
31066
31067
31068
31069
31070
31071
31072
31073
31074
31075
31076
31077
31078
31079
31080
31081
31082
31083
31084
31085
31086
31087
31088
31089
31090
31091
31092
31093
31094
31095
31096
31097
31098
31099
31100
31101
31102
31103
31104
31105
31106
31107
31108
31109
31110
31111
31112
31113
31114
31115
31116
31117
31118
31119
31120
31121
31122
31123
31124
31125
31126
31127
31128
31129
31130
31131
31132
31133
31134
31135
31136
31137
31138
31139
31140
31141
31142
31143
31144
31145
31146
31147
31148
31149
31150
31151
31152
31153
31154
31155
31156
31157
31158
31159
31160
31161
31162
31163
31164
31165
31166
31167
31168
31169
31170
31171
31172
31173
31174
31175
31176
31177
31178
31179
31180
31181
31182
31183
31184
31185
31186
31187
31188
31189
31190
31191
31192
31193
31194
31195
31196
31197
31198
31199
31200
31201
31202
31203
31204
31205
31206
31207
31208
31209
31210
31211
31212
31213
31214
31215
31216
31217
31218
31219
31220
31221
31222
31223
31224
31225
31226
31227
31228
31229
31230
31231
31232
31233
31234
31235
31236
31237
31238
31239
31240
31241
31242
31243
31244
31245
31246
31247
31248
31249
31250
31251
31252
31253
31254
31255
31256
31257
31258
31259
31260
31261
31262
31263
31264
31265
31266
31267
31268
31269
31270
31271
31272
31273
31274
31275
31276
31277
31278
31279
31280
31281
31282
31283
31284
31285
31286
31287
31288
31289
31290
31291
31292
31293
31294
31295
31296
31297
31298
31299
31300
31301
31302
31303
31304
31305
31306
31307
31308
31309
31310
31311
31312
31313
31314
31315
31316
31317
31318
31319
31320
31321
31322
31323
31324
31325
31326
31327
31328
31329
31330
31331
31332
31333
31334
31335
31336
31337
31338
31339
31340
31341
31342
31343
31344
31345
31346
31347
31348
31349
31350
31351
31352
31353
31354
31355
31356
31357
31358
31359
31360
31361
31362
31363
31364
31365
31366
31367
31368
31369
31370
31371
31372
31373
31374
31375
31376
31377
31378
31379
31380
31381
31382
31383
31384
31385
31386
31387
31388
31389
31390
31391
31392
31393
31394
31395
31396
31397
31398
31399
31400
31401
31402
31403
31404
31405
31406
31407
31408
31409
31410
31411
31412
31413
31414
31415
31416
31417
31418
31419
31420
31421
31422
31423
31424
31425
31426
31427
31428
31429
31430
31431
31432
31433
31434
31435
31436
31437
31438
31439
31440
31441
31442
31443
31444
31445
31446
31447
31448
31449
31450
31451
31452
31453
31454
31455
31456
31457
31458
31459
31460
31461
31462
31463
31464
31465
31466
31467
31468
31469
31470
31471
31472
31473
31474
31475
31476
31477
31478
31479
31480
31481
31482
31483
31484
31485
31486
31487
31488
31489
31490
31491
31492
31493
31494
31495
31496
31497
31498
31499
31500
31501
31502
31503
31504
31505
31506
31507
31508
31509
31510
31511
31512
31513
31514
31515
31516
31517
31518
31519
31520
31521
31522
31523
31524
31525
31526
31527
31528
31529
31530
31531
31532
31533
31534
31535
31536
31537
31538
31539
31540
31541
31542
31543
31544
31545
31546
31547
31548
31549
31550
31551
31552
31553
31554
31555
31556
31557
31558
31559
31560
31561
31562
31563
31564
31565
31566
31567
31568
31569
31570
31571
31572
31573
31574
31575
31576
31577
31578
31579
31580
31581
31582
31583
31584
31585
31586
31587
31588
31589
31590
31591
31592
31593
31594
31595
31596
31597
31598
31599
31600
31601
31602
31603
31604
31605
31606
31607
31608
31609
31610
31611
31612
31613
31614
31615
31616
31617
31618
31619
31620
31621
31622
31623
31624
31625
31626
31627
31628
31629
31630
31631
31632
31633
31634
31635
31636
31637
31638
31639
31640
31641
31642
31643
31644
31645
31646
31647
31648
31649
31650
31651
31652
31653
31654
31655
31656
31657
31658
31659
31660
31661
31662
31663
31664
31665
31666
31667
31668
31669
31670
31671
31672
31673
31674
31675
31676
31677
31678
31679
31680
31681
31682
31683
31684
31685
31686
31687
31688
31689
31690
31691
31692
31693
31694
31695
31696
31697
31698
31699
31700
31701
31702
31703
31704
31705
31706
31707
31708
31709
31710
31711
31712
31713
31714
31715
31716
31717
31718
31719
31720
31721
31722
31723
31724
31725
31726
31727
31728
31729
31730
31731
31732
31733
31734
31735
31736
31737
31738
31739
31740
31741
31742
31743
31744
31745
31746
31747
31748
31749
31750
31751
31752
31753
31754
31755
31756
31757
31758
31759
31760
31761
31762
31763
31764
31765
31766
31767
31768
31769
31770
31771
31772
31773
31774
31775
31776
31777
31778
31779
31780
31781
31782
31783
31784
31785
31786
31787
31788
31789
31790
31791
31792
31793
31794
31795
31796
31797
31798
31799
31800
31801
31802
31803
31804
31805
31806
31807
31808
31809
31810
31811
31812
31813
31814
31815
31816
31817
31818
31819
31820
31821
31822
31823
31824
31825
31826
31827
31828
31829
31830
31831
31832
31833
31834
31835
31836
31837
31838
31839
31840
31841
31842
31843
31844
31845
31846
31847
31848
31849
31850
31851
31852
31853
31854
31855
31856
31857
31858
31859
31860
31861
31862
31863
31864
31865
31866
31867
31868
31869
31870
31871
31872
31873
31874
31875
31876
31877
31878
31879
31880
31881
31882
31883
31884
31885
31886
31887
31888
31889
31890
31891
31892
31893
31894
31895
31896
31897
31898
31899
31900
31901
31902
31903
31904
31905
31906
31907
31908
31909
31910
31911
31912
31913
31914
31915
31916
31917
31918
31919
31920
31921
31922
31923
31924
31925
31926
31927
31928
31929
31930
31931
31932
31933
31934
31935
31936
31937
31938
31939
31940
31941
31942
31943
31944
31945
31946
31947
31948
31949
31950
31951
31952
31953
31954
31955
31956
31957
31958
31959
31960
31961
31962
31963
31964
31965
31966
31967
31968
31969
31970
31971
31972
31973
31974
31975
31976
31977
31978
31979
31980
31981
31982
31983
31984
31985
31986
31987
31988
31989
31990
31991
31992
31993
31994
31995
31996
31997
31998
31999
32000
32001
32002
32003
32004
32005
32006
32007
32008
32009
32010
32011
32012
32013
32014
32015
32016
32017
32018
32019
32020
32021
32022
32023
32024
32025
32026
32027
32028
32029
32030
32031
32032
32033
32034
32035
32036
32037
32038
32039
32040
32041
32042
32043
32044
32045
32046
32047
32048
32049
32050
32051
32052
32053
32054
32055
32056
32057
32058
32059
32060
32061
32062
32063
32064
32065
32066
32067
32068
32069
32070
32071
32072
32073
32074
32075
32076
32077
32078
32079
32080
32081
32082
32083
32084
32085
32086
32087
32088
32089
32090
32091
32092
32093
32094
32095
32096
32097
32098
32099
32100
32101
32102
32103
32104
32105
32106
32107
32108
32109
32110
32111
32112
32113
32114
32115
32116
32117
32118
32119
32120
32121
32122
32123
32124
32125
32126
32127
32128
32129
32130
32131
32132
32133
32134
32135
32136
32137
32138
32139
32140
32141
32142
32143
32144
32145
32146
32147
32148
32149
32150
32151
32152
32153
32154
32155
32156
32157
32158
32159
32160
32161
32162
32163
32164
32165
32166
32167
32168
32169
32170
32171
32172
32173
32174
32175
32176
32177
32178
32179
32180
32181
32182
32183
32184
32185
32186
32187
32188
32189
32190
32191
32192
32193
32194
32195
32196
32197
32198
32199
32200
32201
32202
32203
32204
32205
32206
32207
32208
32209
32210
32211
32212
32213
32214
32215
32216
32217
32218
32219
32220
32221
32222
32223
32224
32225
32226
32227
32228
32229
32230
32231
32232
32233
32234
32235
32236
32237
32238
32239
32240
32241
32242
32243
32244
32245
32246
32247
32248
32249
32250
32251
32252
32253
32254
32255
32256
32257
32258
32259
32260
32261
32262
32263
32264
32265
32266
32267
32268
32269
32270
32271
32272
32273
32274
32275
32276
32277
32278
32279
32280
32281
32282
32283
32284
32285
32286
32287
32288
32289
32290
32291
32292
32293
32294
32295
32296
32297
32298
32299
32300
32301
32302
32303
32304
32305
32306
32307
32308
32309
32310
32311
32312
32313
32314
32315
32316
32317
32318
32319
32320
32321
32322
32323
32324
32325
32326
32327
32328
32329
32330
32331
32332
32333
32334
32335
32336
32337
32338
32339
32340
32341
32342
32343
32344
32345
32346
32347
32348
32349
32350
32351
32352
32353
32354
32355
32356
32357
32358
32359
32360
32361
32362
32363
32364
32365
32366
32367
32368
32369
32370
32371
32372
32373
32374
32375
32376
32377
32378
32379
32380
32381
32382
32383
32384
32385
32386
32387
32388
32389
32390
32391
32392
32393
32394
32395
32396
32397
32398
32399
32400
32401
32402
32403
32404
32405
32406
32407
32408
32409
32410
32411
32412
32413
32414
32415
32416
32417
32418
32419
32420
32421
32422
32423
32424
32425
32426
32427
32428
32429
32430
32431
32432
32433
32434
32435
32436
32437
32438
32439
32440
32441
32442
32443
32444
32445
32446
32447
32448
32449
32450
32451
32452
32453
32454
32455
32456
32457
32458
32459
32460
32461
32462
32463
32464
32465
32466
32467
32468
32469
32470
32471
32472
32473
32474
32475
32476
32477
32478
32479
32480
32481
32482
32483
32484
32485
32486
32487
32488
32489
32490
32491
32492
32493
32494
32495
32496
32497
32498
32499
32500
32501
32502
32503
32504
32505
32506
32507
32508
32509
32510
32511
32512
32513
32514
32515
32516
32517
32518
32519
32520
32521
32522
32523
32524
32525
32526
32527
32528
32529
32530
32531
32532
32533
32534
32535
32536
32537
32538
32539
32540
32541
32542
32543
32544
32545
32546
32547
32548
32549
32550
32551
32552
32553
32554
32555
32556
32557
32558
32559
32560
32561
32562
32563
32564
32565
32566
32567
32568
32569
32570
32571
32572
32573
32574
32575
32576
32577
32578
32579
32580
32581
32582
32583
32584
32585
32586
32587
32588
32589
32590
32591
32592
32593
32594
32595
32596
32597
32598
32599
32600
32601
32602
32603
32604
32605
32606
32607
32608
32609
32610
32611
32612
32613
32614
32615
32616
32617
32618
32619
32620
32621
32622
32623
32624
32625
32626
32627
32628
32629
32630
32631
32632
32633
32634
32635
32636
32637
32638
32639
32640
32641
32642
32643
32644
32645
32646
32647
32648
32649
32650
32651
32652
32653
32654
32655
32656
32657
32658
32659
32660
32661
32662
32663
32664
32665
32666
32667
32668
32669
32670
32671
32672
32673
32674
32675
32676
32677
32678
32679
32680
32681
32682
32683
32684
32685
32686
32687
32688
32689
32690
32691
32692
32693
32694
32695
32696
32697
32698
32699
32700
32701
32702
32703
32704
32705
32706
32707
32708
32709
32710
32711
32712
32713
32714
32715
32716
32717
32718
32719
32720
32721
32722
32723
32724
32725
32726
32727
32728
32729
32730
32731
32732
32733
32734
32735
32736
32737
32738
32739
32740
32741
32742
32743
32744
32745
32746
32747
32748
32749
32750
32751
32752
32753
32754
32755
32756
32757
32758
32759
32760
32761
32762
32763
32764
32765
32766
32767
32768
32769
32770
32771
32772
32773
32774
32775
32776
32777
32778
32779
32780
32781
32782
32783
32784
32785
32786
32787
32788
32789
32790
32791
32792
32793
32794
32795
32796
32797
32798
32799
32800
32801
32802
32803
32804
32805
32806
32807
32808
32809
32810
32811
32812
32813
32814
32815
32816
32817
32818
32819
32820
32821
32822
32823
32824
32825
32826
32827
32828
32829
32830
32831
32832
32833
32834
32835
32836
32837
32838
32839
32840
32841
32842
32843
32844
32845
32846
32847
32848
32849
32850
32851
32852
32853
32854
32855
32856
32857
32858
32859
32860
32861
32862
32863
32864
32865
32866
32867
32868
32869
32870
32871
32872
32873
32874
32875
32876
32877
32878
32879
32880
32881
32882
32883
32884
32885
32886
32887
32888
32889
32890
32891
32892
32893
32894
32895
32896
32897
32898
32899
32900
32901
32902
32903
32904
32905
32906
32907
32908
32909
32910
32911
32912
32913
32914
32915
32916
32917
32918
32919
32920
32921
32922
32923
32924
32925
32926
32927
32928
32929
32930
32931
32932
32933
32934
32935
32936
32937
32938
32939
32940
32941
32942
32943
32944
32945
32946
32947
32948
32949
32950
32951
32952
32953
32954
32955
32956
32957
32958
32959
32960
32961
32962
32963
32964
32965
32966
32967
32968
32969
32970
32971
32972
32973
32974
32975
32976
32977
32978
32979
32980
32981
32982
32983
32984
32985
32986
32987
32988
32989
32990
32991
32992
32993
32994
32995
32996
32997
32998
32999
33000
33001
33002
33003
33004
33005
33006
33007
33008
33009
33010
33011
33012
33013
33014
33015
33016
33017
33018
33019
33020
33021
33022
33023
33024
33025
33026
33027
33028
33029
33030
33031
33032
33033
33034
33035
33036
33037
33038
33039
33040
33041
33042
33043
33044
33045
33046
33047
33048
33049
33050
33051
33052
33053
33054
33055
33056
33057
33058
33059
33060
33061
33062
33063
33064
33065
33066
33067
33068
33069
33070
33071
33072
33073
33074
33075
33076
33077
33078
33079
33080
33081
33082
33083
33084
33085
33086
33087
33088
33089
33090
33091
33092
33093
33094
33095
33096
33097
33098
33099
33100
33101
33102
33103
33104
33105
33106
33107
33108
33109
33110
33111
33112
33113
33114
33115
33116
33117
33118
33119
33120
33121
33122
33123
33124
33125
33126
33127
33128
33129
33130
33131
33132
33133
33134
33135
33136
33137
33138
33139
33140
33141
33142
33143
33144
33145
33146
33147
33148
33149
33150
33151
33152
33153
33154
33155
33156
33157
33158
33159
33160
33161
33162
33163
33164
33165
33166
33167
33168
33169
33170
33171
33172
33173
33174
33175
33176
33177
33178
33179
33180
33181
33182
33183
33184
33185
33186
33187
33188
33189
33190
33191
33192
33193
33194
33195
33196
33197
33198
33199
33200
33201
33202
33203
33204
33205
33206
33207
33208
33209
33210
33211
33212
33213
33214
33215
33216
33217
33218
33219
33220
33221
33222
33223
33224
33225
33226
33227
33228
33229
33230
33231
33232
33233
33234
33235
33236
33237
33238
33239
33240
33241
33242
33243
33244
33245
33246
33247
33248
33249
33250
33251
33252
33253
33254
33255
33256
33257
33258
33259
33260
33261
33262
33263
33264
33265
33266
33267
33268
33269
33270
33271
33272
33273
33274
33275
33276
33277
33278
33279
33280
33281
33282
33283
33284
33285
33286
33287
33288
33289
33290
33291
33292
33293
33294
33295
33296
33297
33298
33299
33300
33301
33302
33303
33304
33305
33306
33307
33308
33309
33310
33311
33312
33313
33314
33315
33316
33317
33318
33319
33320
33321
33322
33323
33324
33325
33326
33327
33328
33329
33330
33331
33332
33333
33334
33335
33336
33337
33338
33339
33340
33341
33342
33343
33344
33345
33346
33347
33348
33349
33350
33351
33352
33353
33354
33355
33356
33357
33358
33359
33360
33361
33362
33363
33364
33365
33366
33367
33368
33369
33370
33371
33372
33373
33374
33375
33376
33377
33378
33379
33380
33381
33382
33383
33384
33385
33386
33387
33388
33389
33390
33391
33392
33393
33394
33395
33396
33397
33398
33399
33400
33401
33402
33403
33404
33405
33406
33407
33408
33409
33410
33411
33412
33413
33414
33415
33416
33417
33418
33419
33420
33421
33422
33423
33424
33425
33426
33427
33428
33429
33430
33431
33432
33433
33434
33435
33436
33437
33438
33439
33440
33441
33442
33443
33444
33445
33446
33447
33448
33449
33450
33451
33452
33453
33454
33455
33456
33457
33458
33459
33460
33461
33462
33463
33464
33465
33466
33467
33468
33469
33470
33471
33472
33473
33474
33475
33476
33477
33478
33479
33480
33481
33482
33483
33484
33485
33486
33487
33488
33489
33490
33491
33492
33493
33494
33495
33496
33497
33498
33499
33500
33501
33502
33503
33504
33505
33506
33507
33508
33509
33510
33511
33512
33513
33514
33515
33516
33517
33518
33519
33520
33521
33522
33523
33524
33525
33526
33527
33528
33529
33530
33531
33532
33533
33534
33535
33536
33537
33538
33539
33540
33541
33542
33543
33544
33545
33546
33547
33548
33549
33550
33551
33552
33553
33554
33555
33556
33557
33558
33559
33560
33561
33562
33563
33564
33565
33566
33567
33568
33569
33570
33571
33572
33573
33574
33575
33576
33577
33578
33579
33580
33581
33582
33583
33584
33585
33586
33587
33588
33589
33590
33591
33592
33593
33594
33595
33596
33597
33598
33599
33600
33601
33602
33603
33604
33605
33606
33607
33608
33609
33610
33611
33612
33613
33614
33615
33616
33617
33618
33619
33620
33621
33622
33623
33624
33625
33626
33627
33628
33629
33630
33631
33632
33633
33634
33635
33636
33637
33638
33639
33640
33641
33642
33643
33644
33645
33646
33647
33648
33649
33650
33651
33652
33653
33654
33655
33656
33657
33658
33659
33660
33661
33662
33663
33664
33665
33666
33667
33668
33669
33670
33671
33672
33673
33674
33675
33676
33677
33678
33679
33680
33681
33682
33683
33684
33685
33686
33687
33688
33689
33690
33691
33692
33693
33694
33695
33696
33697
33698
33699
33700
33701
33702
33703
33704
33705
33706
33707
33708
33709
33710
33711
33712
33713
33714
33715
33716
33717
33718
33719
33720
33721
33722
33723
33724
33725
33726
33727
33728
33729
33730
33731
33732
33733
33734
33735
33736
33737
33738
33739
33740
33741
33742
33743
33744
33745
33746
33747
33748
33749
33750
33751
33752
33753
33754
33755
33756
33757
33758
33759
33760
33761
33762
33763
33764
33765
33766
33767
33768
33769
33770
33771
33772
33773
33774
33775
33776
33777
33778
33779
33780
33781
33782
33783
33784
33785
33786
33787
33788
33789
33790
33791
33792
33793
33794
33795
33796
33797
33798
33799
33800
33801
33802
33803
33804
33805
33806
33807
33808
33809
33810
33811
33812
33813
33814
33815
33816
33817
33818
33819
33820
33821
33822
33823
33824
33825
33826
33827
33828
33829
33830
33831
33832
33833
33834
33835
33836
33837
33838
33839
33840
33841
33842
33843
33844
33845
33846
33847
33848
33849
33850
33851
33852
33853
33854
33855
33856
33857
33858
33859
33860
33861
33862
33863
33864
33865
33866
33867
33868
33869
33870
33871
33872
33873
33874
33875
33876
33877
33878
33879
33880
33881
33882
33883
33884
33885
33886
33887
33888
33889
33890
33891
33892
33893
33894
33895
33896
33897
33898
33899
33900
33901
33902
33903
33904
33905
33906
33907
33908
33909
33910
33911
33912
33913
33914
33915
33916
33917
33918
33919
33920
33921
33922
33923
33924
33925
33926
33927
33928
33929
33930
33931
33932
33933
33934
33935
33936
33937
33938
33939
33940
33941
33942
33943
33944
33945
33946
33947
33948
33949
33950
33951
33952
33953
33954
33955
33956
33957
33958
33959
33960
33961
33962
33963
33964
33965
33966
33967
33968
33969
33970
33971
33972
33973
33974
33975
33976
33977
33978
33979
33980
33981
33982
33983
33984
33985
33986
33987
33988
33989
33990
33991
33992
33993
33994
33995
33996
33997
33998
33999
34000
34001
34002
34003
34004
34005
34006
34007
34008
34009
34010
34011
34012
34013
34014
34015
34016
34017
34018
34019
34020
34021
34022
34023
34024
34025
34026
34027
34028
34029
34030
34031
34032
34033
34034
34035
34036
34037
34038
34039
34040
34041
34042
34043
34044
34045
34046
34047
34048
34049
34050
34051
34052
34053
34054
34055
34056
34057
34058
34059
34060
34061
34062
34063
34064
34065
34066
34067
34068
34069
34070
34071
34072
34073
34074
34075
34076
34077
34078
34079
34080
34081
34082
34083
34084
34085
34086
34087
34088
34089
34090
34091
34092
34093
34094
34095
34096
34097
34098
34099
34100
34101
34102
34103
34104
34105
34106
34107
34108
34109
34110
34111
34112
34113
34114
34115
34116
34117
34118
34119
34120
34121
34122
34123
34124
34125
34126
34127
34128
34129
34130
34131
34132
34133
34134
34135
34136
34137
34138
34139
34140
34141
34142
34143
34144
34145
34146
34147
34148
34149
34150
34151
34152
34153
34154
34155
34156
34157
34158
34159
34160
34161
34162
34163
34164
34165
34166
34167
34168
34169
34170
34171
34172
34173
34174
34175
34176
34177
34178
34179
34180
34181
34182
34183
34184
34185
34186
34187
34188
34189
34190
34191
34192
34193
34194
34195
34196
34197
34198
34199
34200
34201
34202
34203
34204
34205
34206
34207
34208
34209
34210
34211
34212
34213
34214
34215
34216
34217
34218
34219
34220
34221
34222
34223
34224
34225
34226
34227
34228
34229
34230
34231
34232
34233
34234
34235
34236
34237
34238
34239
34240
34241
34242
34243
34244
34245
34246
34247
34248
34249
34250
34251
34252
34253
34254
34255
34256
34257
34258
34259
34260
34261
34262
34263
34264
34265
34266
34267
34268
34269
34270
34271
34272
34273
34274
34275
34276
34277
34278
34279
34280
34281
34282
34283
34284
34285
34286
34287
34288
34289
34290
34291
34292
34293
34294
34295
34296
34297
34298
34299
34300
34301
34302
34303
34304
34305
34306
34307
34308
34309
34310
34311
34312
34313
34314
34315
34316
34317
34318
34319
34320
34321
34322
34323
34324
34325
34326
34327
34328
34329
34330
34331
34332
34333
34334
34335
34336
34337
34338
34339
34340
34341
34342
34343
34344
34345
34346
34347
34348
34349
34350
34351
34352
34353
34354
34355
34356
34357
34358
34359
34360
34361
34362
34363
34364
34365
34366
34367
34368
34369
34370
34371
34372
34373
34374
34375
34376
34377
34378
34379
34380
34381
34382
34383
34384
34385
34386
34387
34388
34389
34390
34391
34392
34393
34394
34395
34396
34397
34398
34399
34400
34401
34402
34403
34404
34405
34406
34407
34408
34409
34410
34411
34412
34413
34414
34415
34416
34417
34418
34419
34420
34421
34422
34423
34424
34425
34426
34427
34428
34429
34430
34431
34432
34433
34434
34435
34436
34437
34438
34439
34440
34441
34442
34443
34444
34445
34446
34447
34448
34449
34450
34451
34452
34453
34454
34455
34456
34457
34458
34459
34460
34461
34462
34463
34464
34465
34466
34467
34468
34469
34470
34471
34472
34473
34474
34475
34476
34477
34478
34479
34480
34481
34482
34483
34484
34485
34486
34487
34488
34489
34490
34491
34492
34493
34494
34495
34496
34497
34498
34499
34500
34501
34502
34503
34504
34505
34506
34507
34508
34509
34510
34511
34512
34513
34514
34515
34516
34517
34518
34519
34520
34521
34522
34523
34524
34525
34526
34527
34528
34529
34530
34531
34532
34533
34534
34535
34536
34537
34538
34539
34540
34541
34542
34543
34544
34545
34546
34547
34548
34549
34550
34551
34552
34553
34554
34555
34556
34557
34558
34559
34560
34561
34562
34563
34564
34565
34566
34567
34568
34569
34570
34571
34572
34573
34574
34575
34576
34577
34578
34579
34580
34581
34582
34583
34584
34585
34586
34587
34588
34589
34590
34591
34592
34593
34594
34595
34596
34597
34598
34599
34600
34601
34602
34603
34604
34605
34606
34607
34608
34609
34610
34611
34612
34613
34614
34615
34616
34617
34618
34619
34620
34621
34622
34623
34624
34625
34626
34627
34628
34629
34630
34631
34632
34633
34634
34635
34636
34637
34638
34639
34640
34641
34642
34643
34644
34645
34646
34647
34648
34649
34650
34651
34652
34653
34654
34655
34656
34657
34658
34659
34660
34661
34662
34663
34664
34665
34666
34667
34668
34669
34670
34671
34672
34673
34674
34675
34676
34677
34678
34679
34680
34681
34682
34683
34684
34685
34686
34687
34688
34689
34690
34691
34692
34693
34694
34695
34696
34697
34698
34699
34700
34701
34702
34703
34704
34705
34706
34707
34708
34709
34710
34711
34712
34713
34714
34715
34716
34717
34718
34719
34720
34721
34722
34723
34724
34725
34726
34727
34728
34729
34730
34731
34732
34733
34734
34735
34736
34737
34738
34739
34740
34741
34742
34743
34744
34745
34746
34747
34748
34749
34750
34751
34752
34753
34754
34755
34756
34757
34758
34759
34760
34761
34762
34763
34764
34765
34766
34767
34768
34769
34770
34771
34772
34773
34774
34775
34776
34777
34778
34779
34780
34781
34782
34783
34784
34785
34786
34787
34788
34789
34790
34791
34792
34793
34794
34795
34796
34797
34798
34799
34800
34801
34802
34803
34804
34805
34806
34807
34808
34809
34810
34811
34812
34813
34814
34815
34816
34817
34818
34819
34820
34821
34822
34823
34824
34825
34826
34827
34828
34829
34830
34831
34832
34833
34834
34835
34836
34837
34838
34839
34840
34841
34842
34843
34844
34845
34846
34847
34848
34849
34850
34851
34852
34853
34854
34855
34856
34857
34858
34859
34860
34861
34862
34863
34864
34865
34866
34867
34868
34869
34870
34871
34872
34873
34874
34875
34876
34877
34878
34879
34880
34881
34882
34883
34884
34885
34886
34887
34888
34889
34890
34891
34892
34893
34894
34895
34896
34897
34898
34899
34900
34901
34902
34903
34904
34905
34906
34907
34908
34909
34910
34911
34912
34913
34914
34915
34916
34917
34918
34919
34920
34921
34922
34923
34924
34925
34926
34927
34928
34929
34930
34931
34932
34933
34934
34935
34936
34937
34938
34939
34940
34941
34942
34943
34944
34945
34946
34947
34948
34949
34950
34951
34952
34953
34954
34955
34956
34957
34958
34959
34960
34961
34962
34963
34964
34965
34966
34967
34968
34969
34970
34971
34972
34973
34974
34975
34976
34977
34978
34979
34980
34981
34982
34983
34984
34985
34986
34987
34988
34989
34990
34991
34992
34993
34994
34995
34996
34997
34998
34999
35000
35001
35002
35003
35004
35005
35006
35007
35008
35009
35010
35011
35012
35013
35014
35015
35016
35017
35018
35019
35020
35021
35022
35023
35024
35025
35026
35027
35028
35029
35030
35031
35032
35033
35034
35035
35036
35037
35038
35039
35040
35041
35042
35043
35044
35045
35046
35047
35048
35049
35050
35051
35052
35053
35054
35055
35056
35057
35058
35059
35060
35061
35062
35063
35064
35065
35066
35067
35068
35069
35070
35071
35072
35073
35074
35075
35076
35077
35078
35079
35080
35081
35082
35083
35084
35085
35086
35087
35088
35089
35090
35091
35092
35093
35094
35095
35096
35097
35098
35099
35100
35101
35102
35103
35104
35105
35106
35107
35108
35109
35110
35111
35112
35113
35114
35115
35116
35117
35118
35119
35120
35121
35122
35123
35124
35125
35126
35127
35128
35129
35130
35131
35132
35133
35134
35135
35136
35137
35138
35139
35140
35141
35142
35143
35144
35145
35146
35147
35148
35149
35150
35151
35152
35153
35154
35155
35156
35157
35158
35159
35160
35161
35162
35163
35164
35165
35166
35167
35168
35169
35170
35171
35172
35173
35174
35175
35176
35177
35178
35179
35180
35181
35182
35183
35184
35185
35186
35187
35188
35189
35190
35191
35192
35193
35194
35195
35196
35197
35198
35199
35200
35201
35202
35203
35204
35205
35206
35207
35208
35209
35210
35211
35212
35213
35214
35215
35216
35217
35218
35219
35220
35221
35222
35223
35224
35225
35226
35227
35228
35229
35230
35231
35232
35233
35234
35235
35236
35237
35238
35239
35240
35241
35242
35243
35244
35245
35246
35247
35248
35249
35250
35251
35252
35253
35254
35255
35256
35257
35258
35259
35260
35261
35262
35263
35264
35265
35266
35267
35268
35269
35270
35271
35272
35273
35274
35275
35276
35277
35278
35279
35280
35281
35282
35283
35284
35285
35286
35287
35288
35289
35290
35291
35292
35293
35294
35295
35296
35297
35298
35299
35300
35301
35302
35303
35304
35305
35306
35307
35308
35309
35310
35311
35312
35313
35314
35315
35316
35317
35318
35319
35320
35321
35322
35323
35324
35325
35326
35327
35328
35329
35330
35331
35332
35333
35334
35335
35336
35337
35338
35339
35340
35341
35342
35343
35344
35345
35346
35347
35348
35349
35350
35351
35352
35353
35354
35355
35356
35357
35358
35359
35360
35361
35362
35363
35364
35365
35366
35367
35368
35369
35370
35371
35372
35373
35374
35375
35376
35377
35378
35379
35380
35381
35382
35383
35384
35385
35386
35387
35388
35389
35390
35391
35392
35393
35394
35395
35396
35397
35398
35399
35400
35401
35402
35403
35404
35405
35406
35407
35408
35409
35410
35411
35412
35413
35414
35415
35416
35417
35418
35419
35420
35421
35422
35423
35424
35425
35426
35427
35428
35429
35430
35431
35432
35433
35434
35435
35436
35437
35438
35439
35440
35441
35442
35443
35444
35445
35446
35447
35448
35449
35450
35451
35452
35453
35454
35455
35456
35457
35458
35459
35460
35461
35462
35463
35464
35465
35466
35467
35468
35469
35470
35471
35472
35473
35474
35475
35476
35477
35478
35479
35480
35481
35482
35483
35484
35485
35486
35487
35488
35489
35490
35491
35492
35493
35494
35495
35496
35497
35498
35499
35500
35501
35502
35503
35504
35505
35506
35507
35508
35509
35510
35511
35512
35513
35514
35515
35516
35517
35518
35519
35520
35521
35522
35523
35524
35525
35526
35527
35528
35529
35530
35531
35532
35533
35534
35535
35536
35537
35538
35539
35540
35541
35542
35543
35544
35545
35546
35547
35548
35549
35550
35551
35552
35553
35554
35555
35556
35557
35558
35559
35560
35561
35562
35563
35564
35565
35566
35567
35568
35569
35570
35571
35572
35573
35574
35575
35576
35577
35578
35579
35580
35581
35582
35583
35584
35585
35586
35587
35588
35589
35590
35591
35592
35593
35594
35595
35596
35597
35598
35599
35600
35601
35602
35603
35604
35605
35606
35607
35608
35609
35610
35611
35612
35613
35614
35615
35616
35617
35618
35619
35620
35621
35622
35623
35624
35625
35626
35627
35628
35629
35630
35631
35632
35633
35634
35635
35636
35637
35638
35639
35640
35641
35642
35643
35644
35645
35646
35647
35648
35649
35650
35651
35652
35653
35654
35655
35656
35657
35658
35659
35660
35661
35662
35663
35664
35665
35666
35667
35668
35669
35670
35671
35672
35673
35674
35675
35676
35677
35678
35679
35680
35681
35682
35683
35684
35685
35686
35687
35688
35689
35690
35691
35692
35693
35694
35695
35696
35697
35698
35699
35700
35701
35702
35703
35704
35705
35706
35707
35708
35709
35710
35711
35712
35713
35714
35715
35716
35717
35718
35719
35720
35721
35722
35723
35724
35725
35726
35727
35728
35729
35730
35731
35732
35733
35734
35735
35736
35737
35738
35739
35740
35741
35742
35743
35744
35745
35746
35747
35748
35749
35750
35751
35752
35753
35754
35755
35756
35757
35758
35759
35760
35761
35762
35763
35764
35765
35766
35767
35768
35769
35770
35771
35772
35773
35774
35775
35776
35777
35778
35779
35780
35781
35782
35783
35784
35785
35786
35787
35788
35789
35790
35791
35792
35793
35794
35795
35796
35797
35798
35799
35800
35801
35802
35803
35804
35805
35806
35807
35808
35809
35810
35811
35812
35813
35814
35815
35816
35817
35818
35819
35820
35821
35822
35823
35824
35825
35826
35827
35828
35829
35830
35831
35832
35833
35834
35835
35836
35837
35838
35839
35840
35841
35842
35843
35844
35845
35846
35847
35848
35849
35850
35851
35852
35853
35854
35855
35856
35857
35858
35859
35860
35861
35862
35863
35864
35865
35866
35867
35868
35869
35870
35871
35872
35873
35874
35875
35876
35877
35878
35879
35880
35881
35882
35883
35884
35885
35886
35887
35888
35889
35890
35891
35892
35893
35894
35895
35896
35897
35898
35899
35900
35901
35902
35903
35904
35905
35906
35907
35908
35909
35910
35911
35912
35913
35914
35915
35916
35917
35918
35919
35920
35921
35922
35923
35924
35925
35926
35927
35928
35929
35930
35931
35932
35933
35934
35935
35936
35937
35938
35939
35940
35941
35942
35943
35944
35945
35946
35947
35948
35949
35950
35951
35952
35953
35954
35955
35956
35957
35958
35959
35960
35961
35962
35963
35964
35965
35966
35967
35968
35969
35970
35971
35972
35973
35974
35975
35976
35977
35978
35979
35980
35981
35982
35983
35984
35985
35986
35987
35988
35989
35990
35991
35992
35993
35994
35995
35996
35997
35998
35999
36000
36001
36002
36003
36004
36005
36006
36007
36008
36009
36010
36011
36012
36013
36014
36015
36016
36017
36018
36019
36020
36021
36022
36023
36024
36025
36026
36027
36028
36029
36030
36031
36032
36033
36034
36035
36036
36037
36038
36039
36040
36041
36042
36043
36044
36045
36046
36047
36048
36049
36050
36051
36052
36053
36054
36055
36056
36057
36058
36059
36060
36061
36062
36063
36064
36065
36066
36067
36068
36069
36070
36071
36072
36073
36074
36075
36076
36077
36078
36079
36080
36081
36082
36083
36084
36085
36086
36087
36088
36089
36090
36091
36092
36093
36094
36095
36096
36097
36098
36099
36100
36101
36102
36103
36104
36105
36106
36107
36108
36109
36110
36111
36112
36113
36114
36115
36116
36117
36118
36119
36120
36121
36122
36123
36124
36125
36126
36127
36128
36129
36130
36131
36132
36133
36134
36135
36136
36137
36138
36139
36140
36141
36142
36143
36144
36145
36146
36147
36148
36149
36150
36151
36152
36153
36154
36155
36156
36157
36158
36159
36160
36161
36162
36163
36164
36165
36166
36167
36168
36169
36170
36171
36172
36173
36174
36175
36176
36177
36178
36179
36180
36181
36182
36183
36184
36185
36186
36187
36188
36189
36190
36191
36192
36193
36194
36195
36196
36197
36198
36199
36200
36201
36202
36203
36204
36205
36206
36207
36208
36209
36210
36211
36212
36213
36214
36215
36216
36217
36218
36219
36220
36221
36222
36223
36224
36225
36226
36227
36228
36229
36230
36231
36232
36233
36234
36235
36236
36237
36238
36239
36240
36241
36242
36243
36244
36245
36246
36247
36248
36249
36250
36251
36252
36253
36254
36255
36256
36257
36258
36259
36260
36261
36262
36263
36264
36265
36266
36267
36268
36269
36270
36271
36272
36273
36274
36275
36276
36277
36278
36279
36280
36281
36282
36283
36284
36285
36286
36287
36288
36289
36290
36291
36292
36293
36294
36295
36296
36297
36298
36299
36300
36301
36302
36303
36304
36305
36306
36307
36308
36309
36310
36311
36312
36313
36314
36315
36316
36317
36318
36319
36320
36321
36322
36323
36324
36325
36326
36327
36328
36329
36330
36331
36332
36333
36334
36335
36336
36337
36338
36339
36340
36341
36342
36343
36344
36345
36346
36347
36348
36349
36350
36351
36352
36353
36354
36355
36356
36357
36358
36359
36360
36361
36362
36363
36364
36365
36366
36367
36368
36369
36370
36371
36372
36373
36374
36375
36376
36377
36378
36379
36380
36381
36382
36383
36384
36385
36386
36387
36388
36389
36390
36391
36392
36393
36394
36395
36396
36397
36398
36399
36400
36401
36402
36403
36404
36405
36406
36407
36408
36409
36410
36411
36412
36413
36414
36415
36416
36417
36418
36419
36420
36421
36422
36423
36424
36425
36426
36427
36428
36429
36430
36431
36432
36433
36434
36435
36436
36437
36438
36439
36440
36441
36442
36443
36444
36445
36446
36447
36448
36449
36450
36451
36452
36453
36454
36455
36456
36457
36458
36459
36460
36461
36462
36463
36464
36465
36466
36467
36468
36469
36470
36471
36472
36473
36474
36475
36476
36477
36478
36479
36480
36481
36482
36483
36484
36485
36486
36487
36488
36489
36490
36491
36492
36493
36494
36495
36496
36497
36498
36499
36500
36501
36502
36503
36504
36505
36506
36507
36508
36509
36510
36511
36512
36513
36514
36515
36516
36517
36518
36519
36520
36521
36522
36523
36524
36525
36526
36527
36528
36529
36530
36531
36532
36533
36534
36535
36536
36537
36538
36539
36540
36541
36542
36543
36544
36545
36546
36547
36548
36549
36550
36551
36552
36553
36554
36555
36556
36557
36558
36559
36560
36561
36562
36563
36564
36565
36566
36567
36568
36569
36570
36571
36572
36573
36574
36575
36576
36577
36578
36579
36580
36581
36582
36583
36584
36585
36586
36587
36588
36589
36590
36591
36592
36593
36594
36595
36596
36597
36598
36599
36600
36601
36602
36603
36604
36605
36606
36607
36608
36609
36610
36611
36612
36613
36614
36615
36616
36617
36618
36619
36620
36621
36622
36623
36624
36625
36626
36627
36628
36629
36630
36631
36632
36633
36634
36635
36636
36637
36638
36639
36640
36641
36642
36643
36644
36645
36646
36647
36648
36649
36650
36651
36652
36653
36654
36655
36656
36657
36658
36659
36660
36661
36662
36663
36664
36665
36666
36667
36668
36669
36670
36671
36672
36673
36674
36675
36676
36677
36678
36679
36680
36681
36682
36683
36684
36685
36686
36687
36688
36689
36690
36691
36692
36693
36694
36695
36696
36697
36698
36699
36700
36701
36702
36703
36704
36705
36706
36707
36708
36709
36710
36711
36712
36713
36714
36715
36716
36717
36718
36719
36720
36721
36722
36723
36724
36725
36726
36727
36728
36729
36730
36731
36732
36733
36734
36735
36736
36737
36738
36739
36740
36741
36742
36743
36744
36745
36746
36747
36748
36749
36750
36751
36752
36753
36754
36755
36756
36757
36758
36759
36760
36761
36762
36763
36764
36765
36766
36767
36768
36769
36770
36771
36772
36773
36774
36775
36776
36777
36778
36779
36780
36781
36782
36783
36784
36785
36786
36787
36788
36789
36790
36791
36792
36793
36794
36795
36796
36797
36798
36799
36800
36801
36802
36803
36804
36805
36806
36807
36808
36809
36810
36811
36812
36813
36814
36815
36816
36817
36818
36819
36820
36821
36822
36823
36824
36825
36826
36827
36828
36829
36830
36831
36832
36833
36834
36835
36836
36837
36838
36839
36840
36841
36842
36843
36844
36845
36846
36847
36848
36849
36850
36851
36852
36853
36854
36855
36856
36857
36858
36859
36860
36861
36862
36863
36864
36865
36866
36867
36868
36869
36870
36871
36872
36873
36874
36875
36876
36877
36878
36879
36880
36881
36882
36883
36884
36885
36886
36887
36888
36889
36890
36891
36892
36893
36894
36895
36896
36897
36898
36899
36900
36901
36902
36903
36904
36905
36906
36907
36908
36909
36910
36911
36912
36913
36914
36915
36916
36917
36918
36919
36920
36921
36922
36923
36924
36925
36926
36927
36928
36929
36930
36931
36932
36933
36934
36935
36936
36937
36938
36939
36940
36941
36942
36943
36944
36945
36946
36947
36948
36949
36950
36951
36952
36953
36954
36955
36956
36957
36958
36959
36960
36961
36962
36963
36964
36965
36966
36967
36968
36969
36970
36971
36972
36973
36974
36975
36976
36977
36978
36979
36980
36981
36982
36983
36984
36985
36986
36987
36988
36989
36990
36991
36992
36993
36994
36995
36996
36997
36998
36999
37000
37001
37002
37003
37004
37005
37006
37007
37008
37009
37010
37011
37012
37013
37014
37015
37016
37017
37018
37019
37020
37021
37022
37023
37024
37025
37026
37027
37028
37029
37030
37031
37032
37033
37034
37035
37036
37037
37038
37039
37040
37041
37042
37043
37044
37045
37046
37047
37048
37049
37050
37051
37052
37053
37054
37055
37056
37057
37058
37059
37060
37061
37062
37063
37064
37065
37066
37067
37068
37069
37070
37071
37072
37073
37074
37075
37076
37077
37078
37079
37080
37081
37082
37083
37084
37085
37086
37087
37088
37089
37090
37091
37092
37093
37094
37095
37096
37097
37098
37099
37100
37101
37102
37103
37104
37105
37106
37107
37108
37109
37110
37111
37112
37113
37114
37115
37116
37117
37118
37119
37120
37121
37122
37123
37124
37125
37126
37127
37128
37129
37130
37131
37132
37133
37134
37135
37136
37137
37138
37139
37140
37141
37142
37143
37144
37145
37146
37147
37148
37149
37150
37151
37152
37153
37154
37155
37156
37157
37158
37159
37160
37161
37162
37163
37164
37165
37166
37167
37168
37169
37170
37171
37172
37173
37174
37175
37176
37177
37178
37179
37180
37181
37182
37183
37184
37185
37186
37187
37188
37189
37190
37191
37192
37193
37194
37195
37196
37197
37198
37199
37200
37201
37202
37203
37204
37205
37206
37207
37208
37209
37210
37211
37212
37213
37214
37215
37216
37217
37218
37219
37220
37221
37222
37223
37224
37225
37226
37227
37228
37229
37230
37231
37232
37233
37234
37235
37236
37237
37238
37239
37240
37241
37242
37243
37244
37245
37246
37247
37248
37249
37250
37251
37252
37253
37254
37255
37256
37257
37258
37259
37260
37261
37262
37263
37264
37265
37266
37267
37268
37269
37270
37271
37272
37273
37274
37275
37276
37277
37278
37279
37280
37281
37282
37283
37284
37285
37286
37287
37288
37289
37290
37291
37292
37293
37294
37295
37296
37297
37298
37299
37300
37301
37302
37303
37304
37305
37306
37307
37308
37309
37310
37311
37312
37313
37314
37315
37316
37317
37318
37319
37320
37321
37322
37323
37324
37325
37326
37327
37328
37329
37330
37331
37332
37333
37334
37335
37336
37337
37338
37339
37340
37341
37342
37343
37344
37345
37346
37347
37348
37349
37350
37351
37352
37353
37354
37355
37356
37357
37358
37359
37360
37361
37362
37363
37364
37365
37366
37367
37368
37369
37370
37371
37372
37373
37374
37375
37376
37377
37378
37379
37380
37381
37382
37383
37384
37385
37386
37387
37388
37389
37390
37391
37392
37393
37394
37395
37396
37397
37398
37399
37400
37401
37402
37403
37404
37405
37406
37407
37408
37409
37410
37411
37412
37413
37414
37415
37416
37417
37418
37419
37420
37421
37422
37423
37424
37425
37426
37427
37428
37429
37430
37431
37432
37433
37434
37435
37436
37437
37438
37439
37440
37441
37442
37443
37444
37445
37446
37447
37448
37449
37450
37451
37452
37453
37454
37455
37456
37457
37458
37459
37460
37461
37462
37463
37464
37465
37466
37467
37468
37469
37470
37471
37472
37473
37474
37475
37476
37477
37478
37479
37480
37481
37482
37483
37484
37485
37486
37487
37488
37489
37490
37491
37492
37493
37494
37495
37496
37497
37498
37499
37500
37501
37502
37503
37504
37505
37506
37507
37508
37509
37510
37511
37512
37513
37514
37515
37516
37517
37518
37519
37520
37521
37522
37523
37524
37525
37526
37527
37528
37529
37530
37531
37532
37533
37534
37535
37536
37537
37538
37539
37540
37541
37542
37543
37544
37545
37546
37547
37548
37549
37550
37551
37552
37553
37554
37555
37556
37557
37558
37559
37560
37561
37562
37563
37564
37565
37566
37567
37568
37569
37570
37571
37572
37573
37574
37575
37576
37577
37578
37579
37580
37581
37582
37583
37584
37585
37586
37587
37588
37589
37590
37591
37592
37593
37594
37595
37596
37597
37598
37599
37600
37601
37602
37603
37604
37605
37606
37607
37608
37609
37610
37611
37612
37613
37614
37615
37616
37617
37618
37619
37620
37621
37622
37623
37624
37625
37626
37627
37628
37629
37630
37631
37632
37633
37634
37635
37636
37637
37638
37639
37640
37641
37642
37643
37644
37645
37646
37647
37648
37649
37650
37651
37652
37653
37654
37655
37656
37657
37658
37659
37660
37661
37662
37663
37664
37665
37666
37667
37668
37669
37670
37671
37672
37673
37674
37675
37676
37677
37678
37679
37680
37681
37682
37683
37684
37685
37686
37687
37688
37689
37690
37691
37692
37693
37694
37695
37696
37697
37698
37699
37700
37701
37702
37703
37704
37705
37706
37707
37708
37709
37710
37711
37712
37713
37714
37715
37716
37717
37718
37719
37720
37721
37722
37723
37724
37725
37726
37727
37728
37729
37730
37731
37732
37733
37734
37735
37736
37737
37738
37739
37740
37741
37742
37743
37744
37745
37746
37747
37748
37749
37750
37751
37752
37753
37754
37755
37756
37757
37758
37759
37760
37761
37762
37763
37764
37765
37766
37767
37768
37769
37770
37771
37772
37773
37774
37775
37776
37777
37778
37779
37780
37781
37782
37783
37784
37785
37786
37787
37788
37789
37790
37791
37792
37793
37794
37795
37796
37797
37798
37799
37800
37801
37802
37803
37804
37805
37806
37807
37808
37809
37810
37811
37812
37813
37814
37815
37816
37817
37818
37819
37820
37821
37822
37823
37824
37825
37826
37827
37828
37829
37830
37831
37832
37833
37834
37835
37836
37837
37838
37839
37840
37841
37842
37843
37844
37845
37846
37847
37848
37849
37850
37851
37852
37853
37854
37855
37856
37857
37858
37859
37860
37861
37862
37863
37864
37865
37866
37867
37868
37869
37870
37871
37872
37873
37874
37875
37876
37877
37878
37879
37880
37881
37882
37883
37884
37885
37886
37887
37888
37889
37890
37891
37892
37893
37894
37895
37896
37897
37898
37899
37900
37901
37902
37903
37904
37905
37906
37907
37908
37909
37910
37911
37912
37913
37914
37915
37916
37917
37918
37919
37920
37921
37922
37923
37924
37925
37926
37927
37928
37929
37930
37931
37932
37933
37934
37935
37936
37937
37938
37939
37940
37941
37942
37943
37944
37945
37946
37947
37948
37949
37950
37951
37952
37953
37954
37955
37956
37957
37958
37959
37960
37961
37962
37963
37964
37965
37966
37967
37968
37969
37970
37971
37972
37973
37974
37975
37976
37977
37978
37979
37980
37981
37982
37983
37984
37985
37986
37987
37988
37989
37990
37991
37992
37993
37994
37995
37996
37997
37998
37999
38000
38001
38002
38003
38004
38005
38006
38007
38008
38009
38010
38011
38012
38013
38014
38015
38016
38017
38018
38019
38020
38021
38022
38023
38024
38025
38026
38027
38028
38029
38030
38031
38032
38033
38034
38035
38036
38037
38038
38039
38040
38041
38042
38043
38044
38045
38046
38047
38048
38049
38050
38051
38052
38053
38054
38055
38056
38057
38058
38059
38060
38061
38062
38063
38064
38065
38066
38067
38068
38069
38070
38071
38072
38073
38074
38075
38076
38077
38078
38079
38080
38081
38082
38083
38084
38085
38086
38087
38088
38089
38090
38091
38092
38093
38094
38095
38096
38097
38098
38099
38100
38101
38102
38103
38104
38105
38106
38107
38108
38109
38110
38111
38112
38113
38114
38115
38116
38117
38118
38119
38120
38121
38122
38123
38124
38125
38126
38127
38128
38129
38130
38131
38132
38133
38134
38135
38136
38137
38138
38139
38140
38141
38142
38143
38144
38145
38146
38147
38148
38149
38150
38151
38152
38153
38154
38155
38156
38157
38158
38159
38160
38161
38162
38163
38164
38165
38166
38167
38168
38169
38170
38171
38172
38173
38174
38175
38176
38177
38178
38179
38180
38181
38182
38183
38184
38185
38186
38187
38188
38189
38190
38191
38192
38193
38194
38195
38196
38197
38198
38199
38200
38201
38202
38203
38204
38205
38206
38207
38208
38209
38210
38211
38212
38213
38214
38215
38216
38217
38218
38219
38220
38221
38222
38223
38224
38225
38226
38227
38228
38229
38230
38231
38232
38233
38234
38235
38236
38237
38238
38239
38240
38241
38242
38243
38244
38245
38246
38247
38248
38249
38250
38251
38252
38253
38254
38255
38256
38257
38258
38259
38260
38261
38262
38263
38264
38265
38266
38267
38268
38269
38270
38271
38272
38273
38274
38275
38276
38277
38278
38279
38280
38281
38282
38283
38284
38285
38286
38287
38288
38289
38290
38291
38292
38293
38294
38295
38296
38297
38298
38299
38300
38301
38302
38303
38304
38305
38306
38307
38308
38309
38310
38311
38312
38313
38314
38315
38316
38317
38318
38319
38320
38321
38322
38323
38324
38325
38326
38327
38328
38329
38330
38331
38332
38333
38334
38335
38336
38337
38338
38339
38340
38341
38342
38343
38344
38345
38346
38347
38348
38349
38350
38351
38352
38353
38354
38355
38356
38357
38358
38359
38360
38361
38362
38363
38364
38365
38366
38367
38368
38369
38370
38371
38372
38373
38374
38375
38376
38377
38378
38379
38380
38381
38382
38383
38384
38385
38386
38387
38388
38389
38390
38391
38392
38393
38394
38395
38396
38397
38398
38399
38400
38401
38402
38403
38404
38405
38406
38407
38408
38409
38410
38411
38412
38413
38414
38415
38416
38417
38418
38419
38420
38421
38422
38423
38424
38425
38426
38427
38428
38429
38430
38431
38432
38433
38434
38435
38436
38437
38438
38439
38440
38441
38442
38443
38444
38445
38446
38447
38448
38449
38450
38451
38452
38453
38454
38455
38456
38457
38458
38459
38460
38461
38462
38463
38464
38465
38466
38467
38468
38469
38470
38471
38472
38473
38474
38475
38476
38477
38478
38479
38480
38481
38482
38483
38484
38485
38486
38487
38488
38489
38490
38491
38492
38493
38494
38495
38496
38497
38498
38499
38500
38501
38502
38503
38504
38505
38506
38507
38508
38509
38510
38511
38512
38513
38514
38515
38516
38517
38518
38519
38520
38521
38522
38523
38524
38525
38526
38527
38528
38529
38530
38531
38532
38533
38534
38535
38536
38537
38538
38539
38540
38541
38542
38543
38544
38545
38546
38547
38548
38549
38550
38551
38552
38553
38554
38555
38556
38557
38558
38559
38560
38561
38562
38563
38564
38565
38566
38567
38568
38569
38570
38571
38572
38573
38574
38575
38576
38577
38578
38579
38580
38581
38582
38583
38584
38585
38586
38587
38588
38589
38590
38591
38592
38593
38594
38595
38596
38597
38598
38599
38600
38601
38602
38603
38604
38605
38606
38607
38608
38609
38610
38611
38612
38613
38614
38615
38616
38617
38618
38619
38620
38621
38622
38623
38624
38625
38626
38627
38628
38629
38630
38631
38632
38633
38634
38635
38636
38637
38638
38639
38640
38641
38642
38643
38644
38645
38646
38647
38648
38649
38650
38651
38652
38653
38654
38655
38656
38657
38658
38659
38660
38661
38662
38663
38664
38665
38666
38667
38668
38669
38670
38671
38672
38673
38674
38675
38676
38677
38678
38679
38680
38681
38682
38683
38684
38685
38686
38687
38688
38689
38690
38691
38692
38693
38694
38695
38696
38697
38698
38699
38700
38701
38702
38703
38704
38705
38706
38707
38708
38709
38710
38711
38712
38713
38714
38715
38716
38717
38718
38719
38720
38721
38722
38723
38724
38725
38726
38727
38728
38729
38730
38731
38732
38733
38734
38735
38736
38737
38738
38739
38740
38741
38742
38743
38744
38745
38746
38747
38748
38749
38750
38751
38752
38753
38754
38755
38756
38757
38758
38759
38760
38761
38762
38763
38764
38765
38766
38767
38768
38769
38770
38771
38772
38773
38774
38775
38776
38777
38778
38779
38780
38781
38782
38783
38784
38785
38786
38787
38788
38789
38790
38791
38792
38793
38794
38795
38796
38797
38798
38799
38800
38801
38802
38803
38804
38805
38806
38807
38808
38809
38810
38811
38812
38813
38814
38815
38816
38817
38818
38819
38820
38821
38822
38823
38824
38825
38826
38827
38828
38829
38830
38831
38832
38833
38834
38835
38836
38837
38838
38839
38840
38841
38842
38843
38844
38845
38846
38847
38848
38849
38850
38851
38852
38853
38854
38855
38856
38857
38858
38859
38860
38861
38862
38863
38864
38865
38866
38867
38868
38869
38870
38871
38872
38873
38874
38875
38876
38877
38878
38879
38880
38881
38882
38883
38884
38885
38886
38887
38888
38889
38890
38891
38892
38893
38894
38895
38896
38897
38898
38899
38900
38901
38902
38903
38904
38905
38906
38907
38908
38909
38910
38911
38912
38913
38914
38915
38916
38917
38918
38919
38920
38921
38922
38923
38924
38925
38926
38927
38928
38929
38930
38931
38932
38933
38934
38935
38936
38937
38938
38939
38940
38941
38942
38943
38944
38945
38946
38947
38948
38949
38950
38951
38952
38953
38954
38955
38956
38957
38958
38959
38960
38961
38962
38963
38964
38965
38966
38967
38968
38969
38970
38971
38972
38973
38974
38975
38976
38977
38978
38979
38980
38981
38982
38983
38984
38985
38986
38987
38988
38989
38990
38991
38992
38993
38994
38995
38996
38997
38998
38999
39000
39001
39002
39003
39004
39005
39006
39007
39008
39009
39010
39011
39012
39013
39014
39015
39016
39017
39018
39019
39020
39021
39022
39023
39024
39025
39026
39027
39028
39029
39030
39031
39032
39033
39034
39035
39036
39037
39038
39039
39040
39041
39042
39043
39044
39045
39046
39047
39048
39049
39050
39051
39052
39053
39054
39055
39056
39057
39058
39059
39060
39061
39062
39063
39064
39065
39066
39067
39068
39069
39070
39071
39072
39073
39074
39075
39076
39077
39078
39079
39080
39081
39082
39083
39084
39085
39086
39087
39088
39089
39090
39091
39092
39093
39094
39095
39096
39097
39098
39099
39100
39101
39102
39103
39104
39105
39106
39107
39108
39109
39110
39111
39112
39113
39114
39115
39116
39117
39118
39119
39120
39121
39122
39123
39124
39125
39126
39127
39128
39129
39130
39131
39132
39133
39134
39135
39136
39137
39138
39139
39140
39141
39142
39143
39144
39145
39146
39147
39148
39149
39150
39151
39152
39153
39154
39155
39156
39157
39158
39159
39160
39161
39162
39163
39164
39165
39166
39167
39168
39169
39170
39171
39172
39173
39174
39175
39176
39177
39178
39179
39180
39181
39182
39183
39184
39185
39186
39187
39188
39189
39190
39191
39192
39193
39194
39195
39196
39197
39198
39199
39200
39201
39202
39203
39204
39205
39206
39207
39208
39209
39210
39211
39212
39213
39214
39215
39216
39217
39218
39219
39220
39221
39222
39223
39224
39225
39226
39227
39228
39229
39230
39231
39232
39233
39234
39235
39236
39237
39238
39239
39240
39241
39242
39243
39244
39245
39246
39247
39248
39249
39250
39251
39252
39253
39254
39255
39256
39257
39258
39259
39260
39261
39262
39263
39264
39265
39266
39267
39268
39269
39270
39271
39272
39273
39274
39275
39276
39277
39278
39279
39280
39281
39282
39283
39284
39285
39286
39287
39288
39289
39290
39291
39292
39293
39294
39295
39296
39297
39298
39299
39300
39301
39302
39303
39304
39305
39306
39307
39308
39309
39310
39311
39312
39313
39314
39315
39316
39317
39318
39319
39320
39321
39322
39323
39324
39325
39326
39327
39328
39329
39330
39331
39332
39333
39334
39335
39336
39337
39338
39339
39340
39341
39342
39343
39344
39345
39346
39347
39348
39349
39350
39351
39352
39353
39354
39355
39356
39357
39358
39359
39360
39361
39362
39363
39364
39365
39366
39367
39368
39369
39370
39371
39372
39373
39374
39375
39376
39377
39378
39379
39380
39381
39382
39383
39384
39385
39386
39387
39388
39389
39390
39391
39392
39393
39394
39395
39396
39397
39398
39399
39400
39401
39402
39403
39404
39405
39406
39407
39408
39409
39410
39411
39412
39413
39414
39415
39416
39417
39418
39419
39420
39421
39422
39423
39424
39425
39426
39427
39428
39429
39430
39431
39432
39433
39434
39435
39436
39437
39438
39439
39440
39441
39442
39443
39444
39445
39446
39447
39448
39449
39450
39451
39452
39453
39454
39455
39456
39457
39458
39459
39460
39461
39462
39463
39464
39465
39466
39467
39468
39469
39470
39471
39472
39473
39474
39475
39476
39477
39478
39479
39480
39481
39482
39483
39484
39485
39486
39487
39488
39489
39490
39491
39492
39493
39494
39495
39496
39497
39498
39499
39500
39501
39502
39503
39504
39505
39506
39507
39508
39509
39510
39511
39512
39513
39514
39515
39516
39517
39518
39519
39520
39521
39522
39523
39524
39525
39526
39527
39528
39529
39530
39531
39532
39533
39534
39535
39536
39537
39538
39539
39540
39541
39542
39543
39544
39545
39546
39547
39548
39549
39550
39551
39552
39553
39554
39555
39556
39557
39558
39559
39560
39561
39562
39563
39564
39565
39566
39567
39568
39569
39570
39571
39572
39573
39574
39575
39576
39577
39578
39579
39580
39581
39582
39583
39584
39585
39586
39587
39588
39589
39590
39591
39592
39593
39594
39595
39596
39597
39598
39599
39600
39601
39602
39603
39604
39605
39606
39607
39608
39609
39610
39611
39612
39613
39614
39615
39616
39617
39618
39619
39620
39621
39622
39623
39624
39625
39626
39627
39628
39629
39630
39631
39632
39633
39634
39635
39636
39637
39638
39639
39640
39641
39642
39643
39644
39645
39646
39647
39648
39649
39650
39651
39652
39653
39654
39655
39656
39657
39658
39659
39660
39661
39662
39663
39664
39665
39666
39667
39668
39669
39670
39671
39672
39673
39674
39675
39676
39677
39678
39679
39680
39681
39682
39683
39684
39685
39686
39687
39688
39689
39690
39691
39692
39693
39694
39695
39696
39697
39698
39699
39700
39701
39702
39703
39704
39705
39706
39707
39708
39709
39710
39711
39712
39713
39714
39715
39716
39717
39718
39719
39720
39721
39722
39723
39724
39725
39726
39727
39728
39729
39730
39731
39732
39733
39734
39735
39736
39737
39738
39739
39740
39741
39742
39743
39744
39745
39746
39747
39748
39749
39750
39751
39752
39753
39754
39755
39756
39757
39758
39759
39760
39761
39762
39763
39764
39765
39766
39767
39768
39769
39770
39771
39772
39773
39774
39775
39776
39777
39778
39779
39780
39781
39782
39783
39784
39785
39786
39787
39788
39789
39790
39791
39792
39793
39794
39795
39796
39797
39798
39799
39800
39801
39802
39803
39804
39805
39806
39807
39808
39809
39810
39811
39812
39813
39814
39815
39816
39817
39818
39819
39820
39821
39822
39823
39824
39825
39826
39827
39828
39829
39830
39831
39832
39833
39834
39835
39836
39837
39838
39839
39840
39841
39842
39843
39844
39845
39846
39847
39848
39849
39850
39851
39852
39853
39854
39855
39856
39857
39858
39859
39860
39861
39862
39863
39864
39865
39866
39867
39868
39869
39870
39871
39872
39873
39874
39875
39876
39877
39878
39879
39880
39881
39882
39883
39884
39885
39886
39887
39888
39889
39890
39891
39892
39893
39894
39895
39896
39897
39898
39899
39900
39901
39902
39903
39904
39905
39906
39907
39908
39909
39910
39911
39912
39913
39914
39915
39916
39917
39918
39919
39920
39921
39922
39923
39924
39925
39926
39927
39928
39929
39930
39931
39932
39933
39934
39935
39936
39937
39938
39939
39940
39941
39942
39943
39944
39945
39946
39947
39948
39949
39950
39951
39952
39953
39954
39955
39956
39957
39958
39959
39960
39961
39962
39963
39964
39965
39966
39967
39968
39969
39970
39971
39972
39973
39974
39975
39976
39977
39978
39979
39980
39981
39982
39983
39984
39985
39986
39987
39988
39989
39990
39991
39992
39993
39994
39995
39996
39997
39998
39999
40000
40001
40002
40003
40004
40005
40006
40007
40008
40009
40010
40011
40012
40013
40014
40015
40016
40017
40018
40019
40020
40021
40022
40023
40024
40025
40026
40027
40028
40029
40030
40031
40032
40033
40034
40035
40036
40037
40038
40039
40040
40041
40042
40043
40044
40045
40046
40047
40048
40049
40050
40051
40052
40053
40054
40055
40056
40057
40058
40059
40060
40061
40062
40063
40064
40065
40066
40067
40068
40069
40070
40071
40072
40073
40074
40075
40076
40077
40078
40079
40080
40081
40082
40083
40084
40085
40086
40087
40088
40089
40090
40091
40092
40093
40094
40095
40096
40097
40098
40099
40100
40101
40102
40103
40104
40105
40106
40107
40108
40109
40110
40111
40112
40113
40114
40115
40116
40117
40118
40119
40120
40121
40122
40123
40124
40125
40126
40127
40128
40129
40130
40131
40132
40133
40134
40135
40136
40137
40138
40139
40140
40141
40142
40143
40144
40145
40146
40147
40148
40149
40150
40151
40152
40153
40154
40155
40156
40157
40158
40159
40160
40161
40162
40163
40164
40165
40166
40167
40168
40169
40170
40171
40172
40173
40174
40175
40176
40177
40178
40179
40180
40181
40182
40183
40184
40185
40186
40187
40188
40189
40190
40191
40192
40193
40194
40195
40196
40197
40198
40199
40200
40201
40202
40203
40204
40205
40206
40207
40208
40209
40210
40211
40212
40213
40214
40215
40216
40217
40218
40219
40220
40221
40222
40223
40224
40225
40226
40227
40228
40229
40230
40231
40232
40233
40234
40235
40236
40237
40238
40239
40240
40241
40242
40243
40244
40245
40246
40247
40248
40249
40250
40251
40252
40253
40254
40255
40256
40257
40258
40259
40260
40261
40262
40263
40264
40265
40266
40267
40268
40269
40270
40271
40272
40273
40274
40275
40276
40277
40278
40279
40280
40281
40282
40283
40284
40285
40286
40287
40288
40289
40290
40291
40292
40293
40294
40295
40296
40297
40298
40299
40300
40301
40302
40303
40304
40305
40306
40307
40308
40309
40310
40311
40312
40313
40314
40315
40316
40317
40318
40319
40320
40321
40322
40323
40324
40325
40326
40327
40328
40329
40330
40331
40332
40333
40334
40335
40336
40337
40338
40339
40340
40341
40342
40343
40344
40345
40346
40347
40348
40349
40350
40351
40352
40353
40354
40355
40356
40357
40358
40359
40360
40361
40362
40363
40364
40365
40366
40367
40368
40369
40370
40371
40372
40373
40374
40375
40376
40377
40378
40379
40380
40381
40382
40383
40384
40385
40386
40387
40388
40389
40390
40391
40392
40393
40394
40395
40396
40397
40398
40399
40400
40401
40402
40403
40404
40405
40406
40407
40408
40409
40410
40411
40412
40413
40414
40415
40416
40417
40418
40419
40420
40421
40422
40423
40424
40425
40426
40427
40428
40429
40430
40431
40432
40433
40434
40435
40436
40437
40438
40439
40440
40441
40442
40443
40444
40445
40446
40447
40448
40449
40450
40451
40452
40453
40454
40455
40456
40457
40458
40459
40460
40461
40462
40463
40464
40465
40466
40467
40468
40469
40470
40471
40472
40473
40474
40475
40476
40477
40478
40479
40480
40481
40482
40483
40484
40485
40486
40487
40488
40489
40490
40491
40492
40493
40494
40495
40496
40497
40498
40499
40500
40501
40502
40503
40504
40505
40506
40507
40508
40509
40510
40511
40512
40513
40514
40515
40516
40517
40518
40519
40520
40521
40522
40523
40524
40525
40526
40527
40528
40529
40530
40531
40532
40533
40534
40535
40536
40537
40538
40539
40540
40541
40542
40543
40544
40545
40546
40547
40548
40549
40550
40551
40552
40553
40554
40555
40556
40557
40558
40559
40560
40561
40562
40563
40564
40565
40566
40567
40568
40569
40570
40571
40572
40573
40574
40575
40576
40577
40578
40579
40580
40581
40582
40583
40584
40585
40586
40587
40588
40589
40590
40591
40592
40593
40594
40595
40596
40597
40598
40599
40600
40601
40602
40603
40604
40605
40606
40607
40608
40609
40610
40611
40612
40613
40614
40615
40616
40617
40618
40619
40620
40621
40622
40623
40624
40625
40626
40627
40628
40629
40630
40631
40632
40633
40634
40635
40636
40637
40638
40639
40640
40641
40642
40643
40644
40645
40646
40647
40648
40649
40650
40651
40652
40653
40654
40655
40656
40657
40658
40659
40660
40661
40662
40663
40664
40665
40666
40667
40668
40669
40670
40671
40672
40673
40674
40675
40676
40677
40678
40679
40680
40681
40682
40683
40684
40685
40686
40687
40688
40689
40690
40691
40692
40693
40694
40695
40696
40697
40698
40699
40700
40701
40702
40703
40704
40705
40706
40707
40708
40709
40710
40711
40712
40713
40714
40715
40716
40717
40718
40719
40720
40721
40722
40723
40724
40725
40726
40727
40728
40729
40730
40731
40732
40733
40734
40735
40736
40737
40738
40739
40740
40741
40742
40743
40744
40745
40746
40747
40748
40749
40750
40751
40752
40753
40754
40755
40756
40757
40758
40759
40760
40761
40762
40763
40764
40765
40766
40767
40768
40769
40770
40771
40772
40773
40774
40775
40776
40777
40778
40779
40780
40781
40782
40783
40784
40785
40786
40787
40788
40789
40790
40791
40792
40793
40794
40795
40796
40797
40798
40799
40800
40801
40802
40803
40804
40805
40806
40807
40808
40809
40810
40811
40812
40813
40814
40815
40816
40817
40818
40819
40820
40821
40822
40823
40824
40825
40826
40827
40828
40829
40830
40831
40832
40833
40834
40835
40836
40837
40838
40839
40840
40841
40842
40843
40844
40845
40846
40847
40848
40849
40850
40851
40852
40853
40854
40855
40856
40857
40858
40859
40860
40861
40862
40863
40864
40865
40866
40867
40868
40869
40870
40871
40872
40873
40874
40875
40876
40877
40878
40879
40880
40881
40882
40883
40884
40885
40886
40887
40888
40889
40890
40891
40892
40893
40894
40895
40896
40897
40898
40899
40900
40901
40902
40903
40904
40905
40906
40907
40908
40909
40910
40911
40912
40913
40914
40915
40916
40917
40918
40919
40920
40921
40922
40923
40924
40925
40926
40927
40928
40929
40930
40931
40932
40933
40934
40935
40936
40937
40938
40939
40940
40941
40942
40943
40944
40945
40946
40947
40948
40949
40950
40951
40952
40953
40954
40955
40956
40957
40958
40959
40960
40961
40962
40963
40964
40965
40966
40967
40968
40969
40970
40971
40972
40973
40974
40975
40976
40977
40978
40979
40980
40981
40982
40983
40984
40985
40986
40987
40988
40989
40990
40991
40992
40993
40994
40995
40996
40997
40998
40999
41000
41001
41002
41003
41004
41005
41006
41007
41008
41009
41010
41011
41012
41013
41014
41015
41016
41017
41018
41019
41020
41021
41022
41023
41024
41025
41026
41027
41028
41029
41030
41031
41032
41033
41034
41035
41036
41037
41038
41039
41040
41041
41042
41043
41044
41045
41046
41047
41048
41049
41050
41051
41052
41053
41054
41055
41056
41057
41058
41059
41060
41061
41062
41063
41064
41065
41066
41067
41068
41069
41070
41071
41072
41073
41074
41075
41076
41077
41078
41079
41080
41081
41082
41083
41084
41085
41086
41087
41088
41089
41090
41091
41092
41093
41094
41095
41096
41097
41098
41099
41100
41101
41102
41103
41104
41105
41106
41107
41108
41109
41110
41111
41112
41113
41114
41115
41116
41117
41118
41119
41120
41121
41122
41123
41124
41125
41126
41127
41128
41129
41130
41131
41132
41133
41134
41135
41136
41137
41138
41139
41140
41141
41142
41143
41144
41145
41146
41147
41148
41149
41150
41151
41152
41153
41154
41155
41156
41157
41158
41159
41160
41161
41162
41163
41164
41165
41166
41167
41168
41169
41170
41171
41172
41173
41174
41175
41176
41177
41178
41179
41180
41181
41182
41183
41184
41185
41186
41187
41188
41189
41190
41191
41192
41193
41194
41195
41196
41197
41198
41199
41200
41201
41202
41203
41204
41205
41206
41207
41208
41209
41210
41211
41212
41213
41214
41215
41216
41217
41218
41219
41220
41221
41222
41223
41224
41225
41226
41227
41228
41229
41230
41231
41232
41233
41234
41235
41236
41237
41238
41239
41240
41241
41242
41243
41244
41245
41246
41247
41248
41249
41250
41251
41252
41253
41254
41255
41256
41257
41258
41259
41260
41261
41262
41263
41264
41265
41266
41267
41268
41269
41270
41271
41272
41273
41274
41275
41276
41277
41278
41279
41280
41281
41282
41283
41284
41285
41286
41287
41288
41289
41290
41291
41292
41293
41294
41295
41296
41297
41298
41299
41300
41301
41302
41303
41304
41305
41306
41307
41308
41309
41310
41311
41312
41313
41314
41315
41316
41317
41318
41319
41320
41321
41322
41323
41324
41325
41326
41327
41328
41329
41330
41331
41332
41333
41334
41335
41336
41337
41338
41339
41340
41341
41342
41343
41344
41345
41346
41347
41348
41349
41350
41351
41352
41353
41354
41355
41356
41357
41358
41359
41360
41361
41362
41363
41364
41365
41366
41367
41368
41369
41370
41371
41372
41373
41374
41375
41376
41377
41378
41379
41380
41381
41382
41383
41384
41385
41386
41387
41388
41389
41390
41391
41392
41393
41394
41395
41396
41397
41398
41399
41400
41401
41402
41403
41404
41405
41406
41407
41408
41409
41410
41411
41412
41413
41414
41415
41416
41417
41418
41419
41420
41421
41422
41423
41424
41425
41426
41427
41428
41429
41430
41431
41432
41433
41434
41435
41436
41437
41438
41439
41440
41441
41442
41443
41444
41445
41446
41447
41448
41449
41450
41451
41452
41453
41454
41455
41456
41457
41458
41459
41460
41461
41462
41463
41464
41465
41466
41467
41468
41469
41470
41471
41472
41473
41474
41475
41476
41477
41478
41479
41480
41481
41482
41483
41484
41485
41486
41487
41488
41489
41490
41491
41492
41493
41494
41495
41496
41497
41498
41499
41500
41501
41502
41503
41504
41505
41506
41507
41508
41509
41510
41511
41512
41513
41514
41515
41516
41517
41518
41519
41520
41521
41522
41523
41524
41525
41526
41527
41528
41529
41530
41531
41532
41533
41534
41535
41536
41537
41538
41539
41540
41541
41542
41543
41544
41545
41546
41547
41548
41549
41550
41551
41552
41553
41554
41555
41556
41557
41558
41559
41560
41561
41562
41563
41564
41565
41566
41567
41568
41569
41570
41571
41572
41573
41574
41575
41576
41577
41578
41579
41580
41581
41582
41583
41584
41585
41586
41587
41588
41589
41590
41591
41592
41593
41594
41595
41596
41597
41598
41599
41600
41601
41602
41603
41604
41605
41606
41607
41608
41609
41610
41611
41612
41613
41614
41615
41616
41617
41618
41619
41620
41621
41622
41623
41624
41625
41626
41627
41628
41629
41630
41631
41632
41633
41634
41635
41636
41637
41638
41639
41640
41641
41642
41643
41644
41645
41646
41647
41648
41649
41650
41651
41652
41653
41654
41655
41656
41657
41658
41659
41660
41661
41662
41663
41664
41665
41666
41667
41668
41669
41670
41671
41672
41673
41674
41675
41676
41677
41678
41679
41680
41681
41682
41683
41684
41685
41686
41687
41688
41689
41690
41691
41692
41693
41694
41695
41696
41697
41698
41699
41700
41701
41702
41703
41704
41705
41706
41707
41708
41709
41710
41711
41712
41713
41714
41715
41716
41717
41718
41719
41720
41721
41722
41723
41724
41725
41726
41727
41728
41729
41730
41731
41732
41733
41734
41735
41736
41737
41738
41739
41740
41741
41742
41743
41744
41745
41746
41747
41748
41749
41750
41751
41752
41753
41754
41755
41756
41757
41758
41759
41760
41761
41762
41763
41764
41765
41766
41767
41768
41769
41770
41771
41772
41773
41774
41775
41776
41777
41778
41779
41780
41781
41782
41783
41784
41785
41786
41787
41788
41789
41790
41791
41792
41793
41794
41795
41796
41797
41798
41799
41800
41801
41802
41803
41804
41805
41806
41807
41808
41809
41810
41811
41812
41813
41814
41815
41816
41817
41818
41819
41820
41821
41822
41823
41824
41825
41826
41827
41828
41829
41830
41831
41832
41833
41834
41835
41836
41837
41838
41839
41840
41841
41842
41843
41844
41845
41846
41847
41848
41849
41850
41851
41852
41853
41854
41855
41856
41857
41858
41859
41860
41861
41862
41863
41864
41865
41866
41867
41868
41869
41870
41871
41872
41873
41874
41875
41876
41877
41878
41879
41880
41881
41882
41883
41884
41885
41886
41887
41888
41889
41890
41891
41892
41893
41894
41895
41896
41897
41898
41899
41900
41901
41902
41903
41904
41905
41906
41907
41908
41909
41910
41911
41912
41913
41914
41915
41916
41917
41918
41919
41920
41921
41922
41923
41924
41925
41926
41927
41928
41929
41930
41931
41932
41933
41934
41935
41936
41937
41938
41939
41940
41941
41942
41943
41944
41945
41946
41947
41948
41949
41950
41951
41952
41953
41954
41955
41956
41957
41958
41959
41960
41961
41962
41963
41964
41965
41966
41967
41968
41969
41970
41971
41972
41973
41974
41975
41976
41977
41978
41979
41980
41981
41982
41983
41984
41985
41986
41987
41988
41989
41990
41991
41992
41993
41994
41995
41996
41997
41998
41999
42000
42001
42002
42003
42004
42005
42006
42007
42008
42009
42010
42011
42012
42013
42014
42015
42016
42017
42018
42019
42020
42021
42022
42023
42024
42025
42026
42027
42028
42029
42030
42031
42032
42033
42034
42035
42036
42037
42038
42039
42040
42041
42042
42043
42044
42045
42046
42047
42048
42049
42050
42051
42052
42053
42054
42055
42056
42057
42058
42059
42060
42061
42062
42063
42064
42065
42066
42067
42068
42069
42070
42071
42072
42073
42074
42075
42076
42077
42078
42079
42080
42081
42082
42083
42084
42085
42086
42087
42088
42089
42090
42091
42092
42093
42094
42095
42096
42097
42098
42099
42100
42101
42102
42103
42104
42105
42106
42107
42108
42109
42110
42111
42112
42113
42114
42115
42116
42117
42118
42119
42120
42121
42122
42123
42124
42125
42126
42127
42128
42129
42130
42131
42132
42133
42134
42135
42136
42137
42138
42139
42140
42141
42142
42143
42144
42145
42146
42147
42148
42149
42150
42151
42152
42153
42154
42155
42156
42157
42158
42159
42160
42161
42162
42163
42164
42165
42166
42167
42168
42169
42170
42171
42172
42173
42174
42175
42176
42177
42178
42179
42180
42181
42182
42183
42184
42185
42186
42187
42188
42189
42190
42191
42192
42193
42194
42195
42196
42197
42198
42199
42200
42201
42202
42203
42204
42205
42206
42207
42208
42209
42210
42211
42212
42213
42214
42215
42216
42217
42218
42219
42220
42221
42222
42223
42224
42225
42226
42227
42228
42229
42230
42231
42232
42233
42234
42235
42236
42237
42238
42239
42240
42241
42242
42243
42244
42245
42246
42247
42248
42249
42250
42251
42252
42253
42254
42255
42256
42257
42258
42259
42260
42261
42262
42263
42264
42265
42266
42267
42268
42269
42270
42271
42272
42273
42274
42275
42276
42277
42278
42279
42280
42281
42282
42283
42284
42285
42286
42287
42288
42289
42290
42291
42292
42293
42294
42295
42296
42297
42298
42299
42300
42301
42302
42303
42304
42305
42306
42307
42308
42309
42310
42311
42312
42313
42314
42315
42316
42317
42318
42319
42320
42321
42322
42323
42324
42325
42326
42327
42328
42329
42330
42331
42332
42333
42334
42335
42336
42337
42338
42339
42340
42341
42342
42343
42344
42345
42346
42347
42348
42349
42350
42351
42352
42353
42354
42355
42356
42357
42358
42359
42360
42361
42362
42363
42364
42365
42366
42367
42368
42369
42370
42371
42372
42373
42374
42375
42376
42377
42378
42379
42380
42381
42382
42383
42384
42385
42386
42387
42388
42389
42390
42391
42392
42393
42394
42395
42396
42397
42398
42399
42400
42401
42402
42403
42404
42405
42406
42407
42408
42409
42410
42411
42412
42413
42414
42415
42416
42417
42418
42419
42420
42421
42422
42423
42424
42425
42426
42427
42428
42429
42430
42431
42432
42433
42434
42435
42436
42437
42438
42439
42440
42441
42442
42443
42444
42445
42446
42447
42448
42449
42450
42451
42452
42453
42454
42455
42456
42457
42458
42459
42460
42461
42462
42463
42464
42465
42466
42467
42468
42469
42470
42471
42472
42473
42474
42475
42476
42477
42478
42479
42480
42481
42482
42483
42484
42485
42486
42487
42488
42489
42490
42491
42492
42493
42494
42495
42496
42497
42498
42499
42500
42501
42502
42503
42504
42505
42506
42507
42508
42509
42510
42511
42512
42513
42514
42515
42516
42517
42518
42519
42520
42521
42522
42523
42524
42525
42526
42527
42528
42529
42530
42531
42532
42533
42534
42535
42536
42537
42538
42539
42540
42541
42542
42543
42544
42545
42546
42547
42548
42549
42550
42551
42552
42553
42554
42555
42556
42557
42558
42559
42560
42561
42562
42563
42564
42565
42566
42567
42568
42569
42570
42571
42572
42573
42574
42575
42576
42577
42578
42579
42580
42581
42582
42583
42584
42585
42586
42587
42588
42589
42590
42591
42592
42593
42594
42595
42596
42597
42598
42599
42600
42601
42602
42603
42604
42605
42606
42607
42608
42609
42610
42611
42612
42613
42614
42615
42616
42617
42618
42619
42620
42621
42622
42623
42624
42625
42626
42627
42628
42629
42630
42631
42632
42633
42634
42635
42636
42637
42638
42639
42640
42641
42642
42643
42644
42645
42646
42647
42648
42649
42650
42651
42652
42653
42654
42655
42656
42657
42658
42659
42660
42661
42662
42663
42664
42665
42666
42667
42668
42669
42670
42671
42672
42673
42674
42675
42676
42677
42678
42679
42680
42681
42682
42683
42684
42685
42686
42687
42688
42689
42690
42691
42692
42693
42694
42695
42696
42697
42698
42699
42700
42701
42702
42703
42704
42705
42706
42707
42708
42709
42710
42711
42712
42713
42714
42715
42716
42717
42718
42719
42720
42721
42722
42723
42724
42725
42726
42727
42728
42729
42730
42731
42732
42733
42734
42735
42736
42737
42738
42739
42740
42741
42742
42743
42744
42745
42746
42747
42748
42749
42750
42751
42752
42753
42754
42755
42756
42757
42758
42759
42760
42761
42762
42763
42764
42765
42766
42767
42768
42769
42770
42771
42772
42773
42774
42775
42776
42777
42778
42779
42780
42781
42782
42783
42784
42785
42786
42787
42788
42789
42790
42791
42792
42793
42794
42795
42796
42797
42798
42799
42800
42801
42802
42803
42804
42805
42806
42807
42808
42809
42810
42811
42812
42813
42814
42815
42816
42817
42818
42819
42820
42821
42822
42823
42824
42825
42826
42827
42828
42829
42830
42831
42832
42833
42834
42835
42836
42837
42838
42839
42840
42841
42842
42843
42844
42845
42846
42847
42848
42849
42850
42851
42852
42853
42854
42855
42856
42857
42858
42859
42860
42861
42862
42863
42864
42865
42866
42867
42868
42869
42870
42871
42872
42873
42874
42875
42876
42877
42878
42879
42880
42881
42882
42883
42884
42885
42886
42887
42888
42889
42890
42891
42892
42893
42894
42895
42896
42897
42898
42899
42900
42901
42902
42903
42904
42905
42906
42907
42908
42909
42910
42911
42912
42913
42914
42915
42916
42917
42918
42919
42920
42921
42922
42923
42924
42925
42926
42927
42928
42929
42930
42931
42932
42933
42934
42935
42936
42937
42938
42939
42940
42941
42942
42943
42944
42945
42946
42947
42948
42949
42950
42951
42952
42953
42954
42955
42956
42957
42958
42959
42960
42961
42962
42963
42964
42965
42966
42967
42968
42969
42970
42971
42972
42973
42974
42975
42976
42977
42978
42979
42980
42981
42982
42983
42984
42985
42986
42987
42988
42989
42990
42991
42992
42993
42994
42995
42996
42997
42998
42999
43000
43001
43002
43003
43004
43005
43006
43007
43008
43009
43010
43011
43012
43013
43014
43015
43016
43017
43018
43019
43020
43021
43022
43023
43024
43025
43026
43027
43028
43029
43030
43031
43032
43033
43034
43035
43036
43037
43038
43039
43040
43041
43042
43043
43044
43045
43046
43047
43048
43049
43050
43051
43052
43053
43054
43055
43056
43057
43058
43059
43060
43061
43062
43063
43064
43065
43066
43067
43068
43069
43070
43071
43072
43073
43074
43075
43076
43077
43078
43079
43080
43081
43082
43083
43084
43085
43086
43087
43088
43089
43090
43091
43092
43093
43094
43095
43096
43097
43098
43099
43100
43101
43102
43103
43104
43105
43106
43107
43108
43109
43110
43111
43112
43113
43114
43115
43116
43117
43118
43119
43120
43121
43122
43123
43124
43125
43126
43127
43128
43129
43130
43131
43132
43133
43134
43135
43136
43137
43138
43139
43140
43141
43142
43143
43144
43145
43146
43147
43148
43149
43150
43151
43152
43153
43154
43155
43156
43157
43158
43159
43160
43161
43162
43163
43164
43165
43166
43167
43168
43169
43170
43171
43172
43173
43174
43175
43176
43177
43178
43179
43180
43181
43182
43183
43184
43185
43186
43187
43188
43189
43190
43191
43192
43193
43194
43195
43196
43197
43198
43199
43200
43201
43202
43203
43204
43205
43206
43207
43208
43209
43210
43211
43212
43213
43214
43215
43216
43217
43218
43219
43220
43221
43222
43223
43224
43225
43226
43227
43228
43229
43230
43231
43232
43233
43234
43235
43236
43237
43238
43239
43240
43241
43242
43243
43244
43245
43246
43247
43248
43249
43250
43251
43252
43253
43254
43255
43256
43257
43258
43259
43260
43261
43262
43263
43264
43265
43266
43267
43268
43269
43270
43271
43272
43273
43274
43275
43276
43277
43278
43279
43280
43281
43282
43283
43284
43285
43286
43287
43288
43289
43290
43291
43292
43293
43294
43295
43296
43297
43298
43299
43300
43301
43302
43303
43304
43305
43306
43307
43308
43309
43310
43311
43312
43313
43314
43315
43316
43317
43318
43319
43320
43321
43322
43323
43324
43325
43326
43327
43328
43329
43330
43331
43332
43333
43334
43335
43336
43337
43338
43339
43340
43341
43342
43343
43344
43345
43346
43347
43348
43349
43350
43351
43352
43353
43354
43355
43356
43357
43358
43359
43360
43361
43362
43363
43364
43365
43366
43367
43368
43369
43370
43371
43372
43373
43374
43375
43376
43377
43378
43379
43380
43381
43382
43383
43384
43385
43386
43387
43388
43389
43390
43391
43392
43393
43394
43395
43396
43397
43398
43399
43400
43401
43402
43403
43404
43405
43406
43407
43408
43409
43410
43411
43412
43413
43414
43415
43416
43417
43418
43419
43420
43421
43422
43423
43424
43425
43426
43427
43428
43429
43430
43431
43432
43433
43434
43435
43436
43437
43438
43439
43440
43441
43442
43443
43444
43445
43446
43447
43448
43449
43450
43451
43452
43453
43454
43455
43456
43457
43458
43459
43460
43461
43462
43463
43464
43465
43466
43467
43468
43469
43470
43471
43472
43473
43474
43475
43476
43477
43478
43479
43480
43481
43482
43483
43484
43485
43486
43487
43488
43489
43490
43491
43492
43493
43494
43495
43496
43497
43498
43499
43500
43501
43502
43503
43504
43505
43506
43507
43508
43509
43510
43511
43512
43513
43514
43515
43516
43517
43518
43519
43520
43521
43522
43523
43524
43525
43526
43527
43528
43529
43530
43531
43532
43533
43534
43535
43536
43537
43538
43539
43540
43541
43542
43543
43544
43545
43546
43547
43548
43549
43550
43551
43552
43553
43554
43555
43556
43557
43558
43559
43560
43561
43562
43563
43564
43565
43566
43567
43568
43569
43570
43571
43572
43573
43574
43575
43576
43577
43578
43579
43580
43581
43582
43583
43584
43585
43586
43587
43588
43589
43590
43591
43592
43593
43594
43595
43596
43597
43598
43599
43600
43601
43602
43603
43604
43605
43606
43607
43608
43609
43610
43611
43612
43613
43614
43615
43616
43617
43618
43619
43620
43621
43622
43623
43624
43625
43626
43627
43628
43629
43630
43631
43632
43633
43634
43635
43636
43637
43638
43639
43640
43641
43642
43643
43644
43645
43646
43647
43648
43649
43650
43651
43652
43653
43654
43655
43656
43657
43658
43659
43660
43661
43662
43663
43664
43665
43666
43667
43668
43669
43670
43671
43672
43673
43674
43675
43676
43677
43678
43679
43680
43681
43682
43683
43684
43685
43686
43687
43688
43689
43690
43691
43692
43693
43694
43695
43696
43697
43698
43699
43700
43701
43702
43703
43704
43705
43706
43707
43708
43709
43710
43711
43712
43713
43714
43715
43716
43717
43718
43719
43720
43721
43722
43723
43724
43725
43726
43727
43728
43729
43730
43731
43732
43733
43734
43735
43736
43737
43738
43739
43740
43741
43742
43743
43744
43745
43746
43747
43748
43749
43750
43751
43752
43753
43754
43755
43756
43757
43758
43759
43760
43761
43762
43763
43764
43765
43766
43767
43768
43769
43770
43771
43772
43773
43774
43775
43776
43777
43778
43779
43780
43781
43782
43783
43784
43785
43786
43787
43788
43789
43790
43791
43792
43793
43794
43795
43796
43797
43798
43799
43800
43801
43802
43803
43804
43805
43806
43807
43808
43809
43810
43811
43812
43813
43814
43815
43816
43817
43818
43819
43820
43821
43822
43823
43824
43825
43826
43827
43828
43829
43830
43831
43832
43833
43834
43835
43836
43837
43838
43839
43840
43841
43842
43843
43844
43845
43846
43847
43848
43849
43850
43851
43852
43853
43854
43855
43856
43857
43858
43859
43860
43861
43862
43863
43864
43865
43866
43867
43868
43869
43870
43871
43872
43873
43874
43875
43876
43877
43878
43879
43880
43881
43882
43883
43884
43885
43886
43887
43888
43889
43890
43891
43892
43893
43894
43895
43896
43897
43898
43899
43900
43901
43902
43903
43904
43905
43906
43907
43908
43909
43910
43911
43912
43913
43914
43915
43916
43917
43918
43919
43920
43921
43922
43923
43924
43925
43926
43927
43928
43929
43930
43931
43932
43933
43934
43935
43936
43937
43938
43939
43940
43941
43942
43943
43944
43945
43946
43947
43948
43949
43950
43951
43952
43953
43954
43955
43956
43957
43958
43959
43960
43961
43962
43963
43964
43965
43966
43967
43968
43969
43970
43971
43972
43973
43974
43975
43976
43977
43978
43979
43980
43981
43982
43983
43984
43985
43986
43987
43988
43989
43990
43991
43992
43993
43994
43995
43996
43997
43998
43999
44000
44001
44002
44003
44004
44005
44006
44007
44008
44009
44010
44011
44012
44013
44014
44015
44016
44017
44018
44019
44020
44021
44022
44023
44024
44025
44026
44027
44028
44029
44030
44031
44032
44033
44034
44035
44036
44037
44038
44039
44040
44041
44042
44043
44044
44045
44046
44047
44048
44049
44050
44051
44052
44053
44054
44055
44056
44057
44058
44059
44060
44061
44062
44063
44064
44065
44066
44067
44068
44069
44070
44071
44072
44073
44074
44075
44076
44077
44078
44079
44080
44081
44082
44083
44084
44085
44086
44087
44088
44089
44090
44091
44092
44093
44094
44095
44096
44097
44098
44099
44100
44101
44102
44103
44104
44105
44106
44107
44108
44109
44110
44111
44112
44113
44114
44115
44116
44117
44118
44119
44120
44121
44122
44123
44124
44125
44126
44127
44128
44129
44130
44131
44132
44133
44134
44135
44136
44137
44138
44139
44140
44141
44142
44143
44144
44145
44146
44147
44148
44149
44150
44151
44152
44153
44154
44155
44156
44157
44158
44159
44160
44161
44162
44163
44164
44165
44166
44167
44168
44169
44170
44171
44172
44173
44174
44175
44176
44177
44178
44179
44180
44181
44182
44183
44184
44185
44186
44187
44188
44189
44190
44191
44192
44193
44194
44195
44196
44197
44198
44199
44200
44201
44202
44203
44204
44205
44206
44207
44208
44209
44210
44211
44212
44213
44214
44215
44216
44217
44218
44219
44220
44221
44222
44223
44224
44225
44226
44227
44228
44229
44230
44231
44232
44233
44234
44235
44236
44237
44238
44239
44240
44241
44242
44243
44244
44245
44246
44247
44248
44249
44250
44251
44252
44253
44254
44255
44256
44257
44258
44259
44260
44261
44262
44263
44264
44265
44266
44267
44268
44269
44270
44271
44272
44273
44274
44275
44276
44277
44278
44279
44280
44281
44282
44283
44284
44285
44286
44287
44288
44289
44290
44291
44292
44293
44294
44295
44296
44297
44298
44299
44300
44301
44302
44303
44304
44305
44306
44307
44308
44309
44310
44311
44312
44313
44314
44315
44316
44317
44318
44319
44320
44321
44322
44323
44324
44325
44326
44327
44328
44329
44330
44331
44332
44333
44334
44335
44336
44337
44338
44339
44340
44341
44342
44343
44344
44345
44346
44347
44348
44349
44350
44351
44352
44353
44354
44355
44356
44357
44358
44359
44360
44361
44362
44363
44364
44365
44366
44367
44368
44369
44370
44371
44372
44373
44374
44375
44376
44377
44378
44379
44380
44381
44382
44383
44384
44385
44386
44387
44388
44389
44390
44391
44392
44393
44394
44395
44396
44397
44398
44399
44400
44401
44402
44403
44404
44405
44406
44407
44408
44409
44410
44411
44412
44413
44414
44415
44416
44417
44418
44419
44420
44421
44422
44423
44424
44425
44426
44427
44428
44429
44430
44431
44432
44433
44434
44435
44436
44437
44438
44439
44440
44441
44442
44443
44444
44445
44446
44447
44448
44449
44450
44451
44452
44453
44454
44455
44456
44457
44458
44459
44460
44461
44462
44463
44464
44465
44466
44467
44468
44469
44470
44471
44472
44473
44474
44475
44476
44477
44478
44479
44480
44481
44482
44483
44484
44485
44486
44487
44488
44489
44490
44491
44492
44493
44494
44495
44496
44497
44498
44499
44500
44501
44502
44503
44504
44505
44506
44507
44508
44509
44510
44511
44512
44513
44514
44515
44516
44517
44518
44519
44520
44521
44522
44523
44524
44525
44526
44527
44528
44529
44530
44531
44532
44533
44534
44535
44536
44537
44538
44539
44540
44541
44542
44543
44544
44545
44546
44547
44548
44549
44550
44551
44552
44553
44554
44555
44556
44557
44558
44559
44560
44561
44562
44563
44564
44565
44566
44567
44568
44569
44570
44571
44572
44573
44574
44575
44576
44577
44578
44579
44580
44581
44582
44583
44584
44585
44586
44587
44588
44589
44590
44591
44592
44593
44594
44595
44596
44597
44598
44599
44600
44601
44602
44603
44604
44605
44606
44607
44608
44609
44610
44611
44612
44613
44614
44615
44616
44617
44618
44619
44620
44621
44622
44623
44624
44625
44626
44627
44628
44629
44630
44631
44632
44633
44634
44635
44636
44637
44638
44639
44640
44641
44642
44643
44644
44645
44646
44647
44648
44649
44650
44651
44652
44653
44654
44655
44656
44657
44658
44659
44660
44661
44662
44663
44664
44665
44666
44667
44668
44669
44670
44671
44672
44673
44674
44675
44676
44677
44678
44679
44680
44681
44682
44683
44684
44685
44686
44687
44688
44689
44690
44691
44692
44693
44694
44695
44696
44697
44698
44699
44700
44701
44702
44703
44704
44705
44706
44707
44708
44709
44710
44711
44712
44713
44714
44715
44716
44717
44718
44719
44720
44721
44722
44723
44724
44725
44726
44727
44728
44729
44730
44731
44732
44733
44734
44735
44736
44737
44738
44739
44740
44741
44742
44743
44744
44745
44746
44747
44748
44749
44750
44751
44752
44753
44754
44755
44756
44757
44758
44759
44760
44761
44762
44763
44764
44765
44766
44767
44768
44769
44770
44771
44772
44773
44774
44775
44776
44777
44778
44779
44780
44781
44782
44783
44784
44785
44786
44787
44788
44789
44790
44791
44792
44793
44794
44795
44796
44797
44798
44799
44800
44801
44802
44803
44804
44805
44806
44807
44808
44809
44810
44811
44812
44813
44814
44815
44816
44817
44818
44819
44820
44821
44822
44823
44824
44825
44826
44827
44828
44829
44830
44831
44832
44833
44834
44835
44836
44837
44838
44839
44840
44841
44842
44843
44844
44845
44846
44847
44848
44849
44850
44851
44852
44853
44854
44855
44856
44857
44858
44859
44860
44861
44862
44863
44864
44865
44866
44867
44868
44869
44870
44871
44872
44873
44874
44875
44876
44877
44878
44879
44880
44881
44882
44883
44884
44885
44886
44887
44888
44889
44890
44891
44892
44893
44894
44895
44896
44897
44898
44899
44900
44901
44902
44903
44904
44905
44906
44907
44908
44909
44910
44911
44912
44913
44914
44915
44916
44917
44918
44919
44920
44921
44922
44923
44924
44925
44926
44927
44928
44929
44930
44931
44932
44933
44934
44935
44936
44937
44938
44939
44940
44941
44942
44943
44944
44945
44946
44947
44948
44949
44950
44951
44952
44953
44954
44955
44956
44957
44958
44959
44960
44961
44962
44963
44964
44965
44966
44967
44968
44969
44970
44971
44972
44973
44974
44975
44976
44977
44978
44979
44980
44981
44982
44983
44984
44985
44986
44987
44988
44989
44990
44991
44992
44993
44994
44995
44996
44997
44998
44999
45000
45001
45002
45003
45004
45005
45006
45007
45008
45009
45010
45011
45012
45013
45014
45015
45016
45017
45018
45019
45020
45021
45022
45023
45024
45025
45026
45027
45028
45029
45030
45031
45032
45033
45034
45035
45036
45037
45038
45039
45040
45041
45042
45043
45044
45045
45046
45047
45048
45049
45050
45051
45052
45053
45054
45055
45056
45057
45058
45059
45060
45061
45062
45063
45064
45065
45066
45067
45068
45069
45070
45071
45072
45073
45074
45075
45076
45077
45078
45079
45080
45081
45082
45083
45084
45085
45086
45087
45088
45089
45090
45091
45092
45093
45094
45095
45096
45097
45098
45099
45100
45101
45102
45103
45104
45105
45106
45107
45108
45109
45110
45111
45112
45113
45114
45115
45116
45117
45118
45119
45120
45121
45122
45123
45124
45125
45126
45127
45128
45129
45130
45131
45132
45133
45134
45135
45136
45137
45138
45139
45140
45141
45142
45143
45144
45145
45146
45147
45148
45149
45150
45151
45152
45153
45154
45155
45156
45157
45158
45159
45160
45161
45162
45163
45164
45165
45166
45167
45168
45169
45170
45171
45172
45173
45174
45175
45176
45177
45178
45179
45180
45181
45182
45183
45184
45185
45186
45187
45188
45189
45190
45191
45192
45193
45194
45195
45196
45197
45198
45199
45200
45201
45202
45203
45204
45205
45206
45207
45208
45209
45210
45211
45212
45213
45214
45215
45216
45217
45218
45219
45220
45221
45222
45223
45224
45225
45226
45227
45228
45229
45230
45231
45232
45233
45234
45235
45236
45237
45238
45239
45240
45241
45242
45243
45244
45245
45246
45247
45248
45249
45250
45251
45252
45253
45254
45255
45256
45257
45258
45259
45260
45261
45262
45263
45264
45265
45266
45267
45268
45269
45270
45271
45272
45273
45274
45275
45276
45277
45278
45279
45280
45281
45282
45283
45284
45285
45286
45287
45288
45289
45290
45291
45292
45293
45294
45295
45296
45297
45298
45299
45300
45301
45302
45303
45304
45305
45306
45307
45308
45309
45310
45311
45312
45313
45314
45315
45316
45317
45318
45319
45320
45321
45322
45323
45324
45325
45326
45327
45328
45329
45330
45331
45332
45333
45334
45335
45336
45337
45338
45339
45340
45341
45342
45343
45344
45345
45346
45347
45348
45349
45350
45351
45352
45353
45354
45355
45356
45357
45358
45359
45360
45361
45362
45363
45364
45365
45366
45367
45368
45369
45370
45371
45372
45373
45374
45375
45376
45377
45378
45379
45380
45381
45382
45383
45384
45385
45386
45387
45388
45389
45390
45391
45392
45393
45394
45395
45396
45397
45398
45399
45400
45401
45402
45403
45404
45405
45406
45407
45408
45409
45410
45411
45412
45413
45414
45415
45416
45417
45418
45419
45420
45421
45422
45423
45424
45425
45426
45427
45428
45429
45430
45431
45432
45433
45434
45435
45436
45437
45438
45439
45440
45441
45442
45443
45444
45445
45446
45447
45448
45449
45450
45451
45452
45453
45454
45455
45456
45457
45458
45459
45460
45461
45462
45463
45464
45465
45466
45467
45468
45469
45470
45471
45472
45473
45474
45475
45476
45477
45478
45479
45480
45481
45482
45483
45484
45485
45486
45487
45488
45489
45490
45491
45492
45493
45494
45495
45496
45497
45498
45499
45500
45501
45502
45503
45504
45505
45506
45507
45508
45509
45510
45511
45512
45513
45514
45515
45516
45517
45518
45519
45520
45521
45522
45523
45524
45525
45526
45527
45528
45529
45530
45531
45532
45533
45534
45535
45536
45537
45538
45539
45540
45541
45542
45543
45544
45545
45546
45547
45548
45549
45550
45551
45552
45553
45554
45555
45556
45557
45558
45559
45560
45561
45562
45563
45564
45565
45566
45567
45568
45569
45570
45571
45572
45573
45574
45575
45576
45577
45578
45579
45580
45581
45582
45583
45584
45585
45586
45587
45588
45589
45590
45591
45592
45593
45594
45595
45596
45597
45598
45599
45600
45601
45602
45603
45604
45605
45606
45607
45608
45609
45610
45611
45612
45613
45614
45615
45616
45617
45618
45619
45620
45621
45622
45623
45624
45625
45626
45627
45628
45629
45630
45631
45632
45633
45634
45635
45636
45637
45638
45639
45640
45641
45642
45643
45644
45645
45646
45647
45648
45649
45650
45651
45652
45653
45654
45655
45656
45657
45658
45659
45660
45661
45662
45663
45664
45665
45666
45667
45668
45669
45670
45671
45672
45673
45674
45675
45676
45677
45678
45679
45680
45681
45682
45683
45684
45685
45686
45687
45688
45689
45690
45691
45692
45693
45694
45695
45696
45697
45698
45699
45700
45701
45702
45703
45704
45705
45706
45707
45708
45709
45710
45711
45712
45713
45714
45715
45716
45717
45718
45719
45720
45721
45722
45723
45724
45725
45726
45727
45728
45729
45730
45731
45732
45733
45734
45735
45736
45737
45738
45739
45740
45741
45742
45743
45744
45745
45746
45747
45748
45749
45750
45751
45752
45753
45754
45755
45756
45757
45758
45759
45760
45761
45762
45763
45764
45765
45766
45767
45768
45769
45770
45771
45772
45773
45774
45775
45776
45777
45778
45779
45780
45781
45782
45783
45784
45785
45786
45787
45788
45789
45790
45791
45792
45793
45794
45795
45796
45797
45798
45799
45800
45801
45802
45803
45804
45805
45806
45807
45808
45809
45810
45811
45812
45813
45814
45815
45816
45817
45818
45819
45820
45821
45822
45823
45824
45825
45826
45827
45828
45829
45830
45831
45832
45833
45834
45835
45836
45837
45838
45839
45840
45841
45842
45843
45844
45845
45846
45847
45848
45849
45850
45851
45852
45853
45854
45855
45856
45857
45858
45859
45860
45861
45862
45863
45864
45865
45866
45867
45868
45869
45870
45871
45872
45873
45874
45875
45876
45877
45878
45879
45880
45881
45882
45883
45884
45885
45886
45887
45888
45889
45890
45891
45892
45893
45894
45895
45896
45897
45898
45899
45900
45901
45902
45903
45904
45905
45906
45907
45908
45909
45910
45911
45912
45913
45914
45915
45916
45917
45918
45919
45920
45921
45922
45923
45924
45925
45926
45927
45928
45929
45930
45931
45932
45933
45934
45935
45936
45937
45938
45939
45940
45941
45942
45943
45944
45945
45946
45947
45948
45949
45950
45951
45952
45953
45954
45955
45956
45957
45958
45959
45960
45961
45962
45963
45964
45965
45966
45967
45968
45969
45970
45971
45972
45973
45974
45975
45976
45977
45978
45979
45980
45981
45982
45983
45984
45985
45986
45987
45988
45989
45990
45991
45992
45993
45994
45995
45996
45997
45998
45999
46000
46001
46002
46003
46004
46005
46006
46007
46008
46009
46010
46011
46012
46013
46014
46015
46016
46017
46018
46019
46020
46021
46022
46023
46024
46025
46026
46027
46028
46029
46030
46031
46032
46033
46034
46035
46036
46037
46038
46039
46040
46041
46042
46043
46044
46045
46046
46047
46048
46049
46050
46051
46052
46053
46054
46055
46056
46057
46058
46059
46060
46061
46062
46063
46064
46065
46066
46067
46068
46069
46070
46071
46072
46073
46074
46075
46076
46077
46078
46079
46080
46081
46082
46083
46084
46085
46086
46087
46088
46089
46090
46091
46092
46093
46094
46095
46096
46097
46098
46099
46100
46101
46102
46103
46104
46105
46106
46107
46108
46109
46110
46111
46112
46113
46114
46115
46116
46117
46118
46119
46120
46121
46122
46123
46124
46125
46126
46127
46128
46129
46130
46131
46132
46133
46134
46135
46136
46137
46138
46139
46140
46141
46142
46143
46144
46145
46146
46147
46148
46149
46150
46151
46152
46153
46154
46155
46156
46157
46158
46159
46160
46161
46162
46163
46164
46165
46166
46167
46168
46169
46170
46171
46172
46173
46174
46175
46176
46177
46178
46179
46180
46181
46182
46183
46184
46185
46186
46187
46188
46189
46190
46191
46192
46193
46194
46195
46196
46197
46198
46199
46200
46201
46202
46203
46204
46205
46206
46207
46208
46209
46210
46211
46212
46213
46214
46215
46216
46217
46218
46219
46220
46221
46222
46223
46224
46225
46226
46227
46228
46229
46230
46231
46232
46233
46234
46235
46236
46237
46238
46239
46240
46241
46242
46243
46244
46245
46246
46247
46248
46249
//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that X86 uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//

#include "X86ISelLowering.h"
#include "Utils/X86ShuffleDecode.h"
#include "X86CallingConv.h"
#include "X86FrameLowering.h"
#include "X86InstrBuilder.h"
#include "X86IntrinsicsInfo.h"
#include "X86MachineFunctionInfo.h"
#include "X86TargetMachine.h"
#include "X86TargetObjectFile.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <bitset>
#include <cctype>
#include <numeric>
using namespace llvm;

#define DEBUG_TYPE "x86-isel"

STATISTIC(NumTailCalls, "Number of tail calls");

static cl::opt<int> ExperimentalPrefLoopAlignment(
    "x86-experimental-pref-loop-alignment", cl::init(4),
    cl::desc(
        "Sets the preferable loop alignment for experiments (as log2 bytes)"
        "(the last x86-experimental-pref-loop-alignment bits"
        " of the loop header PC will be 0)."),
    cl::Hidden);

// Added in 10.0.
static cl::opt<bool> EnableOldKNLABI(
    "x86-enable-old-knl-abi", cl::init(false),
    cl::desc("Enables passing v32i16 and v64i8 in 2 YMM registers instead of "
             "one ZMM register on AVX512F, but not AVX512BW targets."),
    cl::Hidden);

static cl::opt<bool> MulConstantOptimization(
    "mul-constant-optimization", cl::init(true),
    cl::desc("Replace 'mul x, Const' with more effective instructions like "
             "SHIFT, LEA, etc."),
    cl::Hidden);

static cl::opt<bool> ExperimentalUnorderedISEL(
    "x86-experimental-unordered-atomic-isel", cl::init(false),
    cl::desc("Use LoadSDNode and StoreSDNode instead of "
             "AtomicSDNode for unordered atomic loads and "
             "stores respectively."),
    cl::Hidden);

/// Call this when the user attempts to do something unsupported, like
/// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
/// report_fatal_error, so calling code should attempt to recover without
/// crashing.
static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
                             const char *Msg) {
  MachineFunction &MF = DAG.getMachineFunction();
  DAG.getContext()->diagnose(
      DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
}

X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
                                     const X86Subtarget &STI)
    : TargetLowering(TM), Subtarget(STI) {
  bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
  X86ScalarSSEf64 = Subtarget.hasSSE2();
  X86ScalarSSEf32 = Subtarget.hasSSE1();
  MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));

  // Set up the TargetLowering object.

  // X86 is weird. It always uses i8 for shift amounts and setcc results.
  setBooleanContents(ZeroOrOneBooleanContent);
  // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
  setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);

  // For 64-bit, since we have so many registers, use the ILP scheduler.
  // For 32-bit, use the register pressure specific scheduling.
  // For Atom, always use ILP scheduling.
  if (Subtarget.isAtom())
    setSchedulingPreference(Sched::ILP);
  else if (Subtarget.is64Bit())
    setSchedulingPreference(Sched::ILP);
  else
    setSchedulingPreference(Sched::RegPressure);
  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());

  // Bypass expensive divides and use cheaper ones.
  if (TM.getOptLevel() >= CodeGenOpt::Default) {
    if (Subtarget.hasSlowDivide32())
      addBypassSlowDiv(32, 8);
    if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
      addBypassSlowDiv(64, 32);
  }

  if (Subtarget.isTargetWindowsMSVC() ||
      Subtarget.isTargetWindowsItanium()) {
    // Setup Windows compiler runtime calls.
    setLibcallName(RTLIB::SDIV_I64, "_alldiv");
    setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
    setLibcallName(RTLIB::SREM_I64, "_allrem");
    setLibcallName(RTLIB::UREM_I64, "_aullrem");
    setLibcallName(RTLIB::MUL_I64, "_allmul");
    setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
    setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
    setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
    setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
    setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
  }

  if (Subtarget.isTargetDarwin()) {
    // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
    setUseUnderscoreSetJmp(false);
    setUseUnderscoreLongJmp(false);
  } else if (Subtarget.isTargetWindowsGNU()) {
    // MS runtime is weird: it exports _setjmp, but longjmp!
    setUseUnderscoreSetJmp(true);
    setUseUnderscoreLongJmp(false);
  } else {
    setUseUnderscoreSetJmp(true);
    setUseUnderscoreLongJmp(true);
  }

  // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
  // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
  // FIXME: Should we be limitting the atomic size on other configs? Default is
  // 1024.
  if (!Subtarget.hasCmpxchg8b())
    setMaxAtomicSizeInBitsSupported(32);

  // Set up the register classes.
  addRegisterClass(MVT::i8, &X86::GR8RegClass);
  addRegisterClass(MVT::i16, &X86::GR16RegClass);
  addRegisterClass(MVT::i32, &X86::GR32RegClass);
  if (Subtarget.is64Bit())
    addRegisterClass(MVT::i64, &X86::GR64RegClass);

  for (MVT VT : MVT::integer_valuetypes())
    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);

  // We don't accept any truncstore of integer registers.
  setTruncStoreAction(MVT::i64, MVT::i32, Expand);
  setTruncStoreAction(MVT::i64, MVT::i16, Expand);
  setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
  setTruncStoreAction(MVT::i32, MVT::i16, Expand);
  setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
  setTruncStoreAction(MVT::i16, MVT::i8,  Expand);

  setTruncStoreAction(MVT::f64, MVT::f32, Expand);

  // SETOEQ and SETUNE require checking two conditions.
  setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
  setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
  setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
  setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
  setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
  setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);

  // Integer absolute.
  if (Subtarget.hasCMov()) {
    setOperationAction(ISD::ABS            , MVT::i16  , Custom);
    setOperationAction(ISD::ABS            , MVT::i32  , Custom);
  }
  setOperationAction(ISD::ABS              , MVT::i64  , Custom);

  // Funnel shifts.
  for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
    setOperationAction(ShiftOp             , MVT::i16  , Custom);
    setOperationAction(ShiftOp             , MVT::i32  , Custom);
    if (Subtarget.is64Bit())
      setOperationAction(ShiftOp           , MVT::i64  , Custom);
  }

  // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
  // operation.
  setOperationAction(ISD::UINT_TO_FP       , MVT::i1   , Promote);
  setOperationAction(ISD::UINT_TO_FP       , MVT::i8   , Promote);
  setOperationAction(ISD::UINT_TO_FP       , MVT::i16  , Promote);

  if (!Subtarget.useSoftFloat()) {
    // We have an algorithm for SSE2->double, and we turn this into a
    // 64-bit FILD followed by conditional FADD for other targets.
    setOperationAction(ISD::UINT_TO_FP     , MVT::i64  , Custom);
    // We have an algorithm for SSE2, and we turn this into a 64-bit
    // FILD or VCVTUSI2SS/SD for other targets.
    setOperationAction(ISD::UINT_TO_FP     , MVT::i32  , Custom);
  } else {
    setOperationAction(ISD::UINT_TO_FP     , MVT::i32  , Expand);
  }

  // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
  // this operation.
  setOperationAction(ISD::SINT_TO_FP       , MVT::i1   , Promote);
  setOperationAction(ISD::SINT_TO_FP       , MVT::i8   , Promote);

  if (!Subtarget.useSoftFloat()) {
    // SSE has no i16 to fp conversion, only i32.
    if (X86ScalarSSEf32) {
      setOperationAction(ISD::SINT_TO_FP     , MVT::i16  , Promote);
      // f32 and f64 cases are Legal, f80 case is not
      setOperationAction(ISD::SINT_TO_FP     , MVT::i32  , Custom);
    } else {
      setOperationAction(ISD::SINT_TO_FP     , MVT::i16  , Custom);
      setOperationAction(ISD::SINT_TO_FP     , MVT::i32  , Custom);
    }
  } else {
    setOperationAction(ISD::SINT_TO_FP     , MVT::i16  , Promote);
    setOperationAction(ISD::SINT_TO_FP     , MVT::i32  , Expand);
  }

  // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
  // this operation.
  setOperationAction(ISD::FP_TO_SINT       , MVT::i1   , Promote);
  setOperationAction(ISD::FP_TO_SINT       , MVT::i8   , Promote);

  if (!Subtarget.useSoftFloat()) {
    // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
    // are Legal, f80 is custom lowered.
    setOperationAction(ISD::FP_TO_SINT     , MVT::i64  , Custom);
    setOperationAction(ISD::SINT_TO_FP     , MVT::i64  , Custom);

    setOperationAction(ISD::FP_TO_SINT     , MVT::i16  , Custom);
    setOperationAction(ISD::FP_TO_SINT     , MVT::i32  , Custom);
  } else {
    setOperationAction(ISD::FP_TO_SINT     , MVT::i16  , Promote);
    setOperationAction(ISD::FP_TO_SINT     , MVT::i32  , Expand);
    setOperationAction(ISD::FP_TO_SINT     , MVT::i64  , Expand);
  }

  // Handle FP_TO_UINT by promoting the destination to a larger signed
  // conversion.
  setOperationAction(ISD::FP_TO_UINT       , MVT::i1   , Promote);
  setOperationAction(ISD::FP_TO_UINT       , MVT::i8   , Promote);
  setOperationAction(ISD::FP_TO_UINT       , MVT::i16  , Promote);

  if (!Subtarget.useSoftFloat()) {
    setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
    setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
  }

  // TODO: when we have SSE, these could be more efficient, by using movd/movq.
  if (!X86ScalarSSEf64) {
    setOperationAction(ISD::BITCAST        , MVT::f32  , Expand);
    setOperationAction(ISD::BITCAST        , MVT::i32  , Expand);
    if (Subtarget.is64Bit()) {
      setOperationAction(ISD::BITCAST      , MVT::f64  , Expand);
      // Without SSE, i64->f64 goes through memory.
      setOperationAction(ISD::BITCAST      , MVT::i64  , Expand);
    }
  } else if (!Subtarget.is64Bit())
    setOperationAction(ISD::BITCAST      , MVT::i64  , Custom);

  // Scalar integer divide and remainder are lowered to use operations that
  // produce two results, to match the available instructions. This exposes
  // the two-result form to trivial CSE, which is able to combine x/y and x%y
  // into a single instruction.
  //
  // Scalar integer multiply-high is also lowered to use two-result
  // operations, to match the available instructions. However, plain multiply
  // (low) operations are left as Legal, as there are single-result
  // instructions for this in x86. Using the two-result multiply instructions
  // when both high and low results are needed must be arranged by dagcombine.
  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
    setOperationAction(ISD::MULHS, VT, Expand);
    setOperationAction(ISD::MULHU, VT, Expand);
    setOperationAction(ISD::SDIV, VT, Expand);
    setOperationAction(ISD::UDIV, VT, Expand);
    setOperationAction(ISD::SREM, VT, Expand);
    setOperationAction(ISD::UREM, VT, Expand);
  }

  setOperationAction(ISD::BR_JT            , MVT::Other, Expand);
  setOperationAction(ISD::BRCOND           , MVT::Other, Custom);
  for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
                   MVT::i8,  MVT::i16, MVT::i32, MVT::i64 }) {
    setOperationAction(ISD::BR_CC,     VT, Expand);
    setOperationAction(ISD::SELECT_CC, VT, Expand);
  }
  if (Subtarget.is64Bit())
    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16  , Legal);
  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8   , Legal);
  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1   , Expand);

  setOperationAction(ISD::FREM             , MVT::f32  , Expand);
  setOperationAction(ISD::FREM             , MVT::f64  , Expand);
  setOperationAction(ISD::FREM             , MVT::f80  , Expand);
  setOperationAction(ISD::FREM             , MVT::f128 , Expand);
  setOperationAction(ISD::FLT_ROUNDS_      , MVT::i32  , Custom);

  // Promote the i8 variants and force them on up to i32 which has a shorter
  // encoding.
  setOperationPromotedToType(ISD::CTTZ           , MVT::i8   , MVT::i32);
  setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
  if (!Subtarget.hasBMI()) {
    setOperationAction(ISD::CTTZ           , MVT::i16  , Custom);
    setOperationAction(ISD::CTTZ           , MVT::i32  , Custom);
    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16  , Legal);
    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32  , Legal);
    if (Subtarget.is64Bit()) {
      setOperationAction(ISD::CTTZ         , MVT::i64  , Custom);
      setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
    }
  }

  if (Subtarget.hasLZCNT()) {
    // When promoting the i8 variants, force them to i32 for a shorter
    // encoding.
    setOperationPromotedToType(ISD::CTLZ           , MVT::i8   , MVT::i32);
    setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
  } else {
    setOperationAction(ISD::CTLZ           , MVT::i8   , Custom);
    setOperationAction(ISD::CTLZ           , MVT::i16  , Custom);
    setOperationAction(ISD::CTLZ           , MVT::i32  , Custom);
    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8   , Custom);
    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16  , Custom);
    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32  , Custom);
    if (Subtarget.is64Bit()) {
      setOperationAction(ISD::CTLZ         , MVT::i64  , Custom);
      setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
    }
  }

  // Special handling for half-precision floating point conversions.
  // If we don't have F16C support, then lower half float conversions
  // into library calls.
  if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
    setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
    setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
  }

  // There's never any support for operations beyond MVT::f32.
  setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
  setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
  setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
  setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
  setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
  setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);

  setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
  setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
  setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
  setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Expand);
  setTruncStoreAction(MVT::f32, MVT::f16, Expand);
  setTruncStoreAction(MVT::f64, MVT::f16, Expand);
  setTruncStoreAction(MVT::f80, MVT::f16, Expand);
  setTruncStoreAction(MVT::f128, MVT::f16, Expand);

  if (Subtarget.hasPOPCNT()) {
    setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
  } else {
    setOperationAction(ISD::CTPOP          , MVT::i8   , Expand);
    setOperationAction(ISD::CTPOP          , MVT::i16  , Expand);
    setOperationAction(ISD::CTPOP          , MVT::i32  , Expand);
    if (Subtarget.is64Bit())
      setOperationAction(ISD::CTPOP        , MVT::i64  , Expand);
    else
      setOperationAction(ISD::CTPOP        , MVT::i64  , Custom);
  }

  setOperationAction(ISD::READCYCLECOUNTER , MVT::i64  , Custom);

  if (!Subtarget.hasMOVBE())
    setOperationAction(ISD::BSWAP          , MVT::i16  , Expand);

  // These should be promoted to a larger select which is supported.
  setOperationAction(ISD::SELECT          , MVT::i1   , Promote);
  // X86 wants to expand cmov itself.
  for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
    setOperationAction(ISD::SELECT, VT, Custom);
    setOperationAction(ISD::SETCC, VT, Custom);
  }
  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
    if (VT == MVT::i64 && !Subtarget.is64Bit())
      continue;
    setOperationAction(ISD::SELECT, VT, Custom);
    setOperationAction(ISD::SETCC,  VT, Custom);
  }

  // Custom action for SELECT MMX and expand action for SELECT_CC MMX
  setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
  setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);

  setOperationAction(ISD::EH_RETURN       , MVT::Other, Custom);
  // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
  // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
  setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
  setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
  setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
  if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
    setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");

  // Darwin ABI issue.
  for (auto VT : { MVT::i32, MVT::i64 }) {
    if (VT == MVT::i64 && !Subtarget.is64Bit())
      continue;
    setOperationAction(ISD::ConstantPool    , VT, Custom);
    setOperationAction(ISD::JumpTable       , VT, Custom);
    setOperationAction(ISD::GlobalAddress   , VT, Custom);
    setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
    setOperationAction(ISD::ExternalSymbol  , VT, Custom);
    setOperationAction(ISD::BlockAddress    , VT, Custom);
  }

  // 64-bit shl, sra, srl (iff 32-bit x86)
  for (auto VT : { MVT::i32, MVT::i64 }) {
    if (VT == MVT::i64 && !Subtarget.is64Bit())
      continue;
    setOperationAction(ISD::SHL_PARTS, VT, Custom);
    setOperationAction(ISD::SRA_PARTS, VT, Custom);
    setOperationAction(ISD::SRL_PARTS, VT, Custom);
  }

  if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
    setOperationAction(ISD::PREFETCH      , MVT::Other, Legal);

  setOperationAction(ISD::ATOMIC_FENCE  , MVT::Other, Custom);

  // Expand certain atomics
  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
    setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
    setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
    setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
    setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
    setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
    setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
    setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
  }

  if (!Subtarget.is64Bit())
    setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);

  if (Subtarget.hasCmpxchg16b()) {
    setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
  }

  // FIXME - use subtarget debug flags
  if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
      !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
      TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
    setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
  }

  setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
  setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);

  setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
  setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);

  setOperationAction(ISD::TRAP, MVT::Other, Legal);
  setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);

  // VASTART needs to be custom lowered to use the VarArgsFrameIndex
  setOperationAction(ISD::VASTART           , MVT::Other, Custom);
  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
  bool Is64Bit = Subtarget.is64Bit();
  setOperationAction(ISD::VAARG,  MVT::Other, Is64Bit ? Custom : Expand);
  setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);

  setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
  setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);

  setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);

  // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
  setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
  setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);

  if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
    // f32 and f64 use SSE.
    // Set up the FP register classes.
    addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
                                                     : &X86::FR32RegClass);
    addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
                                                     : &X86::FR64RegClass);

    // Disable f32->f64 extload as we can only generate this in one instruction
    // under optsize. So its easier to pattern match (fpext (load)) for that
    // case instead of needing to emit 2 instructions for extload in the
    // non-optsize case.
    setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);

    for (auto VT : { MVT::f32, MVT::f64 }) {
      // Use ANDPD to simulate FABS.
      setOperationAction(ISD::FABS, VT, Custom);

      // Use XORP to simulate FNEG.
      setOperationAction(ISD::FNEG, VT, Custom);

      // Use ANDPD and ORPD to simulate FCOPYSIGN.
      setOperationAction(ISD::FCOPYSIGN, VT, Custom);

      // These might be better off as horizontal vector ops.
      setOperationAction(ISD::FADD, VT, Custom);
      setOperationAction(ISD::FSUB, VT, Custom);

      // We don't support sin/cos/fmod
      setOperationAction(ISD::FSIN   , VT, Expand);
      setOperationAction(ISD::FCOS   , VT, Expand);
      setOperationAction(ISD::FSINCOS, VT, Expand);
    }

    // Lower this to MOVMSK plus an AND.
    setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
    setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);

  } else if (!useSoftFloat() && X86ScalarSSEf32 && (UseX87 || Is64Bit)) {
    // Use SSE for f32, x87 for f64.
    // Set up the FP register classes.
    addRegisterClass(MVT::f32, &X86::FR32RegClass);
    if (UseX87)
      addRegisterClass(MVT::f64, &X86::RFP64RegClass);

    // Use ANDPS to simulate FABS.
    setOperationAction(ISD::FABS , MVT::f32, Custom);

    // Use XORP to simulate FNEG.
    setOperationAction(ISD::FNEG , MVT::f32, Custom);

    if (UseX87)
      setOperationAction(ISD::UNDEF, MVT::f64, Expand);

    // Use ANDPS and ORPS to simulate FCOPYSIGN.
    if (UseX87)
      setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);

    // We don't support sin/cos/fmod
    setOperationAction(ISD::FSIN   , MVT::f32, Expand);
    setOperationAction(ISD::FCOS   , MVT::f32, Expand);
    setOperationAction(ISD::FSINCOS, MVT::f32, Expand);

    if (UseX87) {
      // Always expand sin/cos functions even though x87 has an instruction.
      setOperationAction(ISD::FSIN, MVT::f64, Expand);
      setOperationAction(ISD::FCOS, MVT::f64, Expand);
      setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
    }
  } else if (UseX87) {
    // f32 and f64 in x87.
    // Set up the FP register classes.
    addRegisterClass(MVT::f64, &X86::RFP64RegClass);
    addRegisterClass(MVT::f32, &X86::RFP32RegClass);

    for (auto VT : { MVT::f32, MVT::f64 }) {
      setOperationAction(ISD::UNDEF,     VT, Expand);
      setOperationAction(ISD::FCOPYSIGN, VT, Expand);

      // Always expand sin/cos functions even though x87 has an instruction.
      setOperationAction(ISD::FSIN   , VT, Expand);
      setOperationAction(ISD::FCOS   , VT, Expand);
      setOperationAction(ISD::FSINCOS, VT, Expand);
    }
  }

  // Expand FP32 immediates into loads from the stack, save special cases.
  if (isTypeLegal(MVT::f32)) {
    if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
      addLegalFPImmediate(APFloat(+0.0f)); // FLD0
      addLegalFPImmediate(APFloat(+1.0f)); // FLD1
      addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
      addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
    } else // SSE immediates.
      addLegalFPImmediate(APFloat(+0.0f)); // xorps
  }
  // Expand FP64 immediates into loads from the stack, save special cases.
  if (isTypeLegal(MVT::f64)) {
    if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
      addLegalFPImmediate(APFloat(+0.0)); // FLD0
      addLegalFPImmediate(APFloat(+1.0)); // FLD1
      addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
      addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
    } else // SSE immediates.
      addLegalFPImmediate(APFloat(+0.0)); // xorpd
  }

  // We don't support FMA.
  setOperationAction(ISD::FMA, MVT::f64, Expand);
  setOperationAction(ISD::FMA, MVT::f32, Expand);

  // f80 always uses X87.
  if (UseX87) {
    addRegisterClass(MVT::f80, &X86::RFP80RegClass);
    setOperationAction(ISD::UNDEF,     MVT::f80, Expand);
    setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
    {
      APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
      addLegalFPImmediate(TmpFlt);  // FLD0
      TmpFlt.changeSign();
      addLegalFPImmediate(TmpFlt);  // FLD0/FCHS

      bool ignored;
      APFloat TmpFlt2(+1.0);
      TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
                      &ignored);
      addLegalFPImmediate(TmpFlt2);  // FLD1
      TmpFlt2.changeSign();
      addLegalFPImmediate(TmpFlt2);  // FLD1/FCHS
    }

    // Always expand sin/cos functions even though x87 has an instruction.
    setOperationAction(ISD::FSIN   , MVT::f80, Expand);
    setOperationAction(ISD::FCOS   , MVT::f80, Expand);
    setOperationAction(ISD::FSINCOS, MVT::f80, Expand);

    setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
    setOperationAction(ISD::FCEIL,  MVT::f80, Expand);
    setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
    setOperationAction(ISD::FRINT,  MVT::f80, Expand);
    setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
    setOperationAction(ISD::FMA, MVT::f80, Expand);
    setOperationAction(ISD::LROUND, MVT::f80, Expand);
    setOperationAction(ISD::LLROUND, MVT::f80, Expand);
    setOperationAction(ISD::LRINT, MVT::f80, Expand);
    setOperationAction(ISD::LLRINT, MVT::f80, Expand);
  }

  // f128 uses xmm registers, but most operations require libcalls.
  if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
    addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
                                                   : &X86::VR128RegClass);

    addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps

    setOperationAction(ISD::FADD, MVT::f128, Custom);
    setOperationAction(ISD::FSUB, MVT::f128, Custom);
    setOperationAction(ISD::FDIV, MVT::f128, Custom);
    setOperationAction(ISD::FMUL, MVT::f128, Custom);
    setOperationAction(ISD::FMA,  MVT::f128, Expand);

    setOperationAction(ISD::FABS, MVT::f128, Custom);
    setOperationAction(ISD::FNEG, MVT::f128, Custom);
    setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);

    setOperationAction(ISD::FSIN,    MVT::f128, Expand);
    setOperationAction(ISD::FCOS,    MVT::f128, Expand);
    setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
    setOperationAction(ISD::FSQRT,   MVT::f128, Expand);

    setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
    // We need to custom handle any FP_ROUND with an f128 input, but
    // LegalizeDAG uses the result type to know when to run a custom handler.
    // So we have to list all legal floating point result types here.
    if (isTypeLegal(MVT::f32)) {
      setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
    }
    if (isTypeLegal(MVT::f64)) {
      setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
    }
    if (isTypeLegal(MVT::f80)) {
      setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
    }

    setOperationAction(ISD::SETCC, MVT::f128, Custom);

    setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
    setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
    setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
    setTruncStoreAction(MVT::f128, MVT::f32, Expand);
    setTruncStoreAction(MVT::f128, MVT::f64, Expand);
    setTruncStoreAction(MVT::f128, MVT::f80, Expand);
  }

  // Always use a library call for pow.
  setOperationAction(ISD::FPOW             , MVT::f32  , Expand);
  setOperationAction(ISD::FPOW             , MVT::f64  , Expand);
  setOperationAction(ISD::FPOW             , MVT::f80  , Expand);
  setOperationAction(ISD::FPOW             , MVT::f128 , Expand);

  setOperationAction(ISD::FLOG, MVT::f80, Expand);
  setOperationAction(ISD::FLOG2, MVT::f80, Expand);
  setOperationAction(ISD::FLOG10, MVT::f80, Expand);
  setOperationAction(ISD::FEXP, MVT::f80, Expand);
  setOperationAction(ISD::FEXP2, MVT::f80, Expand);
  setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
  setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);

  // Some FP actions are always expanded for vector types.
  for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
                   MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
    setOperationAction(ISD::FSIN,      VT, Expand);
    setOperationAction(ISD::FSINCOS,   VT, Expand);
    setOperationAction(ISD::FCOS,      VT, Expand);
    setOperationAction(ISD::FREM,      VT, Expand);
    setOperationAction(ISD::FCOPYSIGN, VT, Expand);
    setOperationAction(ISD::FPOW,      VT, Expand);
    setOperationAction(ISD::FLOG,      VT, Expand);
    setOperationAction(ISD::FLOG2,     VT, Expand);
    setOperationAction(ISD::FLOG10,    VT, Expand);
    setOperationAction(ISD::FEXP,      VT, Expand);
    setOperationAction(ISD::FEXP2,     VT, Expand);
  }

  // First set operation action for all vector types to either promote
  // (for widening) or expand (for scalarization). Then we will selectively
  // turn on ones that can be effectively codegen'd.
  for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
    setOperationAction(ISD::SDIV, VT, Expand);
    setOperationAction(ISD::UDIV, VT, Expand);
    setOperationAction(ISD::SREM, VT, Expand);
    setOperationAction(ISD::UREM, VT, Expand);
    setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
    setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
    setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
    setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
    setOperationAction(ISD::FMA,  VT, Expand);
    setOperationAction(ISD::FFLOOR, VT, Expand);
    setOperationAction(ISD::FCEIL, VT, Expand);
    setOperationAction(ISD::FTRUNC, VT, Expand);
    setOperationAction(ISD::FRINT, VT, Expand);
    setOperationAction(ISD::FNEARBYINT, VT, Expand);
    setOperationAction(ISD::SMUL_LOHI, VT, Expand);
    setOperationAction(ISD::MULHS, VT, Expand);
    setOperationAction(ISD::UMUL_LOHI, VT, Expand);
    setOperationAction(ISD::MULHU, VT, Expand);
    setOperationAction(ISD::SDIVREM, VT, Expand);
    setOperationAction(ISD::UDIVREM, VT, Expand);
    setOperationAction(ISD::CTPOP, VT, Expand);
    setOperationAction(ISD::CTTZ, VT, Expand);
    setOperationAction(ISD::CTLZ, VT, Expand);
    setOperationAction(ISD::ROTL, VT, Expand);
    setOperationAction(ISD::ROTR, VT, Expand);
    setOperationAction(ISD::BSWAP, VT, Expand);
    setOperationAction(ISD::SETCC, VT, Expand);
    setOperationAction(ISD::FP_TO_UINT, VT, Expand);
    setOperationAction(ISD::FP_TO_SINT, VT, Expand);
    setOperationAction(ISD::UINT_TO_FP, VT, Expand);
    setOperationAction(ISD::SINT_TO_FP, VT, Expand);
    setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
    setOperationAction(ISD::TRUNCATE, VT, Expand);
    setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
    setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
    setOperationAction(ISD::ANY_EXTEND, VT, Expand);
    setOperationAction(ISD::SELECT_CC, VT, Expand);
    for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
      setTruncStoreAction(InnerVT, VT, Expand);

      setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
      setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);

      // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
      // types, we have to deal with them whether we ask for Expansion or not.
      // Setting Expand causes its own optimisation problems though, so leave
      // them legal.
      if (VT.getVectorElementType() == MVT::i1)
        setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);

      // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
      // split/scalarized right now.
      if (VT.getVectorElementType() == MVT::f16)
        setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
    }
  }

  // FIXME: In order to prevent SSE instructions being expanded to MMX ones
  // with -msoft-float, disable use of MMX as well.
  if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
    addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
    // No operations on x86mmx supported, everything uses intrinsics.
  }

  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
    addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
                                                    : &X86::VR128RegClass);

    setOperationAction(ISD::FNEG,               MVT::v4f32, Custom);
    setOperationAction(ISD::FABS,               MVT::v4f32, Custom);
    setOperationAction(ISD::FCOPYSIGN,          MVT::v4f32, Custom);
    setOperationAction(ISD::BUILD_VECTOR,       MVT::v4f32, Custom);
    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4f32, Custom);
    setOperationAction(ISD::VSELECT,            MVT::v4f32, Custom);
    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
    setOperationAction(ISD::SELECT,             MVT::v4f32, Custom);
    setOperationAction(ISD::UINT_TO_FP,         MVT::v4i32, Custom);

    setOperationAction(ISD::LOAD,               MVT::v2f32, Custom);
    setOperationAction(ISD::STORE,              MVT::v2f32, Custom);

    setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v4f32, Custom);
  }

  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
    addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
                                                    : &X86::VR128RegClass);

    // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
    // registers cannot be used even for integer operations.
    addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
                                                    : &X86::VR128RegClass);
    addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
                                                    : &X86::VR128RegClass);
    addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
                                                    : &X86::VR128RegClass);
    addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
                                                    : &X86::VR128RegClass);

    for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
                     MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
      setOperationAction(ISD::SDIV, VT, Custom);
      setOperationAction(ISD::SREM, VT, Custom);
      setOperationAction(ISD::UDIV, VT, Custom);
      setOperationAction(ISD::UREM, VT, Custom);
    }

    setOperationAction(ISD::MUL,                MVT::v2i8,  Custom);
    setOperationAction(ISD::MUL,                MVT::v4i8,  Custom);
    setOperationAction(ISD::MUL,                MVT::v8i8,  Custom);

    setOperationAction(ISD::MUL,                MVT::v16i8, Custom);
    setOperationAction(ISD::MUL,                MVT::v4i32, Custom);
    setOperationAction(ISD::MUL,                MVT::v2i64, Custom);
    setOperationAction(ISD::MULHU,              MVT::v4i32, Custom);
    setOperationAction(ISD::MULHS,              MVT::v4i32, Custom);
    setOperationAction(ISD::MULHU,              MVT::v16i8, Custom);
    setOperationAction(ISD::MULHS,              MVT::v16i8, Custom);
    setOperationAction(ISD::MULHU,              MVT::v8i16, Legal);
    setOperationAction(ISD::MULHS,              MVT::v8i16, Legal);
    setOperationAction(ISD::MUL,                MVT::v8i16, Legal);
    setOperationAction(ISD::FNEG,               MVT::v2f64, Custom);
    setOperationAction(ISD::FABS,               MVT::v2f64, Custom);
    setOperationAction(ISD::FCOPYSIGN,          MVT::v2f64, Custom);

    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
      setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
      setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
      setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
      setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
    }

    setOperationAction(ISD::UADDSAT,            MVT::v16i8, Legal);
    setOperationAction(ISD::SADDSAT,            MVT::v16i8, Legal);
    setOperationAction(ISD::USUBSAT,            MVT::v16i8, Legal);
    setOperationAction(ISD::SSUBSAT,            MVT::v16i8, Legal);
    setOperationAction(ISD::UADDSAT,            MVT::v8i16, Legal);
    setOperationAction(ISD::SADDSAT,            MVT::v8i16, Legal);
    setOperationAction(ISD::USUBSAT,            MVT::v8i16, Legal);
    setOperationAction(ISD::SSUBSAT,            MVT::v8i16, Legal);
    setOperationAction(ISD::UADDSAT,            MVT::v4i32, Custom);
    setOperationAction(ISD::USUBSAT,            MVT::v4i32, Custom);
    setOperationAction(ISD::UADDSAT,            MVT::v2i64, Custom);
    setOperationAction(ISD::USUBSAT,            MVT::v2i64, Custom);

    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);

    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
      setOperationAction(ISD::SETCC,              VT, Custom);
      setOperationAction(ISD::CTPOP,              VT, Custom);
      setOperationAction(ISD::ABS,                VT, Custom);

      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
      // setcc all the way to isel and prefer SETGT in some isel patterns.
      setCondCodeAction(ISD::SETLT, VT, Custom);
      setCondCodeAction(ISD::SETLE, VT, Custom);
    }

    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
      setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
      setOperationAction(ISD::VSELECT,            VT, Custom);
      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
    }

    for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
      setOperationAction(ISD::VSELECT,            VT, Custom);

      if (VT == MVT::v2i64 && !Subtarget.is64Bit())
        continue;

      setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
    }

    // Custom lower v2i64 and v2f64 selects.
    setOperationAction(ISD::SELECT,             MVT::v2f64, Custom);
    setOperationAction(ISD::SELECT,             MVT::v2i64, Custom);
    setOperationAction(ISD::SELECT,             MVT::v4i32, Custom);
    setOperationAction(ISD::SELECT,             MVT::v8i16, Custom);
    setOperationAction(ISD::SELECT,             MVT::v16i8, Custom);

    setOperationAction(ISD::FP_TO_SINT,         MVT::v4i32, Legal);
    setOperationAction(ISD::FP_TO_SINT,         MVT::v2i32, Custom);

    // Custom legalize these to avoid over promotion or custom promotion.
    setOperationAction(ISD::FP_TO_SINT,         MVT::v2i8,  Custom);
    setOperationAction(ISD::FP_TO_SINT,         MVT::v4i8,  Custom);
    setOperationAction(ISD::FP_TO_SINT,         MVT::v8i8,  Custom);
    setOperationAction(ISD::FP_TO_SINT,         MVT::v2i16, Custom);
    setOperationAction(ISD::FP_TO_SINT,         MVT::v4i16, Custom);
    setOperationAction(ISD::FP_TO_UINT,         MVT::v2i8,  Custom);
    setOperationAction(ISD::FP_TO_UINT,         MVT::v4i8,  Custom);
    setOperationAction(ISD::FP_TO_UINT,         MVT::v8i8,  Custom);
    setOperationAction(ISD::FP_TO_UINT,         MVT::v2i16, Custom);
    setOperationAction(ISD::FP_TO_UINT,         MVT::v4i16, Custom);

    // By marking FP_TO_SINT v8i16 as Custom, will trick type legalization into
    // promoting v8i8 FP_TO_UINT into FP_TO_SINT. When the v8i16 FP_TO_SINT is
    // split again based on the input type, this will cause an AssertSExt i16 to
    // be emitted instead of an AssertZExt. This will allow packssdw followed by
    // packuswb to be used to truncate to v8i8. This is necessary since packusdw
    // isn't available until sse4.1.
    setOperationAction(ISD::FP_TO_SINT,         MVT::v8i16, Custom);

    setOperationAction(ISD::SINT_TO_FP,         MVT::v4i32, Legal);
    setOperationAction(ISD::SINT_TO_FP,         MVT::v2i32, Custom);

    setOperationAction(ISD::UINT_TO_FP,         MVT::v2i32, Custom);

    // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
    setOperationAction(ISD::UINT_TO_FP,         MVT::v2f32, Custom);

    setOperationAction(ISD::FP_EXTEND,          MVT::v2f32, Custom);
    setOperationAction(ISD::FP_ROUND,           MVT::v2f32, Custom);

    // We want to legalize this to an f64 load rather than an i64 load on
    // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
    // store.
    setOperationAction(ISD::LOAD,               MVT::v2i32, Custom);
    setOperationAction(ISD::LOAD,               MVT::v4i16, Custom);
    setOperationAction(ISD::LOAD,               MVT::v8i8,  Custom);
    setOperationAction(ISD::STORE,              MVT::v2i32, Custom);
    setOperationAction(ISD::STORE,              MVT::v4i16, Custom);
    setOperationAction(ISD::STORE,              MVT::v8i8,  Custom);

    setOperationAction(ISD::BITCAST,            MVT::v2i32, Custom);
    setOperationAction(ISD::BITCAST,            MVT::v4i16, Custom);
    setOperationAction(ISD::BITCAST,            MVT::v8i8,  Custom);
    if (!Subtarget.hasAVX512())
      setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);

    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);

    setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);

    setOperationAction(ISD::TRUNCATE,    MVT::v2i8,  Custom);
    setOperationAction(ISD::TRUNCATE,    MVT::v2i16, Custom);
    setOperationAction(ISD::TRUNCATE,    MVT::v2i32, Custom);
    setOperationAction(ISD::TRUNCATE,    MVT::v4i8,  Custom);
    setOperationAction(ISD::TRUNCATE,    MVT::v4i16, Custom);
    setOperationAction(ISD::TRUNCATE,    MVT::v8i8,  Custom);

    // In the customized shift lowering, the legal v4i32/v2i64 cases
    // in AVX2 will be recognized.
    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
      setOperationAction(ISD::SRL,              VT, Custom);
      setOperationAction(ISD::SHL,              VT, Custom);
      setOperationAction(ISD::SRA,              VT, Custom);
    }

    setOperationAction(ISD::ROTL,               MVT::v4i32, Custom);
    setOperationAction(ISD::ROTL,               MVT::v8i16, Custom);

    // With AVX512, expanding (and promoting the shifts) is better.
    if (!Subtarget.hasAVX512())
      setOperationAction(ISD::ROTL,             MVT::v16i8, Custom);
  }

  if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
    setOperationAction(ISD::ABS,                MVT::v16i8, Legal);
    setOperationAction(ISD::ABS,                MVT::v8i16, Legal);
    setOperationAction(ISD::ABS,                MVT::v4i32, Legal);
    setOperationAction(ISD::BITREVERSE,         MVT::v16i8, Custom);
    setOperationAction(ISD::CTLZ,               MVT::v16i8, Custom);
    setOperationAction(ISD::CTLZ,               MVT::v8i16, Custom);
    setOperationAction(ISD::CTLZ,               MVT::v4i32, Custom);
    setOperationAction(ISD::CTLZ,               MVT::v2i64, Custom);

    // These might be better off as horizontal vector ops.
    setOperationAction(ISD::ADD,                MVT::i16, Custom);
    setOperationAction(ISD::ADD,                MVT::i32, Custom);
    setOperationAction(ISD::SUB,                MVT::i16, Custom);
    setOperationAction(ISD::SUB,                MVT::i32, Custom);
  }

  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
    for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
      setOperationAction(ISD::FFLOOR,           RoundedTy,  Legal);
      setOperationAction(ISD::FCEIL,            RoundedTy,  Legal);
      setOperationAction(ISD::FTRUNC,           RoundedTy,  Legal);
      setOperationAction(ISD::FRINT,            RoundedTy,  Legal);
      setOperationAction(ISD::FNEARBYINT,       RoundedTy,  Legal);
    }

    setOperationAction(ISD::SMAX,               MVT::v16i8, Legal);
    setOperationAction(ISD::SMAX,               MVT::v4i32, Legal);
    setOperationAction(ISD::UMAX,               MVT::v8i16, Legal);
    setOperationAction(ISD::UMAX,               MVT::v4i32, Legal);
    setOperationAction(ISD::SMIN,               MVT::v16i8, Legal);
    setOperationAction(ISD::SMIN,               MVT::v4i32, Legal);
    setOperationAction(ISD::UMIN,               MVT::v8i16, Legal);
    setOperationAction(ISD::UMIN,               MVT::v4i32, Legal);

    // FIXME: Do we need to handle scalar-to-vector here?
    setOperationAction(ISD::MUL,                MVT::v4i32, Legal);

    // We directly match byte blends in the backend as they match the VSELECT
    // condition form.
    setOperationAction(ISD::VSELECT,            MVT::v16i8, Legal);

    // SSE41 brings specific instructions for doing vector sign extend even in
    // cases where we don't have SRA.
    for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
      setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
    }

    // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
    for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
      setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8,  Legal);
      setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8,  Legal);
      setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8,  Legal);
      setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
      setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
      setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
    }

    // i8 vectors are custom because the source register and source
    // source memory operand types are not the same width.
    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v16i8, Custom);
  }

  if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
    for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
                     MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
      setOperationAction(ISD::ROTL, VT, Custom);

    // XOP can efficiently perform BITREVERSE with VPPERM.
    for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
      setOperationAction(ISD::BITREVERSE, VT, Custom);

    for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
                     MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
      setOperationAction(ISD::BITREVERSE, VT, Custom);
  }

  if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
    bool HasInt256 = Subtarget.hasInt256();

    addRegisterClass(MVT::v32i8,  Subtarget.hasVLX() ? &X86::VR256XRegClass
                                                     : &X86::VR256RegClass);
    addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
                                                     : &X86::VR256RegClass);
    addRegisterClass(MVT::v8i32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
                                                     : &X86::VR256RegClass);
    addRegisterClass(MVT::v8f32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
                                                     : &X86::VR256RegClass);
    addRegisterClass(MVT::v4i64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
                                                     : &X86::VR256RegClass);
    addRegisterClass(MVT::v4f64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
                                                     : &X86::VR256RegClass);

    for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
      setOperationAction(ISD::FFLOOR,     VT, Legal);
      setOperationAction(ISD::FCEIL,      VT, Legal);
      setOperationAction(ISD::FTRUNC,     VT, Legal);
      setOperationAction(ISD::FRINT,      VT, Legal);
      setOperationAction(ISD::FNEARBYINT, VT, Legal);
      setOperationAction(ISD::FNEG,       VT, Custom);
      setOperationAction(ISD::FABS,       VT, Custom);
      setOperationAction(ISD::FCOPYSIGN,  VT, Custom);
    }

    // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
    // even though v8i16 is a legal type.
    setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
    setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
    setOperationAction(ISD::FP_TO_SINT,         MVT::v8i32, Legal);

    setOperationAction(ISD::SINT_TO_FP,         MVT::v8i32, Legal);

    setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v8f32, Custom);

    if (!Subtarget.hasAVX512())
      setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);

    // In the customized shift lowering, the legal v8i32/v4i64 cases
    // in AVX2 will be recognized.
    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
      setOperationAction(ISD::SRL, VT, Custom);
      setOperationAction(ISD::SHL, VT, Custom);
      setOperationAction(ISD::SRA, VT, Custom);
    }

    // These types need custom splitting if their input is a 128-bit vector.
    setOperationAction(ISD::SIGN_EXTEND,       MVT::v8i64,  Custom);
    setOperationAction(ISD::SIGN_EXTEND,       MVT::v16i32, Custom);
    setOperationAction(ISD::ZERO_EXTEND,       MVT::v8i64,  Custom);
    setOperationAction(ISD::ZERO_EXTEND,       MVT::v16i32, Custom);

    setOperationAction(ISD::ROTL,              MVT::v8i32,  Custom);
    setOperationAction(ISD::ROTL,              MVT::v16i16, Custom);

    // With BWI, expanding (and promoting the shifts) is the better.
    if (!Subtarget.hasBWI())
      setOperationAction(ISD::ROTL,            MVT::v32i8,  Custom);

    setOperationAction(ISD::SELECT,            MVT::v4f64, Custom);
    setOperationAction(ISD::SELECT,            MVT::v4i64, Custom);
    setOperationAction(ISD::SELECT,            MVT::v8i32, Custom);
    setOperationAction(ISD::SELECT,            MVT::v16i16, Custom);
    setOperationAction(ISD::SELECT,            MVT::v32i8, Custom);
    setOperationAction(ISD::SELECT,            MVT::v8f32, Custom);

    for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
      setOperationAction(ISD::SIGN_EXTEND,     VT, Custom);
      setOperationAction(ISD::ZERO_EXTEND,     VT, Custom);
      setOperationAction(ISD::ANY_EXTEND,      VT, Custom);
    }

    setOperationAction(ISD::TRUNCATE,          MVT::v16i8, Custom);
    setOperationAction(ISD::TRUNCATE,          MVT::v8i16, Custom);
    setOperationAction(ISD::TRUNCATE,          MVT::v4i32, Custom);
    setOperationAction(ISD::BITREVERSE,        MVT::v32i8, Custom);

    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
      setOperationAction(ISD::SETCC,           VT, Custom);
      setOperationAction(ISD::CTPOP,           VT, Custom);
      setOperationAction(ISD::CTLZ,            VT, Custom);

      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
      // setcc all the way to isel and prefer SETGT in some isel patterns.
      setCondCodeAction(ISD::SETLT, VT, Custom);
      setCondCodeAction(ISD::SETLE, VT, Custom);
    }

    if (Subtarget.hasAnyFMA()) {
      for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
                       MVT::v2f64, MVT::v4f64 })
        setOperationAction(ISD::FMA, VT, Legal);
    }

    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
      setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
      setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
    }

    setOperationAction(ISD::MUL,       MVT::v4i64,  Custom);
    setOperationAction(ISD::MUL,       MVT::v8i32,  HasInt256 ? Legal : Custom);
    setOperationAction(ISD::MUL,       MVT::v16i16, HasInt256 ? Legal : Custom);
    setOperationAction(ISD::MUL,       MVT::v32i8,  Custom);

    setOperationAction(ISD::MULHU,     MVT::v8i32,  Custom);
    setOperationAction(ISD::MULHS,     MVT::v8i32,  Custom);
    setOperationAction(ISD::MULHU,     MVT::v16i16, HasInt256 ? Legal : Custom);
    setOperationAction(ISD::MULHS,     MVT::v16i16, HasInt256 ? Legal : Custom);
    setOperationAction(ISD::MULHU,     MVT::v32i8,  Custom);
    setOperationAction(ISD::MULHS,     MVT::v32i8,  Custom);

    setOperationAction(ISD::ABS,       MVT::v4i64,  Custom);
    setOperationAction(ISD::SMAX,      MVT::v4i64,  Custom);
    setOperationAction(ISD::UMAX,      MVT::v4i64,  Custom);
    setOperationAction(ISD::SMIN,      MVT::v4i64,  Custom);
    setOperationAction(ISD::UMIN,      MVT::v4i64,  Custom);

    setOperationAction(ISD::UADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
    setOperationAction(ISD::SADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
    setOperationAction(ISD::USUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
    setOperationAction(ISD::SSUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
    setOperationAction(ISD::UADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
    setOperationAction(ISD::SADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
    setOperationAction(ISD::USUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
    setOperationAction(ISD::SSUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);

    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
      setOperationAction(ISD::ABS,  VT, HasInt256 ? Legal : Custom);
      setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
      setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
      setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
      setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
    }

    for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
      setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
    }

    if (HasInt256) {
      // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
      // when we have a 256bit-wide blend with immediate.
      setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);

      // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
      for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
        setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
        setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i8,  Legal);
        setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i8,  Legal);
        setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i16, Legal);
        setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i16, Legal);
        setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i32, Legal);
      }
    }

    for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
                     MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
      setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
      setOperationAction(ISD::MSTORE, VT, Legal);
    }

    // Extract subvector is special because the value type
    // (result) is 128-bit but the source is 256-bit wide.
    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
                     MVT::v4f32, MVT::v2f64 }) {
      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
    }

    // Custom lower several nodes for 256-bit types.
    for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
                    MVT::v8f32, MVT::v4f64 }) {
      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
      setOperationAction(ISD::VSELECT,            VT, Custom);
      setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
      setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
      setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Legal);
      setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
      setOperationAction(ISD::STORE,              VT, Custom);
    }

    if (HasInt256) {
      setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);

      // Custom legalize 2x32 to get a little better code.
      setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
      setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);

      for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
                       MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
        setOperationAction(ISD::MGATHER,  VT, Custom);
    }
  }

  // This block controls legalization of the mask vector sizes that are
  // available with AVX512. 512-bit vectors are in a separate block controlled
  // by useAVX512Regs.
  if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
    addRegisterClass(MVT::v1i1,   &X86::VK1RegClass);
    addRegisterClass(MVT::v2i1,   &X86::VK2RegClass);
    addRegisterClass(MVT::v4i1,   &X86::VK4RegClass);
    addRegisterClass(MVT::v8i1,   &X86::VK8RegClass);
    addRegisterClass(MVT::v16i1,  &X86::VK16RegClass);

    setOperationAction(ISD::SELECT,             MVT::v1i1, Custom);
    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
    setOperationAction(ISD::BUILD_VECTOR,       MVT::v1i1, Custom);

    setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1,  MVT::v8i32);
    setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1,  MVT::v8i32);
    setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1,  MVT::v4i32);
    setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1,  MVT::v4i32);
    setOperationAction(ISD::FP_TO_SINT,         MVT::v2i1,  Custom);
    setOperationAction(ISD::FP_TO_UINT,         MVT::v2i1,  Custom);

    // There is no byte sized k-register load or store without AVX512DQ.
    if (!Subtarget.hasDQI()) {
      setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
      setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
      setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
      setOperationAction(ISD::LOAD, MVT::v8i1, Custom);

      setOperationAction(ISD::STORE, MVT::v1i1, Custom);
      setOperationAction(ISD::STORE, MVT::v2i1, Custom);
      setOperationAction(ISD::STORE, MVT::v4i1, Custom);
      setOperationAction(ISD::STORE, MVT::v8i1, Custom);
    }

    // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
      setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
      setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
      setOperationAction(ISD::ANY_EXTEND,  VT, Custom);
    }

    for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
      setOperationAction(ISD::ADD,              VT, Custom);
      setOperationAction(ISD::SUB,              VT, Custom);
      setOperationAction(ISD::MUL,              VT, Custom);
      setOperationAction(ISD::SETCC,            VT, Custom);
      setOperationAction(ISD::SELECT,           VT, Custom);
      setOperationAction(ISD::TRUNCATE,         VT, Custom);
      setOperationAction(ISD::UADDSAT,          VT, Custom);
      setOperationAction(ISD::SADDSAT,          VT, Custom);
      setOperationAction(ISD::USUBSAT,          VT, Custom);
      setOperationAction(ISD::SSUBSAT,          VT, Custom);

      setOperationAction(ISD::BUILD_VECTOR,     VT, Custom);
      setOperationAction(ISD::CONCAT_VECTORS,   VT, Custom);
      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
      setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
      setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
      setOperationAction(ISD::VECTOR_SHUFFLE,   VT,  Custom);
      setOperationAction(ISD::VSELECT,          VT,  Expand);
    }

    for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
  }

  // This block controls legalization for 512-bit operations with 32/64 bit
  // elements. 512-bits can be disabled based on prefer-vector-width and
  // required-vector-width function attributes.
  if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
    addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
    addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
    addRegisterClass(MVT::v8i64,  &X86::VR512RegClass);
    addRegisterClass(MVT::v8f64,  &X86::VR512RegClass);

    for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
      setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8,  Legal);
      setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
      setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i8,   Legal);
      setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i16,  Legal);
      setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i32,  Legal);
    }

    for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
      setOperationAction(ISD::FNEG,  VT, Custom);
      setOperationAction(ISD::FABS,  VT, Custom);
      setOperationAction(ISD::FMA,   VT, Legal);
      setOperationAction(ISD::FCOPYSIGN, VT, Custom);
    }

    setOperationAction(ISD::FP_TO_SINT,         MVT::v16i32, Legal);
    setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i16, MVT::v16i32);
    setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i8, MVT::v16i32);
    setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i1, MVT::v16i32);
    setOperationAction(ISD::FP_TO_UINT,         MVT::v16i32, Legal);
    setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i1, MVT::v16i32);
    setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i8, MVT::v16i32);
    setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i16, MVT::v16i32);
    setOperationAction(ISD::SINT_TO_FP,         MVT::v16i32, Legal);
    setOperationAction(ISD::UINT_TO_FP,         MVT::v16i32, Legal);

    setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v16f32, Custom);

    setTruncStoreAction(MVT::v8i64,   MVT::v8i8,   Legal);
    setTruncStoreAction(MVT::v8i64,   MVT::v8i16,  Legal);
    setTruncStoreAction(MVT::v8i64,   MVT::v8i32,  Legal);
    setTruncStoreAction(MVT::v16i32,  MVT::v16i8,  Legal);
    setTruncStoreAction(MVT::v16i32,  MVT::v16i16, Legal);

    // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
    // to 512-bit rather than use the AVX2 instructions so that we can use
    // k-masks.
    if (!Subtarget.hasVLX()) {
      for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
           MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
        setOperationAction(ISD::MLOAD,  VT, Custom);
        setOperationAction(ISD::MSTORE, VT, Custom);
      }
    }

    setOperationAction(ISD::TRUNCATE,           MVT::v8i32, Custom);
    setOperationAction(ISD::TRUNCATE,           MVT::v16i16, Custom);
    setOperationAction(ISD::ZERO_EXTEND,        MVT::v16i32, Custom);
    setOperationAction(ISD::ZERO_EXTEND,        MVT::v8i64, Custom);
    setOperationAction(ISD::ANY_EXTEND,         MVT::v16i32, Custom);
    setOperationAction(ISD::ANY_EXTEND,         MVT::v8i64, Custom);
    setOperationAction(ISD::SIGN_EXTEND,        MVT::v16i32, Custom);
    setOperationAction(ISD::SIGN_EXTEND,        MVT::v8i64, Custom);

    // Need to custom widen this if we don't have AVX512BW.
    setOperationAction(ISD::ANY_EXTEND,         MVT::v8i8, Custom);
    setOperationAction(ISD::ZERO_EXTEND,        MVT::v8i8, Custom);
    setOperationAction(ISD::SIGN_EXTEND,        MVT::v8i8, Custom);

    for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
      setOperationAction(ISD::FFLOOR,           VT, Legal);
      setOperationAction(ISD::FCEIL,            VT, Legal);
      setOperationAction(ISD::FTRUNC,           VT, Legal);
      setOperationAction(ISD::FRINT,            VT, Legal);
      setOperationAction(ISD::FNEARBYINT,       VT, Legal);

      setOperationAction(ISD::SELECT,           VT, Custom);
    }

    // Without BWI we need to use custom lowering to handle MVT::v64i8 input.
    for (auto VT : {MVT::v16i32, MVT::v8i64, MVT::v64i8}) {
      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
      setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
    }

    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v8f64,  Custom);
    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v8i64,  Custom);
    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v16f32,  Custom);
    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v16i32,  Custom);

    setOperationAction(ISD::MUL,                MVT::v8i64, Custom);
    setOperationAction(ISD::MUL,                MVT::v16i32, Legal);

    setOperationAction(ISD::MULHU,              MVT::v16i32,  Custom);
    setOperationAction(ISD::MULHS,              MVT::v16i32,  Custom);

    for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
      setOperationAction(ISD::SMAX,             VT, Legal);
      setOperationAction(ISD::UMAX,             VT, Legal);
      setOperationAction(ISD::SMIN,             VT, Legal);
      setOperationAction(ISD::UMIN,             VT, Legal);
      setOperationAction(ISD::ABS,              VT, Legal);
      setOperationAction(ISD::SRL,              VT, Custom);
      setOperationAction(ISD::SHL,              VT, Custom);
      setOperationAction(ISD::SRA,              VT, Custom);
      setOperationAction(ISD::CTPOP,            VT, Custom);
      setOperationAction(ISD::ROTL,             VT, Custom);
      setOperationAction(ISD::ROTR,             VT, Custom);
      setOperationAction(ISD::SETCC,            VT, Custom);
      setOperationAction(ISD::SELECT,           VT, Custom);

      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
      // setcc all the way to isel and prefer SETGT in some isel patterns.
      setCondCodeAction(ISD::SETLT, VT, Custom);
      setCondCodeAction(ISD::SETLE, VT, Custom);
    }

    if (Subtarget.hasDQI()) {
      setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
      setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
      setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
      setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);

      setOperationAction(ISD::MUL,        MVT::v8i64, Legal);
    }

    if (Subtarget.hasCDI()) {
      // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
      for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
        setOperationAction(ISD::CTLZ,            VT, Legal);
      }
    } // Subtarget.hasCDI()

    if (Subtarget.hasVPOPCNTDQ()) {
      for (auto VT : { MVT::v16i32, MVT::v8i64 })
        setOperationAction(ISD::CTPOP, VT, Legal);
    }

    // Extract subvector is special because the value type
    // (result) is 256-bit but the source is 512-bit wide.
    // 128-bit was made Legal under AVX1.
    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
                     MVT::v8f32, MVT::v4f64 })
      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);

    for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
      setOperationAction(ISD::VECTOR_SHUFFLE,      VT, Custom);
      setOperationAction(ISD::INSERT_VECTOR_ELT,   VT, Custom);
      setOperationAction(ISD::BUILD_VECTOR,        VT, Custom);
      setOperationAction(ISD::VSELECT,             VT, Custom);
      setOperationAction(ISD::EXTRACT_VECTOR_ELT,  VT, Custom);
      setOperationAction(ISD::SCALAR_TO_VECTOR,    VT, Custom);
      setOperationAction(ISD::INSERT_SUBVECTOR,    VT, Legal);
      setOperationAction(ISD::MLOAD,               VT, Legal);
      setOperationAction(ISD::MSTORE,              VT, Legal);
      setOperationAction(ISD::MGATHER,             VT, Custom);
      setOperationAction(ISD::MSCATTER,            VT, Custom);
    }
    if (!Subtarget.hasBWI()) {
      // Need to custom split v32i16/v64i8 bitcasts.
      setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
      setOperationAction(ISD::BITCAST, MVT::v64i8,  Custom);

      // Better to split these into two 256-bit ops.
      setOperationAction(ISD::BITREVERSE, MVT::v8i64, Custom);
      setOperationAction(ISD::BITREVERSE, MVT::v16i32, Custom);
    }

    if (Subtarget.hasVBMI2()) {
      for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
        setOperationAction(ISD::FSHL, VT, Custom);
        setOperationAction(ISD::FSHR, VT, Custom);
      }
    }
  }// has  AVX-512

  // This block controls legalization for operations that don't have
  // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
  // narrower widths.
  if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
    // These operations are handled on non-VLX by artificially widening in
    // isel patterns.
    // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?

    setOperationAction(ISD::FP_TO_UINT,         MVT::v8i32, Legal);
    setOperationAction(ISD::FP_TO_UINT,         MVT::v4i32, Legal);
    setOperationAction(ISD::FP_TO_UINT,         MVT::v2i32, Custom);
    setOperationAction(ISD::UINT_TO_FP,         MVT::v8i32, Legal);
    setOperationAction(ISD::UINT_TO_FP,         MVT::v4i32, Legal);

    for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
      setOperationAction(ISD::SMAX, VT, Legal);
      setOperationAction(ISD::UMAX, VT, Legal);
      setOperationAction(ISD::SMIN, VT, Legal);
      setOperationAction(ISD::UMIN, VT, Legal);
      setOperationAction(ISD::ABS,  VT, Legal);
    }

    for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
      setOperationAction(ISD::ROTL,     VT, Custom);
      setOperationAction(ISD::ROTR,     VT, Custom);
    }

    // Custom legalize 2x32 to get a little better code.
    setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
    setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);

    for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
                     MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
      setOperationAction(ISD::MSCATTER, VT, Custom);

    if (Subtarget.hasDQI()) {
      for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
        setOperationAction(ISD::SINT_TO_FP,     VT, Legal);
        setOperationAction(ISD::UINT_TO_FP,     VT, Legal);
        setOperationAction(ISD::FP_TO_SINT,     VT, Legal);
        setOperationAction(ISD::FP_TO_UINT,     VT, Legal);

        setOperationAction(ISD::MUL,            VT, Legal);
      }
    }

    if (Subtarget.hasCDI()) {
      for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
        setOperationAction(ISD::CTLZ,            VT, Legal);
      }
    } // Subtarget.hasCDI()

    if (Subtarget.hasVPOPCNTDQ()) {
      for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
        setOperationAction(ISD::CTPOP, VT, Legal);
    }
  }

  // This block control legalization of v32i1/v64i1 which are available with
  // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
  // useBWIRegs.
  if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
    addRegisterClass(MVT::v32i1,  &X86::VK32RegClass);
    addRegisterClass(MVT::v64i1,  &X86::VK64RegClass);

    for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
      setOperationAction(ISD::ADD,                VT, Custom);
      setOperationAction(ISD::SUB,                VT, Custom);
      setOperationAction(ISD::MUL,                VT, Custom);
      setOperationAction(ISD::VSELECT,            VT, Expand);
      setOperationAction(ISD::UADDSAT,            VT, Custom);
      setOperationAction(ISD::SADDSAT,            VT, Custom);
      setOperationAction(ISD::USUBSAT,            VT, Custom);
      setOperationAction(ISD::SSUBSAT,            VT, Custom);

      setOperationAction(ISD::TRUNCATE,           VT, Custom);
      setOperationAction(ISD::SETCC,              VT, Custom);
      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
      setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
      setOperationAction(ISD::SELECT,             VT, Custom);
      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
    }

    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v32i1, Custom);
    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v64i1, Custom);
    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v32i1, Custom);
    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v64i1, Custom);
    for (auto VT : { MVT::v16i1, MVT::v32i1 })
      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);

    // Extends from v32i1 masks to 256-bit vectors.
    setOperationAction(ISD::SIGN_EXTEND,        MVT::v32i8, Custom);
    setOperationAction(ISD::ZERO_EXTEND,        MVT::v32i8, Custom);
    setOperationAction(ISD::ANY_EXTEND,         MVT::v32i8, Custom);
  }

  // This block controls legalization for v32i16 and v64i8. 512-bits can be
  // disabled based on prefer-vector-width and required-vector-width function
  // attributes.
  if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) {
    addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
    addRegisterClass(MVT::v64i8,  &X86::VR512RegClass);

    // Extends from v64i1 masks to 512-bit vectors.
    setOperationAction(ISD::SIGN_EXTEND,        MVT::v64i8, Custom);
    setOperationAction(ISD::ZERO_EXTEND,        MVT::v64i8, Custom);
    setOperationAction(ISD::ANY_EXTEND,         MVT::v64i8, Custom);

    setOperationAction(ISD::MUL,                MVT::v32i16, Legal);
    setOperationAction(ISD::MUL,                MVT::v64i8, Custom);
    setOperationAction(ISD::MULHS,              MVT::v32i16, Legal);
    setOperationAction(ISD::MULHU,              MVT::v32i16, Legal);
    setOperationAction(ISD::MULHS,              MVT::v64i8, Custom);
    setOperationAction(ISD::MULHU,              MVT::v64i8, Custom);
    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v32i16, Custom);
    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v64i8, Custom);
    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v32i16, Legal);
    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v64i8, Legal);
    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v32i16, Custom);
    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v64i8, Custom);
    setOperationAction(ISD::SIGN_EXTEND,        MVT::v32i16, Custom);
    setOperationAction(ISD::ZERO_EXTEND,        MVT::v32i16, Custom);
    setOperationAction(ISD::ANY_EXTEND,         MVT::v32i16, Custom);
    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v32i16, Custom);
    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v64i8, Custom);
    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v32i16, Custom);
    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v64i8, Custom);
    setOperationAction(ISD::TRUNCATE,           MVT::v32i8, Custom);
    setOperationAction(ISD::BITREVERSE,         MVT::v64i8, Custom);

    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
    setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);

    setTruncStoreAction(MVT::v32i16,  MVT::v32i8, Legal);

    for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
      setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
      setOperationAction(ISD::VSELECT,      VT, Custom);
      setOperationAction(ISD::ABS,          VT, Legal);
      setOperationAction(ISD::SRL,          VT, Custom);
      setOperationAction(ISD::SHL,          VT, Custom);
      setOperationAction(ISD::SRA,          VT, Custom);
      setOperationAction(ISD::MLOAD,        VT, Legal);
      setOperationAction(ISD::MSTORE,       VT, Legal);
      setOperationAction(ISD::CTPOP,        VT, Custom);
      setOperationAction(ISD::CTLZ,         VT, Custom);
      setOperationAction(ISD::SMAX,         VT, Legal);
      setOperationAction(ISD::UMAX,         VT, Legal);
      setOperationAction(ISD::SMIN,         VT, Legal);
      setOperationAction(ISD::UMIN,         VT, Legal);
      setOperationAction(ISD::SETCC,        VT, Custom);
      setOperationAction(ISD::UADDSAT,      VT, Legal);
      setOperationAction(ISD::SADDSAT,      VT, Legal);
      setOperationAction(ISD::USUBSAT,      VT, Legal);
      setOperationAction(ISD::SSUBSAT,      VT, Legal);
      setOperationAction(ISD::SELECT,       VT, Custom);

      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
      // setcc all the way to isel and prefer SETGT in some isel patterns.
      setCondCodeAction(ISD::SETLT, VT, Custom);
      setCondCodeAction(ISD::SETLE, VT, Custom);
    }

    for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
      setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
    }

    if (Subtarget.hasBITALG()) {
      for (auto VT : { MVT::v64i8, MVT::v32i16 })
        setOperationAction(ISD::CTPOP, VT, Legal);
    }

    if (Subtarget.hasVBMI2()) {
      setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
      setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
    }
  }

  if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
    for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
      setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
      setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
    }

    // These operations are handled on non-VLX by artificially widening in
    // isel patterns.
    // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?

    if (Subtarget.hasBITALG()) {
      for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
        setOperationAction(ISD::CTPOP, VT, Legal);
    }
  }

  if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
    setTruncStoreAction(MVT::v4i64, MVT::v4i8,  Legal);
    setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
    setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
    setTruncStoreAction(MVT::v8i32, MVT::v8i8,  Legal);
    setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);

    setTruncStoreAction(MVT::v2i64, MVT::v2i8,  Legal);
    setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
    setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
    setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
    setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);

    if (Subtarget.hasDQI()) {
      // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
      // v2f32 UINT_TO_FP is already custom under SSE2.
      setOperationAction(ISD::SINT_TO_FP,    MVT::v2f32, Custom);
      assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
             "Unexpected operation action!");
      // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
      setOperationAction(ISD::FP_TO_SINT,    MVT::v2f32, Custom);
      setOperationAction(ISD::FP_TO_UINT,    MVT::v2f32, Custom);
    }

    if (Subtarget.hasBWI()) {
      setTruncStoreAction(MVT::v16i16,  MVT::v16i8, Legal);
      setTruncStoreAction(MVT::v8i16,   MVT::v8i8,  Legal);
    }

    if (Subtarget.hasVBMI2()) {
      // TODO: Make these legal even without VLX?
      for (auto VT : { MVT::v8i16,  MVT::v4i32, MVT::v2i64,
                       MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
        setOperationAction(ISD::FSHL, VT, Custom);
        setOperationAction(ISD::FSHR, VT, Custom);
      }
    }

    setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
    setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
    setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
  }

  // We want to custom lower some of our intrinsics.
  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
  setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
  if (!Subtarget.is64Bit()) {
    setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
  }

  // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
  // handle type legalization for these operations here.
  //
  // FIXME: We really should do custom legalization for addition and
  // subtraction on x86-32 once PR3203 is fixed.  We really can't do much better
  // than generic legalization for 64-bit multiplication-with-overflow, though.
  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
    if (VT == MVT::i64 && !Subtarget.is64Bit())
      continue;
    // Add/Sub/Mul with overflow operations are custom lowered.
    setOperationAction(ISD::SADDO, VT, Custom);
    setOperationAction(ISD::UADDO, VT, Custom);
    setOperationAction(ISD::SSUBO, VT, Custom);
    setOperationAction(ISD::USUBO, VT, Custom);
    setOperationAction(ISD::SMULO, VT, Custom);
    setOperationAction(ISD::UMULO, VT, Custom);

    // Support carry in as value rather than glue.
    setOperationAction(ISD::ADDCARRY, VT, Custom);
    setOperationAction(ISD::SUBCARRY, VT, Custom);
    setOperationAction(ISD::SETCCCARRY, VT, Custom);
  }

  if (!Subtarget.is64Bit()) {
    // These libcalls are not available in 32-bit.
    setLibcallName(RTLIB::SHL_I128, nullptr);
    setLibcallName(RTLIB::SRL_I128, nullptr);
    setLibcallName(RTLIB::SRA_I128, nullptr);
    setLibcallName(RTLIB::MUL_I128, nullptr);
  }

  // Combine sin / cos into _sincos_stret if it is available.
  if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
      getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
    setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
    setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
  }

  if (Subtarget.isTargetWin64()) {
    setOperationAction(ISD::SDIV, MVT::i128, Custom);
    setOperationAction(ISD::UDIV, MVT::i128, Custom);
    setOperationAction(ISD::SREM, MVT::i128, Custom);
    setOperationAction(ISD::UREM, MVT::i128, Custom);
    setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
    setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
  }

  // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
  // is. We should promote the value to 64-bits to solve this.
  // This is what the CRT headers do - `fmodf` is an inline header
  // function casting to f64 and calling `fmod`.
  if (Subtarget.is32Bit() &&
      (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
    for (ISD::NodeType Op :
         {ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG,
          ISD::FLOG10, ISD::FPOW, ISD::FSIN})
      if (isOperationExpand(Op, MVT::f32))
        setOperationAction(Op, MVT::f32, Promote);

  // We have target-specific dag combine patterns for the following nodes:
  setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
  setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
  setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
  setTargetDAGCombine(ISD::CONCAT_VECTORS);
  setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
  setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
  setTargetDAGCombine(ISD::BITCAST);
  setTargetDAGCombine(ISD::VSELECT);
  setTargetDAGCombine(ISD::SELECT);
  setTargetDAGCombine(ISD::SHL);
  setTargetDAGCombine(ISD::SRA);
  setTargetDAGCombine(ISD::SRL);
  setTargetDAGCombine(ISD::OR);
  setTargetDAGCombine(ISD::AND);
  setTargetDAGCombine(ISD::ADD);
  setTargetDAGCombine(ISD::FADD);
  setTargetDAGCombine(ISD::FSUB);
  setTargetDAGCombine(ISD::FNEG);
  setTargetDAGCombine(ISD::FMA);
  setTargetDAGCombine(ISD::FMINNUM);
  setTargetDAGCombine(ISD::FMAXNUM);
  setTargetDAGCombine(ISD::SUB);
  setTargetDAGCombine(ISD::LOAD);
  setTargetDAGCombine(ISD::MLOAD);
  setTargetDAGCombine(ISD::STORE);
  setTargetDAGCombine(ISD::MSTORE);
  setTargetDAGCombine(ISD::TRUNCATE);
  setTargetDAGCombine(ISD::ZERO_EXTEND);
  setTargetDAGCombine(ISD::ANY_EXTEND);
  setTargetDAGCombine(ISD::SIGN_EXTEND);
  setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
  setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG);
  setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
  setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
  setTargetDAGCombine(ISD::SINT_TO_FP);
  setTargetDAGCombine(ISD::UINT_TO_FP);
  setTargetDAGCombine(ISD::SETCC);
  setTargetDAGCombine(ISD::MUL);
  setTargetDAGCombine(ISD::XOR);
  setTargetDAGCombine(ISD::MSCATTER);
  setTargetDAGCombine(ISD::MGATHER);

  computeRegisterProperties(Subtarget.getRegisterInfo());

  MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
  MaxStoresPerMemsetOptSize = 8;
  MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
  MaxStoresPerMemcpyOptSize = 4;
  MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
  MaxStoresPerMemmoveOptSize = 4;

  // TODO: These control memcmp expansion in CGP and could be raised higher, but
  // that needs to benchmarked and balanced with the potential use of vector
  // load/store types (PR33329, PR33914).
  MaxLoadsPerMemcmp = 2;
  MaxLoadsPerMemcmpOptSize = 2;

  // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
  setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment));

  // An out-of-order CPU can speculatively execute past a predictable branch,
  // but a conditional move could be stalled by an expensive earlier operation.
  PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
  EnableExtLdPromotion = true;
  setPrefFunctionAlignment(Align(16));

  verifyIntrinsicTables();
}

// This has so far only been implemented for 64-bit MachO.
bool X86TargetLowering::useLoadStackGuardNode() const {
  return Subtarget.isTargetMachO() && Subtarget.is64Bit();
}

bool X86TargetLowering::useStackGuardXorFP() const {
  // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
  return Subtarget.getTargetTriple().isOSMSVCRT();
}

SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
                                               const SDLoc &DL) const {
  EVT PtrTy = getPointerTy(DAG.getDataLayout());
  unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
  MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
  return SDValue(Node, 0);
}

TargetLoweringBase::LegalizeTypeAction
X86TargetLowering::getPreferredVectorAction(MVT VT) const {
  if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
    return TypeSplitVector;

  if (VT.getVectorNumElements() != 1 &&
      VT.getVectorElementType() != MVT::i1)
    return TypeWidenVector;

  return TargetLoweringBase::getPreferredVectorAction(VT);
}

MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
                                                     CallingConv::ID CC,
                                                     EVT VT) const {
  // v32i1 vectors should be promoted to v32i8 to match avx2.
  if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
    return MVT::v32i8;
  // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
      Subtarget.hasAVX512() &&
      (!isPowerOf2_32(VT.getVectorNumElements()) ||
       (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
       (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
    return MVT::i8;
  // FIXME: Should we just make these types legal and custom split operations?
  if ((VT == MVT::v32i16 || VT == MVT::v64i8) &&
      Subtarget.hasAVX512() && !Subtarget.hasBWI() && !EnableOldKNLABI)
    return MVT::v16i32;
  return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
}

unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
                                                          CallingConv::ID CC,
                                                          EVT VT) const {
  // v32i1 vectors should be promoted to v32i8 to match avx2.
  if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
    return 1;
  // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
      Subtarget.hasAVX512() &&
      (!isPowerOf2_32(VT.getVectorNumElements()) ||
       (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
       (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
    return VT.getVectorNumElements();
  // FIXME: Should we just make these types legal and custom split operations?
  if ((VT == MVT::v32i16 || VT == MVT::v64i8) &&
      Subtarget.hasAVX512() && !Subtarget.hasBWI() && !EnableOldKNLABI)
    return 1;
  return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
}

unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
    LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
    unsigned &NumIntermediates, MVT &RegisterVT) const {
  // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
      Subtarget.hasAVX512() &&
      (!isPowerOf2_32(VT.getVectorNumElements()) ||
       (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
       (VT.getVectorNumElements() > 64 && Subtarget.hasBWI()))) {
    RegisterVT = MVT::i8;
    IntermediateVT = MVT::i1;
    NumIntermediates = VT.getVectorNumElements();
    return NumIntermediates;
  }

  return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
                                              NumIntermediates, RegisterVT);
}

EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
                                          LLVMContext& Context,
                                          EVT VT) const {
  if (!VT.isVector())
    return MVT::i8;

  if (Subtarget.hasAVX512()) {
    const unsigned NumElts = VT.getVectorNumElements();

    // Figure out what this type will be legalized to.
    EVT LegalVT = VT;
    while (getTypeAction(Context, LegalVT) != TypeLegal)
      LegalVT = getTypeToTransformTo(Context, LegalVT);

    // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
    if (LegalVT.getSimpleVT().is512BitVector())
      return EVT::getVectorVT(Context, MVT::i1, NumElts);

    if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
      // If we legalized to less than a 512-bit vector, then we will use a vXi1
      // compare for vXi32/vXi64 for sure. If we have BWI we will also support
      // vXi16/vXi8.
      MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
      if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
        return EVT::getVectorVT(Context, MVT::i1, NumElts);
    }
  }

  return VT.changeVectorElementTypeToInteger();
}

/// Helper for getByValTypeAlignment to determine
/// the desired ByVal argument alignment.
static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
  if (MaxAlign == 16)
    return;
  if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
    if (VTy->getBitWidth() == 128)
      MaxAlign = 16;
  } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
    unsigned EltAlign = 0;
    getMaxByValAlign(ATy->getElementType(), EltAlign);
    if (EltAlign > MaxAlign)
      MaxAlign = EltAlign;
  } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
    for (auto *EltTy : STy->elements()) {
      unsigned EltAlign = 0;
      getMaxByValAlign(EltTy, EltAlign);
      if (EltAlign > MaxAlign)
        MaxAlign = EltAlign;
      if (MaxAlign == 16)
        break;
    }
  }
}

/// Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. For X86, aggregates
/// that contain SSE vectors are placed at 16-byte boundaries while the rest
/// are at 4-byte boundaries.
unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
                                                  const DataLayout &DL) const {
  if (Subtarget.is64Bit()) {
    // Max of 8 and alignment of type.
    unsigned TyAlign = DL.getABITypeAlignment(Ty);
    if (TyAlign > 8)
      return TyAlign;
    return 8;
  }

  unsigned Align = 4;
  if (Subtarget.hasSSE1())
    getMaxByValAlign(Ty, Align);
  return Align;
}

/// Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
/// lowering. If DstAlign is zero that means it's safe to destination
/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
/// means there isn't a need to check it against alignment requirement,
/// probably because the source does not need to be loaded. If 'IsMemset' is
/// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
/// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
/// source is constant so it does not need to be loaded.
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
/// For vector ops we check that the overall size isn't larger than our
/// preferred vector width.
EVT X86TargetLowering::getOptimalMemOpType(
    uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
    bool ZeroMemset, bool MemcpyStrSrc,
    const AttributeList &FuncAttributes) const {
  if (!FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
    if (Size >= 16 && (!Subtarget.isUnalignedMem16Slow() ||
                       ((DstAlign == 0 || DstAlign >= 16) &&
                        (SrcAlign == 0 || SrcAlign >= 16)))) {
      // FIXME: Check if unaligned 64-byte accesses are slow.
      if (Size >= 64 && Subtarget.hasAVX512() &&
          (Subtarget.getPreferVectorWidth() >= 512)) {
        return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
      }
      // FIXME: Check if unaligned 32-byte accesses are slow.
      if (Size >= 32 && Subtarget.hasAVX() &&
          (Subtarget.getPreferVectorWidth() >= 256)) {
        // Although this isn't a well-supported type for AVX1, we'll let
        // legalization and shuffle lowering produce the optimal codegen. If we
        // choose an optimal type with a vector element larger than a byte,
        // getMemsetStores() may create an intermediate splat (using an integer
        // multiply) before we splat as a vector.
        return MVT::v32i8;
      }
      if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
        return MVT::v16i8;
      // TODO: Can SSE1 handle a byte vector?
      // If we have SSE1 registers we should be able to use them.
      if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
          (Subtarget.getPreferVectorWidth() >= 128))
        return MVT::v4f32;
    } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
               !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
      // Do not use f64 to lower memcpy if source is string constant. It's
      // better to use i32 to avoid the loads.
      // Also, do not use f64 to lower memset unless this is a memset of zeros.
      // The gymnastics of splatting a byte value into an XMM register and then
      // only using 8-byte stores (because this is a CPU with slow unaligned
      // 16-byte accesses) makes that a loser.
      return MVT::f64;
    }
  }
  // This is a compromise. If we reach here, unaligned accesses may be slow on
  // this target. However, creating smaller, aligned accesses could be even
  // slower and would certainly be a lot more code.
  if (Subtarget.is64Bit() && Size >= 8)
    return MVT::i64;
  return MVT::i32;
}

bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
  if (VT == MVT::f32)
    return X86ScalarSSEf32;
  else if (VT == MVT::f64)
    return X86ScalarSSEf64;
  return true;
}

bool X86TargetLowering::allowsMisalignedMemoryAccesses(
    EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags,
    bool *Fast) const {
  if (Fast) {
    switch (VT.getSizeInBits()) {
    default:
      // 8-byte and under are always assumed to be fast.
      *Fast = true;
      break;
    case 128:
      *Fast = !Subtarget.isUnalignedMem16Slow();
      break;
    case 256:
      *Fast = !Subtarget.isUnalignedMem32Slow();
      break;
    // TODO: What about AVX-512 (512-bit) accesses?
    }
  }
  // NonTemporal vector memory ops must be aligned.
  if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
    // NT loads can only be vector aligned, so if its less aligned than the
    // minimum vector size (which we can split the vector down to), we might as
    // well use a regular unaligned vector load.
    // We don't have any NT loads pre-SSE41.
    if (!!(Flags & MachineMemOperand::MOLoad))
      return (Align < 16 || !Subtarget.hasSSE41());
    return false;
  }
  // Misaligned accesses of any size are always allowed.
  return true;
}

/// Return the entry encoding for a jump table in the
/// current function.  The returned value is a member of the
/// MachineJumpTableInfo::JTEntryKind enum.
unsigned X86TargetLowering::getJumpTableEncoding() const {
  // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
  // symbol.
  if (isPositionIndependent() && Subtarget.isPICStyleGOT())
    return MachineJumpTableInfo::EK_Custom32;

  // Otherwise, use the normal jump table encoding heuristics.
  return TargetLowering::getJumpTableEncoding();
}

bool X86TargetLowering::useSoftFloat() const {
  return Subtarget.useSoftFloat();
}

void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
                                              ArgListTy &Args) const {

  // Only relabel X86-32 for C / Stdcall CCs.
  if (Subtarget.is64Bit())
    return;
  if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
    return;
  unsigned ParamRegs = 0;
  if (auto *M = MF->getFunction().getParent())
    ParamRegs = M->getNumberRegisterParameters();

  // Mark the first N int arguments as having reg
  for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
    Type *T = Args[Idx].Ty;
    if (T->isIntOrPtrTy())
      if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
        unsigned numRegs = 1;
        if (MF->getDataLayout().getTypeAllocSize(T) > 4)
          numRegs = 2;
        if (ParamRegs < numRegs)
          return;
        ParamRegs -= numRegs;
        Args[Idx].IsInReg = true;
      }
  }
}

const MCExpr *
X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
                                             const MachineBasicBlock *MBB,
                                             unsigned uid,MCContext &Ctx) const{
  assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
  // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
  // entries.
  return MCSymbolRefExpr::create(MBB->getSymbol(),
                                 MCSymbolRefExpr::VK_GOTOFF, Ctx);
}

/// Returns relocation base for the given PIC jumptable.
SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
                                                    SelectionDAG &DAG) const {
  if (!Subtarget.is64Bit())
    // This doesn't have SDLoc associated with it, but is not really the
    // same as a Register.
    return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
                       getPointerTy(DAG.getDataLayout()));
  return Table;
}

/// This returns the relocation base for the given PIC jumptable,
/// the same as getPICJumpTableRelocBase, but as an MCExpr.
const MCExpr *X86TargetLowering::
getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
                             MCContext &Ctx) const {
  // X86-64 uses RIP relative addressing based on the jump table label.
  if (Subtarget.isPICStyleRIPRel())
    return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);

  // Otherwise, the reference is relative to the PIC base.
  return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
}

std::pair<const TargetRegisterClass *, uint8_t>
X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
                                           MVT VT) const {
  const TargetRegisterClass *RRC = nullptr;
  uint8_t Cost = 1;
  switch (VT.SimpleTy) {
  default:
    return TargetLowering::findRepresentativeClass(TRI, VT);
  case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
    RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
    break;
  case MVT::x86mmx:
    RRC = &X86::VR64RegClass;
    break;
  case MVT::f32: case MVT::f64:
  case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
  case MVT::v4f32: case MVT::v2f64:
  case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
  case MVT::v8f32: case MVT::v4f64:
  case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
  case MVT::v16f32: case MVT::v8f64:
    RRC = &X86::VR128XRegClass;
    break;
  }
  return std::make_pair(RRC, Cost);
}

unsigned X86TargetLowering::getAddressSpace() const {
  if (Subtarget.is64Bit())
    return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
  return 256;
}

static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
  return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
         (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
}

static Constant* SegmentOffset(IRBuilder<> &IRB,
                               unsigned Offset, unsigned AddressSpace) {
  return ConstantExpr::getIntToPtr(
      ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
      Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
}

Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
  // glibc, bionic, and Fuchsia have a special slot for the stack guard in
  // tcbhead_t; use it instead of the usual global variable (see
  // sysdeps/{i386,x86_64}/nptl/tls.h)
  if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
    if (Subtarget.isTargetFuchsia()) {
      // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
      return SegmentOffset(IRB, 0x10, getAddressSpace());
    } else {
      // %fs:0x28, unless we're using a Kernel code model, in which case
      // it's %gs:0x28.  gs:0x14 on i386.
      unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
      return SegmentOffset(IRB, Offset, getAddressSpace());
    }
  }

  return TargetLowering::getIRStackGuard(IRB);
}

void X86TargetLowering::insertSSPDeclarations(Module &M) const {
  // MSVC CRT provides functionalities for stack protection.
  if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
      Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
    // MSVC CRT has a global variable holding security cookie.
    M.getOrInsertGlobal("__security_cookie",
                        Type::getInt8PtrTy(M.getContext()));

    // MSVC CRT has a function to validate security cookie.
    FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
        "__security_check_cookie", Type::getVoidTy(M.getContext()),
        Type::getInt8PtrTy(M.getContext()));
    if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
      F->setCallingConv(CallingConv::X86_FastCall);
      F->addAttribute(1, Attribute::AttrKind::InReg);
    }
    return;
  }
  // glibc, bionic, and Fuchsia have a special slot for the stack guard.
  if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
    return;
  TargetLowering::insertSSPDeclarations(M);
}

Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
  // MSVC CRT has a global variable holding security cookie.
  if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
      Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
    return M.getGlobalVariable("__security_cookie");
  }
  return TargetLowering::getSDagStackGuard(M);
}

Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
  // MSVC CRT has a function to validate security cookie.
  if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
      Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
    return M.getFunction("__security_check_cookie");
  }
  return TargetLowering::getSSPStackGuardCheck(M);
}

Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
  if (Subtarget.getTargetTriple().isOSContiki())
    return getDefaultSafeStackPointerLocation(IRB, false);

  // Android provides a fixed TLS slot for the SafeStack pointer. See the
  // definition of TLS_SLOT_SAFESTACK in
  // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
  if (Subtarget.isTargetAndroid()) {
    // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
    // %gs:0x24 on i386
    unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
    return SegmentOffset(IRB, Offset, getAddressSpace());
  }

  // Fuchsia is similar.
  if (Subtarget.isTargetFuchsia()) {
    // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
    return SegmentOffset(IRB, 0x18, getAddressSpace());
  }

  return TargetLowering::getSafeStackPointerLocation(IRB);
}

bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
                                            unsigned DestAS) const {
  assert(SrcAS != DestAS && "Expected different address spaces!");

  return SrcAS < 256 && DestAS < 256;
}

//===----------------------------------------------------------------------===//
//               Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===//

bool X86TargetLowering::CanLowerReturn(
    CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
    const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
  SmallVector<CCValAssign, 16> RVLocs;
  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
  return CCInfo.CheckReturn(Outs, RetCC_X86);
}

const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
  static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
  return ScratchRegs;
}

/// Lowers masks values (v*i1) to the local register values
/// \returns DAG node after lowering to register type
static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
                               const SDLoc &Dl, SelectionDAG &DAG) {
  EVT ValVT = ValArg.getValueType();

  if (ValVT == MVT::v1i1)
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
                       DAG.getIntPtrConstant(0, Dl));

  if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
      (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
    // Two stage lowering might be required
    // bitcast:   v8i1 -> i8 / v16i1 -> i16
    // anyextend: i8   -> i32 / i16   -> i32
    EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
    SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
    if (ValLoc == MVT::i32)
      ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
    return ValToCopy;
  }

  if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
      (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
    // One stage lowering is required
    // bitcast:   v32i1 -> i32 / v64i1 -> i64
    return DAG.getBitcast(ValLoc, ValArg);
  }

  return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
}

/// Breaks v64i1 value into two registers and adds the new node to the DAG
static void Passv64i1ArgInRegs(
    const SDLoc &Dl, SelectionDAG &DAG, SDValue &Arg,
    SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, CCValAssign &VA,
    CCValAssign &NextVA, const X86Subtarget &Subtarget) {
  assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
  assert(Subtarget.is32Bit() && "Expecting 32 bit target");
  assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
  assert(VA.isRegLoc() && NextVA.isRegLoc() &&
         "The value should reside in two registers");

  // Before splitting the value we cast it to i64
  Arg = DAG.getBitcast(MVT::i64, Arg);

  // Splitting the value into two i32 types
  SDValue Lo, Hi;
  Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
                   DAG.getConstant(0, Dl, MVT::i32));
  Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
                   DAG.getConstant(1, Dl, MVT::i32));

  // Attach the two i32 types into corresponding registers
  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
  RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
}

SDValue
X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
                               bool isVarArg,
                               const SmallVectorImpl<ISD::OutputArg> &Outs,
                               const SmallVectorImpl<SDValue> &OutVals,
                               const SDLoc &dl, SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();
  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();

  // In some cases we need to disable registers from the default CSR list.
  // For example, when they are used for argument passing.
  bool ShouldDisableCalleeSavedRegister =
      CallConv == CallingConv::X86_RegCall ||
      MF.getFunction().hasFnAttribute("no_caller_saved_registers");

  if (CallConv == CallingConv::X86_INTR && !Outs.empty())
    report_fatal_error("X86 interrupts may not return any value");

  SmallVector<CCValAssign, 16> RVLocs;
  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
  CCInfo.AnalyzeReturn(Outs, RetCC_X86);

  SDValue Flag;
  SmallVector<SDValue, 6> RetOps;
  RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
  // Operand #1 = Bytes To Pop
  RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
                   MVT::i32));

  // Copy the result values into the output registers.
  for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
       ++I, ++OutsIndex) {
    CCValAssign &VA = RVLocs[I];
    assert(VA.isRegLoc() && "Can only return in registers!");

    // Add the register to the CalleeSaveDisableRegs list.
    if (ShouldDisableCalleeSavedRegister)
      MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());

    SDValue ValToCopy = OutVals[OutsIndex];
    EVT ValVT = ValToCopy.getValueType();

    // Promote values to the appropriate types.
    if (VA.getLocInfo() == CCValAssign::SExt)
      ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
    else if (VA.getLocInfo() == CCValAssign::ZExt)
      ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
    else if (VA.getLocInfo() == CCValAssign::AExt) {
      if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
        ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
      else
        ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
    }
    else if (VA.getLocInfo() == CCValAssign::BCvt)
      ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);

    assert(VA.getLocInfo() != CCValAssign::FPExt &&
           "Unexpected FP-extend for return value.");

    // If this is x86-64, and we disabled SSE, we can't return FP values,
    // or SSE or MMX vectors.
    if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
         VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
        (Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
      errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
      VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
    } else if (ValVT == MVT::f64 &&
               (Subtarget.is64Bit() && !Subtarget.hasSSE2())) {
      // Likewise we can't return F64 values with SSE1 only.  gcc does so, but
      // llvm-gcc has never done it right and no one has noticed, so this
      // should be OK for now.
      errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
      VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
    }

    // Returns in ST0/ST1 are handled specially: these are pushed as operands to
    // the RET instruction and handled by the FP Stackifier.
    if (VA.getLocReg() == X86::FP0 ||
        VA.getLocReg() == X86::FP1) {
      // If this is a copy from an xmm register to ST(0), use an FPExtend to
      // change the value to the FP stack register class.
      if (isScalarFPTypeInSSEReg(VA.getValVT()))
        ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
      RetOps.push_back(ValToCopy);
      // Don't emit a copytoreg.
      continue;
    }

    // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
    // which is returned in RAX / RDX.
    if (Subtarget.is64Bit()) {
      if (ValVT == MVT::x86mmx) {
        if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
          ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
          ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
                                  ValToCopy);
          // If we don't have SSE2 available, convert to v4f32 so the generated
          // register is legal.
          if (!Subtarget.hasSSE2())
            ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
        }
      }
    }

    SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;

    if (VA.needsCustom()) {
      assert(VA.getValVT() == MVT::v64i1 &&
             "Currently the only custom case is when we split v64i1 to 2 regs");

      Passv64i1ArgInRegs(dl, DAG, ValToCopy, RegsToPass, VA, RVLocs[++I],
                         Subtarget);

      assert(2 == RegsToPass.size() &&
             "Expecting two registers after Pass64BitArgInRegs");

      // Add the second register to the CalleeSaveDisableRegs list.
      if (ShouldDisableCalleeSavedRegister)
        MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
    } else {
      RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
    }

    // Add nodes to the DAG and add the values into the RetOps list
    for (auto &Reg : RegsToPass) {
      Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag);
      Flag = Chain.getValue(1);
      RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
    }
  }

  // Swift calling convention does not require we copy the sret argument
  // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.

  // All x86 ABIs require that for returning structs by value we copy
  // the sret argument into %rax/%eax (depending on ABI) for the return.
  // We saved the argument into a virtual register in the entry block,
  // so now we copy the value out and into %rax/%eax.
  //
  // Checking Function.hasStructRetAttr() here is insufficient because the IR
  // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
  // false, then an sret argument may be implicitly inserted in the SelDAG. In
  // either case FuncInfo->setSRetReturnReg() will have been called.
  if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
    // When we have both sret and another return value, we should use the
    // original Chain stored in RetOps[0], instead of the current Chain updated
    // in the above loop. If we only have sret, RetOps[0] equals to Chain.

    // For the case of sret and another return value, we have
    //   Chain_0 at the function entry
    //   Chain_1 = getCopyToReg(Chain_0) in the above loop
    // If we use Chain_1 in getCopyFromReg, we will have
    //   Val = getCopyFromReg(Chain_1)
    //   Chain_2 = getCopyToReg(Chain_1, Val) from below

    // getCopyToReg(Chain_0) will be glued together with
    // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
    // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
    //   Data dependency from Unit B to Unit A due to usage of Val in
    //     getCopyToReg(Chain_1, Val)
    //   Chain dependency from Unit A to Unit B

    // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
    SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
                                     getPointerTy(MF.getDataLayout()));

    unsigned RetValReg
        = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
          X86::RAX : X86::EAX;
    Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
    Flag = Chain.getValue(1);

    // RAX/EAX now acts like a return value.
    RetOps.push_back(
        DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));

    // Add the returned register to the CalleeSaveDisableRegs list.
    if (ShouldDisableCalleeSavedRegister)
      MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
  }

  const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
  const MCPhysReg *I =
      TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
  if (I) {
    for (; *I; ++I) {
      if (X86::GR64RegClass.contains(*I))
        RetOps.push_back(DAG.getRegister(*I, MVT::i64));
      else
        llvm_unreachable("Unexpected register class in CSRsViaCopy!");
    }
  }

  RetOps[0] = Chain;  // Update chain.

  // Add the flag if we have it.
  if (Flag.getNode())
    RetOps.push_back(Flag);

  X86ISD::NodeType opcode = X86ISD::RET_FLAG;
  if (CallConv == CallingConv::X86_INTR)
    opcode = X86ISD::IRET;
  return DAG.getNode(opcode, dl, MVT::Other, RetOps);
}

bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
  if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
    return false;

  SDValue TCChain = Chain;
  SDNode *Copy = *N->use_begin();
  if (Copy->getOpcode() == ISD::CopyToReg) {
    // If the copy has a glue operand, we conservatively assume it isn't safe to
    // perform a tail call.
    if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
      return false;
    TCChain = Copy->getOperand(0);
  } else if (Copy->getOpcode() != ISD::FP_EXTEND)
    return false;

  bool HasRet = false;
  for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
       UI != UE; ++UI) {
    if (UI->getOpcode() != X86ISD::RET_FLAG)
      return false;
    // If we are returning more than one value, we can definitely
    // not make a tail call see PR19530
    if (UI->getNumOperands() > 4)
      return false;
    if (UI->getNumOperands() == 4 &&
        UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
      return false;
    HasRet = true;
  }

  if (!HasRet)
    return false;

  Chain = TCChain;
  return true;
}

EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
                                           ISD::NodeType ExtendKind) const {
  MVT ReturnMVT = MVT::i32;

  bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
  if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
    // The ABI does not require i1, i8 or i16 to be extended.
    //
    // On Darwin, there is code in the wild relying on Clang's old behaviour of
    // always extending i8/i16 return values, so keep doing that for now.
    // (PR26665).
    ReturnMVT = MVT::i8;
  }

  EVT MinVT = getRegisterType(Context, ReturnMVT);
  return VT.bitsLT(MinVT) ? MinVT : VT;
}

/// Reads two 32 bit registers and creates a 64 bit mask value.
/// \param VA The current 32 bit value that need to be assigned.
/// \param NextVA The next 32 bit value that need to be assigned.
/// \param Root The parent DAG node.
/// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
///                        glue purposes. In the case the DAG is already using
///                        physical register instead of virtual, we should glue
///                        our new SDValue to InFlag SDvalue.
/// \return a new SDvalue of size 64bit.
static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
                                SDValue &Root, SelectionDAG &DAG,
                                const SDLoc &Dl, const X86Subtarget &Subtarget,
                                SDValue *InFlag = nullptr) {
  assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
  assert(Subtarget.is32Bit() && "Expecting 32 bit target");
  assert(VA.getValVT() == MVT::v64i1 &&
         "Expecting first location of 64 bit width type");
  assert(NextVA.getValVT() == VA.getValVT() &&
         "The locations should have the same type");
  assert(VA.isRegLoc() && NextVA.isRegLoc() &&
         "The values should reside in two registers");

  SDValue Lo, Hi;
  SDValue ArgValueLo, ArgValueHi;

  MachineFunction &MF = DAG.getMachineFunction();
  const TargetRegisterClass *RC = &X86::GR32RegClass;

  // Read a 32 bit value from the registers.
  if (nullptr == InFlag) {
    // When no physical register is present,
    // create an intermediate virtual register.
    unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
    ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
    Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
    ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
  } else {
    // When a physical register is available read the value from it and glue
    // the reads together.
    ArgValueLo =
      DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
    *InFlag = ArgValueLo.getValue(2);
    ArgValueHi =
      DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
    *InFlag = ArgValueHi.getValue(2);
  }

  // Convert the i32 type into v32i1 type.
  Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);

  // Convert the i32 type into v32i1 type.
  Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);

  // Concatenate the two values together.
  return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
}

/// The function will lower a register of various sizes (8/16/32/64)
/// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
/// \returns a DAG node contains the operand after lowering to mask type.
static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
                               const EVT &ValLoc, const SDLoc &Dl,
                               SelectionDAG &DAG) {
  SDValue ValReturned = ValArg;

  if (ValVT == MVT::v1i1)
    return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);

  if (ValVT == MVT::v64i1) {
    // In 32 bit machine, this case is handled by getv64i1Argument
    assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
    // In 64 bit machine, There is no need to truncate the value only bitcast
  } else {
    MVT maskLen;
    switch (ValVT.getSimpleVT().SimpleTy) {
    case MVT::v8i1:
      maskLen = MVT::i8;
      break;
    case MVT::v16i1:
      maskLen = MVT::i16;
      break;
    case MVT::v32i1:
      maskLen = MVT::i32;
      break;
    default:
      llvm_unreachable("Expecting a vector of i1 types");
    }

    ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
  }
  return DAG.getBitcast(ValVT, ValReturned);
}

/// Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
///
SDValue X86TargetLowering::LowerCallResult(
    SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
    uint32_t *RegMask) const {

  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
  // Assign locations to each value returned by this call.
  SmallVector<CCValAssign, 16> RVLocs;
  bool Is64Bit = Subtarget.is64Bit();
  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
                 *DAG.getContext());
  CCInfo.AnalyzeCallResult(Ins, RetCC_X86);

  // Copy all of the result registers out of their specified physreg.
  for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
       ++I, ++InsIndex) {
    CCValAssign &VA = RVLocs[I];
    EVT CopyVT = VA.getLocVT();

    // In some calling conventions we need to remove the used registers
    // from the register mask.
    if (RegMask) {
      for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
           SubRegs.isValid(); ++SubRegs)
        RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
    }

    // If this is x86-64, and we disabled SSE, we can't return FP values
    if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
        ((Is64Bit || Ins[InsIndex].Flags.isInReg()) && !Subtarget.hasSSE1())) {
      errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
      VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
    } else if (CopyVT == MVT::f64 &&
               (Is64Bit && !Subtarget.hasSSE2())) {
      errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
      VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
    }

    // If we prefer to use the value in xmm registers, copy it out as f80 and
    // use a truncate to move it from fp stack reg to xmm reg.
    bool RoundAfterCopy = false;
    if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
        isScalarFPTypeInSSEReg(VA.getValVT())) {
      if (!Subtarget.hasX87())
        report_fatal_error("X87 register return with X87 disabled");
      CopyVT = MVT::f80;
      RoundAfterCopy = (CopyVT != VA.getLocVT());
    }

    SDValue Val;
    if (VA.needsCustom()) {
      assert(VA.getValVT() == MVT::v64i1 &&
             "Currently the only custom case is when we split v64i1 to 2 regs");
      Val =
          getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
    } else {
      Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
                  .getValue(1);
      Val = Chain.getValue(0);
      InFlag = Chain.getValue(2);
    }

    if (RoundAfterCopy)
      Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
                        // This truncation won't change the value.
                        DAG.getIntPtrConstant(1, dl));

    if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
      if (VA.getValVT().isVector() &&
          ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
           (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
        // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
        Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
      } else
        Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
    }

    InVals.push_back(Val);
  }

  return Chain;
}

//===----------------------------------------------------------------------===//
//                C & StdCall & Fast Calling Convention implementation
//===----------------------------------------------------------------------===//
//  StdCall calling convention seems to be standard for many Windows' API
//  routines and around. It differs from C calling convention just a little:
//  callee should clean up the stack, not caller. Symbols should be also
//  decorated in some fancy way :) It doesn't support any vector arguments.
//  For info on fast calling convention see Fast Calling Convention (tail call)
//  implementation LowerX86_32FastCCCallTo.

/// CallIsStructReturn - Determines whether a call uses struct return
/// semantics.
enum StructReturnType {
  NotStructReturn,
  RegStructReturn,
  StackStructReturn
};
static StructReturnType
callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
  if (Outs.empty())
    return NotStructReturn;

  const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
  if (!Flags.isSRet())
    return NotStructReturn;
  if (Flags.isInReg() || IsMCU)
    return RegStructReturn;
  return StackStructReturn;
}

/// Determines whether a function uses struct return semantics.
static StructReturnType
argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
  if (Ins.empty())
    return NotStructReturn;

  const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
  if (!Flags.isSRet())
    return NotStructReturn;
  if (Flags.isInReg() || IsMCU)
    return RegStructReturn;
  return StackStructReturn;
}

/// Make a copy of an aggregate at address specified by "Src" to address
/// "Dst" with size and alignment information specified by the specific
/// parameter attribute. The copy will be passed as a byval function parameter.
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
                                         SDValue Chain, ISD::ArgFlagsTy Flags,
                                         SelectionDAG &DAG, const SDLoc &dl) {
  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);

  return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
                       /*isVolatile*/false, /*AlwaysInline=*/true,
                       /*isTailCall*/false,
                       MachinePointerInfo(), MachinePointerInfo());
}

/// Return true if the calling convention is one that we can guarantee TCO for.
static bool canGuaranteeTCO(CallingConv::ID CC) {
  return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
          CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
          CC == CallingConv::HHVM || CC == CallingConv::Tail);
}

/// Return true if we might ever do TCO for calls with this calling convention.
static bool mayTailCallThisCC(CallingConv::ID CC) {
  switch (CC) {
  // C calling conventions:
  case CallingConv::C:
  case CallingConv::Win64:
  case CallingConv::X86_64_SysV:
  // Callee pop conventions:
  case CallingConv::X86_ThisCall:
  case CallingConv::X86_StdCall:
  case CallingConv::X86_VectorCall:
  case CallingConv::X86_FastCall:
  // Swift:
  case CallingConv::Swift:
    return true;
  default:
    return canGuaranteeTCO(CC);
  }
}

/// Return true if the function is being made into a tailcall target by
/// changing its ABI.
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
  return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) || CC == CallingConv::Tail;
}

bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
  auto Attr =
      CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
  if (!CI->isTailCall() || Attr.getValueAsString() == "true")
    return false;

  ImmutableCallSite CS(CI);
  CallingConv::ID CalleeCC = CS.getCallingConv();
  if (!mayTailCallThisCC(CalleeCC))
    return false;

  return true;
}

SDValue
X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
                                    const SmallVectorImpl<ISD::InputArg> &Ins,
                                    const SDLoc &dl, SelectionDAG &DAG,
                                    const CCValAssign &VA,
                                    MachineFrameInfo &MFI, unsigned i) const {
  // Create the nodes corresponding to a load from this parameter slot.
  ISD::ArgFlagsTy Flags = Ins[i].Flags;
  bool AlwaysUseMutable = shouldGuaranteeTCO(
      CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
  bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
  EVT ValVT;
  MVT PtrVT = getPointerTy(DAG.getDataLayout());

  // If value is passed by pointer we have address passed instead of the value
  // itself. No need to extend if the mask value and location share the same
  // absolute size.
  bool ExtendedInMem =
      VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
      VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();

  if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
    ValVT = VA.getLocVT();
  else
    ValVT = VA.getValVT();

  // FIXME: For now, all byval parameter objects are marked mutable. This can be
  // changed with more analysis.
  // In case of tail call optimization mark all arguments mutable. Since they
  // could be overwritten by lowering of arguments in case of a tail call.
  if (Flags.isByVal()) {
    unsigned Bytes = Flags.getByValSize();
    if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.

    // FIXME: For now, all byval parameter objects are marked as aliasing. This
    // can be improved with deeper analysis.
    int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
                                   /*isAliased=*/true);
    return DAG.getFrameIndex(FI, PtrVT);
  }

  // This is an argument in memory. We might be able to perform copy elision.
  // If the argument is passed directly in memory without any extension, then we
  // can perform copy elision. Large vector types, for example, may be passed
  // indirectly by pointer.
  if (Flags.isCopyElisionCandidate() &&
      VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem) {
    EVT ArgVT = Ins[i].ArgVT;
    SDValue PartAddr;
    if (Ins[i].PartOffset == 0) {
      // If this is a one-part value or the first part of a multi-part value,
      // create a stack object for the entire argument value type and return a
      // load from our portion of it. This assumes that if the first part of an
      // argument is in memory, the rest will also be in memory.
      int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
                                     /*IsImmutable=*/false);
      PartAddr = DAG.getFrameIndex(FI, PtrVT);
      return DAG.getLoad(
          ValVT, dl, Chain, PartAddr,
          MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
    } else {
      // This is not the first piece of an argument in memory. See if there is
      // already a fixed stack object including this offset. If so, assume it
      // was created by the PartOffset == 0 branch above and create a load from
      // the appropriate offset into it.
      int64_t PartBegin = VA.getLocMemOffset();
      int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
      int FI = MFI.getObjectIndexBegin();
      for (; MFI.isFixedObjectIndex(FI); ++FI) {
        int64_t ObjBegin = MFI.getObjectOffset(FI);
        int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
        if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
          break;
      }
      if (MFI.isFixedObjectIndex(FI)) {
        SDValue Addr =
            DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
                        DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
        return DAG.getLoad(
            ValVT, dl, Chain, Addr,
            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
                                              Ins[i].PartOffset));
      }
    }
  }

  int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
                                 VA.getLocMemOffset(), isImmutable);

  // Set SExt or ZExt flag.
  if (VA.getLocInfo() == CCValAssign::ZExt) {
    MFI.setObjectZExt(FI, true);
  } else if (VA.getLocInfo() == CCValAssign::SExt) {
    MFI.setObjectSExt(FI, true);
  }

  SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
  SDValue Val = DAG.getLoad(
      ValVT, dl, Chain, FIN,
      MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
  return ExtendedInMem
             ? (VA.getValVT().isVector()
                    ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
                    : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
             : Val;
}

// FIXME: Get this from tablegen.
static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
                                                const X86Subtarget &Subtarget) {
  assert(Subtarget.is64Bit());

  if (Subtarget.isCallingConvWin64(CallConv)) {
    static const MCPhysReg GPR64ArgRegsWin64[] = {
      X86::RCX, X86::RDX, X86::R8,  X86::R9
    };
    return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
  }

  static const MCPhysReg GPR64ArgRegs64Bit[] = {
    X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
  };
  return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
}

// FIXME: Get this from tablegen.
static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
                                                CallingConv::ID CallConv,
                                                const X86Subtarget &Subtarget) {
  assert(Subtarget.is64Bit());
  if (Subtarget.isCallingConvWin64(CallConv)) {
    // The XMM registers which might contain var arg parameters are shadowed
    // in their paired GPR.  So we only need to save the GPR to their home
    // slots.
    // TODO: __vectorcall will change this.
    return None;
  }

  const Function &F = MF.getFunction();
  bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
  bool isSoftFloat = Subtarget.useSoftFloat();
  assert(!(isSoftFloat && NoImplicitFloatOps) &&
         "SSE register cannot be used when SSE is disabled!");
  if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
    // Kernel mode asks for SSE to be disabled, so there are no XMM argument
    // registers.
    return None;

  static const MCPhysReg XMMArgRegs64Bit[] = {
    X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
    X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
  };
  return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
}

#ifndef NDEBUG
static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
  return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
                        [](const CCValAssign &A, const CCValAssign &B) -> bool {
                          return A.getValNo() < B.getValNo();
                        });
}
#endif

SDValue X86TargetLowering::LowerFormalArguments(
    SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
  MachineFunction &MF = DAG.getMachineFunction();
  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();

  const Function &F = MF.getFunction();
  if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
      F.getName() == "main")
    FuncInfo->setForceFramePointer(true);

  MachineFrameInfo &MFI = MF.getFrameInfo();
  bool Is64Bit = Subtarget.is64Bit();
  bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);

  assert(
      !(isVarArg && canGuaranteeTCO(CallConv)) &&
      "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");

  // Assign locations to all of the incoming arguments.
  SmallVector<CCValAssign, 16> ArgLocs;
  CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());

  // Allocate shadow area for Win64.
  if (IsWin64)
    CCInfo.AllocateStack(32, 8);

  CCInfo.AnalyzeArguments(Ins, CC_X86);

  // In vectorcall calling convention a second pass is required for the HVA
  // types.
  if (CallingConv::X86_VectorCall == CallConv) {
    CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
  }

  // The next loop assumes that the locations are in the same order of the
  // input arguments.
  assert(isSortedByValueNo(ArgLocs) &&
         "Argument Location list must be sorted before lowering");

  SDValue ArgValue;
  for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
       ++I, ++InsIndex) {
    assert(InsIndex < Ins.size() && "Invalid Ins index");
    CCValAssign &VA = ArgLocs[I];

    if (VA.isRegLoc()) {
      EVT RegVT = VA.getLocVT();
      if (VA.needsCustom()) {
        assert(
            VA.getValVT() == MVT::v64i1 &&
            "Currently the only custom case is when we split v64i1 to 2 regs");

        // v64i1 values, in regcall calling convention, that are
        // compiled to 32 bit arch, are split up into two registers.
        ArgValue =
            getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
      } else {
        const TargetRegisterClass *RC;
        if (RegVT == MVT::i8)
          RC = &X86::GR8RegClass;
        else if (RegVT == MVT::i16)
          RC = &X86::GR16RegClass;
        else if (RegVT == MVT::i32)
          RC = &X86::GR32RegClass;
        else if (Is64Bit && RegVT == MVT::i64)
          RC = &X86::GR64RegClass;
        else if (RegVT == MVT::f32)
          RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
        else if (RegVT == MVT::f64)
          RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
        else if (RegVT == MVT::f80)
          RC = &X86::RFP80RegClass;
        else if (RegVT == MVT::f128)
          RC = &X86::VR128RegClass;
        else if (RegVT.is512BitVector())
          RC = &X86::VR512RegClass;
        else if (RegVT.is256BitVector())
          RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
        else if (RegVT.is128BitVector())
          RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
        else if (RegVT == MVT::x86mmx)
          RC = &X86::VR64RegClass;
        else if (RegVT == MVT::v1i1)
          RC = &X86::VK1RegClass;
        else if (RegVT == MVT::v8i1)
          RC = &X86::VK8RegClass;
        else if (RegVT == MVT::v16i1)
          RC = &X86::VK16RegClass;
        else if (RegVT == MVT::v32i1)
          RC = &X86::VK32RegClass;
        else if (RegVT == MVT::v64i1)
          RC = &X86::VK64RegClass;
        else
          llvm_unreachable("Unknown argument type!");

        unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
        ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
      }

      // If this is an 8 or 16-bit value, it is really passed promoted to 32
      // bits.  Insert an assert[sz]ext to capture this, then truncate to the
      // right size.
      if (VA.getLocInfo() == CCValAssign::SExt)
        ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
                               DAG.getValueType(VA.getValVT()));
      else if (VA.getLocInfo() == CCValAssign::ZExt)
        ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
                               DAG.getValueType(VA.getValVT()));
      else if (VA.getLocInfo() == CCValAssign::BCvt)
        ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);

      if (VA.isExtInLoc()) {
        // Handle MMX values passed in XMM regs.
        if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
          ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
        else if (VA.getValVT().isVector() &&
                 VA.getValVT().getScalarType() == MVT::i1 &&
                 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
                  (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
          // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
          ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
        } else
          ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
      }
    } else {
      assert(VA.isMemLoc());
      ArgValue =
          LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
    }

    // If value is passed via pointer - do a load.
    if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
      ArgValue =
          DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());

    InVals.push_back(ArgValue);
  }

  for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
    // Swift calling convention does not require we copy the sret argument
    // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
    if (CallConv == CallingConv::Swift)
      continue;

    // All x86 ABIs require that for returning structs by value we copy the
    // sret argument into %rax/%eax (depending on ABI) for the return. Save
    // the argument into a virtual register so that we can access it from the
    // return points.
    if (Ins[I].Flags.isSRet()) {
      unsigned Reg = FuncInfo->getSRetReturnReg();
      if (!Reg) {
        MVT PtrTy = getPointerTy(DAG.getDataLayout());
        Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
        FuncInfo->setSRetReturnReg(Reg);
      }
      SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
      break;
    }
  }

  unsigned StackSize = CCInfo.getNextStackOffset();
  // Align stack specially for tail calls.
  if (shouldGuaranteeTCO(CallConv,
                         MF.getTarget().Options.GuaranteedTailCallOpt))
    StackSize = GetAlignedArgumentStackSize(StackSize, DAG);

  // If the function takes variable number of arguments, make a frame index for
  // the start of the first vararg value... for expansion of llvm.va_start. We
  // can skip this if there are no va_start calls.
  if (MFI.hasVAStart() &&
      (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
                   CallConv != CallingConv::X86_ThisCall))) {
    FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
  }

  // Figure out if XMM registers are in use.
  assert(!(Subtarget.useSoftFloat() &&
           F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
         "SSE register cannot be used when SSE is disabled!");

  // 64-bit calling conventions support varargs and register parameters, so we
  // have to do extra work to spill them in the prologue.
  if (Is64Bit && isVarArg && MFI.hasVAStart()) {
    // Find the first unallocated argument registers.
    ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
    ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
    unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
    unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
    assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
           "SSE register cannot be used when SSE is disabled!");

    // Gather all the live in physical registers.
    SmallVector<SDValue, 6> LiveGPRs;
    SmallVector<SDValue, 8> LiveXMMRegs;
    SDValue ALVal;
    for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
      unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
      LiveGPRs.push_back(
          DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
    }
    if (!ArgXMMs.empty()) {
      unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
      ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
      for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
        unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
        LiveXMMRegs.push_back(
            DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
      }
    }

    if (IsWin64) {
      // Get to the caller-allocated home save location.  Add 8 to account
      // for the return address.
      int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
      FuncInfo->setRegSaveFrameIndex(
          MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
      // Fixup to set vararg frame on shadow area (4 x i64).
      if (NumIntRegs < 4)
        FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
    } else {
      // For X86-64, if there are vararg parameters that are passed via
      // registers, then we must store them to their spots on the stack so
      // they may be loaded by dereferencing the result of va_next.
      FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
      FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
      FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject(
          ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
    }

    // Store the integer parameter registers.
    SmallVector<SDValue, 8> MemOps;
    SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
                                      getPointerTy(DAG.getDataLayout()));
    unsigned Offset = FuncInfo->getVarArgsGPOffset();
    for (SDValue Val : LiveGPRs) {
      SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
                                RSFIN, DAG.getIntPtrConstant(Offset, dl));
      SDValue Store =
          DAG.getStore(Val.getValue(1), dl, Val, FIN,
                       MachinePointerInfo::getFixedStack(
                           DAG.getMachineFunction(),
                           FuncInfo->getRegSaveFrameIndex(), Offset));
      MemOps.push_back(Store);
      Offset += 8;
    }

    if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
      // Now store the XMM (fp + vector) parameter registers.
      SmallVector<SDValue, 12> SaveXMMOps;
      SaveXMMOps.push_back(Chain);
      SaveXMMOps.push_back(ALVal);
      SaveXMMOps.push_back(DAG.getIntPtrConstant(
                             FuncInfo->getRegSaveFrameIndex(), dl));
      SaveXMMOps.push_back(DAG.getIntPtrConstant(
                             FuncInfo->getVarArgsFPOffset(), dl));
      SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
                        LiveXMMRegs.end());
      MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
                                   MVT::Other, SaveXMMOps));
    }

    if (!MemOps.empty())
      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
  }

  if (isVarArg && MFI.hasMustTailInVarArgFunc()) {
    // Find the largest legal vector type.
    MVT VecVT = MVT::Other;
    // FIXME: Only some x86_32 calling conventions support AVX512.
    if (Subtarget.useAVX512Regs() &&
        (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
                     CallConv == CallingConv::Intel_OCL_BI)))
      VecVT = MVT::v16f32;
    else if (Subtarget.hasAVX())
      VecVT = MVT::v8f32;
    else if (Subtarget.hasSSE2())
      VecVT = MVT::v4f32;

    // We forward some GPRs and some vector types.
    SmallVector<MVT, 2> RegParmTypes;
    MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
    RegParmTypes.push_back(IntVT);
    if (VecVT != MVT::Other)
      RegParmTypes.push_back(VecVT);

    // Compute the set of forwarded registers. The rest are scratch.
    SmallVectorImpl<ForwardedRegister> &Forwards =
        FuncInfo->getForwardedMustTailRegParms();
    CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);

    // Conservatively forward AL on x86_64, since it might be used for varargs.
    if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
      unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
      Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
    }

    // Copy all forwards from physical to virtual registers.
    for (ForwardedRegister &FR : Forwards) {
      // FIXME: Can we use a less constrained schedule?
      SDValue RegVal = DAG.getCopyFromReg(Chain, dl, FR.VReg, FR.VT);
      FR.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(FR.VT));
      Chain = DAG.getCopyToReg(Chain, dl, FR.VReg, RegVal);
    }
  }

  // Some CCs need callee pop.
  if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
                       MF.getTarget().Options.GuaranteedTailCallOpt)) {
    FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
  } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
    // X86 interrupts must pop the error code (and the alignment padding) if
    // present.
    FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
  } else {
    FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
    // If this is an sret function, the return should pop the hidden pointer.
    if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
        !Subtarget.getTargetTriple().isOSMSVCRT() &&
        argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
      FuncInfo->setBytesToPopOnReturn(4);
  }

  if (!Is64Bit) {
    // RegSaveFrameIndex is X86-64 only.
    FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
    if (CallConv == CallingConv::X86_FastCall ||
        CallConv == CallingConv::X86_ThisCall)
      // fastcc functions can't have varargs.
      FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
  }

  FuncInfo->setArgumentStackSize(StackSize);

  if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
    EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
    if (Personality == EHPersonality::CoreCLR) {
      assert(Is64Bit);
      // TODO: Add a mechanism to frame lowering that will allow us to indicate
      // that we'd prefer this slot be allocated towards the bottom of the frame
      // (i.e. near the stack pointer after allocating the frame).  Every
      // funclet needs a copy of this slot in its (mostly empty) frame, and the
      // offset from the bottom of this and each funclet's frame must be the
      // same, so the size of funclets' (mostly empty) frames is dictated by
      // how far this slot is from the bottom (since they allocate just enough
      // space to accommodate holding this slot at the correct offset).
      int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false);
      EHInfo->PSPSymFrameIdx = PSPSymFI;
    }
  }

  if (CallConv == CallingConv::X86_RegCall ||
      F.hasFnAttribute("no_caller_saved_registers")) {
    MachineRegisterInfo &MRI = MF.getRegInfo();
    for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
      MRI.disableCalleeSavedRegister(Pair.first);
  }

  return Chain;
}

SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
                                            SDValue Arg, const SDLoc &dl,
                                            SelectionDAG &DAG,
                                            const CCValAssign &VA,
                                            ISD::ArgFlagsTy Flags) const {
  unsigned LocMemOffset = VA.getLocMemOffset();
  SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
  PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
                       StackPtr, PtrOff);
  if (Flags.isByVal())
    return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);

  return DAG.getStore(
      Chain, dl, Arg, PtrOff,
      MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
}

/// Emit a load of return address if tail call
/// optimization is performed and it is required.
SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
    SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
    bool Is64Bit, int FPDiff, const SDLoc &dl) const {
  // Adjust the Return address stack slot.
  EVT VT = getPointerTy(DAG.getDataLayout());
  OutRetAddr = getReturnAddressFrameIndex(DAG);

  // Load the "old" Return address.
  OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
  return SDValue(OutRetAddr.getNode(), 1);
}

/// Emit a store of the return address if tail call
/// optimization is performed and it is required (FPDiff!=0).
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
                                        SDValue Chain, SDValue RetAddrFrIdx,
                                        EVT PtrVT, unsigned SlotSize,
                                        int FPDiff, const SDLoc &dl) {
  // Store the return address to the appropriate stack slot.
  if (!FPDiff) return Chain;
  // Calculate the new stack slot for the return address.
  int NewReturnAddrFI =
    MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
                                         false);
  SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
  Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
                       MachinePointerInfo::getFixedStack(
                           DAG.getMachineFunction(), NewReturnAddrFI));
  return Chain;
}

/// Returns a vector_shuffle mask for an movs{s|d}, movd
/// operation of specified width.
static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
                       SDValue V2) {
  unsigned NumElems = VT.getVectorNumElements();
  SmallVector<int, 8> Mask;
  Mask.push_back(NumElems);
  for (unsigned i = 1; i != NumElems; ++i)
    Mask.push_back(i);
  return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}

SDValue
X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
                             SmallVectorImpl<SDValue> &InVals) const {
  SelectionDAG &DAG                     = CLI.DAG;
  SDLoc &dl                             = CLI.DL;
  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
  SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
  SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
  SDValue Chain                         = CLI.Chain;
  SDValue Callee                        = CLI.Callee;
  CallingConv::ID CallConv              = CLI.CallConv;
  bool &isTailCall                      = CLI.IsTailCall;
  bool isVarArg                         = CLI.IsVarArg;

  MachineFunction &MF = DAG.getMachineFunction();
  bool Is64Bit        = Subtarget.is64Bit();
  bool IsWin64        = Subtarget.isCallingConvWin64(CallConv);
  StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
  bool IsSibcall      = false;
  bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
      CallConv == CallingConv::Tail;
  X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
  auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
  const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
  const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
  bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
                 (Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
  const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction());
  bool HasNoCfCheck =
      (CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
  const Module *M = MF.getMMI().getModule();
  Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");

  MachineFunction::CallSiteInfo CSInfo;

  if (CallConv == CallingConv::X86_INTR)
    report_fatal_error("X86 interrupts may not be called directly");

  if (Attr.getValueAsString() == "true")
    isTailCall = false;

  if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO) {
    // If we are using a GOT, disable tail calls to external symbols with
    // default visibility. Tail calling such a symbol requires using a GOT
    // relocation, which forces early binding of the symbol. This breaks code
    // that require lazy function symbol resolution. Using musttail or
    // GuaranteedTailCallOpt will override this.
    GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
    if (!G || (!G->getGlobal()->hasLocalLinkage() &&
               G->getGlobal()->hasDefaultVisibility()))
      isTailCall = false;
  }

  bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
  if (IsMustTail) {
    // Force this to be a tail call.  The verifier rules are enough to ensure
    // that we can lower this successfully without moving the return address
    // around.
    isTailCall = true;
  } else if (isTailCall) {
    // Check if it's really possible to do a tail call.
    isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
                    isVarArg, SR != NotStructReturn,
                    MF.getFunction().hasStructRetAttr(), CLI.RetTy,
                    Outs, OutVals, Ins, DAG);

    // Sibcalls are automatically detected tailcalls which do not require
    // ABI changes.
    if (!IsGuaranteeTCO && isTailCall)
      IsSibcall = true;

    if (isTailCall)
      ++NumTailCalls;
  }

  assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
         "Var args not supported with calling convention fastcc, ghc or hipe");

  // Analyze operands of the call, assigning locations to each operand.
  SmallVector<CCValAssign, 16> ArgLocs;
  CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());

  // Allocate shadow area for Win64.
  if (IsWin64)
    CCInfo.AllocateStack(32, 8);

  CCInfo.AnalyzeArguments(Outs, CC_X86);

  // In vectorcall calling convention a second pass is required for the HVA
  // types.
  if (CallingConv::X86_VectorCall == CallConv) {
    CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
  }

  // Get a count of how many bytes are to be pushed on the stack.
  unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
  if (IsSibcall)
    // This is a sibcall. The memory operands are available in caller's
    // own caller's stack.
    NumBytes = 0;
  else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
    NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);

  int FPDiff = 0;
  if (isTailCall && !IsSibcall && !IsMustTail) {
    // Lower arguments at fp - stackoffset + fpdiff.
    unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();

    FPDiff = NumBytesCallerPushed - NumBytes;

    // Set the delta of movement of the returnaddr stackslot.
    // But only set if delta is greater than previous delta.
    if (FPDiff < X86Info->getTCReturnAddrDelta())
      X86Info->setTCReturnAddrDelta(FPDiff);
  }

  unsigned NumBytesToPush = NumBytes;
  unsigned NumBytesToPop = NumBytes;

  // If we have an inalloca argument, all stack space has already been allocated
  // for us and be right at the top of the stack.  We don't support multiple
  // arguments passed in memory when using inalloca.
  if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
    NumBytesToPush = 0;
    if (!ArgLocs.back().isMemLoc())
      report_fatal_error("cannot use inalloca attribute on a register "
                         "parameter");
    if (ArgLocs.back().getLocMemOffset() != 0)
      report_fatal_error("any parameter with the inalloca attribute must be "
                         "the only memory argument");
  }

  if (!IsSibcall)
    Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
                                 NumBytes - NumBytesToPush, dl);

  SDValue RetAddrFrIdx;
  // Load return address for tail calls.
  if (isTailCall && FPDiff)
    Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
                                    Is64Bit, FPDiff, dl);

  SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
  SmallVector<SDValue, 8> MemOpChains;
  SDValue StackPtr;

  // The next loop assumes that the locations are in the same order of the
  // input arguments.
  assert(isSortedByValueNo(ArgLocs) &&
         "Argument Location list must be sorted before lowering");

  // Walk the register/memloc assignments, inserting copies/loads.  In the case
  // of tail call optimization arguments are handle later.
  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
       ++I, ++OutIndex) {
    assert(OutIndex < Outs.size() && "Invalid Out index");
    // Skip inalloca arguments, they have already been written.
    ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
    if (Flags.isInAlloca())
      continue;

    CCValAssign &VA = ArgLocs[I];
    EVT RegVT = VA.getLocVT();
    SDValue Arg = OutVals[OutIndex];
    bool isByVal = Flags.isByVal();

    // Promote the value if needed.
    switch (VA.getLocInfo()) {
    default: llvm_unreachable("Unknown loc info!");
    case CCValAssign::Full: break;
    case CCValAssign::SExt:
      Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
      break;
    case CCValAssign::ZExt:
      Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
      break;
    case CCValAssign::AExt:
      if (Arg.getValueType().isVector() &&
          Arg.getValueType().getVectorElementType() == MVT::i1)
        Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
      else if (RegVT.is128BitVector()) {
        // Special case: passing MMX values in XMM registers.
        Arg = DAG.getBitcast(MVT::i64, Arg);
        Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
        Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
      } else
        Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
      break;
    case CCValAssign::BCvt:
      Arg = DAG.getBitcast(RegVT, Arg);
      break;
    case CCValAssign::Indirect: {
      if (isByVal) {
        // Memcpy the argument to a temporary stack slot to prevent
        // the caller from seeing any modifications the callee may make
        // as guaranteed by the `byval` attribute.
        int FrameIdx = MF.getFrameInfo().CreateStackObject(
            Flags.getByValSize(), std::max(16, (int)Flags.getByValAlign()),
            false);
        SDValue StackSlot =
            DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
        Chain =
            CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
        // From now on treat this as a regular pointer
        Arg = StackSlot;
        isByVal = false;
      } else {
        // Store the argument.
        SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
        int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
        Chain = DAG.getStore(
            Chain, dl, Arg, SpillSlot,
            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
        Arg = SpillSlot;
      }
      break;
    }
    }

    if (VA.needsCustom()) {
      assert(VA.getValVT() == MVT::v64i1 &&
             "Currently the only custom case is when we split v64i1 to 2 regs");
      // Split v64i1 value into two registers
      Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
    } else if (VA.isRegLoc()) {
      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
      const TargetOptions &Options = DAG.getTarget().Options;
      if (Options.EnableDebugEntryValues)
        CSInfo.emplace_back(VA.getLocReg(), I);
      if (isVarArg && IsWin64) {
        // Win64 ABI requires argument XMM reg to be copied to the corresponding
        // shadow reg if callee is a varargs function.
        unsigned ShadowReg = 0;
        switch (VA.getLocReg()) {
        case X86::XMM0: ShadowReg = X86::RCX; break;
        case X86::XMM1: ShadowReg = X86::RDX; break;
        case X86::XMM2: ShadowReg = X86::R8; break;
        case X86::XMM3: ShadowReg = X86::R9; break;
        }
        if (ShadowReg)
          RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
      }
    } else if (!IsSibcall && (!isTailCall || isByVal)) {
      assert(VA.isMemLoc());
      if (!StackPtr.getNode())
        StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
                                      getPointerTy(DAG.getDataLayout()));
      MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
                                             dl, DAG, VA, Flags));
    }
  }

  if (!MemOpChains.empty())
    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);

  if (Subtarget.isPICStyleGOT()) {
    // ELF / PIC requires GOT in the EBX register before function calls via PLT
    // GOT pointer.
    if (!isTailCall) {
      RegsToPass.push_back(std::make_pair(
          unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
                                          getPointerTy(DAG.getDataLayout()))));
    } else {
      // If we are tail calling and generating PIC/GOT style code load the
      // address of the callee into ECX. The value in ecx is used as target of
      // the tail jump. This is done to circumvent the ebx/callee-saved problem
      // for tail calls on PIC/GOT architectures. Normally we would just put the
      // address of GOT into ebx and then call target@PLT. But for tail calls
      // ebx would be restored (since ebx is callee saved) before jumping to the
      // target@PLT.

      // Note: The actual moving to ECX is done further down.
      GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
      if (G && !G->getGlobal()->hasLocalLinkage() &&
          G->getGlobal()->hasDefaultVisibility())
        Callee = LowerGlobalAddress(Callee, DAG);
      else if (isa<ExternalSymbolSDNode>(Callee))
        Callee = LowerExternalSymbol(Callee, DAG);
    }
  }

  if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
    // From AMD64 ABI document:
    // For calls that may call functions that use varargs or stdargs
    // (prototype-less calls or calls to functions containing ellipsis (...) in
    // the declaration) %al is used as hidden argument to specify the number
    // of SSE registers used. The contents of %al do not need to match exactly
    // the number of registers, but must be an ubound on the number of SSE
    // registers used and is in the range 0 - 8 inclusive.

    // Count the number of XMM registers allocated.
    static const MCPhysReg XMMArgRegs[] = {
      X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
      X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
    };
    unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
    assert((Subtarget.hasSSE1() || !NumXMMRegs)
           && "SSE registers cannot be used when SSE is disabled");

    RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
                                        DAG.getConstant(NumXMMRegs, dl,
                                                        MVT::i8)));
  }

  if (isVarArg && IsMustTail) {
    const auto &Forwards = X86Info->getForwardedMustTailRegParms();
    for (const auto &F : Forwards) {
      SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
      RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
    }
  }

  // For tail calls lower the arguments to the 'real' stack slots.  Sibcalls
  // don't need this because the eligibility check rejects calls that require
  // shuffling arguments passed in memory.
  if (!IsSibcall && isTailCall) {
    // Force all the incoming stack arguments to be loaded from the stack
    // before any new outgoing arguments are stored to the stack, because the
    // outgoing stack slots may alias the incoming argument stack slots, and
    // the alias isn't otherwise explicit. This is slightly more conservative
    // than necessary, because it means that each store effectively depends
    // on every argument instead of just those arguments it would clobber.
    SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);

    SmallVector<SDValue, 8> MemOpChains2;
    SDValue FIN;
    int FI = 0;
    for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
         ++I, ++OutsIndex) {
      CCValAssign &VA = ArgLocs[I];

      if (VA.isRegLoc()) {
        if (VA.needsCustom()) {
          assert((CallConv == CallingConv::X86_RegCall) &&
                 "Expecting custom case only in regcall calling convention");
          // This means that we are in special case where one argument was
          // passed through two register locations - Skip the next location
          ++I;
        }

        continue;
      }

      assert(VA.isMemLoc());
      SDValue Arg = OutVals[OutsIndex];
      ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
      // Skip inalloca arguments.  They don't require any work.
      if (Flags.isInAlloca())
        continue;
      // Create frame index.
      int32_t Offset = VA.getLocMemOffset()+FPDiff;
      uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
      FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
      FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));

      if (Flags.isByVal()) {
        // Copy relative to framepointer.
        SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
        if (!StackPtr.getNode())
          StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
                                        getPointerTy(DAG.getDataLayout()));
        Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
                             StackPtr, Source);

        MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
                                                         ArgChain,
                                                         Flags, DAG, dl));
      } else {
        // Store relative to framepointer.
        MemOpChains2.push_back(DAG.getStore(
            ArgChain, dl, Arg, FIN,
            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
      }
    }

    if (!MemOpChains2.empty())
      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);

    // Store the return address to the appropriate stack slot.
    Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
                                     getPointerTy(DAG.getDataLayout()),
                                     RegInfo->getSlotSize(), FPDiff, dl);
  }

  // Build a sequence of copy-to-reg nodes chained together with token chain
  // and flag operands which copy the outgoing args into registers.
  SDValue InFlag;
  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
    Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
                             RegsToPass[i].second, InFlag);
    InFlag = Chain.getValue(1);
  }

  if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
    assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
    // In the 64-bit large code model, we have to make all calls
    // through a register, since the call instruction's 32-bit
    // pc-relative offset may not be large enough to hold the whole
    // address.
  } else if (Callee->getOpcode() == ISD::GlobalAddress ||
             Callee->getOpcode() == ISD::ExternalSymbol) {
    // Lower direct calls to global addresses and external symbols. Setting
    // ForCall to true here has the effect of removing WrapperRIP when possible
    // to allow direct calls to be selected without first materializing the
    // address into a register.
    Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
  } else if (Subtarget.isTarget64BitILP32() &&
             Callee->getValueType(0) == MVT::i32) {
    // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
    Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
  }

  // Returns a chain & a flag for retval copy to use.
  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
  SmallVector<SDValue, 8> Ops;

  if (!IsSibcall && isTailCall) {
    Chain = DAG.getCALLSEQ_END(Chain,
                               DAG.getIntPtrConstant(NumBytesToPop, dl, true),
                               DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
    InFlag = Chain.getValue(1);
  }

  Ops.push_back(Chain);
  Ops.push_back(Callee);

  if (isTailCall)
    Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));

  // Add argument registers to the end of the list so that they are known live
  // into the call.
  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
                                  RegsToPass[i].second.getValueType()));

  // Add a register mask operand representing the call-preserved registers.
  // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
  // set X86_INTR calling convention because it has the same CSR mask
  // (same preserved registers).
  const uint32_t *Mask = RegInfo->getCallPreservedMask(
      MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
  assert(Mask && "Missing call preserved mask for calling convention");

  // If this is an invoke in a 32-bit function using a funclet-based
  // personality, assume the function clobbers all registers. If an exception
  // is thrown, the runtime will not restore CSRs.
  // FIXME: Model this more precisely so that we can register allocate across
  // the normal edge and spill and fill across the exceptional edge.
  if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
    const Function &CallerFn = MF.getFunction();
    EHPersonality Pers =
        CallerFn.hasPersonalityFn()
            ? classifyEHPersonality(CallerFn.getPersonalityFn())
            : EHPersonality::Unknown;
    if (isFuncletEHPersonality(Pers))
      Mask = RegInfo->getNoPreservedMask();
  }

  // Define a new register mask from the existing mask.
  uint32_t *RegMask = nullptr;

  // In some calling conventions we need to remove the used physical registers
  // from the reg mask.
  if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
    const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();

    // Allocate a new Reg Mask and copy Mask.
    RegMask = MF.allocateRegMask();
    unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
    memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);

    // Make sure all sub registers of the argument registers are reset
    // in the RegMask.
    for (auto const &RegPair : RegsToPass)
      for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
           SubRegs.isValid(); ++SubRegs)
        RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));

    // Create the RegMask Operand according to our updated mask.
    Ops.push_back(DAG.getRegisterMask(RegMask));
  } else {
    // Create the RegMask Operand according to the static mask.
    Ops.push_back(DAG.getRegisterMask(Mask));
  }

  if (InFlag.getNode())
    Ops.push_back(InFlag);

  if (isTailCall) {
    // We used to do:
    //// If this is the first return lowered for this function, add the regs
    //// to the liveout set for the function.
    // This isn't right, although it's probably harmless on x86; liveouts
    // should be computed from returns not tail calls.  Consider a void
    // function making a tail call to a function returning int.
    MF.getFrameInfo().setHasTailCall();
    SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
    DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
    return Ret;
  }

  if (HasNoCfCheck && IsCFProtectionSupported) {
    Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
  } else {
    Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
  }
  InFlag = Chain.getValue(1);
  DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));

  // Save heapallocsite metadata.
  if (CLI.CS)
    if (MDNode *HeapAlloc = CLI.CS->getMetadata("heapallocsite"))
      DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);

  // Create the CALLSEQ_END node.
  unsigned NumBytesForCalleeToPop;
  if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
                       DAG.getTarget().Options.GuaranteedTailCallOpt))
    NumBytesForCalleeToPop = NumBytes;    // Callee pops everything
  else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
           !Subtarget.getTargetTriple().isOSMSVCRT() &&
           SR == StackStructReturn)
    // If this is a call to a struct-return function, the callee
    // pops the hidden struct pointer, so we have to push it back.
    // This is common for Darwin/X86, Linux & Mingw32 targets.
    // For MSVC Win32 targets, the caller pops the hidden struct pointer.
    NumBytesForCalleeToPop = 4;
  else
    NumBytesForCalleeToPop = 0;  // Callee pops nothing.

  if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
    // No need to reset the stack after the call if the call doesn't return. To
    // make the MI verify, we'll pretend the callee does it for us.
    NumBytesForCalleeToPop = NumBytes;
  }

  // Returns a flag for retval copy to use.
  if (!IsSibcall) {
    Chain = DAG.getCALLSEQ_END(Chain,
                               DAG.getIntPtrConstant(NumBytesToPop, dl, true),
                               DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
                                                     true),
                               InFlag, dl);
    InFlag = Chain.getValue(1);
  }

  // Handle result values, copying them out of physregs into vregs that we
  // return.
  return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
                         InVals, RegMask);
}

//===----------------------------------------------------------------------===//
//                Fast Calling Convention (tail call) implementation
//===----------------------------------------------------------------------===//

//  Like std call, callee cleans arguments, convention except that ECX is
//  reserved for storing the tail called function address. Only 2 registers are
//  free for argument passing (inreg). Tail call optimization is performed
//  provided:
//                * tailcallopt is enabled
//                * caller/callee are fastcc
//  On X86_64 architecture with GOT-style position independent code only local
//  (within module) calls are supported at the moment.
//  To keep the stack aligned according to platform abi the function
//  GetAlignedArgumentStackSize ensures that argument delta is always multiples
//  of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
//  If a tail called function callee has more arguments than the caller the
//  caller needs to make sure that there is room to move the RETADDR to. This is
//  achieved by reserving an area the size of the argument delta right after the
//  original RETADDR, but before the saved framepointer or the spilled registers
//  e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
//  stack layout:
//    arg1
//    arg2
//    RETADDR
//    [ new RETADDR
//      move area ]
//    (possible EBP)
//    ESI
//    EDI
//    local1 ..

/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
/// requirement.
unsigned
X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
                                               SelectionDAG& DAG) const {
  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
  unsigned StackAlignment = TFI.getStackAlignment();
  uint64_t AlignMask = StackAlignment - 1;
  int64_t Offset = StackSize;
  unsigned SlotSize = RegInfo->getSlotSize();
  if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
    // Number smaller than 12 so just add the difference.
    Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
  } else {
    // Mask out lower bits, add stackalignment once plus the 12 bytes.
    Offset = ((~AlignMask) & Offset) + StackAlignment +
      (StackAlignment-SlotSize);
  }
  return Offset;
}

/// Return true if the given stack call argument is already available in the
/// same position (relatively) of the caller's incoming argument stack.
static
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
                         MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
                         const X86InstrInfo *TII, const CCValAssign &VA) {
  unsigned Bytes = Arg.getValueSizeInBits() / 8;

  for (;;) {
    // Look through nodes that don't alter the bits of the incoming value.
    unsigned Op = Arg.getOpcode();
    if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
      Arg = Arg.getOperand(0);
      continue;
    }
    if (Op == ISD::TRUNCATE) {
      const SDValue &TruncInput = Arg.getOperand(0);
      if (TruncInput.getOpcode() == ISD::AssertZext &&
          cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
              Arg.getValueType()) {
        Arg = TruncInput.getOperand(0);
        continue;
      }
    }
    break;
  }

  int FI = INT_MAX;
  if (Arg.getOpcode() == ISD::CopyFromReg) {
    unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
    if (!Register::isVirtualRegister(VR))
      return false;
    MachineInstr *Def = MRI->getVRegDef(VR);
    if (!Def)
      return false;
    if (!Flags.isByVal()) {
      if (!TII->isLoadFromStackSlot(*Def, FI))
        return false;
    } else {
      unsigned Opcode = Def->getOpcode();
      if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
           Opcode == X86::LEA64_32r) &&
          Def->getOperand(1).isFI()) {
        FI = Def->getOperand(1).getIndex();
        Bytes = Flags.getByValSize();
      } else
        return false;
    }
  } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
    if (Flags.isByVal())
      // ByVal argument is passed in as a pointer but it's now being
      // dereferenced. e.g.
      // define @foo(%struct.X* %A) {
      //   tail call @bar(%struct.X* byval %A)
      // }
      return false;
    SDValue Ptr = Ld->getBasePtr();
    FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
    if (!FINode)
      return false;
    FI = FINode->getIndex();
  } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
    FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
    FI = FINode->getIndex();
    Bytes = Flags.getByValSize();
  } else
    return false;

  assert(FI != INT_MAX);
  if (!MFI.isFixedObjectIndex(FI))
    return false;

  if (Offset != MFI.getObjectOffset(FI))
    return false;

  // If this is not byval, check that the argument stack object is immutable.
  // inalloca and argument copy elision can create mutable argument stack
  // objects. Byval objects can be mutated, but a byval call intends to pass the
  // mutated memory.
  if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
    return false;

  if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
    // If the argument location is wider than the argument type, check that any
    // extension flags match.
    if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
        Flags.isSExt() != MFI.isObjectSExt(FI)) {
      return false;
    }
  }

  return Bytes == MFI.getObjectSize(FI);
}

/// Check whether the call is eligible for tail call optimization. Targets
/// that want to do tail call optimization should implement this function.
bool X86TargetLowering::IsEligibleForTailCallOptimization(
    SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
    bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
    const SmallVectorImpl<ISD::OutputArg> &Outs,
    const SmallVectorImpl<SDValue> &OutVals,
    const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
  if (!mayTailCallThisCC(CalleeCC))
    return false;

  // If -tailcallopt is specified, make fastcc functions tail-callable.
  MachineFunction &MF = DAG.getMachineFunction();
  const Function &CallerF = MF.getFunction();

  // If the function return type is x86_fp80 and the callee return type is not,
  // then the FP_EXTEND of the call result is not a nop. It's not safe to
  // perform a tailcall optimization here.
  if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
    return false;

  CallingConv::ID CallerCC = CallerF.getCallingConv();
  bool CCMatch = CallerCC == CalleeCC;
  bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
  bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
  bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
      CalleeCC == CallingConv::Tail;

  // Win64 functions have extra shadow space for argument homing. Don't do the
  // sibcall if the caller and callee have mismatched expectations for this
  // space.
  if (IsCalleeWin64 != IsCallerWin64)
    return false;

  if (IsGuaranteeTCO) {
    if (canGuaranteeTCO(CalleeCC) && CCMatch)
      return true;
    return false;
  }

  // Look for obvious safe cases to perform tail call optimization that do not
  // require ABI changes. This is what gcc calls sibcall.

  // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
  // emit a special epilogue.
  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  if (RegInfo->needsStackRealignment(MF))
    return false;

  // Also avoid sibcall optimization if either caller or callee uses struct
  // return semantics.
  if (isCalleeStructRet || isCallerStructRet)
    return false;

  // Do not sibcall optimize vararg calls unless all arguments are passed via
  // registers.
  LLVMContext &C = *DAG.getContext();
  if (isVarArg && !Outs.empty()) {
    // Optimizing for varargs on Win64 is unlikely to be safe without
    // additional testing.
    if (IsCalleeWin64 || IsCallerWin64)
      return false;

    SmallVector<CCValAssign, 16> ArgLocs;
    CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);

    CCInfo.AnalyzeCallOperands(Outs, CC_X86);
    for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
      if (!ArgLocs[i].isRegLoc())
        return false;
  }

  // If the call result is in ST0 / ST1, it needs to be popped off the x87
  // stack.  Therefore, if it's not used by the call it is not safe to optimize
  // this into a sibcall.
  bool Unused = false;
  for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
    if (!Ins[i].Used) {
      Unused = true;
      break;
    }
  }
  if (Unused) {
    SmallVector<CCValAssign, 16> RVLocs;
    CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
    CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
    for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
      CCValAssign &VA = RVLocs[i];
      if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
        return false;
    }
  }

  // Check that the call results are passed in the same way.
  if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
                                  RetCC_X86, RetCC_X86))
    return false;
  // The callee has to preserve all registers the caller needs to preserve.
  const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
  if (!CCMatch) {
    const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
    if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
      return false;
  }

  unsigned StackArgsSize = 0;

  // If the callee takes no arguments then go on to check the results of the
  // call.
  if (!Outs.empty()) {
    // Check if stack adjustment is needed. For now, do not do this if any
    // argument is passed on the stack.
    SmallVector<CCValAssign, 16> ArgLocs;
    CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);

    // Allocate shadow area for Win64
    if (IsCalleeWin64)
      CCInfo.AllocateStack(32, 8);

    CCInfo.AnalyzeCallOperands(Outs, CC_X86);
    StackArgsSize = CCInfo.getNextStackOffset();

    if (CCInfo.getNextStackOffset()) {
      // Check if the arguments are already laid out in the right way as
      // the caller's fixed stack objects.
      MachineFrameInfo &MFI = MF.getFrameInfo();
      const MachineRegisterInfo *MRI = &MF.getRegInfo();
      const X86InstrInfo *TII = Subtarget.getInstrInfo();
      for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
        CCValAssign &VA = ArgLocs[i];
        SDValue Arg = OutVals[i];
        ISD::ArgFlagsTy Flags = Outs[i].Flags;
        if (VA.getLocInfo() == CCValAssign::Indirect)
          return false;
        if (!VA.isRegLoc()) {
          if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
                                   MFI, MRI, TII, VA))
            return false;
        }
      }
    }

    bool PositionIndependent = isPositionIndependent();
    // If the tailcall address may be in a register, then make sure it's
    // possible to register allocate for it. In 32-bit, the call address can
    // only target EAX, EDX, or ECX since the tail call must be scheduled after
    // callee-saved registers are restored. These happen to be the same
    // registers used to pass 'inreg' arguments so watch out for those.
    if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
                                  !isa<ExternalSymbolSDNode>(Callee)) ||
                                 PositionIndependent)) {
      unsigned NumInRegs = 0;
      // In PIC we need an extra register to formulate the address computation
      // for the callee.
      unsigned MaxInRegs = PositionIndependent ? 2 : 3;

      for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
        CCValAssign &VA = ArgLocs[i];
        if (!VA.isRegLoc())
          continue;
        Register Reg = VA.getLocReg();
        switch (Reg) {
        default: break;
        case X86::EAX: case X86::EDX: case X86::ECX:
          if (++NumInRegs == MaxInRegs)
            return false;
          break;
        }
      }
    }

    const MachineRegisterInfo &MRI = MF.getRegInfo();
    if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
      return false;
  }

  bool CalleeWillPop =
      X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
                       MF.getTarget().Options.GuaranteedTailCallOpt);

  if (unsigned BytesToPop =
          MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
    // If we have bytes to pop, the callee must pop them.
    bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
    if (!CalleePopMatches)
      return false;
  } else if (CalleeWillPop && StackArgsSize > 0) {
    // If we don't have bytes to pop, make sure the callee doesn't pop any.
    return false;
  }

  return true;
}

FastISel *
X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
                                  const TargetLibraryInfo *libInfo) const {
  return X86::createFastISel(funcInfo, libInfo);
}

//===----------------------------------------------------------------------===//
//                           Other Lowering Hooks
//===----------------------------------------------------------------------===//

static bool MayFoldLoad(SDValue Op) {
  return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
}

static bool MayFoldIntoStore(SDValue Op) {
  return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
}

static bool MayFoldIntoZeroExtend(SDValue Op) {
  if (Op.hasOneUse()) {
    unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
    return (ISD::ZERO_EXTEND == Opcode);
  }
  return false;
}

static bool isTargetShuffle(unsigned Opcode) {
  switch(Opcode) {
  default: return false;
  case X86ISD::BLENDI:
  case X86ISD::PSHUFB:
  case X86ISD::PSHUFD:
  case X86ISD::PSHUFHW:
  case X86ISD::PSHUFLW:
  case X86ISD::SHUFP:
  case X86ISD::INSERTPS:
  case X86ISD::EXTRQI:
  case X86ISD::INSERTQI:
  case X86ISD::PALIGNR:
  case X86ISD::VSHLDQ:
  case X86ISD::VSRLDQ:
  case X86ISD::MOVLHPS:
  case X86ISD::MOVHLPS:
  case X86ISD::MOVSHDUP:
  case X86ISD::MOVSLDUP:
  case X86ISD::MOVDDUP:
  case X86ISD::MOVSS:
  case X86ISD::MOVSD:
  case X86ISD::UNPCKL:
  case X86ISD::UNPCKH:
  case X86ISD::VBROADCAST:
  case X86ISD::VPERMILPI:
  case X86ISD::VPERMILPV:
  case X86ISD::VPERM2X128:
  case X86ISD::SHUF128:
  case X86ISD::VPERMIL2:
  case X86ISD::VPERMI:
  case X86ISD::VPPERM:
  case X86ISD::VPERMV:
  case X86ISD::VPERMV3:
  case X86ISD::VZEXT_MOVL:
    return true;
  }
}

static bool isTargetShuffleVariableMask(unsigned Opcode) {
  switch (Opcode) {
  default: return false;
  // Target Shuffles.
  case X86ISD::PSHUFB:
  case X86ISD::VPERMILPV:
  case X86ISD::VPERMIL2:
  case X86ISD::VPPERM:
  case X86ISD::VPERMV:
  case X86ISD::VPERMV3:
    return true;
  // 'Faux' Target Shuffles.
  case ISD::OR:
  case ISD::AND:
  case X86ISD::ANDNP:
    return true;
  }
}

SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();
  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
  int ReturnAddrIndex = FuncInfo->getRAIndex();

  if (ReturnAddrIndex == 0) {
    // Set up a frame object for the return address.
    unsigned SlotSize = RegInfo->getSlotSize();
    ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
                                                          -(int64_t)SlotSize,
                                                          false);
    FuncInfo->setRAIndex(ReturnAddrIndex);
  }

  return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
}

bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
                                       bool hasSymbolicDisplacement) {
  // Offset should fit into 32 bit immediate field.
  if (!isInt<32>(Offset))
    return false;

  // If we don't have a symbolic displacement - we don't have any extra
  // restrictions.
  if (!hasSymbolicDisplacement)
    return true;

  // FIXME: Some tweaks might be needed for medium code model.
  if (M != CodeModel::Small && M != CodeModel::Kernel)
    return false;

  // For small code model we assume that latest object is 16MB before end of 31
  // bits boundary. We may also accept pretty large negative constants knowing
  // that all objects are in the positive half of address space.
  if (M == CodeModel::Small && Offset < 16*1024*1024)
    return true;

  // For kernel code model we know that all object resist in the negative half
  // of 32bits address space. We may not accept negative offsets, since they may
  // be just off and we may accept pretty large positive ones.
  if (M == CodeModel::Kernel && Offset >= 0)
    return true;

  return false;
}

/// Determines whether the callee is required to pop its own arguments.
/// Callee pop is necessary to support tail calls.
bool X86::isCalleePop(CallingConv::ID CallingConv,
                      bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
  // If GuaranteeTCO is true, we force some calls to be callee pop so that we
  // can guarantee TCO.
  if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
    return true;

  switch (CallingConv) {
  default:
    return false;
  case CallingConv::X86_StdCall:
  case CallingConv::X86_FastCall:
  case CallingConv::X86_ThisCall:
  case CallingConv::X86_VectorCall:
    return !is64Bit;
  }
}

/// Return true if the condition is an unsigned comparison operation.
static bool isX86CCUnsigned(unsigned X86CC) {
  switch (X86CC) {
  default:
    llvm_unreachable("Invalid integer condition!");
  case X86::COND_E:
  case X86::COND_NE:
  case X86::COND_B:
  case X86::COND_A:
  case X86::COND_BE:
  case X86::COND_AE:
    return true;
  case X86::COND_G:
  case X86::COND_GE:
  case X86::COND_L:
  case X86::COND_LE:
    return false;
  }
}

static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
  switch (SetCCOpcode) {
  default: llvm_unreachable("Invalid integer condition!");
  case ISD::SETEQ:  return X86::COND_E;
  case ISD::SETGT:  return X86::COND_G;
  case ISD::SETGE:  return X86::COND_GE;
  case ISD::SETLT:  return X86::COND_L;
  case ISD::SETLE:  return X86::COND_LE;
  case ISD::SETNE:  return X86::COND_NE;
  case ISD::SETULT: return X86::COND_B;
  case ISD::SETUGT: return X86::COND_A;
  case ISD::SETULE: return X86::COND_BE;
  case ISD::SETUGE: return X86::COND_AE;
  }
}

/// Do a one-to-one translation of a ISD::CondCode to the X86-specific
/// condition code, returning the condition code and the LHS/RHS of the
/// comparison to make.
static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
                               bool isFP, SDValue &LHS, SDValue &RHS,
                               SelectionDAG &DAG) {
  if (!isFP) {
    if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
      if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
        // X > -1   -> X == 0, jump !sign.
        RHS = DAG.getConstant(0, DL, RHS.getValueType());
        return X86::COND_NS;
      }
      if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
        // X < 0   -> X == 0, jump on sign.
        return X86::COND_S;
      }
      if (SetCCOpcode == ISD::SETGE && RHSC->isNullValue()) {
        // X >= 0   -> X == 0, jump on !sign.
        return X86::COND_NS;
      }
      if (SetCCOpcode == ISD::SETLT && RHSC->getAPIntValue() == 1) {
        // X < 1   -> X <= 0
        RHS = DAG.getConstant(0, DL, RHS.getValueType());
        return X86::COND_LE;
      }
    }

    return TranslateIntegerX86CC(SetCCOpcode);
  }

  // First determine if it is required or is profitable to flip the operands.

  // If LHS is a foldable load, but RHS is not, flip the condition.
  if (ISD::isNON_EXTLoad(LHS.getNode()) &&
      !ISD::isNON_EXTLoad(RHS.getNode())) {
    SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
    std::swap(LHS, RHS);
  }

  switch (SetCCOpcode) {
  default: break;
  case ISD::SETOLT:
  case ISD::SETOLE:
  case ISD::SETUGT:
  case ISD::SETUGE:
    std::swap(LHS, RHS);
    break;
  }

  // On a floating point condition, the flags are set as follows:
  // ZF  PF  CF   op
  //  0 | 0 | 0 | X > Y
  //  0 | 0 | 1 | X < Y
  //  1 | 0 | 0 | X == Y
  //  1 | 1 | 1 | unordered
  switch (SetCCOpcode) {
  default: llvm_unreachable("Condcode should be pre-legalized away");
  case ISD::SETUEQ:
  case ISD::SETEQ:   return X86::COND_E;
  case ISD::SETOLT:              // flipped
  case ISD::SETOGT:
  case ISD::SETGT:   return X86::COND_A;
  case ISD::SETOLE:              // flipped
  case ISD::SETOGE:
  case ISD::SETGE:   return X86::COND_AE;
  case ISD::SETUGT:              // flipped
  case ISD::SETULT:
  case ISD::SETLT:   return X86::COND_B;
  case ISD::SETUGE:              // flipped
  case ISD::SETULE:
  case ISD::SETLE:   return X86::COND_BE;
  case ISD::SETONE:
  case ISD::SETNE:   return X86::COND_NE;
  case ISD::SETUO:   return X86::COND_P;
  case ISD::SETO:    return X86::COND_NP;
  case ISD::SETOEQ:
  case ISD::SETUNE:  return X86::COND_INVALID;
  }
}

/// Is there a floating point cmov for the specific X86 condition code?
/// Current x86 isa includes the following FP cmov instructions:
/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
static bool hasFPCMov(unsigned X86CC) {
  switch (X86CC) {
  default:
    return false;
  case X86::COND_B:
  case X86::COND_BE:
  case X86::COND_E:
  case X86::COND_P:
  case X86::COND_A:
  case X86::COND_AE:
  case X86::COND_NE:
  case X86::COND_NP:
    return true;
  }
}


bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
                                           const CallInst &I,
                                           MachineFunction &MF,
                                           unsigned Intrinsic) const {

  const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
  if (!IntrData)
    return false;

  Info.flags = MachineMemOperand::MONone;
  Info.offset = 0;

  switch (IntrData->Type) {
  case TRUNCATE_TO_MEM_VI8:
  case TRUNCATE_TO_MEM_VI16:
  case TRUNCATE_TO_MEM_VI32: {
    Info.opc = ISD::INTRINSIC_VOID;
    Info.ptrVal = I.getArgOperand(0);
    MVT VT  = MVT::getVT(I.getArgOperand(1)->getType());
    MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
    if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
      ScalarVT = MVT::i8;
    else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
      ScalarVT = MVT::i16;
    else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
      ScalarVT = MVT::i32;

    Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
    Info.align = Align::None();
    Info.flags |= MachineMemOperand::MOStore;
    break;
  }
  case GATHER:
  case GATHER_AVX2: {
    Info.opc = ISD::INTRINSIC_W_CHAIN;
    Info.ptrVal = nullptr;
    MVT DataVT = MVT::getVT(I.getType());
    MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
    unsigned NumElts = std::min(DataVT.getVectorNumElements(),
                                IndexVT.getVectorNumElements());
    Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
    Info.align = Align::None();
    Info.flags |= MachineMemOperand::MOLoad;
    break;
  }
  case SCATTER: {
    Info.opc = ISD::INTRINSIC_VOID;
    Info.ptrVal = nullptr;
    MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
    MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
    unsigned NumElts = std::min(DataVT.getVectorNumElements(),
                                IndexVT.getVectorNumElements());
    Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
    Info.align = Align::None();
    Info.flags |= MachineMemOperand::MOStore;
    break;
  }
  default:
    return false;
  }

  return true;
}

/// Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                     bool ForCodeSize) const {
  for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
    if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
      return true;
  }
  return false;
}

bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
                                              ISD::LoadExtType ExtTy,
                                              EVT NewVT) const {
  assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
  
  // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
  // relocation target a movq or addq instruction: don't let the load shrink.
  SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
  if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
    if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
      return GA->getTargetFlags() != X86II::MO_GOTTPOFF;

  // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
  // those uses are extracted directly into a store, then the extract + store
  // can be store-folded. Therefore, it's probably not worth splitting the load.
  EVT VT = Load->getValueType(0);
  if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
    for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
      // Skip uses of the chain value. Result 0 of the node is the load value.
      if (UI.getUse().getResNo() != 0)
        continue;

      // If this use is not an extract + store, it's probably worth splitting.
      if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
          UI->use_begin()->getOpcode() != ISD::STORE)
        return true;
    }
    // All non-chain uses are extract + store.
    return false;
  }

  return true;
}

/// Returns true if it is beneficial to convert a load of a constant
/// to just the constant itself.
bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
                                                          Type *Ty) const {
  assert(Ty->isIntegerTy());

  unsigned BitSize = Ty->getPrimitiveSizeInBits();
  if (BitSize == 0 || BitSize > 64)
    return false;
  return true;
}

bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
  // If we are using XMM registers in the ABI and the condition of the select is
  // a floating-point compare and we have blendv or conditional move, then it is
  // cheaper to select instead of doing a cross-register move and creating a
  // load that depends on the compare result.
  bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
  return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
}

bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
  // TODO: It might be a win to ease or lift this restriction, but the generic
  // folds in DAGCombiner conflict with vector folds for an AVX512 target.
  if (VT.isVector() && Subtarget.hasAVX512())
    return false;

  return true;
}

bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
                                               SDValue C) const {
  // TODO: We handle scalars using custom code, but generic combining could make
  // that unnecessary.
  APInt MulC;
  if (!ISD::isConstantSplatVector(C.getNode(), MulC))
    return false;

  // Find the type this will be legalized too. Otherwise we might prematurely
  // convert this to shl+add/sub and then still have to type legalize those ops.
  // Another choice would be to defer the decision for illegal types until
  // after type legalization. But constant splat vectors of i64 can't make it
  // through type legalization on 32-bit targets so we would need to special
  // case vXi64.
  while (getTypeAction(Context, VT) != TypeLegal)
    VT = getTypeToTransformTo(Context, VT);

  // If vector multiply is legal, assume that's faster than shl + add/sub.
  // TODO: Multiply is a complex op with higher latency and lower throughput in
  //       most implementations, so this check could be loosened based on type
  //       and/or a CPU attribute.
  if (isOperationLegal(ISD::MUL, VT))
    return false;

  // shl+add, shl+sub, shl+add+neg
  return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
         (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
}

bool X86TargetLowering::shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
                                                 bool IsSigned) const {
  // f80 UINT_TO_FP is more efficient using Strict code if FCMOV is available.
  return !IsSigned && FpVT == MVT::f80 && Subtarget.hasCMov();
}

bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
                                                unsigned Index) const {
  if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
    return false;

  // Mask vectors support all subregister combinations and operations that
  // extract half of vector.
  if (ResVT.getVectorElementType() == MVT::i1)
    return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
                          (Index == ResVT.getVectorNumElements()));

  return (Index % ResVT.getVectorNumElements()) == 0;
}

bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
  unsigned Opc = VecOp.getOpcode();

  // Assume target opcodes can't be scalarized.
  // TODO - do we have any exceptions?
  if (Opc >= ISD::BUILTIN_OP_END)
    return false;

  // If the vector op is not supported, try to convert to scalar.
  EVT VecVT = VecOp.getValueType();
  if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
    return true;

  // If the vector op is supported, but the scalar op is not, the transform may
  // not be worthwhile.
  EVT ScalarVT = VecVT.getScalarType();
  return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
}

bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT) const {
  // TODO: Allow vectors?
  if (VT.isVector())
    return false;
  return VT.isSimple() || !isOperationExpand(Opcode, VT);
}

bool X86TargetLowering::isCheapToSpeculateCttz() const {
  // Speculate cttz only if we can directly use TZCNT.
  return Subtarget.hasBMI();
}

bool X86TargetLowering::isCheapToSpeculateCtlz() const {
  // Speculate ctlz only if we can directly use LZCNT.
  return Subtarget.hasLZCNT();
}

bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
                                                const SelectionDAG &DAG,
                                                const MachineMemOperand &MMO) const {
  if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
      BitcastVT.getVectorElementType() == MVT::i1)
    return false;

  if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
    return false;

  // If both types are legal vectors, it's always ok to convert them.
  if (LoadVT.isVector() && BitcastVT.isVector() &&
      isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
    return true;

  return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
}

bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
                                         const SelectionDAG &DAG) const {
  // Do not merge to float value size (128 bytes) if no implicit
  // float attribute is set.
  bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
      Attribute::NoImplicitFloat);

  if (NoFloat) {
    unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
    return (MemVT.getSizeInBits() <= MaxIntSize);
  }
  // Make sure we don't merge greater than our preferred vector
  // width.
  if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
    return false;
  return true;
}

bool X86TargetLowering::isCtlzFast() const {
  return Subtarget.hasFastLZCNT();
}

bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
    const Instruction &AndI) const {
  return true;
}

bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
  EVT VT = Y.getValueType();

  if (VT.isVector())
    return false;

  if (!Subtarget.hasBMI())
    return false;

  // There are only 32-bit and 64-bit forms for 'andn'.
  if (VT != MVT::i32 && VT != MVT::i64)
    return false;

  return !isa<ConstantSDNode>(Y);
}

bool X86TargetLowering::hasAndNot(SDValue Y) const {
  EVT VT = Y.getValueType();

  if (!VT.isVector())
    return hasAndNotCompare(Y);

  // Vector.

  if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
    return false;

  if (VT == MVT::v4i32)
    return true;

  return Subtarget.hasSSE2();
}

bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
  return X.getValueType().isScalarInteger(); // 'bt'
}

bool X86TargetLowering::
    shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
        SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
        unsigned OldShiftOpcode, unsigned NewShiftOpcode,
        SelectionDAG &DAG) const {
  // Does baseline recommend not to perform the fold by default?
  if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
          X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
    return false;
  // For scalars this transform is always beneficial.
  if (X.getValueType().isScalarInteger())
    return true;
  // If all the shift amounts are identical, then transform is beneficial even
  // with rudimentary SSE2 shifts.
  if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
    return true;
  // If we have AVX2 with it's powerful shift operations, then it's also good.
  if (Subtarget.hasAVX2())
    return true;
  // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
  return NewShiftOpcode == ISD::SHL;
}

bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
    const SDNode *N, CombineLevel Level) const {
  assert(((N->getOpcode() == ISD::SHL &&
           N->getOperand(0).getOpcode() == ISD::SRL) ||
          (N->getOpcode() == ISD::SRL &&
           N->getOperand(0).getOpcode() == ISD::SHL)) &&
         "Expected shift-shift mask");
  EVT VT = N->getValueType(0);
  if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
      (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
    // Only fold if the shift values are equal - so it folds to AND.
    // TODO - we should fold if either is a non-uniform vector but we don't do
    // the fold for non-splats yet.
    return N->getOperand(1) == N->getOperand(0).getOperand(1);
  }
  return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
}

bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
  EVT VT = Y.getValueType();

  // For vectors, we don't have a preference, but we probably want a mask.
  if (VT.isVector())
    return false;

  // 64-bit shifts on 32-bit targets produce really bad bloated code.
  if (VT == MVT::i64 && !Subtarget.is64Bit())
    return false;

  return true;
}

bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
                                          SDNode *N) const {
  if (DAG.getMachineFunction().getFunction().hasMinSize() &&
      !Subtarget.isOSWindows())
    return false;
  return true;
}

bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
  // Any legal vector type can be splatted more efficiently than
  // loading/spilling from memory.
  return isTypeLegal(VT);
}

MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
  MVT VT = MVT::getIntegerVT(NumBits);
  if (isTypeLegal(VT))
    return VT;

  // PMOVMSKB can handle this.
  if (NumBits == 128 && isTypeLegal(MVT::v16i8))
    return MVT::v16i8;

  // VPMOVMSKB can handle this.
  if (NumBits == 256 && isTypeLegal(MVT::v32i8))
    return MVT::v32i8;

  // TODO: Allow 64-bit type for 32-bit target.
  // TODO: 512-bit types should be allowed, but make sure that those
  // cases are handled in combineVectorSizedSetCCEquality().

  return MVT::INVALID_SIMPLE_VALUE_TYPE;
}

/// Val is the undef sentinel value or equal to the specified value.
static bool isUndefOrEqual(int Val, int CmpVal) {
  return ((Val == SM_SentinelUndef) || (Val == CmpVal));
}

/// Val is either the undef or zero sentinel value.
static bool isUndefOrZero(int Val) {
  return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
}

/// Return true if every element in Mask, beginning from position Pos and ending
/// in Pos+Size is the undef sentinel value.
static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
  return llvm::all_of(Mask.slice(Pos, Size),
                      [](int M) { return M == SM_SentinelUndef; });
}

/// Return true if the mask creates a vector whose lower half is undefined.
static bool isUndefLowerHalf(ArrayRef<int> Mask) {
  unsigned NumElts = Mask.size();
  return isUndefInRange(Mask, 0, NumElts / 2);
}

/// Return true if the mask creates a vector whose upper half is undefined.
static bool isUndefUpperHalf(ArrayRef<int> Mask) {
  unsigned NumElts = Mask.size();
  return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
}

/// Return true if Val falls within the specified range (L, H].
static bool isInRange(int Val, int Low, int Hi) {
  return (Val >= Low && Val < Hi);
}

/// Return true if the value of any element in Mask falls within the specified
/// range (L, H].
static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
  return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
}

/// Return true if Val is undef or if its value falls within the
/// specified range (L, H].
static bool isUndefOrInRange(int Val, int Low, int Hi) {
  return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
}

/// Return true if every element in Mask is undef or if its value
/// falls within the specified range (L, H].
static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
  return llvm::all_of(
      Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
}

/// Return true if Val is undef, zero or if its value falls within the
/// specified range (L, H].
static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
  return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
}

/// Return true if every element in Mask is undef, zero or if its value
/// falls within the specified range (L, H].
static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
  return llvm::all_of(
      Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
}

/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos + Size, falls within the specified
/// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
                                       unsigned Size, int Low, int Step = 1) {
  for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
    if (!isUndefOrEqual(Mask[i], Low))
      return false;
  return true;
}

/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size, falls within the specified
/// sequential range (Low, Low+Size], or is undef or is zero.
static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
                                             unsigned Size, int Low,
                                             int Step = 1) {
  for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
    if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
      return false;
  return true;
}

/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size is undef or is zero.
static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
                                 unsigned Size) {
  return llvm::all_of(Mask.slice(Pos, Size),
                      [](int M) { return isUndefOrZero(M); });
}

/// Helper function to test whether a shuffle mask could be
/// simplified by widening the elements being shuffled.
///
/// Appends the mask for wider elements in WidenedMask if valid. Otherwise
/// leaves it in an unspecified state.
///
/// NOTE: This must handle normal vector shuffle masks and *target* vector
/// shuffle masks. The latter have the special property of a '-2' representing
/// a zero-ed lane of a vector.
static bool canWidenShuffleElements(ArrayRef<int> Mask,
                                    SmallVectorImpl<int> &WidenedMask) {
  WidenedMask.assign(Mask.size() / 2, 0);
  for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
    int M0 = Mask[i];
    int M1 = Mask[i + 1];

    // If both elements are undef, its trivial.
    if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
      WidenedMask[i / 2] = SM_SentinelUndef;
      continue;
    }

    // Check for an undef mask and a mask value properly aligned to fit with
    // a pair of values. If we find such a case, use the non-undef mask's value.
    if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
      WidenedMask[i / 2] = M1 / 2;
      continue;
    }
    if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
      WidenedMask[i / 2] = M0 / 2;
      continue;
    }

    // When zeroing, we need to spread the zeroing across both lanes to widen.
    if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
      if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
          (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
        WidenedMask[i / 2] = SM_SentinelZero;
        continue;
      }
      return false;
    }

    // Finally check if the two mask values are adjacent and aligned with
    // a pair.
    if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
      WidenedMask[i / 2] = M0 / 2;
      continue;
    }

    // Otherwise we can't safely widen the elements used in this shuffle.
    return false;
  }
  assert(WidenedMask.size() == Mask.size() / 2 &&
         "Incorrect size of mask after widening the elements!");

  return true;
}

static bool canWidenShuffleElements(ArrayRef<int> Mask,
                                    const APInt &Zeroable,
                                    SmallVectorImpl<int> &WidenedMask) {
  SmallVector<int, 32> TargetMask(Mask.begin(), Mask.end());
  for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
    if (TargetMask[i] == SM_SentinelUndef)
      continue;
    if (Zeroable[i])
      TargetMask[i] = SM_SentinelZero;
  }
  return canWidenShuffleElements(TargetMask, WidenedMask);
}

static bool canWidenShuffleElements(ArrayRef<int> Mask) {
  SmallVector<int, 32> WidenedMask;
  return canWidenShuffleElements(Mask, WidenedMask);
}

/// Returns true if Elt is a constant zero or a floating point constant +0.0.
bool X86::isZeroNode(SDValue Elt) {
  return isNullConstant(Elt) || isNullFPConstant(Elt);
}

// Build a vector of constants.
// Use an UNDEF node if MaskElt == -1.
// Split 64-bit constants in the 32-bit mode.
static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
                              const SDLoc &dl, bool IsMask = false) {

  SmallVector<SDValue, 32>  Ops;
  bool Split = false;

  MVT ConstVecVT = VT;
  unsigned NumElts = VT.getVectorNumElements();
  bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
  if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
    ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
    Split = true;
  }

  MVT EltVT = ConstVecVT.getVectorElementType();
  for (unsigned i = 0; i < NumElts; ++i) {
    bool IsUndef = Values[i] < 0 && IsMask;
    SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
      DAG.getConstant(Values[i], dl, EltVT);
    Ops.push_back(OpNode);
    if (Split)
      Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
                    DAG.getConstant(0, dl, EltVT));
  }
  SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
  if (Split)
    ConstsNode = DAG.getBitcast(VT, ConstsNode);
  return ConstsNode;
}

static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
                              MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
  assert(Bits.size() == Undefs.getBitWidth() &&
         "Unequal constant and undef arrays");
  SmallVector<SDValue, 32> Ops;
  bool Split = false;

  MVT ConstVecVT = VT;
  unsigned NumElts = VT.getVectorNumElements();
  bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
  if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
    ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
    Split = true;
  }

  MVT EltVT = ConstVecVT.getVectorElementType();
  for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
    if (Undefs[i]) {
      Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
      continue;
    }
    const APInt &V = Bits[i];
    assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
    if (Split) {
      Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
      Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
    } else if (EltVT == MVT::f32) {
      APFloat FV(APFloat::IEEEsingle(), V);
      Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
    } else if (EltVT == MVT::f64) {
      APFloat FV(APFloat::IEEEdouble(), V);
      Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
    } else {
      Ops.push_back(DAG.getConstant(V, dl, EltVT));
    }
  }

  SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
  return DAG.getBitcast(VT, ConstsNode);
}

/// Returns a vector of specified type with all zero elements.
static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
                             SelectionDAG &DAG, const SDLoc &dl) {
  assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
          VT.getVectorElementType() == MVT::i1) &&
         "Unexpected vector type");

  // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
  // type. This ensures they get CSE'd. But if the integer type is not
  // available, use a floating-point +0.0 instead.
  SDValue Vec;
  if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
    Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
  } else if (VT.isFloatingPoint()) {
    Vec = DAG.getConstantFP(+0.0, dl, VT);
  } else if (VT.getVectorElementType() == MVT::i1) {
    assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
           "Unexpected vector type");
    Vec = DAG.getConstant(0, dl, VT);
  } else {
    unsigned Num32BitElts = VT.getSizeInBits() / 32;
    Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
  }
  return DAG.getBitcast(VT, Vec);
}

static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
                                const SDLoc &dl, unsigned vectorWidth) {
  EVT VT = Vec.getValueType();
  EVT ElVT = VT.getVectorElementType();
  unsigned Factor = VT.getSizeInBits()/vectorWidth;
  EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
                                  VT.getVectorNumElements()/Factor);

  // Extract the relevant vectorWidth bits.  Generate an EXTRACT_SUBVECTOR
  unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
  assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");

  // This is the index of the first element of the vectorWidth-bit chunk
  // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
  IdxVal &= ~(ElemsPerChunk - 1);

  // If the input is a buildvector just emit a smaller one.
  if (Vec.getOpcode() == ISD::BUILD_VECTOR)
    return DAG.getBuildVector(ResultVT, dl,
                              Vec->ops().slice(IdxVal, ElemsPerChunk));

  SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
}

/// Generate a DAG to grab 128-bits from a vector > 128 bits.  This
/// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
/// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
/// instructions or a simple subregister reference. Idx is an index in the
/// 128 bits we want.  It need not be aligned to a 128-bit boundary.  That makes
/// lowering EXTRACT_VECTOR_ELT operations easier.
static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
                                   SelectionDAG &DAG, const SDLoc &dl) {
  assert((Vec.getValueType().is256BitVector() ||
          Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
  return extractSubVector(Vec, IdxVal, DAG, dl, 128);
}

/// Generate a DAG to grab 256-bits from a 512-bit vector.
static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
                                   SelectionDAG &DAG, const SDLoc &dl) {
  assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
  return extractSubVector(Vec, IdxVal, DAG, dl, 256);
}

static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
                               SelectionDAG &DAG, const SDLoc &dl,
                               unsigned vectorWidth) {
  assert((vectorWidth == 128 || vectorWidth == 256) &&
         "Unsupported vector width");
  // Inserting UNDEF is Result
  if (Vec.isUndef())
    return Result;
  EVT VT = Vec.getValueType();
  EVT ElVT = VT.getVectorElementType();
  EVT ResultVT = Result.getValueType();

  // Insert the relevant vectorWidth bits.
  unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
  assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");

  // This is the index of the first element of the vectorWidth-bit chunk
  // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
  IdxVal &= ~(ElemsPerChunk - 1);

  SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
}

/// Generate a DAG to put 128-bits into a vector > 128 bits.  This
/// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
/// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
/// simple superregister reference.  Idx is an index in the 128 bits
/// we want.  It need not be aligned to a 128-bit boundary.  That makes
/// lowering INSERT_VECTOR_ELT operations easier.
static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
                                  SelectionDAG &DAG, const SDLoc &dl) {
  assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
  return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
}

/// Widen a vector to a larger size with the same scalar type, with the new
/// elements either zero or undef.
static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
                              const SDLoc &dl) {
  assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&
         Vec.getValueType().getScalarType() == VT.getScalarType() &&
         "Unsupported vector widening type");
  SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
                                : DAG.getUNDEF(VT);
  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
                     DAG.getIntPtrConstant(0, dl));
}

/// Widen a vector to a larger size with the same scalar type, with the new
/// elements either zero or undef.
static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
                              const SDLoc &dl, unsigned WideSizeInBits) {
  assert(Vec.getValueSizeInBits() < WideSizeInBits &&
         (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
         "Unsupported vector widening type");
  unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
  MVT SVT = Vec.getSimpleValueType().getScalarType();
  MVT VT = MVT::getVectorVT(SVT, WideNumElts);
  return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
}

// Helper function to collect subvector ops that are concated together,
// either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
// The subvectors in Ops are guaranteed to be the same type.
static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
  assert(Ops.empty() && "Expected an empty ops vector");

  if (N->getOpcode() == ISD::CONCAT_VECTORS) {
    Ops.append(N->op_begin(), N->op_end());
    return true;
  }

  if (N->getOpcode() == ISD::INSERT_SUBVECTOR &&
      isa<ConstantSDNode>(N->getOperand(2))) {
    SDValue Src = N->getOperand(0);
    SDValue Sub = N->getOperand(1);
    const APInt &Idx = N->getConstantOperandAPInt(2);
    EVT VT = Src.getValueType();
    EVT SubVT = Sub.getValueType();

    // TODO - Handle more general insert_subvector chains.
    if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2) &&
        Idx == (VT.getVectorNumElements() / 2) &&
        Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
        Src.getOperand(1).getValueType() == SubVT &&
        isNullConstant(Src.getOperand(2))) {
      Ops.push_back(Src.getOperand(1));
      Ops.push_back(Sub);
      return true;
    }
  }

  return false;
}

// Helper for splitting operands of an operation to legal target size and
// apply a function on each part.
// Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
// 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
// deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
// The argument Builder is a function that will be applied on each split part:
// SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
template <typename F>
SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
                         const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
                         F Builder, bool CheckBWI = true) {
  assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
  unsigned NumSubs = 1;
  if ((CheckBWI && Subtarget.useBWIRegs()) ||
      (!CheckBWI && Subtarget.useAVX512Regs())) {
    if (VT.getSizeInBits() > 512) {
      NumSubs = VT.getSizeInBits() / 512;
      assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
    }
  } else if (Subtarget.hasAVX2()) {
    if (VT.getSizeInBits() > 256) {
      NumSubs = VT.getSizeInBits() / 256;
      assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
    }
  } else {
    if (VT.getSizeInBits() > 128) {
      NumSubs = VT.getSizeInBits() / 128;
      assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
    }
  }

  if (NumSubs == 1)
    return Builder(DAG, DL, Ops);

  SmallVector<SDValue, 4> Subs;
  for (unsigned i = 0; i != NumSubs; ++i) {
    SmallVector<SDValue, 2> SubOps;
    for (SDValue Op : Ops) {
      EVT OpVT = Op.getValueType();
      unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
      unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
      SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
    }
    Subs.push_back(Builder(DAG, DL, SubOps));
  }
  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
}

/// Insert i1-subvector to i1-vector.
static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
                                const X86Subtarget &Subtarget) {

  SDLoc dl(Op);
  SDValue Vec = Op.getOperand(0);
  SDValue SubVec = Op.getOperand(1);
  SDValue Idx = Op.getOperand(2);

  if (!isa<ConstantSDNode>(Idx))
    return SDValue();

  // Inserting undef is a nop. We can just return the original vector.
  if (SubVec.isUndef())
    return Vec;

  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
  if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
    return Op;

  MVT OpVT = Op.getSimpleValueType();
  unsigned NumElems = OpVT.getVectorNumElements();

  SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);

  // Extend to natively supported kshift.
  MVT WideOpVT = OpVT;
  if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
    WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;

  // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
  // if necessary.
  if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
    // May need to promote to a legal type.
    Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
                     DAG.getConstant(0, dl, WideOpVT),
                     SubVec, Idx);
    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
  }

  MVT SubVecVT = SubVec.getSimpleValueType();
  unsigned SubVecNumElems = SubVecVT.getVectorNumElements();

  assert(IdxVal + SubVecNumElems <= NumElems &&
         IdxVal % SubVecVT.getSizeInBits() == 0 &&
         "Unexpected index value in INSERT_SUBVECTOR");

  SDValue Undef = DAG.getUNDEF(WideOpVT);

  if (IdxVal == 0) {
    // Zero lower bits of the Vec
    SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
                      ZeroIdx);
    Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
    Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
    // Merge them together, SubVec should be zero extended.
    SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
                         DAG.getConstant(0, dl, WideOpVT),
                         SubVec, ZeroIdx);
    Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
  }

  SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
                       Undef, SubVec, ZeroIdx);

  if (Vec.isUndef()) {
    assert(IdxVal != 0 && "Unexpected index");
    SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
                         DAG.getTargetConstant(IdxVal, dl, MVT::i8));
    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
  }

  if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
    assert(IdxVal != 0 && "Unexpected index");
    NumElems = WideOpVT.getVectorNumElements();
    unsigned ShiftLeft = NumElems - SubVecNumElems;
    unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
    SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
                         DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
    if (ShiftRight != 0)
      SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
                           DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
  }

  // Simple case when we put subvector in the upper part
  if (IdxVal + SubVecNumElems == NumElems) {
    SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
                         DAG.getTargetConstant(IdxVal, dl, MVT::i8));
    if (SubVecNumElems * 2 == NumElems) {
      // Special case, use legal zero extending insert_subvector. This allows
      // isel to opimitize when bits are known zero.
      Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
      Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
                        DAG.getConstant(0, dl, WideOpVT),
                        Vec, ZeroIdx);
    } else {
      // Otherwise use explicit shifts to zero the bits.
      Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
                        Undef, Vec, ZeroIdx);
      NumElems = WideOpVT.getVectorNumElements();
      SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
      Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
      Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
    }
    Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
  }

  // Inserting into the middle is more complicated.

  NumElems = WideOpVT.getVectorNumElements();

  // Widen the vector if needed.
  Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);

  // Clear the upper bits of the subvector and move it to its insert position.
  unsigned ShiftLeft = NumElems - SubVecNumElems;
  SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
                       DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
  unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
  SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
                       DAG.getTargetConstant(ShiftRight, dl, MVT::i8));

  // Isolate the bits below the insertion point.
  unsigned LowShift = NumElems - IdxVal;
  SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
                            DAG.getTargetConstant(LowShift, dl, MVT::i8));
  Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
                    DAG.getTargetConstant(LowShift, dl, MVT::i8));

  // Isolate the bits after the last inserted bit.
  unsigned HighShift = IdxVal + SubVecNumElems;
  SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
                            DAG.getTargetConstant(HighShift, dl, MVT::i8));
  High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
                    DAG.getTargetConstant(HighShift, dl, MVT::i8));

  // Now OR all 3 pieces together.
  Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
  SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);

  // Reduce to original width if needed.
  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
}

static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
                                const SDLoc &dl) {
  assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
  EVT SubVT = V1.getValueType();
  EVT SubSVT = SubVT.getScalarType();
  unsigned SubNumElts = SubVT.getVectorNumElements();
  unsigned SubVectorWidth = SubVT.getSizeInBits();
  EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
  SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
  return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
}

/// Returns a vector of specified type with all bits set.
/// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
/// Then bitcast to their original type, ensuring they get CSE'd.
static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
  assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
         "Expected a 128/256/512-bit vector type");

  APInt Ones = APInt::getAllOnesValue(32);
  unsigned NumElts = VT.getSizeInBits() / 32;
  SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
  return DAG.getBitcast(VT, Vec);
}

// Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
  switch (Opcode) {
  case ISD::ANY_EXTEND:
  case ISD::ANY_EXTEND_VECTOR_INREG:
    return ISD::ANY_EXTEND_VECTOR_INREG;
  case ISD::ZERO_EXTEND:
  case ISD::ZERO_EXTEND_VECTOR_INREG:
    return ISD::ZERO_EXTEND_VECTOR_INREG;
  case ISD::SIGN_EXTEND:
  case ISD::SIGN_EXTEND_VECTOR_INREG:
    return ISD::SIGN_EXTEND_VECTOR_INREG;
  }
  llvm_unreachable("Unknown opcode");
}

static SDValue getExtendInVec(unsigned Opcode, const SDLoc &DL, EVT VT,
                              SDValue In, SelectionDAG &DAG) {
  EVT InVT = In.getValueType();
  assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
  assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
          ISD::ZERO_EXTEND == Opcode) &&
         "Unknown extension opcode");

  // For 256-bit vectors, we only need the lower (128-bit) input half.
  // For 512-bit vectors, we only need the lower input half or quarter.
  if (InVT.getSizeInBits() > 128) {
    assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
           "Expected VTs to be the same size!");
    unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
    In = extractSubVector(In, 0, DAG, DL,
                          std::max(128U, VT.getSizeInBits() / Scale));
    InVT = In.getValueType();
  }

  if (VT.getVectorNumElements() != InVT.getVectorNumElements())
    Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);

  return DAG.getNode(Opcode, DL, VT, In);
}

// Match (xor X, -1) -> X.
// Match extract_subvector(xor X, -1) -> extract_subvector(X).
// Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
  V = peekThroughBitcasts(V);
  if (V.getOpcode() == ISD::XOR &&
      ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()))
    return V.getOperand(0);
  if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
      (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
    if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
      Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
                         Not, V.getOperand(1));
    }
  }
  SmallVector<SDValue, 2> CatOps;
  if (collectConcatOps(V.getNode(), CatOps)) {
    for (SDValue &CatOp : CatOps) {
      SDValue NotCat = IsNOT(CatOp, DAG);
      if (!NotCat) return SDValue();
      CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
    }
    return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
  }
  return SDValue();
}

/// Returns a vector_shuffle node for an unpackl operation.
static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
                          SDValue V1, SDValue V2) {
  SmallVector<int, 8> Mask;
  createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
  return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}

/// Returns a vector_shuffle node for an unpackh operation.
static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
                          SDValue V1, SDValue V2) {
  SmallVector<int, 8> Mask;
  createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
  return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}

/// Return a vector_shuffle of the specified vector of zero or undef vector.
/// This produces a shuffle where the low element of V2 is swizzled into the
/// zero/undef vector, landing at element Idx.
/// This produces a shuffle mask like 4,1,2,3 (idx=0) or  0,1,2,4 (idx=3).
static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
                                           bool IsZero,
                                           const X86Subtarget &Subtarget,
                                           SelectionDAG &DAG) {
  MVT VT = V2.getSimpleValueType();
  SDValue V1 = IsZero
    ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
  int NumElems = VT.getVectorNumElements();
  SmallVector<int, 16> MaskVec(NumElems);
  for (int i = 0; i != NumElems; ++i)
    // If this is the insertion idx, put the low elt of V2 here.
    MaskVec[i] = (i == Idx) ? NumElems : i;
  return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
}

static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
  if (!Load || !ISD::isNormalLoad(Load))
    return nullptr;

  SDValue Ptr = Load->getBasePtr();
  if (Ptr->getOpcode() == X86ISD::Wrapper ||
      Ptr->getOpcode() == X86ISD::WrapperRIP)
    Ptr = Ptr->getOperand(0);

  auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
  if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
    return nullptr;

  return CNode->getConstVal();
}

static const Constant *getTargetConstantFromNode(SDValue Op) {
  Op = peekThroughBitcasts(Op);
  return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
}

const Constant *
X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
  assert(LD && "Unexpected null LoadSDNode");
  return getTargetConstantFromNode(LD);
}

// Extract raw constant bits from constant pools.
static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
                                          APInt &UndefElts,
                                          SmallVectorImpl<APInt> &EltBits,
                                          bool AllowWholeUndefs = true,
                                          bool AllowPartialUndefs = true) {
  assert(EltBits.empty() && "Expected an empty EltBits vector");

  Op = peekThroughBitcasts(Op);

  EVT VT = Op.getValueType();
  unsigned SizeInBits = VT.getSizeInBits();
  assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
  unsigned NumElts = SizeInBits / EltSizeInBits;

  // Bitcast a source array of element bits to the target size.
  auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
    unsigned NumSrcElts = UndefSrcElts.getBitWidth();
    unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
    assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
           "Constant bit sizes don't match");

    // Don't split if we don't allow undef bits.
    bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
    if (UndefSrcElts.getBoolValue() && !AllowUndefs)
      return false;

    // If we're already the right size, don't bother bitcasting.
    if (NumSrcElts == NumElts) {
      UndefElts = UndefSrcElts;
      EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
      return true;
    }

    // Extract all the undef/constant element data and pack into single bitsets.
    APInt UndefBits(SizeInBits, 0);
    APInt MaskBits(SizeInBits, 0);

    for (unsigned i = 0; i != NumSrcElts; ++i) {
      unsigned BitOffset = i * SrcEltSizeInBits;
      if (UndefSrcElts[i])
        UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
      MaskBits.insertBits(SrcEltBits[i], BitOffset);
    }

    // Split the undef/constant single bitset data into the target elements.
    UndefElts = APInt(NumElts, 0);
    EltBits.resize(NumElts, APInt(EltSizeInBits, 0));

    for (unsigned i = 0; i != NumElts; ++i) {
      unsigned BitOffset = i * EltSizeInBits;
      APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);

      // Only treat an element as UNDEF if all bits are UNDEF.
      if (UndefEltBits.isAllOnesValue()) {
        if (!AllowWholeUndefs)
          return false;
        UndefElts.setBit(i);
        continue;
      }

      // If only some bits are UNDEF then treat them as zero (or bail if not
      // supported).
      if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
        return false;

      EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
    }
    return true;
  };

  // Collect constant bits and insert into mask/undef bit masks.
  auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
                                unsigned UndefBitIndex) {
    if (!Cst)
      return false;
    if (isa<UndefValue>(Cst)) {
      Undefs.setBit(UndefBitIndex);
      return true;
    }
    if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
      Mask = CInt->getValue();
      return true;
    }
    if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
      Mask = CFP->getValueAPF().bitcastToAPInt();
      return true;
    }
    return false;
  };

  // Handle UNDEFs.
  if (Op.isUndef()) {
    APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
    SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
    return CastBitData(UndefSrcElts, SrcEltBits);
  }

  // Extract scalar constant bits.
  if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
    APInt UndefSrcElts = APInt::getNullValue(1);
    SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
    return CastBitData(UndefSrcElts, SrcEltBits);
  }
  if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
    APInt UndefSrcElts = APInt::getNullValue(1);
    APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
    SmallVector<APInt, 64> SrcEltBits(1, RawBits);
    return CastBitData(UndefSrcElts, SrcEltBits);
  }

  // Extract constant bits from build vector.
  if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
    unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;

    APInt UndefSrcElts(NumSrcElts, 0);
    SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
    for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
      const SDValue &Src = Op.getOperand(i);
      if (Src.isUndef()) {
        UndefSrcElts.setBit(i);
        continue;
      }
      auto *Cst = cast<ConstantSDNode>(Src);
      SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
    }
    return CastBitData(UndefSrcElts, SrcEltBits);
  }
  if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) {
    unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;

    APInt UndefSrcElts(NumSrcElts, 0);
    SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
    for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
      const SDValue &Src = Op.getOperand(i);
      if (Src.isUndef()) {
        UndefSrcElts.setBit(i);
        continue;
      }
      auto *Cst = cast<ConstantFPSDNode>(Src);
      APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
      SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits);
    }
    return CastBitData(UndefSrcElts, SrcEltBits);
  }

  // Extract constant bits from constant pool vector.
  if (auto *Cst = getTargetConstantFromNode(Op)) {
    Type *CstTy = Cst->getType();
    unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
    if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
      return false;

    unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;

    APInt UndefSrcElts(NumSrcElts, 0);
    SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
    for (unsigned i = 0; i != NumSrcElts; ++i)
      if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
                               UndefSrcElts, i))
        return false;

    return CastBitData(UndefSrcElts, SrcEltBits);
  }

  // Extract constant bits from a broadcasted constant pool scalar.
  if (Op.getOpcode() == X86ISD::VBROADCAST &&
      EltSizeInBits <= VT.getScalarSizeInBits()) {
    if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) {
      unsigned SrcEltSizeInBits = Broadcast->getType()->getScalarSizeInBits();
      unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;

      APInt UndefSrcElts(NumSrcElts, 0);
      SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
      if (CollectConstantBits(Broadcast, SrcEltBits[0], UndefSrcElts, 0)) {
        if (UndefSrcElts[0])
          UndefSrcElts.setBits(0, NumSrcElts);
        SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
        return CastBitData(UndefSrcElts, SrcEltBits);
      }
    }
  }

  if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
      EltSizeInBits <= VT.getScalarSizeInBits()) {
    auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
    if (MemIntr->getMemoryVT().getScalarSizeInBits() != VT.getScalarSizeInBits())
      return false;

    SDValue Ptr = MemIntr->getBasePtr();
    if (Ptr->getOpcode() == X86ISD::Wrapper ||
        Ptr->getOpcode() == X86ISD::WrapperRIP)
      Ptr = Ptr->getOperand(0);

    auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
    if (!CNode || CNode->isMachineConstantPoolEntry() ||
        CNode->getOffset() != 0)
      return false;

    if (const Constant *C = CNode->getConstVal()) {
      unsigned SrcEltSizeInBits = C->getType()->getScalarSizeInBits();
      unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;

      APInt UndefSrcElts(NumSrcElts, 0);
      SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
      if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
        if (UndefSrcElts[0])
          UndefSrcElts.setBits(0, NumSrcElts);
        SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
        return CastBitData(UndefSrcElts, SrcEltBits);
      }
    }
  }

  // Extract constant bits from a subvector broadcast.
  if (Op.getOpcode() == X86ISD::SUBV_BROADCAST) {
    SmallVector<APInt, 16> SubEltBits;
    if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
                                      UndefElts, SubEltBits, AllowWholeUndefs,
                                      AllowPartialUndefs)) {
      UndefElts = APInt::getSplat(NumElts, UndefElts);
      while (EltBits.size() < NumElts)
        EltBits.append(SubEltBits.begin(), SubEltBits.end());
      return true;
    }
  }

  // Extract a rematerialized scalar constant insertion.
  if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
      Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
      isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
    unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;

    APInt UndefSrcElts(NumSrcElts, 0);
    SmallVector<APInt, 64> SrcEltBits;
    auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
    SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
    SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
    return CastBitData(UndefSrcElts, SrcEltBits);
  }

  // Insert constant bits from a base and sub vector sources.
  if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
      isa<ConstantSDNode>(Op.getOperand(2))) {
    // TODO - support insert_subvector through bitcasts.
    if (EltSizeInBits != VT.getScalarSizeInBits())
      return false;

    APInt UndefSubElts;
    SmallVector<APInt, 32> EltSubBits;
    if (getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
                                      UndefSubElts, EltSubBits,
                                      AllowWholeUndefs, AllowPartialUndefs) &&
        getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
                                      UndefElts, EltBits, AllowWholeUndefs,
                                      AllowPartialUndefs)) {
      unsigned BaseIdx = Op.getConstantOperandVal(2);
      UndefElts.insertBits(UndefSubElts, BaseIdx);
      for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
        EltBits[BaseIdx + i] = EltSubBits[i];
      return true;
    }
  }

  // Extract constant bits from a subvector's source.
  if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
      isa<ConstantSDNode>(Op.getOperand(1))) {
    // TODO - support extract_subvector through bitcasts.
    if (EltSizeInBits != VT.getScalarSizeInBits())
      return false;

    if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
                                      UndefElts, EltBits, AllowWholeUndefs,
                                      AllowPartialUndefs)) {
      EVT SrcVT = Op.getOperand(0).getValueType();
      unsigned NumSrcElts = SrcVT.getVectorNumElements();
      unsigned NumSubElts = VT.getVectorNumElements();
      unsigned BaseIdx = Op.getConstantOperandVal(1);
      UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
      if ((BaseIdx + NumSubElts) != NumSrcElts)
        EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
      if (BaseIdx != 0)
        EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
      return true;
    }
  }

  // Extract constant bits from shuffle node sources.
  if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
    // TODO - support shuffle through bitcasts.
    if (EltSizeInBits != VT.getScalarSizeInBits())
      return false;

    ArrayRef<int> Mask = SVN->getMask();
    if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
        llvm::any_of(Mask, [](int M) { return M < 0; }))
      return false;

    APInt UndefElts0, UndefElts1;
    SmallVector<APInt, 32> EltBits0, EltBits1;
    if (isAnyInRange(Mask, 0, NumElts) &&
        !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
                                       UndefElts0, EltBits0, AllowWholeUndefs,
                                       AllowPartialUndefs))
      return false;
    if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
        !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
                                       UndefElts1, EltBits1, AllowWholeUndefs,
                                       AllowPartialUndefs))
      return false;

    UndefElts = APInt::getNullValue(NumElts);
    for (int i = 0; i != (int)NumElts; ++i) {
      int M = Mask[i];
      if (M < 0) {
        UndefElts.setBit(i);
        EltBits.push_back(APInt::getNullValue(EltSizeInBits));
      } else if (M < (int)NumElts) {
        if (UndefElts0[M])
          UndefElts.setBit(i);
        EltBits.push_back(EltBits0[M]);
      } else {
        if (UndefElts1[M - NumElts])
          UndefElts.setBit(i);
        EltBits.push_back(EltBits1[M - NumElts]);
      }
    }
    return true;
  }

  return false;
}

namespace llvm {
namespace X86 {
bool isConstantSplat(SDValue Op, APInt &SplatVal) {
  APInt UndefElts;
  SmallVector<APInt, 16> EltBits;
  if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
                                    UndefElts, EltBits, true, false)) {
    int SplatIndex = -1;
    for (int i = 0, e = EltBits.size(); i != e; ++i) {
      if (UndefElts[i])
        continue;
      if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
        SplatIndex = -1;
        break;
      }
      SplatIndex = i;
    }
    if (0 <= SplatIndex) {
      SplatVal = EltBits[SplatIndex];
      return true;
    }
  }

  return false;
}
} // namespace X86
} // namespace llvm

static bool getTargetShuffleMaskIndices(SDValue MaskNode,
                                        unsigned MaskEltSizeInBits,
                                        SmallVectorImpl<uint64_t> &RawMask,
                                        APInt &UndefElts) {
  // Extract the raw target constant bits.
  SmallVector<APInt, 64> EltBits;
  if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
                                     EltBits, /* AllowWholeUndefs */ true,
                                     /* AllowPartialUndefs */ false))
    return false;

  // Insert the extracted elements into the mask.
  for (APInt Elt : EltBits)
    RawMask.push_back(Elt.getZExtValue());

  return true;
}

/// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
/// Note: This ignores saturation, so inputs must be checked first.
static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
                                  bool Unary) {
  assert(Mask.empty() && "Expected an empty shuffle mask vector");
  unsigned NumElts = VT.getVectorNumElements();
  unsigned NumLanes = VT.getSizeInBits() / 128;
  unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
  unsigned Offset = Unary ? 0 : NumElts;

  for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
    for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
      Mask.push_back(Elt + (Lane * NumEltsPerLane));
    for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
      Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
  }
}

// Split the demanded elts of a PACKSS/PACKUS node between its operands.
static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
                                APInt &DemandedLHS, APInt &DemandedRHS) {
  int NumLanes = VT.getSizeInBits() / 128;
  int NumElts = DemandedElts.getBitWidth();
  int NumInnerElts = NumElts / 2;
  int NumEltsPerLane = NumElts / NumLanes;
  int NumInnerEltsPerLane = NumInnerElts / NumLanes;

  DemandedLHS = APInt::getNullValue(NumInnerElts);
  DemandedRHS = APInt::getNullValue(NumInnerElts);

  // Map DemandedElts to the packed operands.
  for (int Lane = 0; Lane != NumLanes; ++Lane) {
    for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
      int OuterIdx = (Lane * NumEltsPerLane) + Elt;
      int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
      if (DemandedElts[OuterIdx])
        DemandedLHS.setBit(InnerIdx);
      if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
        DemandedRHS.setBit(InnerIdx);
    }
  }
}

// Split the demanded elts of a HADD/HSUB node between its operands.
static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
                                 APInt &DemandedLHS, APInt &DemandedRHS) {
  int NumLanes = VT.getSizeInBits() / 128;
  int NumElts = DemandedElts.getBitWidth();
  int NumEltsPerLane = NumElts / NumLanes;
  int HalfEltsPerLane = NumEltsPerLane / 2;

  DemandedLHS = APInt::getNullValue(NumElts);
  DemandedRHS = APInt::getNullValue(NumElts);

  // Map DemandedElts to the horizontal operands.
  for (int Idx = 0; Idx != NumElts; ++Idx) {
    if (!DemandedElts[Idx])
      continue;
    int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
    int LocalIdx = Idx % NumEltsPerLane;
    if (LocalIdx < HalfEltsPerLane) {
      DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
      DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
    } else {
      LocalIdx -= HalfEltsPerLane;
      DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
      DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
    }
  }
}

/// Calculates the shuffle mask corresponding to the target-specific opcode.
/// If the mask could be calculated, returns it in \p Mask, returns the shuffle
/// operands in \p Ops, and returns true.
/// Sets \p IsUnary to true if only one source is used. Note that this will set
/// IsUnary for shuffles which use a single input multiple times, and in those
/// cases it will adjust the mask to only have indices within that single input.
/// It is an error to call this with non-empty Mask/Ops vectors.
static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
                                 SmallVectorImpl<SDValue> &Ops,
                                 SmallVectorImpl<int> &Mask, bool &IsUnary) {
  unsigned NumElems = VT.getVectorNumElements();
  unsigned MaskEltSize = VT.getScalarSizeInBits();
  SmallVector<uint64_t, 32> RawMask;
  APInt RawUndefs;
  SDValue ImmN;

  assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
  assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");

  IsUnary = false;
  bool IsFakeUnary = false;
  switch (N->getOpcode()) {
  case X86ISD::BLENDI:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    DecodeBLENDMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    break;
  case X86ISD::SHUFP:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    DecodeSHUFPMask(NumElems, MaskEltSize,
                    cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    break;
  case X86ISD::INSERTPS:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    break;
  case X86ISD::EXTRQI:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    if (isa<ConstantSDNode>(N->getOperand(1)) &&
        isa<ConstantSDNode>(N->getOperand(2))) {
      int BitLen = N->getConstantOperandVal(1);
      int BitIdx = N->getConstantOperandVal(2);
      DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
      IsUnary = true;
    }
    break;
  case X86ISD::INSERTQI:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    if (isa<ConstantSDNode>(N->getOperand(2)) &&
        isa<ConstantSDNode>(N->getOperand(3))) {
      int BitLen = N->getConstantOperandVal(2);
      int BitIdx = N->getConstantOperandVal(3);
      DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
      IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    }
    break;
  case X86ISD::UNPCKH:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    break;
  case X86ISD::UNPCKL:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    break;
  case X86ISD::MOVHLPS:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    DecodeMOVHLPSMask(NumElems, Mask);
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    break;
  case X86ISD::MOVLHPS:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    DecodeMOVLHPSMask(NumElems, Mask);
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    break;
  case X86ISD::PALIGNR:
    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    DecodePALIGNRMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
                      Mask);
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    Ops.push_back(N->getOperand(1));
    Ops.push_back(N->getOperand(0));
    break;
  case X86ISD::VSHLDQ:
    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    DecodePSLLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
                     Mask);
    IsUnary = true;
    break;
  case X86ISD::VSRLDQ:
    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    DecodePSRLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
                     Mask);
    IsUnary = true;
    break;
  case X86ISD::PSHUFD:
  case X86ISD::VPERMILPI:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    DecodePSHUFMask(NumElems, MaskEltSize,
                    cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
    IsUnary = true;
    break;
  case X86ISD::PSHUFHW:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    DecodePSHUFHWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
                      Mask);
    IsUnary = true;
    break;
  case X86ISD::PSHUFLW:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    DecodePSHUFLWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
                      Mask);
    IsUnary = true;
    break;
  case X86ISD::VZEXT_MOVL:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    DecodeZeroMoveLowMask(NumElems, Mask);
    IsUnary = true;
    break;
  case X86ISD::VBROADCAST: {
    SDValue N0 = N->getOperand(0);
    // See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
    // add the pre-extracted value to the Ops vector.
    if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
        N0.getOperand(0).getValueType() == VT &&
        N0.getConstantOperandVal(1) == 0)
      Ops.push_back(N0.getOperand(0));

    // We only decode broadcasts of same-sized vectors, unless the broadcast
    // came from an extract from the original width. If we found one, we
    // pushed it the Ops vector above.
    if (N0.getValueType() == VT || !Ops.empty()) {
      DecodeVectorBroadcast(NumElems, Mask);
      IsUnary = true;
      break;
    }
    return false;
  }
  case X86ISD::VPERMILPV: {
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    IsUnary = true;
    SDValue MaskNode = N->getOperand(1);
    if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
                                    RawUndefs)) {
      DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
      break;
    }
    return false;
  }
  case X86ISD::PSHUFB: {
    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    IsUnary = true;
    SDValue MaskNode = N->getOperand(1);
    if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
      DecodePSHUFBMask(RawMask, RawUndefs, Mask);
      break;
    }
    return false;
  }
  case X86ISD::VPERMI:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    DecodeVPERMMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
    IsUnary = true;
    break;
  case X86ISD::MOVSS:
  case X86ISD::MOVSD:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
    break;
  case X86ISD::VPERM2X128:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    DecodeVPERM2X128Mask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
                         Mask);
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    break;
  case X86ISD::SHUF128:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    ImmN = N->getOperand(N->getNumOperands() - 1);
    decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize,
                              cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    break;
  case X86ISD::MOVSLDUP:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    DecodeMOVSLDUPMask(NumElems, Mask);
    IsUnary = true;
    break;
  case X86ISD::MOVSHDUP:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    DecodeMOVSHDUPMask(NumElems, Mask);
    IsUnary = true;
    break;
  case X86ISD::MOVDDUP:
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    DecodeMOVDDUPMask(NumElems, Mask);
    IsUnary = true;
    break;
  case X86ISD::VPERMIL2: {
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    SDValue MaskNode = N->getOperand(2);
    SDValue CtrlNode = N->getOperand(3);
    if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
      unsigned CtrlImm = CtrlOp->getZExtValue();
      if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
                                      RawUndefs)) {
        DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
                            Mask);
        break;
      }
    }
    return false;
  }
  case X86ISD::VPPERM: {
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
    SDValue MaskNode = N->getOperand(2);
    if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
      DecodeVPPERMMask(RawMask, RawUndefs, Mask);
      break;
    }
    return false;
  }
  case X86ISD::VPERMV: {
    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
    IsUnary = true;
    // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
    Ops.push_back(N->getOperand(1));
    SDValue MaskNode = N->getOperand(0);
    if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
                                    RawUndefs)) {
      DecodeVPERMVMask(RawMask, RawUndefs, Mask);
      break;
    }
    return false;
  }
  case X86ISD::VPERMV3: {
    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
    assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
    // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
    Ops.push_back(N->getOperand(0));
    Ops.push_back(N->getOperand(2));
    SDValue MaskNode = N->getOperand(1);
    if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
                                    RawUndefs)) {
      DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
      break;
    }
    return false;
  }
  default: llvm_unreachable("unknown target shuffle node");
  }

  // Empty mask indicates the decode failed.
  if (Mask.empty())
    return false;

  // Check if we're getting a shuffle mask with zero'd elements.
  if (!AllowSentinelZero)
    if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
      return false;

  // If we have a fake unary shuffle, the shuffle mask is spread across two
  // inputs that are actually the same node. Re-map the mask to always point
  // into the first input.
  if (IsFakeUnary)
    for (int &M : Mask)
      if (M >= (int)Mask.size())
        M -= Mask.size();

  // If we didn't already add operands in the opcode-specific code, default to
  // adding 1 or 2 operands starting at 0.
  if (Ops.empty()) {
    Ops.push_back(N->getOperand(0));
    if (!IsUnary || IsFakeUnary)
      Ops.push_back(N->getOperand(1));
  }

  return true;
}

/// Decode a target shuffle mask and inputs and see if any values are
/// known to be undef or zero from their inputs.
/// Returns true if the target shuffle mask was decoded.
static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
                                         SmallVectorImpl<SDValue> &Ops,
                                         APInt &KnownUndef, APInt &KnownZero) {
  bool IsUnary;
  if (!isTargetShuffle(N.getOpcode()))
    return false;

  MVT VT = N.getSimpleValueType();
  if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
    return false;

  int Size = Mask.size();
  SDValue V1 = Ops[0];
  SDValue V2 = IsUnary ? V1 : Ops[1];
  KnownUndef = KnownZero = APInt::getNullValue(Size);

  V1 = peekThroughBitcasts(V1);
  V2 = peekThroughBitcasts(V2);

  assert((VT.getSizeInBits() % Mask.size()) == 0 &&
         "Illegal split of shuffle value type");
  unsigned EltSizeInBits = VT.getSizeInBits() / Size;

  // Extract known constant input data.
  APInt UndefSrcElts[2];
  SmallVector<APInt, 32> SrcEltBits[2];
  bool IsSrcConstant[2] = {
      getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
                                    SrcEltBits[0], true, false),
      getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
                                    SrcEltBits[1], true, false)};

  for (int i = 0; i < Size; ++i) {
    int M = Mask[i];

    // Already decoded as SM_SentinelZero / SM_SentinelUndef.
    if (M < 0) {
      assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
      if (SM_SentinelUndef == M)
        KnownUndef.setBit(i);
      if (SM_SentinelZero == M)
        KnownZero.setBit(i);
      continue;
    }

    // Determine shuffle input and normalize the mask.
    unsigned SrcIdx = M / Size;
    SDValue V = M < Size ? V1 : V2;
    M %= Size;

    // We are referencing an UNDEF input.
    if (V.isUndef()) {
      KnownUndef.setBit(i);
      continue;
    }

    // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
    // TODO: We currently only set UNDEF for integer types - floats use the same
    // registers as vectors and many of the scalar folded loads rely on the
    // SCALAR_TO_VECTOR pattern.
    if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
        (Size % V.getValueType().getVectorNumElements()) == 0) {
      int Scale = Size / V.getValueType().getVectorNumElements();
      int Idx = M / Scale;
      if (Idx != 0 && !VT.isFloatingPoint())
        KnownUndef.setBit(i);
      else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
        KnownZero.setBit(i);
      continue;
    }

    // Attempt to extract from the source's constant bits.
    if (IsSrcConstant[SrcIdx]) {
      if (UndefSrcElts[SrcIdx][M])
        KnownUndef.setBit(i);
      else if (SrcEltBits[SrcIdx][M] == 0)
        KnownZero.setBit(i);
    }
  }

  assert(VT.getVectorNumElements() == (unsigned)Size &&
         "Different mask size from vector size!");
  return true;
}

// Replace target shuffle mask elements with known undef/zero sentinels.
static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
                                              const APInt &KnownUndef,
                                              const APInt &KnownZero) {
  unsigned NumElts = Mask.size();
  assert(KnownUndef.getBitWidth() == NumElts &&
         KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");

  for (unsigned i = 0; i != NumElts; ++i) {
    if (KnownUndef[i])
      Mask[i] = SM_SentinelUndef;
    else if (KnownZero[i])
      Mask[i] = SM_SentinelZero;
  }
}

// Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
                                              APInt &KnownUndef,
                                              APInt &KnownZero) {
  unsigned NumElts = Mask.size();
  KnownUndef = KnownZero = APInt::getNullValue(NumElts);

  for (unsigned i = 0; i != NumElts; ++i) {
    int M = Mask[i];
    if (SM_SentinelUndef == M)
      KnownUndef.setBit(i);
    if (SM_SentinelZero == M)
      KnownZero.setBit(i);
  }
}

// Forward declaration (for getFauxShuffleMask recursive check).
// TODO: Use DemandedElts variant.
static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
                                   SmallVectorImpl<int> &Mask,
                                   SelectionDAG &DAG, unsigned Depth,
                                   bool ResolveKnownElts);

// Attempt to decode ops that could be represented as a shuffle mask.
// The decoded shuffle mask may contain a different number of elements to the
// destination value type.
static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
                               SmallVectorImpl<int> &Mask,
                               SmallVectorImpl<SDValue> &Ops,
                               SelectionDAG &DAG, unsigned Depth,
                               bool ResolveKnownElts) {
  Mask.clear();
  Ops.clear();

  MVT VT = N.getSimpleValueType();
  unsigned NumElts = VT.getVectorNumElements();
  unsigned NumSizeInBits = VT.getSizeInBits();
  unsigned NumBitsPerElt = VT.getScalarSizeInBits();
  if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
    return false;
  assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");

  unsigned Opcode = N.getOpcode();
  switch (Opcode) {
  case ISD::VECTOR_SHUFFLE: {
    // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
    ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
    if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
      Mask.append(ShuffleMask.begin(), ShuffleMask.end());
      Ops.push_back(N.getOperand(0));
      Ops.push_back(N.getOperand(1));
      return true;
    }
    return false;
  }
  case ISD::AND:
  case X86ISD::ANDNP: {
    // Attempt to decode as a per-byte mask.
    APInt UndefElts;
    SmallVector<APInt, 32> EltBits;
    SDValue N0 = N.getOperand(0);
    SDValue N1 = N.getOperand(1);
    bool IsAndN = (X86ISD::ANDNP == Opcode);
    uint64_t ZeroMask = IsAndN ? 255 : 0;
    if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
      return false;
    for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
      if (UndefElts[i]) {
        Mask.push_back(SM_SentinelUndef);
        continue;
      }
      const APInt &ByteBits = EltBits[i];
      if (ByteBits != 0 && ByteBits != 255)
        return false;
      Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
    }
    Ops.push_back(IsAndN ? N1 : N0);
    return true;
  }
  case ISD::OR: {
    // Inspect each operand at the byte level. We can merge these into a
    // blend shuffle mask if for each byte at least one is masked out (zero).
    KnownBits Known0 =
        DAG.computeKnownBits(N.getOperand(0), DemandedElts, Depth + 1);
    KnownBits Known1 =
        DAG.computeKnownBits(N.getOperand(1), DemandedElts, Depth + 1);
    if (Known0.One.isNullValue() && Known1.One.isNullValue()) {
      bool IsByteMask = true;
      unsigned NumSizeInBytes = NumSizeInBits / 8;
      unsigned NumBytesPerElt = NumBitsPerElt / 8;
      APInt ZeroMask = APInt::getNullValue(NumBytesPerElt);
      APInt SelectMask = APInt::getNullValue(NumBytesPerElt);
      for (unsigned i = 0; i != NumBytesPerElt && IsByteMask; ++i) {
        unsigned LHS = Known0.Zero.extractBits(8, i * 8).getZExtValue();
        unsigned RHS = Known1.Zero.extractBits(8, i * 8).getZExtValue();
        if (LHS == 255 && RHS == 0)
          SelectMask.setBit(i);
        else if (LHS == 255 && RHS == 255)
          ZeroMask.setBit(i);
        else if (!(LHS == 0 && RHS == 255))
          IsByteMask = false;
      }
      if (IsByteMask) {
        for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt) {
          for (unsigned j = 0; j != NumBytesPerElt; ++j) {
            unsigned Ofs = (SelectMask[j] ? NumSizeInBytes : 0);
            int Idx = (ZeroMask[j] ? (int)SM_SentinelZero : (i + j + Ofs));
            Mask.push_back(Idx);
          }
        }
        Ops.push_back(N.getOperand(0));
        Ops.push_back(N.getOperand(1));
        return true;
      }
    }

    // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
    // is a valid shuffle index.
    SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
    SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
    if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
      return false;
    SmallVector<int, 64> SrcMask0, SrcMask1;
    SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
    if (!getTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG, Depth + 1,
                                true) ||
        !getTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG, Depth + 1,
                                true))
      return false;
    size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
    SmallVector<int, 64> Mask0, Mask1;
    scaleShuffleMask<int>(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
    scaleShuffleMask<int>(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
    for (size_t i = 0; i != MaskSize; ++i) {
      if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
        Mask.push_back(SM_SentinelUndef);
      else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
        Mask.push_back(SM_SentinelZero);
      else if (Mask1[i] == SM_SentinelZero)
        Mask.push_back(Mask0[i]);
      else if (Mask0[i] == SM_SentinelZero)
        Mask.push_back(Mask1[i] + (int)(MaskSize * SrcInputs0.size()));
      else
        return false;
    }
    Ops.append(SrcInputs0.begin(), SrcInputs0.end());
    Ops.append(SrcInputs1.begin(), SrcInputs1.end());
    return true;
  }
  case ISD::INSERT_SUBVECTOR: {
    SDValue Src = N.getOperand(0);
    SDValue Sub = N.getOperand(1);
    EVT SubVT = Sub.getValueType();
    unsigned NumSubElts = SubVT.getVectorNumElements();
    if (!isa<ConstantSDNode>(N.getOperand(2)) ||
        !N->isOnlyUserOf(Sub.getNode()))
      return false;
    uint64_t InsertIdx = N.getConstantOperandVal(2);
    // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
    if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
        Sub.getOperand(0).getValueType() == VT &&
        isa<ConstantSDNode>(Sub.getOperand(1))) {
      uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
      for (int i = 0; i != (int)NumElts; ++i)
        Mask.push_back(i);
      for (int i = 0; i != (int)NumSubElts; ++i)
        Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
      Ops.push_back(Src);
      Ops.push_back(Sub.getOperand(0));
      return true;
    }
    // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
    SmallVector<int, 64> SubMask;
    SmallVector<SDValue, 2> SubInputs;
    if (!getTargetShuffleInputs(peekThroughOneUseBitcasts(Sub), SubInputs,
                                SubMask, DAG, Depth + 1, ResolveKnownElts))
      return false;
    if (SubMask.size() != NumSubElts) {
      assert(((SubMask.size() % NumSubElts) == 0 ||
              (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
      if ((NumSubElts % SubMask.size()) == 0) {
        int Scale = NumSubElts / SubMask.size();
        SmallVector<int,64> ScaledSubMask;
        scaleShuffleMask<int>(Scale, SubMask, ScaledSubMask);
        SubMask = ScaledSubMask;
      } else {
        int Scale = SubMask.size() / NumSubElts;
        NumSubElts = SubMask.size();
        NumElts *= Scale;
        InsertIdx *= Scale;
      }
    }
    Ops.push_back(Src);
    for (SDValue &SubInput : SubInputs) {
      EVT SubSVT = SubInput.getValueType().getScalarType();
      EVT AltVT = EVT::getVectorVT(*DAG.getContext(), SubSVT,
                                   NumSizeInBits / SubSVT.getSizeInBits());
      Ops.push_back(DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), AltVT,
                                DAG.getUNDEF(AltVT), SubInput,
                                DAG.getIntPtrConstant(0, SDLoc(N))));
    }
    for (int i = 0; i != (int)NumElts; ++i)
      Mask.push_back(i);
    for (int i = 0; i != (int)NumSubElts; ++i) {
      int M = SubMask[i];
      if (0 <= M) {
        int InputIdx = M / NumSubElts;
        M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
      }
      Mask[i + InsertIdx] = M;
    }
    return true;
  }
  case ISD::SCALAR_TO_VECTOR: {
    // Match against a scalar_to_vector of an extract from a vector,
    // for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
    SDValue N0 = N.getOperand(0);
    SDValue SrcExtract;

    if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
         N0.getOperand(0).getValueType() == VT) ||
        (N0.getOpcode() == X86ISD::PEXTRW &&
         N0.getOperand(0).getValueType() == MVT::v8i16) ||
        (N0.getOpcode() == X86ISD::PEXTRB &&
         N0.getOperand(0).getValueType() == MVT::v16i8)) {
      SrcExtract = N0;
    }

    if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
      return false;

    SDValue SrcVec = SrcExtract.getOperand(0);
    EVT SrcVT = SrcVec.getValueType();
    unsigned NumSrcElts = SrcVT.getVectorNumElements();
    unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;

    unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
    if (NumSrcElts <= SrcIdx)
      return false;

    Ops.push_back(SrcVec);
    Mask.push_back(SrcIdx);
    Mask.append(NumZeros, SM_SentinelZero);
    Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
    return true;
  }
  case X86ISD::PINSRB:
  case X86ISD::PINSRW: {
    SDValue InVec = N.getOperand(0);
    SDValue InScl = N.getOperand(1);
    SDValue InIndex = N.getOperand(2);
    if (!isa<ConstantSDNode>(InIndex) ||
        cast<ConstantSDNode>(InIndex)->getAPIntValue().uge(NumElts))
      return false;
    uint64_t InIdx = N.getConstantOperandVal(2);

    // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
    if (X86::isZeroNode(InScl)) {
      Ops.push_back(InVec);
      for (unsigned i = 0; i != NumElts; ++i)
        Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i);
      return true;
    }

    // Attempt to recognise a PINSR*(PEXTR*) shuffle pattern.
    // TODO: Expand this to support INSERT_VECTOR_ELT/etc.
    unsigned ExOp =
        (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
    if (InScl.getOpcode() != ExOp)
      return false;

    SDValue ExVec = InScl.getOperand(0);
    SDValue ExIndex = InScl.getOperand(1);
    if (!isa<ConstantSDNode>(ExIndex) ||
        cast<ConstantSDNode>(ExIndex)->getAPIntValue().uge(NumElts))
      return false;
    uint64_t ExIdx = InScl.getConstantOperandVal(1);

    Ops.push_back(InVec);
    Ops.push_back(ExVec);
    for (unsigned i = 0; i != NumElts; ++i)
      Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
    return true;
  }
  case X86ISD::PACKSS:
  case X86ISD::PACKUS: {
    SDValue N0 = N.getOperand(0);
    SDValue N1 = N.getOperand(1);
    assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
           N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
           "Unexpected input value type");

    APInt EltsLHS, EltsRHS;
    getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);

    // If we know input saturation won't happen we can treat this
    // as a truncation shuffle.
    if (Opcode == X86ISD::PACKSS) {
      if ((!N0.isUndef() &&
           DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
          (!N1.isUndef() &&
           DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
        return false;
    } else {
      APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
      if ((!N0.isUndef() &&
           !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
          (!N1.isUndef() &&
           !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
        return false;
    }

    bool IsUnary = (N0 == N1);

    Ops.push_back(N0);
    if (!IsUnary)
      Ops.push_back(N1);

    createPackShuffleMask(VT, Mask, IsUnary);
    return true;
  }
  case X86ISD::VSHLI:
  case X86ISD::VSRLI: {
    uint64_t ShiftVal = N.getConstantOperandVal(1);
    // Out of range bit shifts are guaranteed to be zero.
    if (NumBitsPerElt <= ShiftVal) {
      Mask.append(NumElts, SM_SentinelZero);
      return true;
    }

    // We can only decode 'whole byte' bit shifts as shuffles.
    if ((ShiftVal % 8) != 0)
      break;

    uint64_t ByteShift = ShiftVal / 8;
    unsigned NumBytes = NumSizeInBits / 8;
    unsigned NumBytesPerElt = NumBitsPerElt / 8;
    Ops.push_back(N.getOperand(0));

    // Clear mask to all zeros and insert the shifted byte indices.
    Mask.append(NumBytes, SM_SentinelZero);

    if (X86ISD::VSHLI == Opcode) {
      for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
        for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
          Mask[i + j] = i + j - ByteShift;
    } else {
      for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
        for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
          Mask[i + j - ByteShift] = i + j;
    }
    return true;
  }
  case X86ISD::VBROADCAST: {
    SDValue Src = N.getOperand(0);
    MVT SrcVT = Src.getSimpleValueType();
    if (!SrcVT.isVector())
      return false;

    if (NumSizeInBits != SrcVT.getSizeInBits()) {
      assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
             "Illegal broadcast type");
      SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
                               NumSizeInBits / SrcVT.getScalarSizeInBits());
      Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
                        DAG.getUNDEF(SrcVT), Src,
                        DAG.getIntPtrConstant(0, SDLoc(N)));
    }

    Ops.push_back(Src);
    Mask.append(NumElts, 0);
    return true;
  }
  case ISD::ZERO_EXTEND:
  case ISD::ANY_EXTEND:
  case ISD::ZERO_EXTEND_VECTOR_INREG:
  case ISD::ANY_EXTEND_VECTOR_INREG: {
    SDValue Src = N.getOperand(0);
    EVT SrcVT = Src.getValueType();

    // Extended source must be a simple vector.
    if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
        (SrcVT.getScalarSizeInBits() % 8) != 0)
      return false;

    unsigned NumSrcBitsPerElt = SrcVT.getScalarSizeInBits();
    bool IsAnyExtend =
        (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
    DecodeZeroExtendMask(NumSrcBitsPerElt, NumBitsPerElt, NumElts, IsAnyExtend,
                         Mask);

    if (NumSizeInBits != SrcVT.getSizeInBits()) {
      assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
             "Illegal zero-extension type");
      SrcVT = MVT::getVectorVT(SrcVT.getSimpleVT().getScalarType(),
                               NumSizeInBits / NumSrcBitsPerElt);
      Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
                        DAG.getUNDEF(SrcVT), Src,
                        DAG.getIntPtrConstant(0, SDLoc(N)));
    }

    Ops.push_back(Src);
    return true;
  }
  }

  return false;
}

/// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
                                              SmallVectorImpl<int> &Mask) {
  int MaskWidth = Mask.size();
  SmallVector<SDValue, 16> UsedInputs;
  for (int i = 0, e = Inputs.size(); i < e; ++i) {
    int lo = UsedInputs.size() * MaskWidth;
    int hi = lo + MaskWidth;

    // Strip UNDEF input usage.
    if (Inputs[i].isUndef())
      for (int &M : Mask)
        if ((lo <= M) && (M < hi))
          M = SM_SentinelUndef;

    // Check for unused inputs.
    if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
      for (int &M : Mask)
        if (lo <= M)
          M -= MaskWidth;
      continue;
    }

    // Check for repeated inputs.
    bool IsRepeat = false;
    for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
      if (UsedInputs[j] != Inputs[i])
        continue;
      for (int &M : Mask)
        if (lo <= M)
          M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
      IsRepeat = true;
      break;
    }
    if (IsRepeat)
      continue;

    UsedInputs.push_back(Inputs[i]);
  }
  Inputs = UsedInputs;
}

/// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
/// and then sets the SM_SentinelUndef and SM_SentinelZero values.
/// Returns true if the target shuffle mask was decoded.
static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
                                   SmallVectorImpl<SDValue> &Inputs,
                                   SmallVectorImpl<int> &Mask,
                                   APInt &KnownUndef, APInt &KnownZero,
                                   SelectionDAG &DAG, unsigned Depth,
                                   bool ResolveKnownElts) {
  EVT VT = Op.getValueType();
  if (!VT.isSimple() || !VT.isVector())
    return false;

  if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
    if (ResolveKnownElts)
      resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
    return true;
  }
  if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
                         ResolveKnownElts)) {
    resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
    return true;
  }
  return false;
}

static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
                                   SmallVectorImpl<int> &Mask,
                                   SelectionDAG &DAG, unsigned Depth = 0,
                                   bool ResolveKnownElts = true) {
  EVT VT = Op.getValueType();
  if (!VT.isSimple() || !VT.isVector())
    return false;

  APInt KnownUndef, KnownZero;
  unsigned NumElts = Op.getValueType().getVectorNumElements();
  APInt DemandedElts = APInt::getAllOnesValue(NumElts);
  return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
                                KnownZero, DAG, Depth, ResolveKnownElts);
}

/// Returns the scalar element that will make up the ith
/// element of the result of the vector shuffle.
static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
                                   unsigned Depth) {
  if (Depth == 6)
    return SDValue();  // Limit search depth.

  SDValue V = SDValue(N, 0);
  EVT VT = V.getValueType();
  unsigned Opcode = V.getOpcode();

  // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
  if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
    int Elt = SV->getMaskElt(Index);

    if (Elt < 0)
      return DAG.getUNDEF(VT.getVectorElementType());

    unsigned NumElems = VT.getVectorNumElements();
    SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
                                         : SV->getOperand(1);
    return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
  }

  // Recurse into target specific vector shuffles to find scalars.
  if (isTargetShuffle(Opcode)) {
    MVT ShufVT = V.getSimpleValueType();
    MVT ShufSVT = ShufVT.getVectorElementType();
    int NumElems = (int)ShufVT.getVectorNumElements();
    SmallVector<int, 16> ShuffleMask;
    SmallVector<SDValue, 16> ShuffleOps;
    bool IsUnary;

    if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
      return SDValue();

    int Elt = ShuffleMask[Index];
    if (Elt == SM_SentinelZero)
      return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
                                 : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
    if (Elt == SM_SentinelUndef)
      return DAG.getUNDEF(ShufSVT);

    assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
    SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
    return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
                               Depth+1);
  }

  // Recurse into insert_subvector base/sub vector to find scalars.
  if (Opcode == ISD::INSERT_SUBVECTOR &&
      isa<ConstantSDNode>(N->getOperand(2))) {
    SDValue Vec = N->getOperand(0);
    SDValue Sub = N->getOperand(1);
    EVT SubVT = Sub.getValueType();
    unsigned NumSubElts = SubVT.getVectorNumElements();
    uint64_t SubIdx = N->getConstantOperandVal(2);

    if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
      return getShuffleScalarElt(Sub.getNode(), Index - SubIdx, DAG, Depth + 1);
    return getShuffleScalarElt(Vec.getNode(), Index, DAG, Depth + 1);
  }

  // Recurse into extract_subvector src vector to find scalars.
  if (Opcode == ISD::EXTRACT_SUBVECTOR &&
      isa<ConstantSDNode>(N->getOperand(1))) {
    SDValue Src = N->getOperand(0);
    uint64_t SrcIdx = N->getConstantOperandVal(1);
    return getShuffleScalarElt(Src.getNode(), Index + SrcIdx, DAG, Depth + 1);
  }

  // Actual nodes that may contain scalar elements
  if (Opcode == ISD::BITCAST) {
    V = V.getOperand(0);
    EVT SrcVT = V.getValueType();
    unsigned NumElems = VT.getVectorNumElements();

    if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
      return SDValue();
  }

  if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
    return (Index == 0) ? V.getOperand(0)
                        : DAG.getUNDEF(VT.getVectorElementType());

  if (V.getOpcode() == ISD::BUILD_VECTOR)
    return V.getOperand(Index);

  return SDValue();
}

// Use PINSRB/PINSRW/PINSRD to create a build vector.
static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
                                        unsigned NumNonZero, unsigned NumZero,
                                        SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {
  MVT VT = Op.getSimpleValueType();
  unsigned NumElts = VT.getVectorNumElements();
  assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
          ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
         "Illegal vector insertion");

  SDLoc dl(Op);
  SDValue V;
  bool First = true;

  for (unsigned i = 0; i < NumElts; ++i) {
    bool IsNonZero = (NonZeros & (1 << i)) != 0;
    if (!IsNonZero)
      continue;

    // If the build vector contains zeros or our first insertion is not the
    // first index then insert into zero vector to break any register
    // dependency else use SCALAR_TO_VECTOR.
    if (First) {
      First = false;
      if (NumZero || 0 != i)
        V = getZeroVector(VT, Subtarget, DAG, dl);
      else {
        assert(0 == i && "Expected insertion into zero-index");
        V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
        V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
        V = DAG.getBitcast(VT, V);
        continue;
      }
    }
    V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
                    DAG.getIntPtrConstant(i, dl));
  }

  return V;
}

/// Custom lower build_vector of v16i8.
static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
                                     unsigned NumNonZero, unsigned NumZero,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  if (NumNonZero > 8 && !Subtarget.hasSSE41())
    return SDValue();

  // SSE4.1 - use PINSRB to insert each byte directly.
  if (Subtarget.hasSSE41())
    return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
                                    Subtarget);

  SDLoc dl(Op);
  SDValue V;

  // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
  for (unsigned i = 0; i < 16; i += 2) {
    bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
    bool NextIsNonZero = (NonZeros & (1 << (i + 1))) != 0;
    if (!ThisIsNonZero && !NextIsNonZero)
      continue;

    // FIXME: Investigate combining the first 4 bytes as a i32 instead.
    SDValue Elt;
    if (ThisIsNonZero) {
      if (NumZero || NextIsNonZero)
        Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
      else
        Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
    }

    if (NextIsNonZero) {
      SDValue NextElt = Op.getOperand(i + 1);
      if (i == 0 && NumZero)
        NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
      else
        NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
      NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
                            DAG.getConstant(8, dl, MVT::i8));
      if (ThisIsNonZero)
        Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
      else
        Elt = NextElt;
    }

    // If our first insertion is not the first index then insert into zero
    // vector to break any register dependency else use SCALAR_TO_VECTOR.
    if (!V) {
      if (i != 0)
        V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
      else {
        V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
        V = DAG.getBitcast(MVT::v8i16, V);
        continue;
      }
    }
    Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
    V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
                    DAG.getIntPtrConstant(i / 2, dl));
  }

  return DAG.getBitcast(MVT::v16i8, V);
}

/// Custom lower build_vector of v8i16.
static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
                                     unsigned NumNonZero, unsigned NumZero,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  if (NumNonZero > 4 && !Subtarget.hasSSE41())
    return SDValue();

  // Use PINSRW to insert each byte directly.
  return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
                                  Subtarget);
}

/// Custom lower build_vector of v4i32 or v4f32.
static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  // If this is a splat of a pair of elements, use MOVDDUP (unless the target
  // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
  // Because we're creating a less complicated build vector here, we may enable
  // further folding of the MOVDDUP via shuffle transforms.
  if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
      Op.getOperand(0) == Op.getOperand(2) &&
      Op.getOperand(1) == Op.getOperand(3) &&
      Op.getOperand(0) != Op.getOperand(1)) {
    SDLoc DL(Op);
    MVT VT = Op.getSimpleValueType();
    MVT EltVT = VT.getVectorElementType();
    // Create a new build vector with the first 2 elements followed by undef
    // padding, bitcast to v2f64, duplicate, and bitcast back.
    SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
                       DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
    SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
    SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
    return DAG.getBitcast(VT, Dup);
  }

  // Find all zeroable elements.
  std::bitset<4> Zeroable, Undefs;
  for (int i = 0; i < 4; ++i) {
    SDValue Elt = Op.getOperand(i);
    Undefs[i] = Elt.isUndef();
    Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
  }
  assert(Zeroable.size() - Zeroable.count() > 1 &&
         "We expect at least two non-zero elements!");

  // We only know how to deal with build_vector nodes where elements are either
  // zeroable or extract_vector_elt with constant index.
  SDValue FirstNonZero;
  unsigned FirstNonZeroIdx;
  for (unsigned i = 0; i < 4; ++i) {
    if (Zeroable[i])
      continue;
    SDValue Elt = Op.getOperand(i);
    if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        !isa<ConstantSDNode>(Elt.getOperand(1)))
      return SDValue();
    // Make sure that this node is extracting from a 128-bit vector.
    MVT VT = Elt.getOperand(0).getSimpleValueType();
    if (!VT.is128BitVector())
      return SDValue();
    if (!FirstNonZero.getNode()) {
      FirstNonZero = Elt;
      FirstNonZeroIdx = i;
    }
  }

  assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
  SDValue V1 = FirstNonZero.getOperand(0);
  MVT VT = V1.getSimpleValueType();

  // See if this build_vector can be lowered as a blend with zero.
  SDValue Elt;
  unsigned EltMaskIdx, EltIdx;
  int Mask[4];
  for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
    if (Zeroable[EltIdx]) {
      // The zero vector will be on the right hand side.
      Mask[EltIdx] = EltIdx+4;
      continue;
    }

    Elt = Op->getOperand(EltIdx);
    // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
    EltMaskIdx = Elt.getConstantOperandVal(1);
    if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
      break;
    Mask[EltIdx] = EltIdx;
  }

  if (EltIdx == 4) {
    // Let the shuffle legalizer deal with blend operations.
    SDValue VZeroOrUndef = (Zeroable == Undefs)
                               ? DAG.getUNDEF(VT)
                               : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
    if (V1.getSimpleValueType() != VT)
      V1 = DAG.getBitcast(VT, V1);
    return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
  }

  // See if we can lower this build_vector to a INSERTPS.
  if (!Subtarget.hasSSE41())
    return SDValue();

  SDValue V2 = Elt.getOperand(0);
  if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
    V1 = SDValue();

  bool CanFold = true;
  for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
    if (Zeroable[i])
      continue;

    SDValue Current = Op->getOperand(i);
    SDValue SrcVector = Current->getOperand(0);
    if (!V1.getNode())
      V1 = SrcVector;
    CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
  }

  if (!CanFold)
    return SDValue();

  assert(V1.getNode() && "Expected at least two non-zero elements!");
  if (V1.getSimpleValueType() != MVT::v4f32)
    V1 = DAG.getBitcast(MVT::v4f32, V1);
  if (V2.getSimpleValueType() != MVT::v4f32)
    V2 = DAG.getBitcast(MVT::v4f32, V2);

  // Ok, we can emit an INSERTPS instruction.
  unsigned ZMask = Zeroable.to_ulong();

  unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
  assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
  SDLoc DL(Op);
  SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
                               DAG.getIntPtrConstant(InsertPSMask, DL, true));
  return DAG.getBitcast(VT, Result);
}

/// Return a vector logical shift node.
static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
                         SelectionDAG &DAG, const TargetLowering &TLI,
                         const SDLoc &dl) {
  assert(VT.is128BitVector() && "Unknown type for VShift");
  MVT ShVT = MVT::v16i8;
  unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
  SrcOp = DAG.getBitcast(ShVT, SrcOp);
  assert(NumBits % 8 == 0 && "Only support byte sized shifts");
  SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
  return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
}

static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
                                      SelectionDAG &DAG) {

  // Check if the scalar load can be widened into a vector load. And if
  // the address is "base + cst" see if the cst can be "absorbed" into
  // the shuffle mask.
  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
    SDValue Ptr = LD->getBasePtr();
    if (!ISD::isNormalLoad(LD) || !LD->isSimple())
      return SDValue();
    EVT PVT = LD->getValueType(0);
    if (PVT != MVT::i32 && PVT != MVT::f32)
      return SDValue();

    int FI = -1;
    int64_t Offset = 0;
    if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
      FI = FINode->getIndex();
      Offset = 0;
    } else if (DAG.isBaseWithConstantOffset(Ptr) &&
               isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
      FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
      Offset = Ptr.getConstantOperandVal(1);
      Ptr = Ptr.getOperand(0);
    } else {
      return SDValue();
    }

    // FIXME: 256-bit vector instructions don't require a strict alignment,
    // improve this code to support it better.
    unsigned RequiredAlign = VT.getSizeInBits()/8;
    SDValue Chain = LD->getChain();
    // Make sure the stack object alignment is at least 16 or 32.
    MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
    if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
      if (MFI.isFixedObjectIndex(FI)) {
        // Can't change the alignment. FIXME: It's possible to compute
        // the exact stack offset and reference FI + adjust offset instead.
        // If someone *really* cares about this. That's the way to implement it.
        return SDValue();
      } else {
        MFI.setObjectAlignment(FI, RequiredAlign);
      }
    }

    // (Offset % 16 or 32) must be multiple of 4. Then address is then
    // Ptr + (Offset & ~15).
    if (Offset < 0)
      return SDValue();
    if ((Offset % RequiredAlign) & 3)
      return SDValue();
    int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
    if (StartOffset) {
      SDLoc DL(Ptr);
      Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
                        DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
    }

    int EltNo = (Offset - StartOffset) >> 2;
    unsigned NumElems = VT.getVectorNumElements();

    EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
    SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
                             LD->getPointerInfo().getWithOffset(StartOffset));

    SmallVector<int, 8> Mask(NumElems, EltNo);

    return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
  }

  return SDValue();
}

// Recurse to find a LoadSDNode source and the accumulated ByteOffest.
static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
  if (ISD::isNON_EXTLoad(Elt.getNode())) {
    auto *BaseLd = cast<LoadSDNode>(Elt);
    if (!BaseLd->isSimple())
      return false;
    Ld = BaseLd;
    ByteOffset = 0;
    return true;
  }

  switch (Elt.getOpcode()) {
  case ISD::BITCAST:
  case ISD::TRUNCATE:
  case ISD::SCALAR_TO_VECTOR:
    return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
  case ISD::SRL:
    if (isa<ConstantSDNode>(Elt.getOperand(1))) {
      uint64_t Idx = Elt.getConstantOperandVal(1);
      if ((Idx % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
        ByteOffset += Idx / 8;
        return true;
      }
    }
    break;
  case ISD::EXTRACT_VECTOR_ELT:
    if (isa<ConstantSDNode>(Elt.getOperand(1))) {
      SDValue Src = Elt.getOperand(0);
      unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
      unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
      if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
          findEltLoadSrc(Src, Ld, ByteOffset)) {
        uint64_t Idx = Elt.getConstantOperandVal(1);
        ByteOffset += Idx * (SrcSizeInBits / 8);
        return true;
      }
    }
    break;
  }

  return false;
}

/// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
/// elements can be replaced by a single large load which has the same value as
/// a build_vector or insert_subvector whose loaded operands are 'Elts'.
///
/// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
                                        const SDLoc &DL, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget,
                                        bool isAfterLegalize) {
  if ((VT.getScalarSizeInBits() % 8) != 0)
    return SDValue();

  unsigned NumElems = Elts.size();

  int LastLoadedElt = -1;
  APInt LoadMask = APInt::getNullValue(NumElems);
  APInt ZeroMask = APInt::getNullValue(NumElems);
  APInt UndefMask = APInt::getNullValue(NumElems);

  SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
  SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);

  // For each element in the initializer, see if we've found a load, zero or an
  // undef.
  for (unsigned i = 0; i < NumElems; ++i) {
    SDValue Elt = peekThroughBitcasts(Elts[i]);
    if (!Elt.getNode())
      return SDValue();
    if (Elt.isUndef()) {
      UndefMask.setBit(i);
      continue;
    }
    if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
      ZeroMask.setBit(i);
      continue;
    }

    // Each loaded element must be the correct fractional portion of the
    // requested vector load.
    unsigned EltSizeInBits = Elt.getValueSizeInBits();
    if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
      return SDValue();

    if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
      return SDValue();
    unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
    if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
      return SDValue();

    LoadMask.setBit(i);
    LastLoadedElt = i;
  }
  assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
          LoadMask.countPopulation()) == NumElems &&
         "Incomplete element masks");

  // Handle Special Cases - all undef or undef/zero.
  if (UndefMask.countPopulation() == NumElems)
    return DAG.getUNDEF(VT);

  // FIXME: Should we return this as a BUILD_VECTOR instead?
  if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
    return VT.isInteger() ? DAG.getConstant(0, DL, VT)
                          : DAG.getConstantFP(0.0, DL, VT);

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  int FirstLoadedElt = LoadMask.countTrailingZeros();
  SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
  EVT EltBaseVT = EltBase.getValueType();
  assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
         "Register/Memory size mismatch");
  LoadSDNode *LDBase = Loads[FirstLoadedElt];
  assert(LDBase && "Did not find base load for merging consecutive loads");
  unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
  unsigned BaseSizeInBytes = BaseSizeInBits / 8;
  int LoadSizeInBits = (1 + LastLoadedElt - FirstLoadedElt) * BaseSizeInBits;
  assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");

  // TODO: Support offsetting the base load.
  if (ByteOffsets[FirstLoadedElt] != 0)
    return SDValue();

  // Check to see if the element's load is consecutive to the base load
  // or offset from a previous (already checked) load.
  auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
    LoadSDNode *Ld = Loads[EltIdx];
    int64_t ByteOffset = ByteOffsets[EltIdx];
    if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
      int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
      return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
              Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
    }
    return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
                                              EltIdx - FirstLoadedElt);
  };

  // Consecutive loads can contain UNDEFS but not ZERO elements.
  // Consecutive loads with UNDEFs and ZEROs elements require a
  // an additional shuffle stage to clear the ZERO elements.
  bool IsConsecutiveLoad = true;
  bool IsConsecutiveLoadWithZeros = true;
  for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
    if (LoadMask[i]) {
      if (!CheckConsecutiveLoad(LDBase, i)) {
        IsConsecutiveLoad = false;
        IsConsecutiveLoadWithZeros = false;
        break;
      }
    } else if (ZeroMask[i]) {
      IsConsecutiveLoad = false;
    }
  }

  auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
    auto MMOFlags = LDBase->getMemOperand()->getFlags();
    assert(LDBase->isSimple() &&
           "Cannot merge volatile or atomic loads.");
    SDValue NewLd =
        DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
                    LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
    for (auto *LD : Loads)
      if (LD)
        DAG.makeEquivalentMemoryOrdering(LD, NewLd);
    return NewLd;
  };

  // Check if the base load is entirely dereferenceable.
  bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
      VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());

  // LOAD - all consecutive load/undefs (must start/end with a load or be
  // entirely dereferenceable). If we have found an entire vector of loads and
  // undefs, then return a large load of the entire vector width starting at the
  // base pointer. If the vector contains zeros, then attempt to shuffle those
  // elements.
  if (FirstLoadedElt == 0 &&
      (LastLoadedElt == (int)(NumElems - 1) || IsDereferenceable) &&
      (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
    if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
      return SDValue();

    // Don't create 256-bit non-temporal aligned loads without AVX2 as these
    // will lower to regular temporal loads and use the cache.
    if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
        VT.is256BitVector() && !Subtarget.hasInt256())
      return SDValue();

    if (NumElems == 1)
      return DAG.getBitcast(VT, Elts[FirstLoadedElt]);

    if (!ZeroMask)
      return CreateLoad(VT, LDBase);

    // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
    // vector and a zero vector to clear out the zero elements.
    if (!isAfterLegalize && VT.isVector()) {
      unsigned NumMaskElts = VT.getVectorNumElements();
      if ((NumMaskElts % NumElems) == 0) {
        unsigned Scale = NumMaskElts / NumElems;
        SmallVector<int, 4> ClearMask(NumMaskElts, -1);
        for (unsigned i = 0; i < NumElems; ++i) {
          if (UndefMask[i])
            continue;
          int Offset = ZeroMask[i] ? NumMaskElts : 0;
          for (unsigned j = 0; j != Scale; ++j)
            ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
        }
        SDValue V = CreateLoad(VT, LDBase);
        SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
                                   : DAG.getConstantFP(0.0, DL, VT);
        return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
      }
    }
  }

  // If the upper half of a ymm/zmm load is undef then just load the lower half.
  if (VT.is256BitVector() || VT.is512BitVector()) {
    unsigned HalfNumElems = NumElems / 2;
    if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnesValue()) {
      EVT HalfVT =
          EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
      SDValue HalfLD =
          EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
                                   DAG, Subtarget, isAfterLegalize);
      if (HalfLD)
        return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
                           HalfLD, DAG.getIntPtrConstant(0, DL));
    }
  }

  // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
  if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
      (LoadSizeInBits == 32 || LoadSizeInBits == 64) &&
      ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
    MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
                                      : MVT::getIntegerVT(LoadSizeInBits);
    MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
    if (TLI.isTypeLegal(VecVT)) {
      SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
      SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
      SDValue ResNode =
          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
                                  LDBase->getPointerInfo(),
                                  LDBase->getAlignment(),
                                  MachineMemOperand::MOLoad);
      for (auto *LD : Loads)
        if (LD)
          DAG.makeEquivalentMemoryOrdering(LD, ResNode);
      return DAG.getBitcast(VT, ResNode);
    }
  }

  // BROADCAST - match the smallest possible repetition pattern, load that
  // scalar/subvector element and then broadcast to the entire vector.
  if (ZeroMask.isNullValue() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
      (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
    for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
      unsigned RepeatSize = SubElems * BaseSizeInBits;
      unsigned ScalarSize = std::min(RepeatSize, 64u);
      if (!Subtarget.hasAVX2() && ScalarSize < 32)
        continue;

      bool Match = true;
      SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
      for (unsigned i = 0; i != NumElems && Match; ++i) {
        if (!LoadMask[i])
          continue;
        SDValue Elt = peekThroughBitcasts(Elts[i]);
        if (RepeatedLoads[i % SubElems].isUndef())
          RepeatedLoads[i % SubElems] = Elt;
        else
          Match &= (RepeatedLoads[i % SubElems] == Elt);
      }

      // We must have loads at both ends of the repetition.
      Match &= !RepeatedLoads.front().isUndef();
      Match &= !RepeatedLoads.back().isUndef();
      if (!Match)
        continue;

      EVT RepeatVT =
          VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
              ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
              : EVT::getFloatingPointVT(ScalarSize);
      if (RepeatSize > ScalarSize)
        RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
                                    RepeatSize / ScalarSize);
      EVT BroadcastVT =
          EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
                           VT.getSizeInBits() / ScalarSize);
      if (TLI.isTypeLegal(BroadcastVT)) {
        if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
                RepeatVT, RepeatedLoads, DL, DAG, Subtarget, isAfterLegalize)) {
          unsigned Opcode = RepeatSize > ScalarSize ? X86ISD::SUBV_BROADCAST
                                                    : X86ISD::VBROADCAST;
          SDValue Broadcast = DAG.getNode(Opcode, DL, BroadcastVT, RepeatLoad);
          return DAG.getBitcast(VT, Broadcast);
        }
      }
    }
  }

  return SDValue();
}

// Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
// load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
// are consecutive, non-overlapping, and in the right order.
static SDValue combineToConsecutiveLoads(EVT VT, SDNode *N, const SDLoc &DL,
                                         SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget,
                                         bool isAfterLegalize) {
  SmallVector<SDValue, 64> Elts;
  for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
    if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) {
      Elts.push_back(Elt);
      continue;
    }
    return SDValue();
  }
  assert(Elts.size() == VT.getVectorNumElements());
  return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
                                  isAfterLegalize);
}

static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
                                   unsigned SplatBitSize, LLVMContext &C) {
  unsigned ScalarSize = VT.getScalarSizeInBits();
  unsigned NumElm = SplatBitSize / ScalarSize;

  SmallVector<Constant *, 32> ConstantVec;
  for (unsigned i = 0; i < NumElm; i++) {
    APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
    Constant *Const;
    if (VT.isFloatingPoint()) {
      if (ScalarSize == 32) {
        Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
      } else {
        assert(ScalarSize == 64 && "Unsupported floating point scalar size");
        Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
      }
    } else
      Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
    ConstantVec.push_back(Const);
  }
  return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
}

static bool isFoldableUseOfShuffle(SDNode *N) {
  for (auto *U : N->uses()) {
    unsigned Opc = U->getOpcode();
    // VPERMV/VPERMV3 shuffles can never fold their index operands.
    if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
      return false;
    if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
      return false;
    if (isTargetShuffle(Opc))
      return true;
    if (Opc == ISD::BITCAST) // Ignore bitcasts
      return isFoldableUseOfShuffle(U);
    if (N->hasOneUse())
      return true;
  }
  return false;
}

// Check if the current node of build vector is a zero extended vector.
// // If so, return the value extended.
// // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
// // NumElt - return the number of zero extended identical values.
// // EltType - return the type of the value include the zero extend.
static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
                                   unsigned &NumElt, MVT &EltType) {
  SDValue ExtValue = Op->getOperand(0);
  unsigned NumElts = Op->getNumOperands();
  unsigned Delta = NumElts;

  for (unsigned i = 1; i < NumElts; i++) {
    if (Op->getOperand(i) == ExtValue) {
      Delta = i;
      break;
    }
    if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
      return SDValue();
  }
  if (!isPowerOf2_32(Delta) || Delta == 1)
    return SDValue();

  for (unsigned i = Delta; i < NumElts; i++) {
    if (i % Delta == 0) {
      if (Op->getOperand(i) != ExtValue)
        return SDValue();
    } else if (!(isNullConstant(Op->getOperand(i)) ||
                 Op->getOperand(i).isUndef()))
      return SDValue();
  }
  unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
  unsigned ExtVTSize = EltSize * Delta;
  EltType = MVT::getIntegerVT(ExtVTSize);
  NumElt = NumElts / Delta;
  return ExtValue;
}

/// Attempt to use the vbroadcast instruction to generate a splat value
/// from a splat BUILD_VECTOR which uses:
///  a. A single scalar load, or a constant.
///  b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
///
/// The VBROADCAST node is returned when a pattern is found,
/// or SDValue() otherwise.
static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
                                           const X86Subtarget &Subtarget,
                                           SelectionDAG &DAG) {
  // VBROADCAST requires AVX.
  // TODO: Splats could be generated for non-AVX CPUs using SSE
  // instructions, but there's less potential gain for only 128-bit vectors.
  if (!Subtarget.hasAVX())
    return SDValue();

  MVT VT = BVOp->getSimpleValueType(0);
  SDLoc dl(BVOp);

  assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
         "Unsupported vector type for broadcast.");

  BitVector UndefElements;
  SDValue Ld = BVOp->getSplatValue(&UndefElements);

  // Attempt to use VBROADCASTM
  // From this paterrn:
  // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
  // b. t1 = (build_vector t0 t0)
  //
  // Create (VBROADCASTM v2i1 X)
  if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
    MVT EltType = VT.getScalarType();
    unsigned NumElts = VT.getVectorNumElements();
    SDValue BOperand;
    SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
    if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
        (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
         Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
      if (ZeroExtended)
        BOperand = ZeroExtended.getOperand(0);
      else
        BOperand = Ld.getOperand(0).getOperand(0);
      MVT MaskVT = BOperand.getSimpleValueType();
      if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
          (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
        SDValue Brdcst =
            DAG.getNode(X86ISD::VBROADCASTM, dl,
                        MVT::getVectorVT(EltType, NumElts), BOperand);
        return DAG.getBitcast(VT, Brdcst);
      }
    }
  }

  unsigned NumElts = VT.getVectorNumElements();
  unsigned NumUndefElts = UndefElements.count();
  if (!Ld || (NumElts - NumUndefElts) <= 1) {
    APInt SplatValue, Undef;
    unsigned SplatBitSize;
    bool HasUndef;
    // Check if this is a repeated constant pattern suitable for broadcasting.
    if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
        SplatBitSize > VT.getScalarSizeInBits() &&
        SplatBitSize < VT.getSizeInBits()) {
      // Avoid replacing with broadcast when it's a use of a shuffle
      // instruction to preserve the present custom lowering of shuffles.
      if (isFoldableUseOfShuffle(BVOp))
        return SDValue();
      // replace BUILD_VECTOR with broadcast of the repeated constants.
      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
      LLVMContext *Ctx = DAG.getContext();
      MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
      if (Subtarget.hasAVX()) {
        if (SplatBitSize <= 64 && Subtarget.hasAVX2() &&
            !(SplatBitSize == 64 && Subtarget.is32Bit())) {
          // Splatted value can fit in one INTEGER constant in constant pool.
          // Load the constant and broadcast it.
          MVT CVT = MVT::getIntegerVT(SplatBitSize);
          Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
          Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
          SDValue CP = DAG.getConstantPool(C, PVT);
          unsigned Repeat = VT.getSizeInBits() / SplatBitSize;

          unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
          Ld = DAG.getLoad(
              CVT, dl, DAG.getEntryNode(), CP,
              MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
              Alignment);
          SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
                                       MVT::getVectorVT(CVT, Repeat), Ld);
          return DAG.getBitcast(VT, Brdcst);
        } else if (SplatBitSize == 32 || SplatBitSize == 64) {
          // Splatted value can fit in one FLOAT constant in constant pool.
          // Load the constant and broadcast it.
          // AVX have support for 32 and 64 bit broadcast for floats only.
          // No 64bit integer in 32bit subtarget.
          MVT CVT = MVT::getFloatingPointVT(SplatBitSize);
          // Lower the splat via APFloat directly, to avoid any conversion.
          Constant *C =
              SplatBitSize == 32
                  ? ConstantFP::get(*Ctx,
                                    APFloat(APFloat::IEEEsingle(), SplatValue))
                  : ConstantFP::get(*Ctx,
                                    APFloat(APFloat::IEEEdouble(), SplatValue));
          SDValue CP = DAG.getConstantPool(C, PVT);
          unsigned Repeat = VT.getSizeInBits() / SplatBitSize;

          unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
          Ld = DAG.getLoad(
              CVT, dl, DAG.getEntryNode(), CP,
              MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
              Alignment);
          SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
                                       MVT::getVectorVT(CVT, Repeat), Ld);
          return DAG.getBitcast(VT, Brdcst);
        } else if (SplatBitSize > 64) {
          // Load the vector of constants and broadcast it.
          MVT CVT = VT.getScalarType();
          Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
                                             *Ctx);
          SDValue VCP = DAG.getConstantPool(VecC, PVT);
          unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
          unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment();
          Ld = DAG.getLoad(
              MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
              MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
              Alignment);
          SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
          return DAG.getBitcast(VT, Brdcst);
        }
      }
    }

    // If we are moving a scalar into a vector (Ld must be set and all elements
    // but 1 are undef) and that operation is not obviously supported by
    // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
    // That's better than general shuffling and may eliminate a load to GPR and
    // move from scalar to vector register.
    if (!Ld || NumElts - NumUndefElts != 1)
      return SDValue();
    unsigned ScalarSize = Ld.getValueSizeInBits();
    if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
      return SDValue();
  }

  bool ConstSplatVal =
      (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);

  // Make sure that all of the users of a non-constant load are from the
  // BUILD_VECTOR node.
  if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
    return SDValue();

  unsigned ScalarSize = Ld.getValueSizeInBits();
  bool IsGE256 = (VT.getSizeInBits() >= 256);

  // When optimizing for size, generate up to 5 extra bytes for a broadcast
  // instruction to save 8 or more bytes of constant pool data.
  // TODO: If multiple splats are generated to load the same constant,
  // it may be detrimental to overall size. There needs to be a way to detect
  // that condition to know if this is truly a size win.
  bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();

  // Handle broadcasting a single constant scalar from the constant pool
  // into a vector.
  // On Sandybridge (no AVX2), it is still better to load a constant vector
  // from the constant pool and not to broadcast it from a scalar.
  // But override that restriction when optimizing for size.
  // TODO: Check if splatting is recommended for other AVX-capable CPUs.
  if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
    EVT CVT = Ld.getValueType();
    assert(!CVT.isVector() && "Must not broadcast a vector type");

    // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
    // For size optimization, also splat v2f64 and v2i64, and for size opt
    // with AVX2, also splat i8 and i16.
    // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
    if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
        (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
      const Constant *C = nullptr;
      if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
        C = CI->getConstantIntValue();
      else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
        C = CF->getConstantFPValue();

      assert(C && "Invalid constant type");

      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
      SDValue CP =
          DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
      unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
      Ld = DAG.getLoad(
          CVT, dl, DAG.getEntryNode(), CP,
          MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
          Alignment);

      return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
    }
  }

  bool IsLoad = ISD::isNormalLoad(Ld.getNode());

  // Handle AVX2 in-register broadcasts.
  if (!IsLoad && Subtarget.hasInt256() &&
      (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
    return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);

  // The scalar source must be a normal load.
  if (!IsLoad)
    return SDValue();

  if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
      (Subtarget.hasVLX() && ScalarSize == 64))
    return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);

  // The integer check is needed for the 64-bit into 128-bit so it doesn't match
  // double since there is no vbroadcastsd xmm
  if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
    if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
      return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
  }

  // Unsupported broadcast.
  return SDValue();
}

/// For an EXTRACT_VECTOR_ELT with a constant index return the real
/// underlying vector and index.
///
/// Modifies \p ExtractedFromVec to the real vector and returns the real
/// index.
static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
                                         SDValue ExtIdx) {
  int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
  if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
    return Idx;

  // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
  // lowered this:
  //   (extract_vector_elt (v8f32 %1), Constant<6>)
  // to:
  //   (extract_vector_elt (vector_shuffle<2,u,u,u>
  //                           (extract_subvector (v8f32 %0), Constant<4>),
  //                           undef)
  //                       Constant<0>)
  // In this case the vector is the extract_subvector expression and the index
  // is 2, as specified by the shuffle.
  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
  SDValue ShuffleVec = SVOp->getOperand(0);
  MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
  assert(ShuffleVecVT.getVectorElementType() ==
         ExtractedFromVec.getSimpleValueType().getVectorElementType());

  int ShuffleIdx = SVOp->getMaskElt(Idx);
  if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
    ExtractedFromVec = ShuffleVec;
    return ShuffleIdx;
  }
  return Idx;
}

static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();

  // Skip if insert_vec_elt is not supported.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
    return SDValue();

  SDLoc DL(Op);
  unsigned NumElems = Op.getNumOperands();

  SDValue VecIn1;
  SDValue VecIn2;
  SmallVector<unsigned, 4> InsertIndices;
  SmallVector<int, 8> Mask(NumElems, -1);

  for (unsigned i = 0; i != NumElems; ++i) {
    unsigned Opc = Op.getOperand(i).getOpcode();

    if (Opc == ISD::UNDEF)
      continue;

    if (Opc != ISD::EXTRACT_VECTOR_ELT) {
      // Quit if more than 1 elements need inserting.
      if (InsertIndices.size() > 1)
        return SDValue();

      InsertIndices.push_back(i);
      continue;
    }

    SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
    SDValue ExtIdx = Op.getOperand(i).getOperand(1);

    // Quit if non-constant index.
    if (!isa<ConstantSDNode>(ExtIdx))
      return SDValue();
    int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);

    // Quit if extracted from vector of different type.
    if (ExtractedFromVec.getValueType() != VT)
      return SDValue();

    if (!VecIn1.getNode())
      VecIn1 = ExtractedFromVec;
    else if (VecIn1 != ExtractedFromVec) {
      if (!VecIn2.getNode())
        VecIn2 = ExtractedFromVec;
      else if (VecIn2 != ExtractedFromVec)
        // Quit if more than 2 vectors to shuffle
        return SDValue();
    }

    if (ExtractedFromVec == VecIn1)
      Mask[i] = Idx;
    else if (ExtractedFromVec == VecIn2)
      Mask[i] = Idx + NumElems;
  }

  if (!VecIn1.getNode())
    return SDValue();

  VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
  SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);

  for (unsigned Idx : InsertIndices)
    NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
                     DAG.getIntPtrConstant(Idx, DL));

  return NV;
}

static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
  assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
         Op.getScalarValueSizeInBits() == 1 &&
         "Can not convert non-constant vector");
  uint64_t Immediate = 0;
  for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
    SDValue In = Op.getOperand(idx);
    if (!In.isUndef())
      Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
  }
  SDLoc dl(Op);
  MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
  return DAG.getConstant(Immediate, dl, VT);
}
// Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {

  MVT VT = Op.getSimpleValueType();
  assert((VT.getVectorElementType() == MVT::i1) &&
         "Unexpected type in LowerBUILD_VECTORvXi1!");

  SDLoc dl(Op);
  if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
      ISD::isBuildVectorAllOnes(Op.getNode()))
    return Op;

  uint64_t Immediate = 0;
  SmallVector<unsigned, 16> NonConstIdx;
  bool IsSplat = true;
  bool HasConstElts = false;
  int SplatIdx = -1;
  for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
    SDValue In = Op.getOperand(idx);
    if (In.isUndef())
      continue;
    if (!isa<ConstantSDNode>(In))
      NonConstIdx.push_back(idx);
    else {
      Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
      HasConstElts = true;
    }
    if (SplatIdx < 0)
      SplatIdx = idx;
    else if (In != Op.getOperand(SplatIdx))
      IsSplat = false;
  }

  // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
  if (IsSplat) {
    // The build_vector allows the scalar element to be larger than the vector
    // element type. We need to mask it to use as a condition unless we know
    // the upper bits are zero.
    // FIXME: Use computeKnownBits instead of checking specific opcode?
    SDValue Cond = Op.getOperand(SplatIdx);
    assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
    if (Cond.getOpcode() != ISD::SETCC)
      Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
                         DAG.getConstant(1, dl, MVT::i8));
    return DAG.getSelect(dl, VT, Cond,
                         DAG.getConstant(1, dl, VT),
                         DAG.getConstant(0, dl, VT));
  }

  // insert elements one by one
  SDValue DstVec;
  if (HasConstElts) {
    if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
      SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
      SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
      ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
      ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
      DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
    } else {
      MVT ImmVT = MVT::getIntegerVT(std::max(VT.getSizeInBits(), 8U));
      SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
      MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
      DstVec = DAG.getBitcast(VecVT, Imm);
      DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
                           DAG.getIntPtrConstant(0, dl));
    }
  } else
    DstVec = DAG.getUNDEF(VT);

  for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
    unsigned InsertIdx = NonConstIdx[i];
    DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
                         Op.getOperand(InsertIdx),
                         DAG.getIntPtrConstant(InsertIdx, dl));
  }
  return DstVec;
}

/// This is a helper function of LowerToHorizontalOp().
/// This function checks that the build_vector \p N in input implements a
/// 128-bit partial horizontal operation on a 256-bit vector, but that operation
/// may not match the layout of an x86 256-bit horizontal instruction.
/// In other words, if this returns true, then some extraction/insertion will
/// be required to produce a valid horizontal instruction.
///
/// Parameter \p Opcode defines the kind of horizontal operation to match.
/// For example, if \p Opcode is equal to ISD::ADD, then this function
/// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
/// is equal to ISD::SUB, then this function checks if this is a horizontal
/// arithmetic sub.
///
/// This function only analyzes elements of \p N whose indices are
/// in range [BaseIdx, LastIdx).
///
/// TODO: This function was originally used to match both real and fake partial
/// horizontal operations, but the index-matching logic is incorrect for that.
/// See the corrected implementation in isHopBuildVector(). Can we reduce this
/// code because it is only used for partial h-op matching now?
static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
                                  SelectionDAG &DAG,
                                  unsigned BaseIdx, unsigned LastIdx,
                                  SDValue &V0, SDValue &V1) {
  EVT VT = N->getValueType(0);
  assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
  assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
  assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
         "Invalid Vector in input!");

  bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
  bool CanFold = true;
  unsigned ExpectedVExtractIdx = BaseIdx;
  unsigned NumElts = LastIdx - BaseIdx;
  V0 = DAG.getUNDEF(VT);
  V1 = DAG.getUNDEF(VT);

  // Check if N implements a horizontal binop.
  for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
    SDValue Op = N->getOperand(i + BaseIdx);

    // Skip UNDEFs.
    if (Op->isUndef()) {
      // Update the expected vector extract index.
      if (i * 2 == NumElts)
        ExpectedVExtractIdx = BaseIdx;
      ExpectedVExtractIdx += 2;
      continue;
    }

    CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();

    if (!CanFold)
      break;

    SDValue Op0 = Op.getOperand(0);
    SDValue Op1 = Op.getOperand(1);

    // Try to match the following pattern:
    // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
    CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
        Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
        Op0.getOperand(0) == Op1.getOperand(0) &&
        isa<ConstantSDNode>(Op0.getOperand(1)) &&
        isa<ConstantSDNode>(Op1.getOperand(1)));
    if (!CanFold)
      break;

    unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
    unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();

    if (i * 2 < NumElts) {
      if (V0.isUndef()) {
        V0 = Op0.getOperand(0);
        if (V0.getValueType() != VT)
          return false;
      }
    } else {
      if (V1.isUndef()) {
        V1 = Op0.getOperand(0);
        if (V1.getValueType() != VT)
          return false;
      }
      if (i * 2 == NumElts)
        ExpectedVExtractIdx = BaseIdx;
    }

    SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
    if (I0 == ExpectedVExtractIdx)
      CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
    else if (IsCommutable && I1 == ExpectedVExtractIdx) {
      // Try to match the following dag sequence:
      // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
      CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
    } else
      CanFold = false;

    ExpectedVExtractIdx += 2;
  }

  return CanFold;
}

/// Emit a sequence of two 128-bit horizontal add/sub followed by
/// a concat_vector.
///
/// This is a helper function of LowerToHorizontalOp().
/// This function expects two 256-bit vectors called V0 and V1.
/// At first, each vector is split into two separate 128-bit vectors.
/// Then, the resulting 128-bit vectors are used to implement two
/// horizontal binary operations.
///
/// The kind of horizontal binary operation is defined by \p X86Opcode.
///
/// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
/// the two new horizontal binop.
/// When Mode is set, the first horizontal binop dag node would take as input
/// the lower 128-bit of V0 and the upper 128-bit of V0. The second
/// horizontal binop dag node would take as input the lower 128-bit of V1
/// and the upper 128-bit of V1.
///   Example:
///     HADD V0_LO, V0_HI
///     HADD V1_LO, V1_HI
///
/// Otherwise, the first horizontal binop dag node takes as input the lower
/// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
/// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
///   Example:
///     HADD V0_LO, V1_LO
///     HADD V0_HI, V1_HI
///
/// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
/// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
/// the upper 128-bits of the result.
static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
                                     const SDLoc &DL, SelectionDAG &DAG,
                                     unsigned X86Opcode, bool Mode,
                                     bool isUndefLO, bool isUndefHI) {
  MVT VT = V0.getSimpleValueType();
  assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
         "Invalid nodes in input!");

  unsigned NumElts = VT.getVectorNumElements();
  SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
  SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
  SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
  SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
  MVT NewVT = V0_LO.getSimpleValueType();

  SDValue LO = DAG.getUNDEF(NewVT);
  SDValue HI = DAG.getUNDEF(NewVT);

  if (Mode) {
    // Don't emit a horizontal binop if the result is expected to be UNDEF.
    if (!isUndefLO && !V0->isUndef())
      LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
    if (!isUndefHI && !V1->isUndef())
      HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
  } else {
    // Don't emit a horizontal binop if the result is expected to be UNDEF.
    if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
      LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);

    if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
      HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
  }

  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
}

/// Returns true iff \p BV builds a vector with the result equivalent to
/// the result of ADDSUB/SUBADD operation.
/// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
/// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
/// \p Opnd0 and \p Opnd1.
static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
                             const X86Subtarget &Subtarget, SelectionDAG &DAG,
                             SDValue &Opnd0, SDValue &Opnd1,
                             unsigned &NumExtracts,
                             bool &IsSubAdd) {

  MVT VT = BV->getSimpleValueType(0);
  if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
    return false;

  unsigned NumElts = VT.getVectorNumElements();
  SDValue InVec0 = DAG.getUNDEF(VT);
  SDValue InVec1 = DAG.getUNDEF(VT);

  NumExtracts = 0;

  // Odd-numbered elements in the input build vector are obtained from
  // adding/subtracting two integer/float elements.
  // Even-numbered elements in the input build vector are obtained from
  // subtracting/adding two integer/float elements.
  unsigned Opc[2] = {0, 0};
  for (unsigned i = 0, e = NumElts; i != e; ++i) {
    SDValue Op = BV->getOperand(i);

    // Skip 'undef' values.
    unsigned Opcode = Op.getOpcode();
    if (Opcode == ISD::UNDEF)
      continue;

    // Early exit if we found an unexpected opcode.
    if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
      return false;

    SDValue Op0 = Op.getOperand(0);
    SDValue Op1 = Op.getOperand(1);

    // Try to match the following pattern:
    // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
    // Early exit if we cannot match that sequence.
    if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        !isa<ConstantSDNode>(Op0.getOperand(1)) ||
        !isa<ConstantSDNode>(Op1.getOperand(1)) ||
        Op0.getOperand(1) != Op1.getOperand(1))
      return false;

    unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
    if (I0 != i)
      return false;

    // We found a valid add/sub node, make sure its the same opcode as previous
    // elements for this parity.
    if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
      return false;
    Opc[i % 2] = Opcode;

    // Update InVec0 and InVec1.
    if (InVec0.isUndef()) {
      InVec0 = Op0.getOperand(0);
      if (InVec0.getSimpleValueType() != VT)
        return false;
    }
    if (InVec1.isUndef()) {
      InVec1 = Op1.getOperand(0);
      if (InVec1.getSimpleValueType() != VT)
        return false;
    }

    // Make sure that operands in input to each add/sub node always
    // come from a same pair of vectors.
    if (InVec0 != Op0.getOperand(0)) {
      if (Opcode == ISD::FSUB)
        return false;

      // FADD is commutable. Try to commute the operands
      // and then test again.
      std::swap(Op0, Op1);
      if (InVec0 != Op0.getOperand(0))
        return false;
    }

    if (InVec1 != Op1.getOperand(0))
      return false;

    // Increment the number of extractions done.
    ++NumExtracts;
  }

  // Ensure we have found an opcode for both parities and that they are
  // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
  // inputs are undef.
  if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
      InVec0.isUndef() || InVec1.isUndef())
    return false;

  IsSubAdd = Opc[0] == ISD::FADD;

  Opnd0 = InVec0;
  Opnd1 = InVec1;
  return true;
}

/// Returns true if is possible to fold MUL and an idiom that has already been
/// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
/// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
/// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
///
/// Prior to calling this function it should be known that there is some
/// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
/// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
/// before replacement of such SDNode with ADDSUB operation. Thus the number
/// of \p Opnd0 uses is expected to be equal to 2.
/// For example, this function may be called for the following IR:
///    %AB = fmul fast <2 x double> %A, %B
///    %Sub = fsub fast <2 x double> %AB, %C
///    %Add = fadd fast <2 x double> %AB, %C
///    %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
///                            <2 x i32> <i32 0, i32 3>
/// There is a def for %Addsub here, which potentially can be replaced by
/// X86ISD::ADDSUB operation:
///    %Addsub = X86ISD::ADDSUB %AB, %C
/// and such ADDSUB can further be replaced with FMADDSUB:
///    %Addsub = FMADDSUB %A, %B, %C.
///
/// The main reason why this method is called before the replacement of the
/// recognized ADDSUB idiom with ADDSUB operation is that such replacement
/// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
/// FMADDSUB is.
static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG,
                                 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
                                 unsigned ExpectedUses) {
  if (Opnd0.getOpcode() != ISD::FMUL ||
      !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
    return false;

  // FIXME: These checks must match the similar ones in
  // DAGCombiner::visitFADDForFMACombine. It would be good to have one
  // function that would answer if it is Ok to fuse MUL + ADD to FMADD
  // or MUL + ADDSUB to FMADDSUB.
  const TargetOptions &Options = DAG.getTarget().Options;
  bool AllowFusion =
      (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
  if (!AllowFusion)
    return false;

  Opnd2 = Opnd1;
  Opnd1 = Opnd0.getOperand(1);
  Opnd0 = Opnd0.getOperand(0);

  return true;
}

/// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
/// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
/// X86ISD::FMSUBADD node.
static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {
  SDValue Opnd0, Opnd1;
  unsigned NumExtracts;
  bool IsSubAdd;
  if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
                        IsSubAdd))
    return SDValue();

  MVT VT = BV->getSimpleValueType(0);
  SDLoc DL(BV);

  // Try to generate X86ISD::FMADDSUB node here.
  SDValue Opnd2;
  if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
    unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
    return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
  }

  // We only support ADDSUB.
  if (IsSubAdd)
    return SDValue();

  // Do not generate X86ISD::ADDSUB node for 512-bit types even though
  // the ADDSUB idiom has been successfully recognized. There are no known
  // X86 targets with 512-bit ADDSUB instructions!
  // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
  // recognition.
  if (VT.is512BitVector())
    return SDValue();

  return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
}

static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
                             unsigned &HOpcode, SDValue &V0, SDValue &V1) {
  // Initialize outputs to known values.
  MVT VT = BV->getSimpleValueType(0);
  HOpcode = ISD::DELETED_NODE;
  V0 = DAG.getUNDEF(VT);
  V1 = DAG.getUNDEF(VT);

  // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
  // half of the result is calculated independently from the 128-bit halves of
  // the inputs, so that makes the index-checking logic below more complicated.
  unsigned NumElts = VT.getVectorNumElements();
  unsigned GenericOpcode = ISD::DELETED_NODE;
  unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
  unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
  unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
  for (unsigned i = 0; i != Num128BitChunks; ++i) {
    for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
      // Ignore undef elements.
      SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
      if (Op.isUndef())
        continue;

      // If there's an opcode mismatch, we're done.
      if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
        return false;

      // Initialize horizontal opcode.
      if (HOpcode == ISD::DELETED_NODE) {
        GenericOpcode = Op.getOpcode();
        switch (GenericOpcode) {
        case ISD::ADD: HOpcode = X86ISD::HADD; break;
        case ISD::SUB: HOpcode = X86ISD::HSUB; break;
        case ISD::FADD: HOpcode = X86ISD::FHADD; break;
        case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
        default: return false;
        }
      }

      SDValue Op0 = Op.getOperand(0);
      SDValue Op1 = Op.getOperand(1);
      if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
          Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
          Op0.getOperand(0) != Op1.getOperand(0) ||
          !isa<ConstantSDNode>(Op0.getOperand(1)) ||
          !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
        return false;

      // The source vector is chosen based on which 64-bit half of the
      // destination vector is being calculated.
      if (j < NumEltsIn64Bits) {
        if (V0.isUndef())
          V0 = Op0.getOperand(0);
      } else {
        if (V1.isUndef())
          V1 = Op0.getOperand(0);
      }

      SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
      if (SourceVec != Op0.getOperand(0))
        return false;

      // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
      unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
      unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
      unsigned ExpectedIndex = i * NumEltsIn128Bits +
                               (j % NumEltsIn64Bits) * 2;
      if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
        continue;

      // If this is not a commutative op, this does not match.
      if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
        return false;

      // Addition is commutative, so try swapping the extract indexes.
      // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
      if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
        continue;

      // Extract indexes do not match horizontal requirement.
      return false;
    }
  }
  // We matched. Opcode and operands are returned by reference as arguments.
  return true;
}

static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
                                    SelectionDAG &DAG, unsigned HOpcode,
                                    SDValue V0, SDValue V1) {
  // If either input vector is not the same size as the build vector,
  // extract/insert the low bits to the correct size.
  // This is free (examples: zmm --> xmm, xmm --> ymm).
  MVT VT = BV->getSimpleValueType(0);
  unsigned Width = VT.getSizeInBits();
  if (V0.getValueSizeInBits() > Width)
    V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
  else if (V0.getValueSizeInBits() < Width)
    V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);

  if (V1.getValueSizeInBits() > Width)
    V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
  else if (V1.getValueSizeInBits() < Width)
    V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);

  unsigned NumElts = VT.getVectorNumElements();
  APInt DemandedElts = APInt::getAllOnesValue(NumElts);
  for (unsigned i = 0; i != NumElts; ++i)
    if (BV->getOperand(i).isUndef())
      DemandedElts.clearBit(i);

  // If we don't need the upper xmm, then perform as a xmm hop.
  unsigned HalfNumElts = NumElts / 2;
  if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
    MVT HalfVT = VT.getHalfNumVectorElementsVT();
    V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
    V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
    SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
    return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
  }

  return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
}

/// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG) {
  // We need at least 2 non-undef elements to make this worthwhile by default.
  unsigned NumNonUndefs =
      count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
  if (NumNonUndefs < 2)
    return SDValue();

  // There are 4 sets of horizontal math operations distinguished by type:
  // int/FP at 128-bit/256-bit. Each type was introduced with a different
  // subtarget feature. Try to match those "native" patterns first.
  MVT VT = BV->getSimpleValueType(0);
  if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
      ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
      ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
      ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
    unsigned HOpcode;
    SDValue V0, V1;
    if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
      return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
  }

  // Try harder to match 256-bit ops by using extract/concat.
  if (!Subtarget.hasAVX() || !VT.is256BitVector())
    return SDValue();

  // Count the number of UNDEF operands in the build_vector in input.
  unsigned NumElts = VT.getVectorNumElements();
  unsigned Half = NumElts / 2;
  unsigned NumUndefsLO = 0;
  unsigned NumUndefsHI = 0;
  for (unsigned i = 0, e = Half; i != e; ++i)
    if (BV->getOperand(i)->isUndef())
      NumUndefsLO++;

  for (unsigned i = Half, e = NumElts; i != e; ++i)
    if (BV->getOperand(i)->isUndef())
      NumUndefsHI++;

  SDLoc DL(BV);
  SDValue InVec0, InVec1;
  if (VT == MVT::v8i32 || VT == MVT::v16i16) {
    SDValue InVec2, InVec3;
    unsigned X86Opcode;
    bool CanFold = true;

    if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
        isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
                              InVec3) &&
        ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
        ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
      X86Opcode = X86ISD::HADD;
    else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
                                   InVec1) &&
             isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
                                   InVec3) &&
             ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
             ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
      X86Opcode = X86ISD::HSUB;
    else
      CanFold = false;

    if (CanFold) {
      // Do not try to expand this build_vector into a pair of horizontal
      // add/sub if we can emit a pair of scalar add/sub.
      if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
        return SDValue();

      // Convert this build_vector into a pair of horizontal binops followed by
      // a concat vector. We must adjust the outputs from the partial horizontal
      // matching calls above to account for undefined vector halves.
      SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
      SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
      assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
      bool isUndefLO = NumUndefsLO == Half;
      bool isUndefHI = NumUndefsHI == Half;
      return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
                                   isUndefHI);
    }
  }

  if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
      VT == MVT::v16i16) {
    unsigned X86Opcode;
    if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
      X86Opcode = X86ISD::HADD;
    else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
                                   InVec1))
      X86Opcode = X86ISD::HSUB;
    else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
                                   InVec1))
      X86Opcode = X86ISD::FHADD;
    else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
                                   InVec1))
      X86Opcode = X86ISD::FHSUB;
    else
      return SDValue();

    // Don't try to expand this build_vector into a pair of horizontal add/sub
    // if we can simply emit a pair of scalar add/sub.
    if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
      return SDValue();

    // Convert this build_vector into two horizontal add/sub followed by
    // a concat vector.
    bool isUndefLO = NumUndefsLO == Half;
    bool isUndefHI = NumUndefsHI == Half;
    return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
                                 isUndefLO, isUndefHI);
  }

  return SDValue();
}

/// If a BUILD_VECTOR's source elements all apply the same bit operation and
/// one of their operands is constant, lower to a pair of BUILD_VECTOR and
/// just apply the bit to the vectors.
/// NOTE: Its not in our interest to start make a general purpose vectorizer
/// from this, but enough scalar bit operations are created from the later
/// legalization + scalarization stages to need basic support.
static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
                                       SelectionDAG &DAG) {
  SDLoc DL(Op);
  MVT VT = Op->getSimpleValueType(0);
  unsigned NumElems = VT.getVectorNumElements();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  // Check that all elements have the same opcode.
  // TODO: Should we allow UNDEFS and if so how many?
  unsigned Opcode = Op->getOperand(0).getOpcode();
  for (unsigned i = 1; i < NumElems; ++i)
    if (Opcode != Op->getOperand(i).getOpcode())
      return SDValue();

  // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
  bool IsShift = false;
  switch (Opcode) {
  default:
    return SDValue();
  case ISD::SHL:
  case ISD::SRL:
  case ISD::SRA:
    IsShift = true;
    break;
  case ISD::AND:
  case ISD::XOR:
  case ISD::OR:
    // Don't do this if the buildvector is a splat - we'd replace one
    // constant with an entire vector.
    if (Op->getSplatValue())
      return SDValue();
    if (!TLI.isOperationLegalOrPromote(Opcode, VT))
      return SDValue();
    break;
  }

  SmallVector<SDValue, 4> LHSElts, RHSElts;
  for (SDValue Elt : Op->ops()) {
    SDValue LHS = Elt.getOperand(0);
    SDValue RHS = Elt.getOperand(1);

    // We expect the canonicalized RHS operand to be the constant.
    if (!isa<ConstantSDNode>(RHS))
      return SDValue();

    // Extend shift amounts.
    if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
      if (!IsShift)
        return SDValue();
      RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
    }

    LHSElts.push_back(LHS);
    RHSElts.push_back(RHS);
  }

  // Limit to shifts by uniform immediates.
  // TODO: Only accept vXi8/vXi64 special cases?
  // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
  if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
    return SDValue();

  SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
  SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
  return DAG.getNode(Opcode, DL, VT, LHS, RHS);
}

/// Create a vector constant without a load. SSE/AVX provide the bare minimum
/// functionality to do this, so it's all zeros, all ones, or some derivation
/// that is cheap to calculate.
static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {
  SDLoc DL(Op);
  MVT VT = Op.getSimpleValueType();

  // Vectors containing all zeros can be matched by pxor and xorps.
  if (ISD::isBuildVectorAllZeros(Op.getNode()))
    return Op;

  // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
  // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
  // vpcmpeqd on 256-bit vectors.
  if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
    if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
      return Op;

    return getOnesVector(VT, DAG, DL);
  }

  return SDValue();
}

/// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
/// from a vector of source values and a vector of extraction indices.
/// The vectors might be manipulated to match the type of the permute op.
static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
                                     SDLoc &DL, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  MVT ShuffleVT = VT;
  EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
  unsigned NumElts = VT.getVectorNumElements();
  unsigned SizeInBits = VT.getSizeInBits();

  // Adjust IndicesVec to match VT size.
  assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
         "Illegal variable permute mask size");
  if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
    IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
                                  NumElts * VT.getScalarSizeInBits());
  IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);

  // Handle SrcVec that don't match VT type.
  if (SrcVec.getValueSizeInBits() != SizeInBits) {
    if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
      // Handle larger SrcVec by treating it as a larger permute.
      unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
      VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
      IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
      IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
                                  Subtarget, DAG, SDLoc(IndicesVec));
      return extractSubVector(
          createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget), 0,
          DAG, DL, SizeInBits);
    } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
      // Widen smaller SrcVec to match VT.
      SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
    } else
      return SDValue();
  }

  auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
    assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
    EVT SrcVT = Idx.getValueType();
    unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
    uint64_t IndexScale = 0;
    uint64_t IndexOffset = 0;

    // If we're scaling a smaller permute op, then we need to repeat the
    // indices, scaling and offsetting them as well.
    // e.g. v4i32 -> v16i8 (Scale = 4)
    // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
    // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
    for (uint64_t i = 0; i != Scale; ++i) {
      IndexScale |= Scale << (i * NumDstBits);
      IndexOffset |= i << (i * NumDstBits);
    }

    Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
                      DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
    Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
                      DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
    return Idx;
  };

  unsigned Opcode = 0;
  switch (VT.SimpleTy) {
  default:
    break;
  case MVT::v16i8:
    if (Subtarget.hasSSSE3())
      Opcode = X86ISD::PSHUFB;
    break;
  case MVT::v8i16:
    if (Subtarget.hasVLX() && Subtarget.hasBWI())
      Opcode = X86ISD::VPERMV;
    else if (Subtarget.hasSSSE3()) {
      Opcode = X86ISD::PSHUFB;
      ShuffleVT = MVT::v16i8;
    }
    break;
  case MVT::v4f32:
  case MVT::v4i32:
    if (Subtarget.hasAVX()) {
      Opcode = X86ISD::VPERMILPV;
      ShuffleVT = MVT::v4f32;
    } else if (Subtarget.hasSSSE3()) {
      Opcode = X86ISD::PSHUFB;
      ShuffleVT = MVT::v16i8;
    }
    break;
  case MVT::v2f64:
  case MVT::v2i64:
    if (Subtarget.hasAVX()) {
      // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
      IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
      Opcode = X86ISD::VPERMILPV;
      ShuffleVT = MVT::v2f64;
    } else if (Subtarget.hasSSE41()) {
      // SSE41 can compare v2i64 - select between indices 0 and 1.
      return DAG.getSelectCC(
          DL, IndicesVec,
          getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
          DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
          DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
          ISD::CondCode::SETEQ);
    }
    break;
  case MVT::v32i8:
    if (Subtarget.hasVLX() && Subtarget.hasVBMI())
      Opcode = X86ISD::VPERMV;
    else if (Subtarget.hasXOP()) {
      SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
      SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
      SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
      SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
      return DAG.getNode(
          ISD::CONCAT_VECTORS, DL, VT,
          DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
          DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
    } else if (Subtarget.hasAVX()) {
      SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
      SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
      SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
      SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
      auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                              ArrayRef<SDValue> Ops) {
        // Permute Lo and Hi and then select based on index range.
        // This works as SHUFB uses bits[3:0] to permute elements and we don't
        // care about the bit[7] as its just an index vector.
        SDValue Idx = Ops[2];
        EVT VT = Idx.getValueType();
        return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
                               DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
                               DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
                               ISD::CondCode::SETGT);
      };
      SDValue Ops[] = {LoLo, HiHi, IndicesVec};
      return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
                              PSHUFBBuilder);
    }
    break;
  case MVT::v16i16:
    if (Subtarget.hasVLX() && Subtarget.hasBWI())
      Opcode = X86ISD::VPERMV;
    else if (Subtarget.hasAVX()) {
      // Scale to v32i8 and perform as v32i8.
      IndicesVec = ScaleIndices(IndicesVec, 2);
      return DAG.getBitcast(
          VT, createVariablePermute(
                  MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
                  DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
    }
    break;
  case MVT::v8f32:
  case MVT::v8i32:
    if (Subtarget.hasAVX2())
      Opcode = X86ISD::VPERMV;
    else if (Subtarget.hasAVX()) {
      SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
      SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
                                          {0, 1, 2, 3, 0, 1, 2, 3});
      SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
                                          {4, 5, 6, 7, 4, 5, 6, 7});
      if (Subtarget.hasXOP())
        return DAG.getBitcast(
            VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
                            IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
      // Permute Lo and Hi and then select based on index range.
      // This works as VPERMILPS only uses index bits[0:1] to permute elements.
      SDValue Res = DAG.getSelectCC(
          DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
          ISD::CondCode::SETGT);
      return DAG.getBitcast(VT, Res);
    }
    break;
  case MVT::v4i64:
  case MVT::v4f64:
    if (Subtarget.hasAVX512()) {
      if (!Subtarget.hasVLX()) {
        MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
        SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
                                SDLoc(SrcVec));
        IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
                                    DAG, SDLoc(IndicesVec));
        SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
                                            DAG, Subtarget);
        return extract256BitVector(Res, 0, DAG, DL);
      }
      Opcode = X86ISD::VPERMV;
    } else if (Subtarget.hasAVX()) {
      SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
      SDValue LoLo =
          DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
      SDValue HiHi =
          DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
      // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
      IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
      if (Subtarget.hasXOP())
        return DAG.getBitcast(
            VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
                            IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
      // Permute Lo and Hi and then select based on index range.
      // This works as VPERMILPD only uses index bit[1] to permute elements.
      SDValue Res = DAG.getSelectCC(
          DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
          ISD::CondCode::SETGT);
      return DAG.getBitcast(VT, Res);
    }
    break;
  case MVT::v64i8:
    if (Subtarget.hasVBMI())
      Opcode = X86ISD::VPERMV;
    break;
  case MVT::v32i16:
    if (Subtarget.hasBWI())
      Opcode = X86ISD::VPERMV;
    break;
  case MVT::v16f32:
  case MVT::v16i32:
  case MVT::v8f64:
  case MVT::v8i64:
    if (Subtarget.hasAVX512())
      Opcode = X86ISD::VPERMV;
    break;
  }
  if (!Opcode)
    return SDValue();

  assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
         (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
         "Illegal variable permute shuffle type");

  uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
  if (Scale > 1)
    IndicesVec = ScaleIndices(IndicesVec, Scale);

  EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
  IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);

  SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
  SDValue Res = Opcode == X86ISD::VPERMV
                    ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
                    : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
  return DAG.getBitcast(VT, Res);
}

// Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
// reasoned to be a permutation of a vector by indices in a non-constant vector.
// (build_vector (extract_elt V, (extract_elt I, 0)),
//               (extract_elt V, (extract_elt I, 1)),
//                    ...
// ->
// (vpermv I, V)
//
// TODO: Handle undefs
// TODO: Utilize pshufb and zero mask blending to support more efficient
// construction of vectors with constant-0 elements.
static SDValue
LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {
  SDValue SrcVec, IndicesVec;
  // Check for a match of the permute source vector and permute index elements.
  // This is done by checking that the i-th build_vector operand is of the form:
  // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
  for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
    SDValue Op = V.getOperand(Idx);
    if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
      return SDValue();

    // If this is the first extract encountered in V, set the source vector,
    // otherwise verify the extract is from the previously defined source
    // vector.
    if (!SrcVec)
      SrcVec = Op.getOperand(0);
    else if (SrcVec != Op.getOperand(0))
      return SDValue();
    SDValue ExtractedIndex = Op->getOperand(1);
    // Peek through extends.
    if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
        ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
      ExtractedIndex = ExtractedIndex.getOperand(0);
    if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
      return SDValue();

    // If this is the first extract from the index vector candidate, set the
    // indices vector, otherwise verify the extract is from the previously
    // defined indices vector.
    if (!IndicesVec)
      IndicesVec = ExtractedIndex.getOperand(0);
    else if (IndicesVec != ExtractedIndex.getOperand(0))
      return SDValue();

    auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
    if (!PermIdx || PermIdx->getAPIntValue() != Idx)
      return SDValue();
  }

  SDLoc DL(V);
  MVT VT = V.getSimpleValueType();
  return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
}

SDValue
X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
  SDLoc dl(Op);

  MVT VT = Op.getSimpleValueType();
  MVT EltVT = VT.getVectorElementType();
  unsigned NumElems = Op.getNumOperands();

  // Generate vectors for predicate vectors.
  if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
    return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);

  if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
    return VectorConstant;

  BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
  if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
    return AddSub;
  if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
    return HorizontalOp;
  if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
    return Broadcast;
  if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG))
    return BitOp;

  unsigned EVTBits = EltVT.getSizeInBits();

  unsigned NumZero  = 0;
  unsigned NumNonZero = 0;
  uint64_t NonZeros = 0;
  bool IsAllConstants = true;
  SmallSet<SDValue, 8> Values;
  unsigned NumConstants = NumElems;
  for (unsigned i = 0; i < NumElems; ++i) {
    SDValue Elt = Op.getOperand(i);
    if (Elt.isUndef())
      continue;
    Values.insert(Elt);
    if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
      IsAllConstants = false;
      NumConstants--;
    }
    if (X86::isZeroNode(Elt))
      NumZero++;
    else {
      assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
      NonZeros |= ((uint64_t)1 << i);
      NumNonZero++;
    }
  }

  // All undef vector. Return an UNDEF.  All zero vectors were handled above.
  if (NumNonZero == 0)
    return DAG.getUNDEF(VT);

  // If we are inserting one variable into a vector of non-zero constants, try
  // to avoid loading each constant element as a scalar. Load the constants as a
  // vector and then insert the variable scalar element. If insertion is not
  // supported, fall back to a shuffle to get the scalar blended with the
  // constants. Insertion into a zero vector is handled as a special-case
  // somewhere below here.
  if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
      (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
       isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
    // Create an all-constant vector. The variable element in the old
    // build vector is replaced by undef in the constant vector. Save the
    // variable scalar element and its index for use in the insertelement.
    LLVMContext &Context = *DAG.getContext();
    Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
    SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
    SDValue VarElt;
    SDValue InsIndex;
    for (unsigned i = 0; i != NumElems; ++i) {
      SDValue Elt = Op.getOperand(i);
      if (auto *C = dyn_cast<ConstantSDNode>(Elt))
        ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
      else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
        ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
      else if (!Elt.isUndef()) {
        assert(!VarElt.getNode() && !InsIndex.getNode() &&
               "Expected one variable element in this vector");
        VarElt = Elt;
        InsIndex = DAG.getConstant(i, dl, getVectorIdxTy(DAG.getDataLayout()));
      }
    }
    Constant *CV = ConstantVector::get(ConstVecOps);
    SDValue DAGConstVec = DAG.getConstantPool(CV, VT);

    // The constants we just created may not be legal (eg, floating point). We
    // must lower the vector right here because we can not guarantee that we'll
    // legalize it before loading it. This is also why we could not just create
    // a new build vector here. If the build vector contains illegal constants,
    // it could get split back up into a series of insert elements.
    // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
    SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
    MachineFunction &MF = DAG.getMachineFunction();
    MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
    SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
    unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
    unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
    if (InsertC < NumEltsInLow128Bits)
      return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);

    // There's no good way to insert into the high elements of a >128-bit
    // vector, so use shuffles to avoid an extract/insert sequence.
    assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
    assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
    SmallVector<int, 8> ShuffleMask;
    unsigned NumElts = VT.getVectorNumElements();
    for (unsigned i = 0; i != NumElts; ++i)
      ShuffleMask.push_back(i == InsertC ? NumElts : i);
    SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
    return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
  }

  // Special case for single non-zero, non-undef, element.
  if (NumNonZero == 1) {
    unsigned Idx = countTrailingZeros(NonZeros);
    SDValue Item = Op.getOperand(Idx);

    // If we have a constant or non-constant insertion into the low element of
    // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
    // the rest of the elements.  This will be matched as movd/movq/movss/movsd
    // depending on what the source datatype is.
    if (Idx == 0) {
      if (NumZero == 0)
        return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);

      if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
          (EltVT == MVT::i64 && Subtarget.is64Bit())) {
        assert((VT.is128BitVector() || VT.is256BitVector() ||
                VT.is512BitVector()) &&
               "Expected an SSE value type!");
        Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
        // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
        return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
      }

      // We can't directly insert an i8 or i16 into a vector, so zero extend
      // it to i32 first.
      if (EltVT == MVT::i16 || EltVT == MVT::i8) {
        Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
        MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
        Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
        Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
        return DAG.getBitcast(VT, Item);
      }
    }

    // Is it a vector logical left shift?
    if (NumElems == 2 && Idx == 1 &&
        X86::isZeroNode(Op.getOperand(0)) &&
        !X86::isZeroNode(Op.getOperand(1))) {
      unsigned NumBits = VT.getSizeInBits();
      return getVShift(true, VT,
                       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
                                   VT, Op.getOperand(1)),
                       NumBits/2, DAG, *this, dl);
    }

    if (IsAllConstants) // Otherwise, it's better to do a constpool load.
      return SDValue();

    // Otherwise, if this is a vector with i32 or f32 elements, and the element
    // is a non-constant being inserted into an element other than the low one,
    // we can't use a constant pool load.  Instead, use SCALAR_TO_VECTOR (aka
    // movd/movss) to move this into the low element, then shuffle it into
    // place.
    if (EVTBits == 32) {
      Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
      return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
    }
  }

  // Splat is obviously ok. Let legalizer expand it to a shuffle.
  if (Values.size() == 1) {
    if (EVTBits == 32) {
      // Instead of a shuffle like this:
      // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
      // Check if it's possible to issue this instead.
      // shuffle (vload ptr)), undef, <1, 1, 1, 1>
      unsigned Idx = countTrailingZeros(NonZeros);
      SDValue Item = Op.getOperand(Idx);
      if (Op.getNode()->isOnlyUserOf(Item.getNode()))
        return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
    }
    return SDValue();
  }

  // A vector full of immediates; various special cases are already
  // handled, so this is best done with a single constant-pool load.
  if (IsAllConstants)
    return SDValue();

  if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
      return V;

  // See if we can use a vector load to get all of the elements.
  {
    SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
    if (SDValue LD =
            EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
      return LD;
  }

  // If this is a splat of pairs of 32-bit elements, we can use a narrower
  // build_vector and broadcast it.
  // TODO: We could probably generalize this more.
  if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
    SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
                       DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
    auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
      // Make sure all the even/odd operands match.
      for (unsigned i = 2; i != NumElems; ++i)
        if (Ops[i % 2] != Op.getOperand(i))
          return false;
      return true;
    };
    if (CanSplat(Op, NumElems, Ops)) {
      MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
      MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
      // Create a new build vector and cast to v2i64/v2f64.
      SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
                                     DAG.getBuildVector(NarrowVT, dl, Ops));
      // Broadcast from v2i64/v2f64 and cast to final VT.
      MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
      return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
                                            NewBV));
    }
  }

  // For AVX-length vectors, build the individual 128-bit pieces and use
  // shuffles to put them in place.
  if (VT.getSizeInBits() > 128) {
    MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);

    // Build both the lower and upper subvector.
    SDValue Lower =
        DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
    SDValue Upper = DAG.getBuildVector(
        HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));

    // Recreate the wider vector with the lower and upper part.
    return concatSubVectors(Lower, Upper, DAG, dl);
  }

  // Let legalizer expand 2-wide build_vectors.
  if (EVTBits == 64) {
    if (NumNonZero == 1) {
      // One half is zero or undef.
      unsigned Idx = countTrailingZeros(NonZeros);
      SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
                               Op.getOperand(Idx));
      return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
    }
    return SDValue();
  }

  // If element VT is < 32 bits, convert it to inserts into a zero vector.
  if (EVTBits == 8 && NumElems == 16)
    if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
                                          DAG, Subtarget))
      return V;

  if (EVTBits == 16 && NumElems == 8)
    if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
                                          DAG, Subtarget))
      return V;

  // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
  if (EVTBits == 32 && NumElems == 4)
    if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
      return V;

  // If element VT is == 32 bits, turn it into a number of shuffles.
  if (NumElems == 4 && NumZero > 0) {
    SmallVector<SDValue, 8> Ops(NumElems);
    for (unsigned i = 0; i < 4; ++i) {
      bool isZero = !(NonZeros & (1ULL << i));
      if (isZero)
        Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
      else
        Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
    }

    for (unsigned i = 0; i < 2; ++i) {
      switch ((NonZeros >> (i*2)) & 0x3) {
        default: llvm_unreachable("Unexpected NonZero count");
        case 0:
          Ops[i] = Ops[i*2];  // Must be a zero vector.
          break;
        case 1:
          Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
          break;
        case 2:
          Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
          break;
        case 3:
          Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
          break;
      }
    }

    bool Reverse1 = (NonZeros & 0x3) == 2;
    bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
    int MaskVec[] = {
      Reverse1 ? 1 : 0,
      Reverse1 ? 0 : 1,
      static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
      static_cast<int>(Reverse2 ? NumElems   : NumElems+1)
    };
    return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
  }

  assert(Values.size() > 1 && "Expected non-undef and non-splat vector");

  // Check for a build vector from mostly shuffle plus few inserting.
  if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
    return Sh;

  // For SSE 4.1, use insertps to put the high elements into the low element.
  if (Subtarget.hasSSE41()) {
    SDValue Result;
    if (!Op.getOperand(0).isUndef())
      Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
    else
      Result = DAG.getUNDEF(VT);

    for (unsigned i = 1; i < NumElems; ++i) {
      if (Op.getOperand(i).isUndef()) continue;
      Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
                           Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
    }
    return Result;
  }

  // Otherwise, expand into a number of unpckl*, start by extending each of
  // our (non-undef) elements to the full vector width with the element in the
  // bottom slot of the vector (which generates no code for SSE).
  SmallVector<SDValue, 8> Ops(NumElems);
  for (unsigned i = 0; i < NumElems; ++i) {
    if (!Op.getOperand(i).isUndef())
      Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
    else
      Ops[i] = DAG.getUNDEF(VT);
  }

  // Next, we iteratively mix elements, e.g. for v4f32:
  //   Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
  //         : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
  //   Step 2: unpcklpd X, Y ==>    <3, 2, 1, 0>
  for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
    // Generate scaled UNPCKL shuffle mask.
    SmallVector<int, 16> Mask;
    for(unsigned i = 0; i != Scale; ++i)
      Mask.push_back(i);
    for (unsigned i = 0; i != Scale; ++i)
      Mask.push_back(NumElems+i);
    Mask.append(NumElems - Mask.size(), SM_SentinelUndef);

    for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
      Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
  }
  return Ops[0];
}

// 256-bit AVX can use the vinsertf128 instruction
// to create 256-bit vectors from two other 128-bit ones.
// TODO: Detect subvector broadcast here instead of DAG combine?
static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {
  SDLoc dl(Op);
  MVT ResVT = Op.getSimpleValueType();

  assert((ResVT.is256BitVector() ||
          ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");

  unsigned NumOperands = Op.getNumOperands();
  unsigned NumZero = 0;
  unsigned NumNonZero = 0;
  unsigned NonZeros = 0;
  for (unsigned i = 0; i != NumOperands; ++i) {
    SDValue SubVec = Op.getOperand(i);
    if (SubVec.isUndef())
      continue;
    if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
      ++NumZero;
    else {
      assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
      NonZeros |= 1 << i;
      ++NumNonZero;
    }
  }

  // If we have more than 2 non-zeros, build each half separately.
  if (NumNonZero > 2) {
    MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
    ArrayRef<SDUse> Ops = Op->ops();
    SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
                             Ops.slice(0, NumOperands/2));
    SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
                             Ops.slice(NumOperands/2));
    return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
  }

  // Otherwise, build it up through insert_subvectors.
  SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
                        : DAG.getUNDEF(ResVT);

  MVT SubVT = Op.getOperand(0).getSimpleValueType();
  unsigned NumSubElems = SubVT.getVectorNumElements();
  for (unsigned i = 0; i != NumOperands; ++i) {
    if ((NonZeros & (1 << i)) == 0)
      continue;

    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
                      Op.getOperand(i),
                      DAG.getIntPtrConstant(i * NumSubElems, dl));
  }

  return Vec;
}

// Returns true if the given node is a type promotion (by concatenating i1
// zeros) of the result of a node that already zeros all upper bits of
// k-register.
// TODO: Merge this with LowerAVXCONCAT_VECTORS?
static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG & DAG) {
  SDLoc dl(Op);
  MVT ResVT = Op.getSimpleValueType();
  unsigned NumOperands = Op.getNumOperands();

  assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
         "Unexpected number of operands in CONCAT_VECTORS");

  uint64_t Zeros = 0;
  uint64_t NonZeros = 0;
  for (unsigned i = 0; i != NumOperands; ++i) {
    SDValue SubVec = Op.getOperand(i);
    if (SubVec.isUndef())
      continue;
    assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
    if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
      Zeros |= (uint64_t)1 << i;
    else
      NonZeros |= (uint64_t)1 << i;
  }

  unsigned NumElems = ResVT.getVectorNumElements();

  // If we are inserting non-zero vector and there are zeros in LSBs and undef
  // in the MSBs we need to emit a KSHIFTL. The generic lowering to
  // insert_subvector will give us two kshifts.
  if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
      Log2_64(NonZeros) != NumOperands - 1) {
    MVT ShiftVT = ResVT;
    if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
      ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
    unsigned Idx = Log2_64(NonZeros);
    SDValue SubVec = Op.getOperand(Idx);
    unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
    SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
                         DAG.getUNDEF(ShiftVT), SubVec,
                         DAG.getIntPtrConstant(0, dl));
    Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
                     DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
                       DAG.getIntPtrConstant(0, dl));
  }

  // If there are zero or one non-zeros we can handle this very simply.
  if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
    SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
    if (!NonZeros)
      return Vec;
    unsigned Idx = Log2_64(NonZeros);
    SDValue SubVec = Op.getOperand(Idx);
    unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
    return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
                       DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
  }

  if (NumOperands > 2) {
    MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
    ArrayRef<SDUse> Ops = Op->ops();
    SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
                             Ops.slice(0, NumOperands/2));
    SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
                             Ops.slice(NumOperands/2));
    return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
  }

  assert(countPopulation(NonZeros) == 2 && "Simple cases not handled?");

  if (ResVT.getVectorNumElements() >= 16)
    return Op; // The operation is legal with KUNPCK

  SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
                            DAG.getUNDEF(ResVT), Op.getOperand(0),
                            DAG.getIntPtrConstant(0, dl));
  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
                     DAG.getIntPtrConstant(NumElems/2, dl));
}

static SDValue LowerCONCAT_VECTORS(SDValue Op,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  if (VT.getVectorElementType() == MVT::i1)
    return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);

  assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
         (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
          Op.getNumOperands() == 4)));

  // AVX can use the vinsertf128 instruction to create 256-bit vectors
  // from two other 128-bit ones.

  // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
  return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
}

//===----------------------------------------------------------------------===//
// Vector shuffle lowering
//
// This is an experimental code path for lowering vector shuffles on x86. It is
// designed to handle arbitrary vector shuffles and blends, gracefully
// degrading performance as necessary. It works hard to recognize idiomatic
// shuffles and lower them to optimal instruction patterns without leaving
// a framework that allows reasonably efficient handling of all vector shuffle
// patterns.
//===----------------------------------------------------------------------===//

/// Tiny helper function to identify a no-op mask.
///
/// This is a somewhat boring predicate function. It checks whether the mask
/// array input, which is assumed to be a single-input shuffle mask of the kind
/// used by the X86 shuffle instructions (not a fully general
/// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
/// in-place shuffle are 'no-op's.
static bool isNoopShuffleMask(ArrayRef<int> Mask) {
  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
    assert(Mask[i] >= -1 && "Out of bound mask element!");
    if (Mask[i] >= 0 && Mask[i] != i)
      return false;
  }
  return true;
}

/// Test whether there are elements crossing 128-bit lanes in this
/// shuffle mask.
///
/// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
/// and we routinely test for these.
static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
  int LaneSize = 128 / VT.getScalarSizeInBits();
  int Size = Mask.size();
  for (int i = 0; i < Size; ++i)
    if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
      return true;
  return false;
}

/// Test whether a shuffle mask is equivalent within each sub-lane.
///
/// This checks a shuffle mask to see if it is performing the same
/// lane-relative shuffle in each sub-lane. This trivially implies
/// that it is also not lane-crossing. It may however involve a blend from the
/// same lane of a second vector.
///
/// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
/// non-trivial to compute in the face of undef lanes. The representation is
/// suitable for use with existing 128-bit shuffles as entries from the second
/// vector have been remapped to [LaneSize, 2*LaneSize).
static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
                                  ArrayRef<int> Mask,
                                  SmallVectorImpl<int> &RepeatedMask) {
  auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
  RepeatedMask.assign(LaneSize, -1);
  int Size = Mask.size();
  for (int i = 0; i < Size; ++i) {
    assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
    if (Mask[i] < 0)
      continue;
    if ((Mask[i] % Size) / LaneSize != i / LaneSize)
      // This entry crosses lanes, so there is no way to model this shuffle.
      return false;

    // Ok, handle the in-lane shuffles by detecting if and when they repeat.
    // Adjust second vector indices to start at LaneSize instead of Size.
    int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
                                : Mask[i] % LaneSize + LaneSize;
    if (RepeatedMask[i % LaneSize] < 0)
      // This is the first non-undef entry in this slot of a 128-bit lane.
      RepeatedMask[i % LaneSize] = LocalM;
    else if (RepeatedMask[i % LaneSize] != LocalM)
      // Found a mismatch with the repeated mask.
      return false;
  }
  return true;
}

/// Test whether a shuffle mask is equivalent within each 128-bit lane.
static bool
is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
                                SmallVectorImpl<int> &RepeatedMask) {
  return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
}

static bool
is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
  SmallVector<int, 32> RepeatedMask;
  return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
}

/// Test whether a shuffle mask is equivalent within each 256-bit lane.
static bool
is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
                                SmallVectorImpl<int> &RepeatedMask) {
  return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
}

/// Test whether a target shuffle mask is equivalent within each sub-lane.
/// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
                                        ArrayRef<int> Mask,
                                        SmallVectorImpl<int> &RepeatedMask) {
  int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
  RepeatedMask.assign(LaneSize, SM_SentinelUndef);
  int Size = Mask.size();
  for (int i = 0; i < Size; ++i) {
    assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
    if (Mask[i] == SM_SentinelUndef)
      continue;
    if (Mask[i] == SM_SentinelZero) {
      if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
        return false;
      RepeatedMask[i % LaneSize] = SM_SentinelZero;
      continue;
    }
    if ((Mask[i] % Size) / LaneSize != i / LaneSize)
      // This entry crosses lanes, so there is no way to model this shuffle.
      return false;

    // Ok, handle the in-lane shuffles by detecting if and when they repeat.
    // Adjust second vector indices to start at LaneSize instead of Size.
    int LocalM =
        Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
    if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
      // This is the first non-undef entry in this slot of a 128-bit lane.
      RepeatedMask[i % LaneSize] = LocalM;
    else if (RepeatedMask[i % LaneSize] != LocalM)
      // Found a mismatch with the repeated mask.
      return false;
  }
  return true;
}

/// Checks whether a shuffle mask is equivalent to an explicit list of
/// arguments.
///
/// This is a fast way to test a shuffle mask against a fixed pattern:
///
///   if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
///
/// It returns true if the mask is exactly as wide as the argument list, and
/// each element of the mask is either -1 (signifying undef) or the value given
/// in the argument.
static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
                                ArrayRef<int> ExpectedMask) {
  if (Mask.size() != ExpectedMask.size())
    return false;

  int Size = Mask.size();

  // If the values are build vectors, we can look through them to find
  // equivalent inputs that make the shuffles equivalent.
  auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
  auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);

  for (int i = 0; i < Size; ++i) {
    assert(Mask[i] >= -1 && "Out of bound mask element!");
    if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
      auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
      auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
      if (!MaskBV || !ExpectedBV ||
          MaskBV->getOperand(Mask[i] % Size) !=
              ExpectedBV->getOperand(ExpectedMask[i] % Size))
        return false;
    }
  }

  return true;
}

/// Checks whether a target shuffle mask is equivalent to an explicit pattern.
///
/// The masks must be exactly the same width.
///
/// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
/// value in ExpectedMask is always accepted. Otherwise the indices must match.
///
/// SM_SentinelZero is accepted as a valid negative index but must match in
/// both.
static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
                                      ArrayRef<int> ExpectedMask,
                                      SDValue V1 = SDValue(),
                                      SDValue V2 = SDValue()) {
  int Size = Mask.size();
  if (Size != (int)ExpectedMask.size())
    return false;
  assert(isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&
         "Illegal target shuffle mask");

  // Check for out-of-range target shuffle mask indices.
  if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
    return false;

  // If the values are build vectors, we can look through them to find
  // equivalent inputs that make the shuffles equivalent.
  auto *BV1 = dyn_cast_or_null<BuildVectorSDNode>(V1);
  auto *BV2 = dyn_cast_or_null<BuildVectorSDNode>(V2);
  BV1 = ((BV1 && Size != (int)BV1->getNumOperands()) ? nullptr : BV1);
  BV2 = ((BV2 && Size != (int)BV2->getNumOperands()) ? nullptr : BV2);

  for (int i = 0; i < Size; ++i) {
    if (Mask[i] == SM_SentinelUndef || Mask[i] == ExpectedMask[i])
      continue;
    if (0 <= Mask[i] && 0 <= ExpectedMask[i]) {
      auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
      auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
      if (MaskBV && ExpectedBV &&
          MaskBV->getOperand(Mask[i] % Size) ==
              ExpectedBV->getOperand(ExpectedMask[i] % Size))
        continue;
    }
    // TODO - handle SM_Sentinel equivalences.
    return false;
  }
  return true;
}

// Attempt to create a shuffle mask from a VSELECT condition mask.
static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
                                         SDValue Cond) {
  if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
    return false;

  unsigned Size = Cond.getValueType().getVectorNumElements();
  Mask.resize(Size, SM_SentinelUndef);

  for (int i = 0; i != (int)Size; ++i) {
    SDValue CondElt = Cond.getOperand(i);
    Mask[i] = i;
    // Arbitrarily choose from the 2nd operand if the select condition element
    // is undef.
    // TODO: Can we do better by matching patterns such as even/odd?
    if (CondElt.isUndef() || isNullConstant(CondElt))
      Mask[i] += Size;
  }

  return true;
}

// Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
// instructions.
static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
  if (VT != MVT::v8i32 && VT != MVT::v8f32)
    return false;

  SmallVector<int, 8> Unpcklwd;
  createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
                          /* Unary = */ false);
  SmallVector<int, 8> Unpckhwd;
  createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
                          /* Unary = */ false);
  bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
                         isTargetShuffleEquivalent(Mask, Unpckhwd));
  return IsUnpackwdMask;
}

static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
  // Create 128-bit vector type based on mask size.
  MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
  MVT VT = MVT::getVectorVT(EltVT, Mask.size());

  // We can't assume a canonical shuffle mask, so try the commuted version too.
  SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
  ShuffleVectorSDNode::commuteMask(CommutedMask);

  // Match any of unary/binary or low/high.
  for (unsigned i = 0; i != 4; ++i) {
    SmallVector<int, 16> UnpackMask;
    createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
    if (isTargetShuffleEquivalent(Mask, UnpackMask) ||
        isTargetShuffleEquivalent(CommutedMask, UnpackMask))
      return true;
  }
  return false;
}

/// Return true if a shuffle mask chooses elements identically in its top and
/// bottom halves. For example, any splat mask has the same top and bottom
/// halves. If an element is undefined in only one half of the mask, the halves
/// are not considered identical.
static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
  assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
  unsigned HalfSize = Mask.size() / 2;
  for (unsigned i = 0; i != HalfSize; ++i) {
    if (Mask[i] != Mask[i + HalfSize])
      return false;
  }
  return true;
}

/// Get a 4-lane 8-bit shuffle immediate for a mask.
///
/// This helper function produces an 8-bit shuffle immediate corresponding to
/// the ubiquitous shuffle encoding scheme used in x86 instructions for
/// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
/// example.
///
/// NB: We rely heavily on "undef" masks preserving the input lane.
static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
  assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
  assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
  assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
  assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
  assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");

  unsigned Imm = 0;
  Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
  Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
  Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
  Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
  return Imm;
}

static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
                                          SelectionDAG &DAG) {
  return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
}

/// Compute whether each element of a shuffle is zeroable.
///
/// A "zeroable" vector shuffle element is one which can be lowered to zero.
/// Either it is an undef element in the shuffle mask, the element of the input
/// referenced is undef, or the element of the input referenced is known to be
/// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
/// as many lanes with this technique as possible to simplify the remaining
/// shuffle.
static APInt computeZeroableShuffleElements(ArrayRef<int> Mask,
                                            SDValue V1, SDValue V2) {
  APInt Zeroable(Mask.size(), 0);
  V1 = peekThroughBitcasts(V1);
  V2 = peekThroughBitcasts(V2);

  bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
  bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());

  int VectorSizeInBits = V1.getValueSizeInBits();
  int ScalarSizeInBits = VectorSizeInBits / Mask.size();
  assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");

  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
    int M = Mask[i];
    // Handle the easy cases.
    if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
      Zeroable.setBit(i);
      continue;
    }

    // Determine shuffle input and normalize the mask.
    SDValue V = M < Size ? V1 : V2;
    M %= Size;

    // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
    if (V.getOpcode() != ISD::BUILD_VECTOR)
      continue;

    // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
    // the (larger) source element must be UNDEF/ZERO.
    if ((Size % V.getNumOperands()) == 0) {
      int Scale = Size / V->getNumOperands();
      SDValue Op = V.getOperand(M / Scale);
      if (Op.isUndef() || X86::isZeroNode(Op))
        Zeroable.setBit(i);
      else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
        APInt Val = Cst->getAPIntValue();
        Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
        Val = Val.getLoBits(ScalarSizeInBits);
        if (Val == 0)
          Zeroable.setBit(i);
      } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
        APInt Val = Cst->getValueAPF().bitcastToAPInt();
        Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
        Val = Val.getLoBits(ScalarSizeInBits);
        if (Val == 0)
          Zeroable.setBit(i);
      }
      continue;
    }

    // If the BUILD_VECTOR has more elements then all the (smaller) source
    // elements must be UNDEF or ZERO.
    if ((V.getNumOperands() % Size) == 0) {
      int Scale = V->getNumOperands() / Size;
      bool AllZeroable = true;
      for (int j = 0; j < Scale; ++j) {
        SDValue Op = V.getOperand((M * Scale) + j);
        AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op));
      }
      if (AllZeroable)
        Zeroable.setBit(i);
      continue;
    }
  }

  return Zeroable;
}

// The Shuffle result is as follow:
// 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
// Each Zeroable's element correspond to a particular Mask's element.
// As described in computeZeroableShuffleElements function.
//
// The function looks for a sub-mask that the nonzero elements are in
// increasing order. If such sub-mask exist. The function returns true.
static bool isNonZeroElementsInOrder(const APInt &Zeroable,
                                     ArrayRef<int> Mask, const EVT &VectorType,
                                     bool &IsZeroSideLeft) {
  int NextElement = -1;
  // Check if the Mask's nonzero elements are in increasing order.
  for (int i = 0, e = Mask.size(); i < e; i++) {
    // Checks if the mask's zeros elements are built from only zeros.
    assert(Mask[i] >= -1 && "Out of bound mask element!");
    if (Mask[i] < 0)
      return false;
    if (Zeroable[i])
      continue;
    // Find the lowest non zero element
    if (NextElement < 0) {
      NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
      IsZeroSideLeft = NextElement != 0;
    }
    // Exit if the mask's non zero elements are not in increasing order.
    if (NextElement != Mask[i])
      return false;
    NextElement++;
  }
  return true;
}

/// Try to lower a shuffle with a single PSHUFB of V1 or V2.
static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
                                      ArrayRef<int> Mask, SDValue V1,
                                      SDValue V2, const APInt &Zeroable,
                                      const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {
  int Size = Mask.size();
  int LaneSize = 128 / VT.getScalarSizeInBits();
  const int NumBytes = VT.getSizeInBits() / 8;
  const int NumEltBytes = VT.getScalarSizeInBits() / 8;

  assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
         (Subtarget.hasAVX2() && VT.is256BitVector()) ||
         (Subtarget.hasBWI() && VT.is512BitVector()));

  SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
  // Sign bit set in i8 mask means zero element.
  SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);

  SDValue V;
  for (int i = 0; i < NumBytes; ++i) {
    int M = Mask[i / NumEltBytes];
    if (M < 0) {
      PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
      continue;
    }
    if (Zeroable[i / NumEltBytes]) {
      PSHUFBMask[i] = ZeroMask;
      continue;
    }

    // We can only use a single input of V1 or V2.
    SDValue SrcV = (M >= Size ? V2 : V1);
    if (V && V != SrcV)
      return SDValue();
    V = SrcV;
    M %= Size;

    // PSHUFB can't cross lanes, ensure this doesn't happen.
    if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
      return SDValue();

    M = M % LaneSize;
    M = M * NumEltBytes + (i % NumEltBytes);
    PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
  }
  assert(V && "Failed to find a source input");

  MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
  return DAG.getBitcast(
      VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
                      DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
}

static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
                           const X86Subtarget &Subtarget, SelectionDAG &DAG,
                           const SDLoc &dl);

// X86 has dedicated shuffle that can be lowered to VEXPAND
static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
                                    const APInt &Zeroable,
                                    ArrayRef<int> Mask, SDValue &V1,
                                    SDValue &V2, SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget) {
  bool IsLeftZeroSide = true;
  if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
                                IsLeftZeroSide))
    return SDValue();
  unsigned VEXPANDMask = (~Zeroable).getZExtValue();
  MVT IntegerType =
      MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
  SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
  unsigned NumElts = VT.getVectorNumElements();
  assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
         "Unexpected number of vector elements");
  SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
                              Subtarget, DAG, DL);
  SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
  SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
  return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
}

static bool matchVectorShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
                                        unsigned &UnpackOpcode, bool IsUnary,
                                        ArrayRef<int> TargetMask,
                                        const SDLoc &DL, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {
  int NumElts = VT.getVectorNumElements();

  bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
  for (int i = 0; i != NumElts; i += 2) {
    int M1 = TargetMask[i + 0];
    int M2 = TargetMask[i + 1];
    Undef1 &= (SM_SentinelUndef == M1);
    Undef2 &= (SM_SentinelUndef == M2);
    Zero1 &= isUndefOrZero(M1);
    Zero2 &= isUndefOrZero(M2);
  }
  assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
         "Zeroable shuffle detected");

  // Attempt to match the target mask against the unpack lo/hi mask patterns.
  SmallVector<int, 64> Unpckl, Unpckh;
  createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
  if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
    UnpackOpcode = X86ISD::UNPCKL;
    V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
    V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
    return true;
  }

  createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
  if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
    UnpackOpcode = X86ISD::UNPCKH;
    V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
    V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
    return true;
  }

  // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
  if (IsUnary && (Zero1 || Zero2)) {
    // Don't bother if we can blend instead.
    if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
        isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
      return false;

    bool MatchLo = true, MatchHi = true;
    for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
      int M = TargetMask[i];

      // Ignore if the input is known to be zero or the index is undef.
      if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
          (M == SM_SentinelUndef))
        continue;

      MatchLo &= (M == Unpckl[i]);
      MatchHi &= (M == Unpckh[i]);
    }

    if (MatchLo || MatchHi) {
      UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
      V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
      V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
      return true;
    }
  }

  // If a binary shuffle, commute and try again.
  if (!IsUnary) {
    ShuffleVectorSDNode::commuteMask(Unpckl);
    if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
      UnpackOpcode = X86ISD::UNPCKL;
      std::swap(V1, V2);
      return true;
    }

    ShuffleVectorSDNode::commuteMask(Unpckh);
    if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
      UnpackOpcode = X86ISD::UNPCKH;
      std::swap(V1, V2);
      return true;
    }
  }

  return false;
}

// X86 has dedicated unpack instructions that can handle specific blend
// operations: UNPCKH and UNPCKL.
static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
                                     ArrayRef<int> Mask, SDValue V1, SDValue V2,
                                     SelectionDAG &DAG) {
  SmallVector<int, 8> Unpckl;
  createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
  if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
    return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);

  SmallVector<int, 8> Unpckh;
  createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
  if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
    return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);

  // Commute and try again.
  ShuffleVectorSDNode::commuteMask(Unpckl);
  if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
    return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);

  ShuffleVectorSDNode::commuteMask(Unpckh);
  if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
    return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);

  return SDValue();
}

static bool matchVectorShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
                                         int Delta) {
  int Size = (int)Mask.size();
  int Split = Size / Delta;
  int TruncatedVectorStart = SwappedOps ? Size : 0;

  // Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
  if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
    return false;

  // The rest of the mask should not refer to the truncated vector's elements.
  if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
                   TruncatedVectorStart + Size))
    return false;

  return true;
}

// Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
//
// An example is the following:
//
// t0: ch = EntryToken
//           t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
//         t25: v4i32 = truncate t2
//       t41: v8i16 = bitcast t25
//       t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
//       Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
//     t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
//   t18: v2i64 = bitcast t51
//
// Without avx512vl, this is lowered to:
//
// vpmovqd %zmm0, %ymm0
// vpshufb {{.*#+}} xmm0 =
// xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
//
// But when avx512vl is available, one can just use a single vpmovdw
// instruction.
static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
                                     MVT VT, SDValue V1, SDValue V2,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  if (VT != MVT::v16i8 && VT != MVT::v8i16)
    return SDValue();

  if (Mask.size() != VT.getVectorNumElements())
    return SDValue();

  bool SwappedOps = false;

  if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
    if (!ISD::isBuildVectorAllZeros(V1.getNode()))
      return SDValue();

    std::swap(V1, V2);
    SwappedOps = true;
  }

  // Look for:
  //
  // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
  // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
  //
  // and similar ones.
  if (V1.getOpcode() != ISD::BITCAST)
    return SDValue();
  if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
    return SDValue();

  SDValue Src = V1.getOperand(0).getOperand(0);
  MVT SrcVT = Src.getSimpleValueType();

  // The vptrunc** instructions truncating 128 bit and 256 bit vectors
  // are only available with avx512vl.
  if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
    return SDValue();

  // Down Convert Word to Byte is only available with avx512bw. The case with
  // 256-bit output doesn't contain a shuffle and is therefore not handled here.
  if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
      !Subtarget.hasBWI())
    return SDValue();

  // The first half/quarter of the mask should refer to every second/fourth
  // element of the vector truncated and bitcasted.
  if (!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 2) &&
      !matchVectorShuffleAsVPMOV(Mask, SwappedOps, 4))
    return SDValue();

  return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
}

// X86 has dedicated pack instructions that can handle specific truncation
// operations: PACKSS and PACKUS.
static bool matchVectorShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1,
                                       SDValue &V2, unsigned &PackOpcode,
                                       ArrayRef<int> TargetMask,
                                       SelectionDAG &DAG,
                                       const X86Subtarget &Subtarget) {
  unsigned NumElts = VT.getVectorNumElements();
  unsigned BitSize = VT.getScalarSizeInBits();
  MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
  MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2);

  auto MatchPACK = [&](SDValue N1, SDValue N2) {
    SDValue VV1 = DAG.getBitcast(PackVT, N1);
    SDValue VV2 = DAG.getBitcast(PackVT, N2);
    if (Subtarget.hasSSE41() || PackSVT == MVT::i16) {
      APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize);
      if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
          (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
        V1 = VV1;
        V2 = VV2;
        SrcVT = PackVT;
        PackOpcode = X86ISD::PACKUS;
        return true;
      }
    }
    if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) &&
        (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) {
      V1 = VV1;
      V2 = VV2;
      SrcVT = PackVT;
      PackOpcode = X86ISD::PACKSS;
      return true;
    }
    return false;
  };

  // Try binary shuffle.
  SmallVector<int, 32> BinaryMask;
  createPackShuffleMask(VT, BinaryMask, false);
  if (isTargetShuffleEquivalent(TargetMask, BinaryMask, V1, V2))
    if (MatchPACK(V1, V2))
      return true;

  // Try unary shuffle.
  SmallVector<int, 32> UnaryMask;
  createPackShuffleMask(VT, UnaryMask, true);
  if (isTargetShuffleEquivalent(TargetMask, UnaryMask, V1))
    if (MatchPACK(V1, V1))
      return true;

  return false;
}

static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
                                    SDValue V1, SDValue V2, SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget) {
  MVT PackVT;
  unsigned PackOpcode;
  if (matchVectorShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
                                 Subtarget))
    return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
                       DAG.getBitcast(PackVT, V2));

  return SDValue();
}

/// Try to emit a bitmask instruction for a shuffle.
///
/// This handles cases where we can model a blend exactly as a bitmask due to
/// one of the inputs being zeroable.
static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
                                     SDValue V2, ArrayRef<int> Mask,
                                     const APInt &Zeroable,
                                     const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {
  MVT MaskVT = VT;
  MVT EltVT = VT.getVectorElementType();
  SDValue Zero, AllOnes;
  // Use f64 if i64 isn't legal.
  if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
    EltVT = MVT::f64;
    MaskVT = MVT::getVectorVT(EltVT, Mask.size());
  }

  MVT LogicVT = VT;
  if (EltVT == MVT::f32 || EltVT == MVT::f64) {
    Zero = DAG.getConstantFP(0.0, DL, EltVT);
    AllOnes = DAG.getConstantFP(
        APFloat::getAllOnesValue(EltVT.getSizeInBits(), true), DL, EltVT);
    LogicVT =
        MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
  } else {
    Zero = DAG.getConstant(0, DL, EltVT);
    AllOnes = DAG.getAllOnesConstant(DL, EltVT);
  }

  SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
  SDValue V;
  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
    if (Zeroable[i])
      continue;
    if (Mask[i] % Size != i)
      return SDValue(); // Not a blend.
    if (!V)
      V = Mask[i] < Size ? V1 : V2;
    else if (V != (Mask[i] < Size ? V1 : V2))
      return SDValue(); // Can only let one input through the mask.

    VMaskOps[i] = AllOnes;
  }
  if (!V)
    return SDValue(); // No non-zeroable elements!

  SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
  VMask = DAG.getBitcast(LogicVT, VMask);
  V = DAG.getBitcast(LogicVT, V);
  SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
  return DAG.getBitcast(VT, And);
}

/// Try to emit a blend instruction for a shuffle using bit math.
///
/// This is used as a fallback approach when first class blend instructions are
/// unavailable. Currently it is only suitable for integer vectors, but could
/// be generalized for floating point vectors if desirable.
static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
                                      SDValue V2, ArrayRef<int> Mask,
                                      SelectionDAG &DAG) {
  assert(VT.isInteger() && "Only supports integer vector types!");
  MVT EltVT = VT.getVectorElementType();
  SDValue Zero = DAG.getConstant(0, DL, EltVT);
  SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
  SmallVector<SDValue, 16> MaskOps;
  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
    if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
      return SDValue(); // Shuffled input!
    MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
  }

  SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
  V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
  V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
  return DAG.getNode(ISD::OR, DL, VT, V1, V2);
}

static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
                                    SDValue PreservedSrc,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG);

static bool matchVectorShuffleAsBlend(SDValue V1, SDValue V2,
                                      MutableArrayRef<int> Mask,
                                      const APInt &Zeroable, bool &ForceV1Zero,
                                      bool &ForceV2Zero, uint64_t &BlendMask) {
  bool V1IsZeroOrUndef =
      V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
  bool V2IsZeroOrUndef =
      V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());

  BlendMask = 0;
  ForceV1Zero = false, ForceV2Zero = false;
  assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");

  // Attempt to generate the binary blend mask. If an input is zero then
  // we can use any lane.
  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
    int M = Mask[i];
    if (M == SM_SentinelUndef)
      continue;
    if (M == i)
      continue;
    if (M == i + Size) {
      BlendMask |= 1ull << i;
      continue;
    }
    if (Zeroable[i]) {
      if (V1IsZeroOrUndef) {
        ForceV1Zero = true;
        Mask[i] = i;
        continue;
      }
      if (V2IsZeroOrUndef) {
        ForceV2Zero = true;
        BlendMask |= 1ull << i;
        Mask[i] = i + Size;
        continue;
      }
    }
    return false;
  }
  return true;
}

static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
                                            int Scale) {
  uint64_t ScaledMask = 0;
  for (int i = 0; i != Size; ++i)
    if (BlendMask & (1ull << i))
      ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
  return ScaledMask;
}

/// Try to emit a blend instruction for a shuffle.
///
/// This doesn't do any checks for the availability of instructions for blending
/// these values. It relies on the availability of the X86ISD::BLENDI pattern to
/// be matched in the backend with the type given. What it does check for is
/// that the shuffle mask is a blend, or convertible into a blend with zero.
static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
                                   SDValue V2, ArrayRef<int> Original,
                                   const APInt &Zeroable,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG) {
  uint64_t BlendMask = 0;
  bool ForceV1Zero = false, ForceV2Zero = false;
  SmallVector<int, 64> Mask(Original.begin(), Original.end());
  if (!matchVectorShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
                                 BlendMask))
    return SDValue();

  // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
  if (ForceV1Zero)
    V1 = getZeroVector(VT, Subtarget, DAG, DL);
  if (ForceV2Zero)
    V2 = getZeroVector(VT, Subtarget, DAG, DL);

  switch (VT.SimpleTy) {
  case MVT::v4i64:
  case MVT::v8i32:
    assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
    LLVM_FALLTHROUGH;
  case MVT::v4f64:
  case MVT::v8f32:
    assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
    LLVM_FALLTHROUGH;
  case MVT::v2f64:
  case MVT::v2i64:
  case MVT::v4f32:
  case MVT::v4i32:
  case MVT::v8i16:
    assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
    return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
                       DAG.getTargetConstant(BlendMask, DL, MVT::i8));
  case MVT::v16i16: {
    assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
    SmallVector<int, 8> RepeatedMask;
    if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
      // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
      assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
      BlendMask = 0;
      for (int i = 0; i < 8; ++i)
        if (RepeatedMask[i] >= 8)
          BlendMask |= 1ull << i;
      return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
                         DAG.getTargetConstant(BlendMask, DL, MVT::i8));
    }
    // Use PBLENDW for lower/upper lanes and then blend lanes.
    // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
    // merge to VSELECT where useful.
    uint64_t LoMask = BlendMask & 0xFF;
    uint64_t HiMask = (BlendMask >> 8) & 0xFF;
    if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
      SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
                               DAG.getTargetConstant(LoMask, DL, MVT::i8));
      SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
                               DAG.getTargetConstant(HiMask, DL, MVT::i8));
      return DAG.getVectorShuffle(
          MVT::v16i16, DL, Lo, Hi,
          {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
    }
    LLVM_FALLTHROUGH;
  }
  case MVT::v32i8:
    assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
    LLVM_FALLTHROUGH;
  case MVT::v16i8: {
    assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");

    // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
    if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
                                               Subtarget, DAG))
      return Masked;

    if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
      MVT IntegerType =
          MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
      SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
      return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
    }

    // Scale the blend by the number of bytes per element.
    int Scale = VT.getScalarSizeInBits() / 8;

    // This form of blend is always done on bytes. Compute the byte vector
    // type.
    MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);

    // x86 allows load folding with blendvb from the 2nd source operand. But
    // we are still using LLVM select here (see comment below), so that's V1.
    // If V2 can be load-folded and V1 cannot be load-folded, then commute to
    // allow that load-folding possibility.
    if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
      ShuffleVectorSDNode::commuteMask(Mask);
      std::swap(V1, V2);
    }

    // Compute the VSELECT mask. Note that VSELECT is really confusing in the
    // mix of LLVM's code generator and the x86 backend. We tell the code
    // generator that boolean values in the elements of an x86 vector register
    // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
    // mapping a select to operand #1, and 'false' mapping to operand #2. The
    // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
    // of the element (the remaining are ignored) and 0 in that high bit would
    // mean operand #1 while 1 in the high bit would mean operand #2. So while
    // the LLVM model for boolean values in vector elements gets the relevant
    // bit set, it is set backwards and over constrained relative to x86's
    // actual model.
    SmallVector<SDValue, 32> VSELECTMask;
    for (int i = 0, Size = Mask.size(); i < Size; ++i)
      for (int j = 0; j < Scale; ++j)
        VSELECTMask.push_back(
            Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
                        : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
                                          MVT::i8));

    V1 = DAG.getBitcast(BlendVT, V1);
    V2 = DAG.getBitcast(BlendVT, V2);
    return DAG.getBitcast(
        VT,
        DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
                      V1, V2));
  }
  case MVT::v16f32:
  case MVT::v8f64:
  case MVT::v8i64:
  case MVT::v16i32:
  case MVT::v32i16:
  case MVT::v64i8: {
    // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
    bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
    if (!OptForSize) {
      if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
                                                 Subtarget, DAG))
        return Masked;
    }

    // Otherwise load an immediate into a GPR, cast to k-register, and use a
    // masked move.
    MVT IntegerType =
        MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
    SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
    return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
  }
  default:
    llvm_unreachable("Not a supported integer vector type!");
  }
}

/// Try to lower as a blend of elements from two inputs followed by
/// a single-input permutation.
///
/// This matches the pattern where we can blend elements from two inputs and
/// then reduce the shuffle to a single-input permutation.
static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
                                             SDValue V1, SDValue V2,
                                             ArrayRef<int> Mask,
                                             SelectionDAG &DAG,
                                             bool ImmBlends = false) {
  // We build up the blend mask while checking whether a blend is a viable way
  // to reduce the shuffle.
  SmallVector<int, 32> BlendMask(Mask.size(), -1);
  SmallVector<int, 32> PermuteMask(Mask.size(), -1);

  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
    if (Mask[i] < 0)
      continue;

    assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");

    if (BlendMask[Mask[i] % Size] < 0)
      BlendMask[Mask[i] % Size] = Mask[i];
    else if (BlendMask[Mask[i] % Size] != Mask[i])
      return SDValue(); // Can't blend in the needed input!

    PermuteMask[i] = Mask[i] % Size;
  }

  // If only immediate blends, then bail if the blend mask can't be widened to
  // i16.
  unsigned EltSize = VT.getScalarSizeInBits();
  if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
    return SDValue();

  SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
  return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
}

/// Try to lower as an unpack of elements from two inputs followed by
/// a single-input permutation.
///
/// This matches the pattern where we can unpack elements from two inputs and
/// then reduce the shuffle to a single-input (wider) permutation.
static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
                                             SDValue V1, SDValue V2,
                                             ArrayRef<int> Mask,
                                             SelectionDAG &DAG) {
  int NumElts = Mask.size();
  int NumLanes = VT.getSizeInBits() / 128;
  int NumLaneElts = NumElts / NumLanes;
  int NumHalfLaneElts = NumLaneElts / 2;

  bool MatchLo = true, MatchHi = true;
  SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};

  // Determine UNPCKL/UNPCKH type and operand order.
  for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
    for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
      int M = Mask[Lane + Elt];
      if (M < 0)
        continue;

      SDValue &Op = Ops[Elt & 1];
      if (M < NumElts && (Op.isUndef() || Op == V1))
        Op = V1;
      else if (NumElts <= M && (Op.isUndef() || Op == V2))
        Op = V2;
      else
        return SDValue();

      int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
      MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
                 isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
      MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
                 isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
      if (!MatchLo && !MatchHi)
        return SDValue();
    }
  }
  assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");

  // Now check that each pair of elts come from the same unpack pair
  // and set the permute mask based on each pair.
  // TODO - Investigate cases where we permute individual elements.
  SmallVector<int, 32> PermuteMask(NumElts, -1);
  for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
    for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
      int M0 = Mask[Lane + Elt + 0];
      int M1 = Mask[Lane + Elt + 1];
      if (0 <= M0 && 0 <= M1 &&
          (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
        return SDValue();
      if (0 <= M0)
        PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
      if (0 <= M1)
        PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
    }
  }

  unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
  SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
  return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
}

/// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
/// permuting the elements of the result in place.
static SDValue lowerShuffleAsByteRotateAndPermute(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
  if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
      (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
      (VT.is512BitVector() && !Subtarget.hasBWI()))
    return SDValue();

  // We don't currently support lane crossing permutes.
  if (is128BitLaneCrossingShuffleMask(VT, Mask))
    return SDValue();

  int Scale = VT.getScalarSizeInBits() / 8;
  int NumLanes = VT.getSizeInBits() / 128;
  int NumElts = VT.getVectorNumElements();
  int NumEltsPerLane = NumElts / NumLanes;

  // Determine range of mask elts.
  bool Blend1 = true;
  bool Blend2 = true;
  std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
  std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
  for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
    for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
      int M = Mask[Lane + Elt];
      if (M < 0)
        continue;
      if (M < NumElts) {
        Blend1 &= (M == (Lane + Elt));
        assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
        M = M % NumEltsPerLane;
        Range1.first = std::min(Range1.first, M);
        Range1.second = std::max(Range1.second, M);
      } else {
        M -= NumElts;
        Blend2 &= (M == (Lane + Elt));
        assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
        M = M % NumEltsPerLane;
        Range2.first = std::min(Range2.first, M);
        Range2.second = std::max(Range2.second, M);
      }
    }
  }

  // Bail if we don't need both elements.
  // TODO - it might be worth doing this for unary shuffles if the permute
  // can be widened.
  if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
      !(0 <= Range2.first && Range2.second < NumEltsPerLane))
    return SDValue();

  if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
    return SDValue();

  // Rotate the 2 ops so we can access both ranges, then permute the result.
  auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
    MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
    SDValue Rotate = DAG.getBitcast(
        VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
                        DAG.getBitcast(ByteVT, Lo),
                        DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
    SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
    for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
      for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
        int M = Mask[Lane + Elt];
        if (M < 0)
          continue;
        if (M < NumElts)
          PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
        else
          PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
      }
    }
    return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
  };

  // Check if the ranges are small enough to rotate from either direction.
  if (Range2.second < Range1.first)
    return RotateAndPermute(V1, V2, Range1.first, 0);
  if (Range1.second < Range2.first)
    return RotateAndPermute(V2, V1, Range2.first, NumElts);
  return SDValue();
}

/// Generic routine to decompose a shuffle and blend into independent
/// blends and permutes.
///
/// This matches the extremely common pattern for handling combined
/// shuffle+blend operations on newer X86 ISAs where we have very fast blend
/// operations. It will try to pick the best arrangement of shuffles and
/// blends.
static SDValue lowerShuffleAsDecomposedShuffleBlend(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
  // Shuffle the input elements into the desired positions in V1 and V2 and
  // blend them together.
  SmallVector<int, 32> V1Mask(Mask.size(), -1);
  SmallVector<int, 32> V2Mask(Mask.size(), -1);
  SmallVector<int, 32> BlendMask(Mask.size(), -1);
  for (int i = 0, Size = Mask.size(); i < Size; ++i)
    if (Mask[i] >= 0 && Mask[i] < Size) {
      V1Mask[i] = Mask[i];
      BlendMask[i] = i;
    } else if (Mask[i] >= Size) {
      V2Mask[i] = Mask[i] - Size;
      BlendMask[i] = i + Size;
    }

  // Try to lower with the simpler initial blend/unpack/rotate strategies unless
  // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
  // the shuffle may be able to fold with a load or other benefit. However, when
  // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
  // pre-shuffle first is a better strategy.
  if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
    // Only prefer immediate blends to unpack/rotate.
    if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
                                                          DAG, true))
      return BlendPerm;
    if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
                                                           DAG))
      return UnpackPerm;
    if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
            DL, VT, V1, V2, Mask, Subtarget, DAG))
      return RotatePerm;
    // Unpack/rotate failed - try again with variable blends.
    if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
                                                          DAG))
      return BlendPerm;
  }

  V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
  V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
  return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
}

/// Try to lower a vector shuffle as a rotation.
///
/// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
static int matchShuffleAsRotate(SDValue &V1, SDValue &V2, ArrayRef<int> Mask) {
  int NumElts = Mask.size();

  // We need to detect various ways of spelling a rotation:
  //   [11, 12, 13, 14, 15,  0,  1,  2]
  //   [-1, 12, 13, 14, -1, -1,  1, -1]
  //   [-1, -1, -1, -1, -1, -1,  1,  2]
  //   [ 3,  4,  5,  6,  7,  8,  9, 10]
  //   [-1,  4,  5,  6, -1, -1,  9, -1]
  //   [-1,  4,  5,  6, -1, -1, -1, -1]
  int Rotation = 0;
  SDValue Lo, Hi;
  for (int i = 0; i < NumElts; ++i) {
    int M = Mask[i];
    assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
           "Unexpected mask index.");
    if (M < 0)
      continue;

    // Determine where a rotated vector would have started.
    int StartIdx = i - (M % NumElts);
    if (StartIdx == 0)
      // The identity rotation isn't interesting, stop.
      return -1;

    // If we found the tail of a vector the rotation must be the missing
    // front. If we found the head of a vector, it must be how much of the
    // head.
    int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;

    if (Rotation == 0)
      Rotation = CandidateRotation;
    else if (Rotation != CandidateRotation)
      // The rotations don't match, so we can't match this mask.
      return -1;

    // Compute which value this mask is pointing at.
    SDValue MaskV = M < NumElts ? V1 : V2;

    // Compute which of the two target values this index should be assigned
    // to. This reflects whether the high elements are remaining or the low
    // elements are remaining.
    SDValue &TargetV = StartIdx < 0 ? Hi : Lo;

    // Either set up this value if we've not encountered it before, or check
    // that it remains consistent.
    if (!TargetV)
      TargetV = MaskV;
    else if (TargetV != MaskV)
      // This may be a rotation, but it pulls from the inputs in some
      // unsupported interleaving.
      return -1;
  }

  // Check that we successfully analyzed the mask, and normalize the results.
  assert(Rotation != 0 && "Failed to locate a viable rotation!");
  assert((Lo || Hi) && "Failed to find a rotated input vector!");
  if (!Lo)
    Lo = Hi;
  else if (!Hi)
    Hi = Lo;

  V1 = Lo;
  V2 = Hi;

  return Rotation;
}

/// Try to lower a vector shuffle as a byte rotation.
///
/// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
/// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
/// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
/// try to generically lower a vector shuffle through such an pattern. It
/// does not check for the profitability of lowering either as PALIGNR or
/// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
/// This matches shuffle vectors that look like:
///
///   v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
///
/// Essentially it concatenates V1 and V2, shifts right by some number of
/// elements, and takes the low elements as the result. Note that while this is
/// specified as a *right shift* because x86 is little-endian, it is a *left
/// rotate* of the vector lanes.
static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
                                    ArrayRef<int> Mask) {
  // Don't accept any shuffles with zero elements.
  if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
    return -1;

  // PALIGNR works on 128-bit lanes.
  SmallVector<int, 16> RepeatedMask;
  if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
    return -1;

  int Rotation = matchShuffleAsRotate(V1, V2, RepeatedMask);
  if (Rotation <= 0)
    return -1;

  // PALIGNR rotates bytes, so we need to scale the
  // rotation based on how many bytes are in the vector lane.
  int NumElts = RepeatedMask.size();
  int Scale = 16 / NumElts;
  return Rotation * Scale;
}

static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
                                        SDValue V2, ArrayRef<int> Mask,
                                        const X86Subtarget &Subtarget,
                                        SelectionDAG &DAG) {
  assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");

  SDValue Lo = V1, Hi = V2;
  int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
  if (ByteRotation <= 0)
    return SDValue();

  // Cast the inputs to i8 vector of correct length to match PALIGNR or
  // PSLLDQ/PSRLDQ.
  MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
  Lo = DAG.getBitcast(ByteVT, Lo);
  Hi = DAG.getBitcast(ByteVT, Hi);

  // SSSE3 targets can use the palignr instruction.
  if (Subtarget.hasSSSE3()) {
    assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
           "512-bit PALIGNR requires BWI instructions");
    return DAG.getBitcast(
        VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
                        DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
  }

  assert(VT.is128BitVector() &&
         "Rotate-based lowering only supports 128-bit lowering!");
  assert(Mask.size() <= 16 &&
         "Can shuffle at most 16 bytes in a 128-bit vector!");
  assert(ByteVT == MVT::v16i8 &&
         "SSE2 rotate lowering only needed for v16i8!");

  // Default SSE2 implementation
  int LoByteShift = 16 - ByteRotation;
  int HiByteShift = ByteRotation;

  SDValue LoShift =
      DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
                  DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
  SDValue HiShift =
      DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
                  DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
  return DAG.getBitcast(VT,
                        DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
}

/// Try to lower a vector shuffle as a dword/qword rotation.
///
/// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
/// rotation of the concatenation of two vectors; This routine will
/// try to generically lower a vector shuffle through such an pattern.
///
/// Essentially it concatenates V1 and V2, shifts right by some number of
/// elements, and takes the low elements as the result. Note that while this is
/// specified as a *right shift* because x86 is little-endian, it is a *left
/// rotate* of the vector lanes.
static SDValue lowerShuffleAsRotate(const SDLoc &DL, MVT VT, SDValue V1,
                                    SDValue V2, ArrayRef<int> Mask,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {
  assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
         "Only 32-bit and 64-bit elements are supported!");

  // 128/256-bit vectors are only supported with VLX.
  assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
         && "VLX required for 128/256-bit vectors");

  SDValue Lo = V1, Hi = V2;
  int Rotation = matchShuffleAsRotate(Lo, Hi, Mask);
  if (Rotation <= 0)
    return SDValue();

  return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
                     DAG.getTargetConstant(Rotation, DL, MVT::i8));
}

/// Try to lower a vector shuffle as a byte shift sequence.
static SDValue lowerVectorShuffleAsByteShiftMask(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const APInt &Zeroable, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
  assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
  assert(VT.is128BitVector() && "Only 128-bit vectors supported");

  // We need a shuffle that has zeros at one/both ends and a sequential
  // shuffle from one source within.
  unsigned ZeroLo = Zeroable.countTrailingOnes();
  unsigned ZeroHi = Zeroable.countLeadingOnes();
  if (!ZeroLo && !ZeroHi)
    return SDValue();

  unsigned NumElts = Mask.size();
  unsigned Len = NumElts - (ZeroLo + ZeroHi);
  if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
    return SDValue();

  unsigned Scale = VT.getScalarSizeInBits() / 8;
  ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
  if (!isUndefOrInRange(StubMask, 0, NumElts) &&
      !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
    return SDValue();

  SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
  Res = DAG.getBitcast(MVT::v16i8, Res);

  // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
  // inner sequential set of elements, possibly offset:
  // 01234567 --> zzzzzz01 --> 1zzzzzzz
  // 01234567 --> 4567zzzz --> zzzzz456
  // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
  if (ZeroLo == 0) {
    unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
    Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
                      DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
  } else if (ZeroHi == 0) {
    unsigned Shift = Mask[ZeroLo] % NumElts;
    Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
                      DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
  } else if (!Subtarget.hasSSSE3()) {
    // If we don't have PSHUFB then its worth avoiding an AND constant mask
    // by performing 3 byte shifts. Shuffle combining can kick in above that.
    // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
    unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
    Shift += Mask[ZeroLo] % NumElts;
    Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
                      DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
  } else
    return SDValue();

  return DAG.getBitcast(VT, Res);
}

/// Try to lower a vector shuffle as a bit shift (shifts in zeros).
///
/// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
/// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
/// matches elements from one of the input vectors shuffled to the left or
/// right with zeroable elements 'shifted in'. It handles both the strictly
/// bit-wise element shifts and the byte shift across an entire 128-bit double
/// quad word lane.
///
/// PSHL : (little-endian) left bit shift.
/// [ zz, 0, zz,  2 ]
/// [ -1, 4, zz, -1 ]
/// PSRL : (little-endian) right bit shift.
/// [  1, zz,  3, zz]
/// [ -1, -1,  7, zz]
/// PSLLDQ : (little-endian) left byte shift
/// [ zz,  0,  1,  2,  3,  4,  5,  6]
/// [ zz, zz, -1, -1,  2,  3,  4, -1]
/// [ zz, zz, zz, zz, zz, zz, -1,  1]
/// PSRLDQ : (little-endian) right byte shift
/// [  5, 6,  7, zz, zz, zz, zz, zz]
/// [ -1, 5,  6,  7, zz, zz, zz, zz]
/// [  1, 2, -1, -1, -1, -1, zz, zz]
static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
                               unsigned ScalarSizeInBits, ArrayRef<int> Mask,
                               int MaskOffset, const APInt &Zeroable,
                               const X86Subtarget &Subtarget) {
  int Size = Mask.size();
  unsigned SizeInBits = Size * ScalarSizeInBits;

  auto CheckZeros = [&](int Shift, int Scale, bool Left) {
    for (int i = 0; i < Size; i += Scale)
      for (int j = 0; j < Shift; ++j)
        if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
          return false;

    return true;
  };

  auto MatchShift = [&](int Shift, int Scale, bool Left) {
    for (int i = 0; i != Size; i += Scale) {
      unsigned Pos = Left ? i + Shift : i;
      unsigned Low = Left ? i : i + Shift;
      unsigned Len = Scale - Shift;
      if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
        return -1;
    }

    int ShiftEltBits = ScalarSizeInBits * Scale;
    bool ByteShift = ShiftEltBits > 64;
    Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
                  : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
    int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);

    // Normalize the scale for byte shifts to still produce an i64 element
    // type.
    Scale = ByteShift ? Scale / 2 : Scale;

    // We need to round trip through the appropriate type for the shift.
    MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
    ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
                        : MVT::getVectorVT(ShiftSVT, Size / Scale);
    return (int)ShiftAmt;
  };

  // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
  // keep doubling the size of the integer elements up to that. We can
  // then shift the elements of the integer vector by whole multiples of
  // their width within the elements of the larger integer vector. Test each
  // multiple to see if we can find a match with the moved element indices
  // and that the shifted in elements are all zeroable.
  unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
  for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
    for (int Shift = 1; Shift != Scale; ++Shift)
      for (bool Left : {true, false})
        if (CheckZeros(Shift, Scale, Left)) {
          int ShiftAmt = MatchShift(Shift, Scale, Left);
          if (0 < ShiftAmt)
            return ShiftAmt;
        }

  // no match
  return -1;
}

static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
                                   SDValue V2, ArrayRef<int> Mask,
                                   const APInt &Zeroable,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG) {
  int Size = Mask.size();
  assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");

  MVT ShiftVT;
  SDValue V = V1;
  unsigned Opcode;

  // Try to match shuffle against V1 shift.
  int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
                                     Mask, 0, Zeroable, Subtarget);

  // If V1 failed, try to match shuffle against V2 shift.
  if (ShiftAmt < 0) {
    ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
                                   Mask, Size, Zeroable, Subtarget);
    V = V2;
  }

  if (ShiftAmt < 0)
    return SDValue();

  assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
         "Illegal integer vector type");
  V = DAG.getBitcast(ShiftVT, V);
  V = DAG.getNode(Opcode, DL, ShiftVT, V,
                  DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
  return DAG.getBitcast(VT, V);
}

// EXTRQ: Extract Len elements from lower half of source, starting at Idx.
// Remainder of lower half result is zero and upper half is all undef.
static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
                                ArrayRef<int> Mask, uint64_t &BitLen,
                                uint64_t &BitIdx, const APInt &Zeroable) {
  int Size = Mask.size();
  int HalfSize = Size / 2;
  assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
  assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");

  // Upper half must be undefined.
  if (!isUndefUpperHalf(Mask))
    return false;

  // Determine the extraction length from the part of the
  // lower half that isn't zeroable.
  int Len = HalfSize;
  for (; Len > 0; --Len)
    if (!Zeroable[Len - 1])
      break;
  assert(Len > 0 && "Zeroable shuffle mask");

  // Attempt to match first Len sequential elements from the lower half.
  SDValue Src;
  int Idx = -1;
  for (int i = 0; i != Len; ++i) {
    int M = Mask[i];
    if (M == SM_SentinelUndef)
      continue;
    SDValue &V = (M < Size ? V1 : V2);
    M = M % Size;

    // The extracted elements must start at a valid index and all mask
    // elements must be in the lower half.
    if (i > M || M >= HalfSize)
      return false;

    if (Idx < 0 || (Src == V && Idx == (M - i))) {
      Src = V;
      Idx = M - i;
      continue;
    }
    return false;
  }

  if (!Src || Idx < 0)
    return false;

  assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
  BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
  BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
  V1 = Src;
  return true;
}

// INSERTQ: Extract lowest Len elements from lower half of second source and
// insert over first source, starting at Idx.
// { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
                                  ArrayRef<int> Mask, uint64_t &BitLen,
                                  uint64_t &BitIdx) {
  int Size = Mask.size();
  int HalfSize = Size / 2;
  assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");

  // Upper half must be undefined.
  if (!isUndefUpperHalf(Mask))
    return false;

  for (int Idx = 0; Idx != HalfSize; ++Idx) {
    SDValue Base;

    // Attempt to match first source from mask before insertion point.
    if (isUndefInRange(Mask, 0, Idx)) {
      /* EMPTY */
    } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
      Base = V1;
    } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
      Base = V2;
    } else {
      continue;
    }

    // Extend the extraction length looking to match both the insertion of
    // the second source and the remaining elements of the first.
    for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
      SDValue Insert;
      int Len = Hi - Idx;

      // Match insertion.
      if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
        Insert = V1;
      } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
        Insert = V2;
      } else {
        continue;
      }

      // Match the remaining elements of the lower half.
      if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
        /* EMPTY */
      } else if ((!Base || (Base == V1)) &&
                 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
        Base = V1;
      } else if ((!Base || (Base == V2)) &&
                 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
                                            Size + Hi)) {
        Base = V2;
      } else {
        continue;
      }

      BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
      BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
      V1 = Base;
      V2 = Insert;
      return true;
    }
  }

  return false;
}

/// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
                                     SDValue V2, ArrayRef<int> Mask,
                                     const APInt &Zeroable, SelectionDAG &DAG) {
  uint64_t BitLen, BitIdx;
  if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
    return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
                       DAG.getTargetConstant(BitLen, DL, MVT::i8),
                       DAG.getTargetConstant(BitIdx, DL, MVT::i8));

  if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
    return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
                       V2 ? V2 : DAG.getUNDEF(VT),
                       DAG.getTargetConstant(BitLen, DL, MVT::i8),
                       DAG.getTargetConstant(BitIdx, DL, MVT::i8));

  return SDValue();
}

/// Lower a vector shuffle as a zero or any extension.
///
/// Given a specific number of elements, element bit width, and extension
/// stride, produce either a zero or any extension based on the available
/// features of the subtarget. The extended elements are consecutive and
/// begin and can start from an offsetted element index in the input; to
/// avoid excess shuffling the offset must either being in the bottom lane
/// or at the start of a higher lane. All extended elements must be from
/// the same lane.
static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
    const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
    ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
  assert(Scale > 1 && "Need a scale to extend.");
  int EltBits = VT.getScalarSizeInBits();
  int NumElements = VT.getVectorNumElements();
  int NumEltsPerLane = 128 / EltBits;
  int OffsetLane = Offset / NumEltsPerLane;
  assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
         "Only 8, 16, and 32 bit elements can be extended.");
  assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
  assert(0 <= Offset && "Extension offset must be positive.");
  assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
         "Extension offset must be in the first lane or start an upper lane.");

  // Check that an index is in same lane as the base offset.
  auto SafeOffset = [&](int Idx) {
    return OffsetLane == (Idx / NumEltsPerLane);
  };

  // Shift along an input so that the offset base moves to the first element.
  auto ShuffleOffset = [&](SDValue V) {
    if (!Offset)
      return V;

    SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
    for (int i = 0; i * Scale < NumElements; ++i) {
      int SrcIdx = i + Offset;
      ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
    }
    return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
  };

  // Found a valid a/zext mask! Try various lowering strategies based on the
  // input type and available ISA extensions.
  if (Subtarget.hasSSE41()) {
    // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
    // PUNPCK will catch this in a later shuffle match.
    if (Offset && Scale == 2 && VT.is128BitVector())
      return SDValue();
    MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
                                 NumElements / Scale);
    InputV = ShuffleOffset(InputV);
    InputV = getExtendInVec(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND, DL,
                            ExtVT, InputV, DAG);
    return DAG.getBitcast(VT, InputV);
  }

  assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");

  // For any extends we can cheat for larger element sizes and use shuffle
  // instructions that can fold with a load and/or copy.
  if (AnyExt && EltBits == 32) {
    int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
                         -1};
    return DAG.getBitcast(
        VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
                        DAG.getBitcast(MVT::v4i32, InputV),
                        getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
  }
  if (AnyExt && EltBits == 16 && Scale > 2) {
    int PSHUFDMask[4] = {Offset / 2, -1,
                         SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
    InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
                         DAG.getBitcast(MVT::v4i32, InputV),
                         getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
    int PSHUFWMask[4] = {1, -1, -1, -1};
    unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
    return DAG.getBitcast(
        VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
                        DAG.getBitcast(MVT::v8i16, InputV),
                        getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
  }

  // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
  // to 64-bits.
  if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
    assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
    assert(VT.is128BitVector() && "Unexpected vector width!");

    int LoIdx = Offset * EltBits;
    SDValue Lo = DAG.getBitcast(
        MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
                                DAG.getTargetConstant(EltBits, DL, MVT::i8),
                                DAG.getTargetConstant(LoIdx, DL, MVT::i8)));

    if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
      return DAG.getBitcast(VT, Lo);

    int HiIdx = (Offset + 1) * EltBits;
    SDValue Hi = DAG.getBitcast(
        MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
                                DAG.getTargetConstant(EltBits, DL, MVT::i8),
                                DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
    return DAG.getBitcast(VT,
                          DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
  }

  // If this would require more than 2 unpack instructions to expand, use
  // pshufb when available. We can only use more than 2 unpack instructions
  // when zero extending i8 elements which also makes it easier to use pshufb.
  if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
    assert(NumElements == 16 && "Unexpected byte vector width!");
    SDValue PSHUFBMask[16];
    for (int i = 0; i < 16; ++i) {
      int Idx = Offset + (i / Scale);
      if ((i % Scale == 0 && SafeOffset(Idx))) {
        PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
        continue;
      }
      PSHUFBMask[i] =
          AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
    }
    InputV = DAG.getBitcast(MVT::v16i8, InputV);
    return DAG.getBitcast(
        VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
                        DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
  }

  // If we are extending from an offset, ensure we start on a boundary that
  // we can unpack from.
  int AlignToUnpack = Offset % (NumElements / Scale);
  if (AlignToUnpack) {
    SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
    for (int i = AlignToUnpack; i < NumElements; ++i)
      ShMask[i - AlignToUnpack] = i;
    InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
    Offset -= AlignToUnpack;
  }

  // Otherwise emit a sequence of unpacks.
  do {
    unsigned UnpackLoHi = X86ISD::UNPCKL;
    if (Offset >= (NumElements / 2)) {
      UnpackLoHi = X86ISD::UNPCKH;
      Offset -= (NumElements / 2);
    }

    MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
    SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
                         : getZeroVector(InputVT, Subtarget, DAG, DL);
    InputV = DAG.getBitcast(InputVT, InputV);
    InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
    Scale /= 2;
    EltBits *= 2;
    NumElements /= 2;
  } while (Scale > 1);
  return DAG.getBitcast(VT, InputV);
}

/// Try to lower a vector shuffle as a zero extension on any microarch.
///
/// This routine will try to do everything in its power to cleverly lower
/// a shuffle which happens to match the pattern of a zero extend. It doesn't
/// check for the profitability of this lowering,  it tries to aggressively
/// match this pattern. It will use all of the micro-architectural details it
/// can to emit an efficient lowering. It handles both blends with all-zero
/// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
/// masking out later).
///
/// The reason we have dedicated lowering for zext-style shuffles is that they
/// are both incredibly common and often quite performance sensitive.
static SDValue lowerShuffleAsZeroOrAnyExtend(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const APInt &Zeroable, const X86Subtarget &Subtarget,
    SelectionDAG &DAG) {
  int Bits = VT.getSizeInBits();
  int NumLanes = Bits / 128;
  int NumElements = VT.getVectorNumElements();
  int NumEltsPerLane = NumElements / NumLanes;
  assert(VT.getScalarSizeInBits() <= 32 &&
         "Exceeds 32-bit integer zero extension limit");
  assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");

  // Define a helper function to check a particular ext-scale and lower to it if
  // valid.
  auto Lower = [&](int Scale) -> SDValue {
    SDValue InputV;
    bool AnyExt = true;
    int Offset = 0;
    int Matches = 0;
    for (int i = 0; i < NumElements; ++i) {
      int M = Mask[i];
      if (M < 0)
        continue; // Valid anywhere but doesn't tell us anything.
      if (i % Scale != 0) {
        // Each of the extended elements need to be zeroable.
        if (!Zeroable[i])
          return SDValue();

        // We no longer are in the anyext case.
        AnyExt = false;
        continue;
      }

      // Each of the base elements needs to be consecutive indices into the
      // same input vector.
      SDValue V = M < NumElements ? V1 : V2;
      M = M % NumElements;
      if (!InputV) {
        InputV = V;
        Offset = M - (i / Scale);
      } else if (InputV != V)
        return SDValue(); // Flip-flopping inputs.

      // Offset must start in the lowest 128-bit lane or at the start of an
      // upper lane.
      // FIXME: Is it ever worth allowing a negative base offset?
      if (!((0 <= Offset && Offset < NumEltsPerLane) ||
            (Offset % NumEltsPerLane) == 0))
        return SDValue();

      // If we are offsetting, all referenced entries must come from the same
      // lane.
      if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
        return SDValue();

      if ((M % NumElements) != (Offset + (i / Scale)))
        return SDValue(); // Non-consecutive strided elements.
      Matches++;
    }

    // If we fail to find an input, we have a zero-shuffle which should always
    // have already been handled.
    // FIXME: Maybe handle this here in case during blending we end up with one?
    if (!InputV)
      return SDValue();

    // If we are offsetting, don't extend if we only match a single input, we
    // can always do better by using a basic PSHUF or PUNPCK.
    if (Offset != 0 && Matches < 2)
      return SDValue();

    return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
                                                 InputV, Mask, Subtarget, DAG);
  };

  // The widest scale possible for extending is to a 64-bit integer.
  assert(Bits % 64 == 0 &&
         "The number of bits in a vector must be divisible by 64 on x86!");
  int NumExtElements = Bits / 64;

  // Each iteration, try extending the elements half as much, but into twice as
  // many elements.
  for (; NumExtElements < NumElements; NumExtElements *= 2) {
    assert(NumElements % NumExtElements == 0 &&
           "The input vector size must be divisible by the extended size.");
    if (SDValue V = Lower(NumElements / NumExtElements))
      return V;
  }

  // General extends failed, but 128-bit vectors may be able to use MOVQ.
  if (Bits != 128)
    return SDValue();

  // Returns one of the source operands if the shuffle can be reduced to a
  // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
  auto CanZExtLowHalf = [&]() {
    for (int i = NumElements / 2; i != NumElements; ++i)
      if (!Zeroable[i])
        return SDValue();
    if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
      return V1;
    if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
      return V2;
    return SDValue();
  };

  if (SDValue V = CanZExtLowHalf()) {
    V = DAG.getBitcast(MVT::v2i64, V);
    V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
    return DAG.getBitcast(VT, V);
  }

  // No viable ext lowering found.
  return SDValue();
}

/// Try to get a scalar value for a specific element of a vector.
///
/// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
                                              SelectionDAG &DAG) {
  MVT VT = V.getSimpleValueType();
  MVT EltVT = VT.getVectorElementType();
  V = peekThroughBitcasts(V);

  // If the bitcasts shift the element size, we can't extract an equivalent
  // element from it.
  MVT NewVT = V.getSimpleValueType();
  if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
    return SDValue();

  if (V.getOpcode() == ISD::BUILD_VECTOR ||
      (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
    // Ensure the scalar operand is the same size as the destination.
    // FIXME: Add support for scalar truncation where possible.
    SDValue S = V.getOperand(Idx);
    if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
      return DAG.getBitcast(EltVT, S);
  }

  return SDValue();
}

/// Helper to test for a load that can be folded with x86 shuffles.
///
/// This is particularly important because the set of instructions varies
/// significantly based on whether the operand is a load or not.
static bool isShuffleFoldableLoad(SDValue V) {
  V = peekThroughBitcasts(V);
  return ISD::isNON_EXTLoad(V.getNode());
}

/// Try to lower insertion of a single element into a zero vector.
///
/// This is a common pattern that we have especially efficient patterns to lower
/// across all subtarget feature sets.
static SDValue lowerShuffleAsElementInsertion(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const APInt &Zeroable, const X86Subtarget &Subtarget,
    SelectionDAG &DAG) {
  MVT ExtVT = VT;
  MVT EltVT = VT.getVectorElementType();

  int V2Index =
      find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
      Mask.begin();
  bool IsV1Zeroable = true;
  for (int i = 0, Size = Mask.size(); i < Size; ++i)
    if (i != V2Index && !Zeroable[i]) {
      IsV1Zeroable = false;
      break;
    }

  // Check for a single input from a SCALAR_TO_VECTOR node.
  // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
  // all the smarts here sunk into that routine. However, the current
  // lowering of BUILD_VECTOR makes that nearly impossible until the old
  // vector shuffle lowering is dead.
  SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
                                               DAG);
  if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
    // We need to zext the scalar if it is smaller than an i32.
    V2S = DAG.getBitcast(EltVT, V2S);
    if (EltVT == MVT::i8 || EltVT == MVT::i16) {
      // Using zext to expand a narrow element won't work for non-zero
      // insertions.
      if (!IsV1Zeroable)
        return SDValue();

      // Zero-extend directly to i32.
      ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
      V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
    }
    V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
  } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
             EltVT == MVT::i16) {
    // Either not inserting from the low element of the input or the input
    // element size is too small to use VZEXT_MOVL to clear the high bits.
    return SDValue();
  }

  if (!IsV1Zeroable) {
    // If V1 can't be treated as a zero vector we have fewer options to lower
    // this. We can't support integer vectors or non-zero targets cheaply, and
    // the V1 elements can't be permuted in any way.
    assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
    if (!VT.isFloatingPoint() || V2Index != 0)
      return SDValue();
    SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
    V1Mask[V2Index] = -1;
    if (!isNoopShuffleMask(V1Mask))
      return SDValue();
    if (!VT.is128BitVector())
      return SDValue();

    // Otherwise, use MOVSD or MOVSS.
    assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
           "Only two types of floating point element types to handle!");
    return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
                       ExtVT, V1, V2);
  }

  // This lowering only works for the low element with floating point vectors.
  if (VT.isFloatingPoint() && V2Index != 0)
    return SDValue();

  V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
  if (ExtVT != VT)
    V2 = DAG.getBitcast(VT, V2);

  if (V2Index != 0) {
    // If we have 4 or fewer lanes we can cheaply shuffle the element into
    // the desired position. Otherwise it is more efficient to do a vector
    // shift left. We know that we can do a vector shift left because all
    // the inputs are zero.
    if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
      SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
      V2Shuffle[V2Index] = 0;
      V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
    } else {
      V2 = DAG.getBitcast(MVT::v16i8, V2);
      V2 = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
                       DAG.getTargetConstant(
                           V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
      V2 = DAG.getBitcast(VT, V2);
    }
  }
  return V2;
}

/// Try to lower broadcast of a single - truncated - integer element,
/// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
///
/// This assumes we have AVX2.
static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
                                            int BroadcastIdx,
                                            const X86Subtarget &Subtarget,
                                            SelectionDAG &DAG) {
  assert(Subtarget.hasAVX2() &&
         "We can only lower integer broadcasts with AVX2!");

  EVT EltVT = VT.getVectorElementType();
  EVT V0VT = V0.getValueType();

  assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
  assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");

  EVT V0EltVT = V0VT.getVectorElementType();
  if (!V0EltVT.isInteger())
    return SDValue();

  const unsigned EltSize = EltVT.getSizeInBits();
  const unsigned V0EltSize = V0EltVT.getSizeInBits();

  // This is only a truncation if the original element type is larger.
  if (V0EltSize <= EltSize)
    return SDValue();

  assert(((V0EltSize % EltSize) == 0) &&
         "Scalar type sizes must all be powers of 2 on x86!");

  const unsigned V0Opc = V0.getOpcode();
  const unsigned Scale = V0EltSize / EltSize;
  const unsigned V0BroadcastIdx = BroadcastIdx / Scale;

  if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
      V0Opc != ISD::BUILD_VECTOR)
    return SDValue();

  SDValue Scalar = V0.getOperand(V0BroadcastIdx);

  // If we're extracting non-least-significant bits, shift so we can truncate.
  // Hopefully, we can fold away the trunc/srl/load into the broadcast.
  // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
  // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
  if (const int OffsetIdx = BroadcastIdx % Scale)
    Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
                         DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));

  return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
                     DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
}

/// Test whether this can be lowered with a single SHUFPS instruction.
///
/// This is used to disable more specialized lowerings when the shufps lowering
/// will happen to be efficient.
static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
  // This routine only handles 128-bit shufps.
  assert(Mask.size() == 4 && "Unsupported mask size!");
  assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
  assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
  assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
  assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");

  // To lower with a single SHUFPS we need to have the low half and high half
  // each requiring a single input.
  if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
    return false;
  if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
    return false;

  return true;
}

/// If we are extracting two 128-bit halves of a vector and shuffling the
/// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
/// multi-shuffle lowering.
static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
                                             SDValue N1, ArrayRef<int> Mask,
                                             SelectionDAG &DAG) {
  EVT VT = N0.getValueType();
  assert((VT.is128BitVector() &&
          (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
         "VPERM* family of shuffles requires 32-bit or 64-bit elements");

  // Check that both sources are extracts of the same source vector.
  if (!N0.hasOneUse() || !N1.hasOneUse() ||
      N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
      N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
      N0.getOperand(0) != N1.getOperand(0))
    return SDValue();

  SDValue WideVec = N0.getOperand(0);
  EVT WideVT = WideVec.getValueType();
  if (!WideVT.is256BitVector() || !isa<ConstantSDNode>(N0.getOperand(1)) ||
      !isa<ConstantSDNode>(N1.getOperand(1)))
    return SDValue();

  // Match extracts of each half of the wide source vector. Commute the shuffle
  // if the extract of the low half is N1.
  unsigned NumElts = VT.getVectorNumElements();
  SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
  const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
  const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
  if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
    ShuffleVectorSDNode::commuteMask(NewMask);
  else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
    return SDValue();

  // Final bailout: if the mask is simple, we are better off using an extract
  // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
  // because that avoids a constant load from memory.
  if (NumElts == 4 &&
      (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask)))
    return SDValue();

  // Extend the shuffle mask with undef elements.
  NewMask.append(NumElts, -1);

  // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
  SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
                                      NewMask);
  // This is free: ymm -> xmm.
  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
                     DAG.getIntPtrConstant(0, DL));
}

/// Try to lower broadcast of a single element.
///
/// For convenience, this code also bundles all of the subtarget feature set
/// filtering. While a little annoying to re-dispatch on type here, there isn't
/// a convenient way to factor it out.
static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
                                       SDValue V2, ArrayRef<int> Mask,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {
  if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
        (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
        (Subtarget.hasAVX2() && VT.isInteger())))
    return SDValue();

  // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
  // we can only broadcast from a register with AVX2.
  unsigned NumElts = Mask.size();
  unsigned NumEltBits = VT.getScalarSizeInBits();
  unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
                        ? X86ISD::MOVDDUP
                        : X86ISD::VBROADCAST;
  bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();

  // Check that the mask is a broadcast.
  int BroadcastIdx = -1;
  for (int i = 0; i != (int)NumElts; ++i) {
    SmallVector<int, 8> BroadcastMask(NumElts, i);
    if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
      BroadcastIdx = i;
      break;
    }
  }

  if (BroadcastIdx < 0)
    return SDValue();
  assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
                                            "a sorted mask where the broadcast "
                                            "comes from V1.");

  // Go up the chain of (vector) values to find a scalar load that we can
  // combine with the broadcast.
  int BitOffset = BroadcastIdx * NumEltBits;
  SDValue V = V1;
  for (;;) {
    switch (V.getOpcode()) {
    case ISD::BITCAST: {
      V = V.getOperand(0);
      continue;
    }
    case ISD::CONCAT_VECTORS: {
      int OpBitWidth = V.getOperand(0).getValueSizeInBits();
      int OpIdx = BitOffset / OpBitWidth;
      V = V.getOperand(OpIdx);
      BitOffset %= OpBitWidth;
      continue;
    }
    case ISD::INSERT_SUBVECTOR: {
      SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
      auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
      if (!ConstantIdx)
        break;

      int EltBitWidth = VOuter.getScalarValueSizeInBits();
      int Idx = (int)ConstantIdx->getZExtValue();
      int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
      int BeginOffset = Idx * EltBitWidth;
      int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
      if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
        BitOffset -= BeginOffset;
        V = VInner;
      } else {
        V = VOuter;
      }
      continue;
    }
    }
    break;
  }
  assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
  BroadcastIdx = BitOffset / NumEltBits;

  // Do we need to bitcast the source to retrieve the original broadcast index?
  bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;

  // Check if this is a broadcast of a scalar. We special case lowering
  // for scalars so that we can more effectively fold with loads.
  // If the original value has a larger element type than the shuffle, the
  // broadcast element is in essence truncated. Make that explicit to ease
  // folding.
  if (BitCastSrc && VT.isInteger())
    if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
            DL, VT, V, BroadcastIdx, Subtarget, DAG))
      return TruncBroadcast;

  MVT BroadcastVT = VT;

  // Also check the simpler case, where we can directly reuse the scalar.
  if (!BitCastSrc &&
      ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
       (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
    V = V.getOperand(BroadcastIdx);

    // If we can't broadcast from a register, check that the input is a load.
    if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
      return SDValue();
  } else if (MayFoldLoad(V) && cast<LoadSDNode>(V)->isSimple()) {
    // 32-bit targets need to load i64 as a f64 and then bitcast the result.
    if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
      BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
      Opcode = (BroadcastVT.is128BitVector() && !Subtarget.hasAVX2())
                   ? X86ISD::MOVDDUP
                   : Opcode;
    }

    // If we are broadcasting a load that is only used by the shuffle
    // then we can reduce the vector load to the broadcasted scalar load.
    LoadSDNode *Ld = cast<LoadSDNode>(V);
    SDValue BaseAddr = Ld->getOperand(1);
    EVT SVT = BroadcastVT.getScalarType();
    unsigned Offset = BroadcastIdx * SVT.getStoreSize();
    assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
    SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
    V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
                    DAG.getMachineFunction().getMachineMemOperand(
                        Ld->getMemOperand(), Offset, SVT.getStoreSize()));
    DAG.makeEquivalentMemoryOrdering(Ld, V);
  } else if (!BroadcastFromReg) {
    // We can't broadcast from a vector register.
    return SDValue();
  } else if (BitOffset != 0) {
    // We can only broadcast from the zero-element of a vector register,
    // but it can be advantageous to broadcast from the zero-element of a
    // subvector.
    if (!VT.is256BitVector() && !VT.is512BitVector())
      return SDValue();

    // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
    if (VT == MVT::v4f64 || VT == MVT::v4i64)
      return SDValue();

    // Only broadcast the zero-element of a 128-bit subvector.
    if ((BitOffset % 128) != 0)
      return SDValue();

    assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
           "Unexpected bit-offset");
    assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
           "Unexpected vector size");
    unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
    V = extract128BitVector(V, ExtractIdx, DAG, DL);
  }

  if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
    V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
                    DAG.getBitcast(MVT::f64, V));

  // Bitcast back to the same scalar type as BroadcastVT.
  if (V.getValueType().getScalarType() != BroadcastVT.getScalarType()) {
    assert(NumEltBits == BroadcastVT.getScalarSizeInBits() &&
           "Unexpected vector element size");
    MVT ExtVT;
    if (V.getValueType().isVector()) {
      unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
      ExtVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
    } else {
      ExtVT = BroadcastVT.getScalarType();
    }
    V = DAG.getBitcast(ExtVT, V);
  }

  // 32-bit targets need to load i64 as a f64 and then bitcast the result.
  if (!Subtarget.is64Bit() && V.getValueType() == MVT::i64) {
    V = DAG.getBitcast(MVT::f64, V);
    unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements();
    BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts);
  }

  // We only support broadcasting from 128-bit vectors to minimize the
  // number of patterns we need to deal with in isel. So extract down to
  // 128-bits, removing as many bitcasts as possible.
  if (V.getValueSizeInBits() > 128) {
    MVT ExtVT = V.getSimpleValueType().getScalarType();
    ExtVT = MVT::getVectorVT(ExtVT, 128 / ExtVT.getScalarSizeInBits());
    V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
    V = DAG.getBitcast(ExtVT, V);
  }

  return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
}

// Check for whether we can use INSERTPS to perform the shuffle. We only use
// INSERTPS when the V1 elements are already in the correct locations
// because otherwise we can just always use two SHUFPS instructions which
// are much smaller to encode than a SHUFPS and an INSERTPS. We can also
// perform INSERTPS if a single V1 element is out of place and all V2
// elements are zeroable.
static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
                                   unsigned &InsertPSMask,
                                   const APInt &Zeroable,
                                   ArrayRef<int> Mask, SelectionDAG &DAG) {
  assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
  assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");

  // Attempt to match INSERTPS with one element from VA or VB being
  // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
  // are updated.
  auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
                             ArrayRef<int> CandidateMask) {
    unsigned ZMask = 0;
    int VADstIndex = -1;
    int VBDstIndex = -1;
    bool VAUsedInPlace = false;

    for (int i = 0; i < 4; ++i) {
      // Synthesize a zero mask from the zeroable elements (includes undefs).
      if (Zeroable[i]) {
        ZMask |= 1 << i;
        continue;
      }

      // Flag if we use any VA inputs in place.
      if (i == CandidateMask[i]) {
        VAUsedInPlace = true;
        continue;
      }

      // We can only insert a single non-zeroable element.
      if (VADstIndex >= 0 || VBDstIndex >= 0)
        return false;

      if (CandidateMask[i] < 4) {
        // VA input out of place for insertion.
        VADstIndex = i;
      } else {
        // VB input for insertion.
        VBDstIndex = i;
      }
    }

    // Don't bother if we have no (non-zeroable) element for insertion.
    if (VADstIndex < 0 && VBDstIndex < 0)
      return false;

    // Determine element insertion src/dst indices. The src index is from the
    // start of the inserted vector, not the start of the concatenated vector.
    unsigned VBSrcIndex = 0;
    if (VADstIndex >= 0) {
      // If we have a VA input out of place, we use VA as the V2 element
      // insertion and don't use the original V2 at all.
      VBSrcIndex = CandidateMask[VADstIndex];
      VBDstIndex = VADstIndex;
      VB = VA;
    } else {
      VBSrcIndex = CandidateMask[VBDstIndex] - 4;
    }

    // If no V1 inputs are used in place, then the result is created only from
    // the zero mask and the V2 insertion - so remove V1 dependency.
    if (!VAUsedInPlace)
      VA = DAG.getUNDEF(MVT::v4f32);

    // Update V1, V2 and InsertPSMask accordingly.
    V1 = VA;
    V2 = VB;

    // Insert the V2 element into the desired position.
    InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
    assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
    return true;
  };

  if (matchAsInsertPS(V1, V2, Mask))
    return true;

  // Commute and try again.
  SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
  ShuffleVectorSDNode::commuteMask(CommutedMask);
  if (matchAsInsertPS(V2, V1, CommutedMask))
    return true;

  return false;
}

static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
                                      ArrayRef<int> Mask, const APInt &Zeroable,
                                      SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");

  // Attempt to match the insertps pattern.
  unsigned InsertPSMask;
  if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
    return SDValue();

  // Insert the V2 element into the desired position.
  return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
                     DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
}

/// Try to lower a shuffle as a permute of the inputs followed by an
/// UNPCK instruction.
///
/// This specifically targets cases where we end up with alternating between
/// the two inputs, and so can permute them into something that feeds a single
/// UNPCK instruction. Note that this routine only targets integer vectors
/// because for floating point vectors we have a generalized SHUFPS lowering
/// strategy that handles everything that doesn't *exactly* match an unpack,
/// making this clever lowering unnecessary.
static SDValue lowerShuffleAsPermuteAndUnpack(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
  assert(!VT.isFloatingPoint() &&
         "This routine only supports integer vectors.");
  assert(VT.is128BitVector() &&
         "This routine only works on 128-bit vectors.");
  assert(!V2.isUndef() &&
         "This routine should only be used when blending two inputs.");
  assert(Mask.size() >= 2 && "Single element masks are invalid.");

  int Size = Mask.size();

  int NumLoInputs =
      count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
  int NumHiInputs =
      count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });

  bool UnpackLo = NumLoInputs >= NumHiInputs;

  auto TryUnpack = [&](int ScalarSize, int Scale) {
    SmallVector<int, 16> V1Mask((unsigned)Size, -1);
    SmallVector<int, 16> V2Mask((unsigned)Size, -1);

    for (int i = 0; i < Size; ++i) {
      if (Mask[i] < 0)
        continue;

      // Each element of the unpack contains Scale elements from this mask.
      int UnpackIdx = i / Scale;

      // We only handle the case where V1 feeds the first slots of the unpack.
      // We rely on canonicalization to ensure this is the case.
      if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
        return SDValue();

      // Setup the mask for this input. The indexing is tricky as we have to
      // handle the unpack stride.
      SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
      VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
          Mask[i] % Size;
    }

    // If we will have to shuffle both inputs to use the unpack, check whether
    // we can just unpack first and shuffle the result. If so, skip this unpack.
    if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
        !isNoopShuffleMask(V2Mask))
      return SDValue();

    // Shuffle the inputs into place.
    V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
    V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);

    // Cast the inputs to the type we will use to unpack them.
    MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
    V1 = DAG.getBitcast(UnpackVT, V1);
    V2 = DAG.getBitcast(UnpackVT, V2);

    // Unpack the inputs and cast the result back to the desired type.
    return DAG.getBitcast(
        VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
                        UnpackVT, V1, V2));
  };

  // We try each unpack from the largest to the smallest to try and find one
  // that fits this mask.
  int OrigScalarSize = VT.getScalarSizeInBits();
  for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
    if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
      return Unpack;

  // If we're shuffling with a zero vector then we're better off not doing
  // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
  if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
      ISD::isBuildVectorAllZeros(V2.getNode()))
    return SDValue();

  // If none of the unpack-rooted lowerings worked (or were profitable) try an
  // initial unpack.
  if (NumLoInputs == 0 || NumHiInputs == 0) {
    assert((NumLoInputs > 0 || NumHiInputs > 0) &&
           "We have to have *some* inputs!");
    int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;

    // FIXME: We could consider the total complexity of the permute of each
    // possible unpacking. Or at the least we should consider how many
    // half-crossings are created.
    // FIXME: We could consider commuting the unpacks.

    SmallVector<int, 32> PermMask((unsigned)Size, -1);
    for (int i = 0; i < Size; ++i) {
      if (Mask[i] < 0)
        continue;

      assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");

      PermMask[i] =
          2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
    }
    return DAG.getVectorShuffle(
        VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
                            DL, VT, V1, V2),
        DAG.getUNDEF(VT), PermMask);
  }

  return SDValue();
}

/// Handle lowering of 2-lane 64-bit floating point shuffles.
///
/// This is the basis function for the 2-lane 64-bit shuffles as we have full
/// support for floating point shuffles but not integer shuffles. These
/// instructions will incur a domain crossing penalty on some chips though so
/// it is better to avoid lowering through this for integer vectors where
/// possible.
static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
  assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");

  if (V2.isUndef()) {
    // Check for being able to broadcast a single element.
    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
                                                    Mask, Subtarget, DAG))
      return Broadcast;

    // Straight shuffle of a single input vector. Simulate this by using the
    // single input as both of the "inputs" to this instruction..
    unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);

    if (Subtarget.hasAVX()) {
      // If we have AVX, we can use VPERMILPS which will allow folding a load
      // into the shuffle.
      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
                         DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
    }

    return DAG.getNode(
        X86ISD::SHUFP, DL, MVT::v2f64,
        Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
        Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
        DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
  }
  assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
  assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
  assert(Mask[0] < 2 && "We sort V1 to be the first input.");
  assert(Mask[1] >= 2 && "We sort V2 to be the second input.");

  if (Subtarget.hasAVX2())
    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
      return Extract;

  // When loading a scalar and then shuffling it into a vector we can often do
  // the insertion cheaply.
  if (SDValue Insertion = lowerShuffleAsElementInsertion(
          DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
    return Insertion;
  // Try inverting the insertion since for v2 masks it is easy to do and we
  // can't reliably sort the mask one way or the other.
  int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
                        Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
  if (SDValue Insertion = lowerShuffleAsElementInsertion(
          DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
    return Insertion;

  // Try to use one of the special instruction patterns to handle two common
  // blend patterns if a zero-blend above didn't work.
  if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
      isShuffleEquivalent(V1, V2, Mask, {1, 3}))
    if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
      // We can either use a special instruction to load over the low double or
      // to move just the low double.
      return DAG.getNode(
          X86ISD::MOVSD, DL, MVT::v2f64, V2,
          DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));

  if (Subtarget.hasSSE41())
    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
                                            Zeroable, Subtarget, DAG))
      return Blend;

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
    return V;

  unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
  return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
                     DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
}

/// Handle lowering of 2-lane 64-bit integer shuffles.
///
/// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
/// the integer unit to minimize domain crossing penalties. However, for blends
/// it falls back to the floating point shuffle operation with appropriate bit
/// casting.
static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
  assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");

  if (V2.isUndef()) {
    // Check for being able to broadcast a single element.
    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
                                                    Mask, Subtarget, DAG))
      return Broadcast;

    // Straight shuffle of a single input vector. For everything from SSE2
    // onward this has a single fast instruction with no scary immediates.
    // We have to map the mask as it is actually a v4i32 shuffle instruction.
    V1 = DAG.getBitcast(MVT::v4i32, V1);
    int WidenedMask[4] = {
        std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
        std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
    return DAG.getBitcast(
        MVT::v2i64,
        DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
                    getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
  }
  assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
  assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
  assert(Mask[0] < 2 && "We sort V1 to be the first input.");
  assert(Mask[1] >= 2 && "We sort V2 to be the second input.");

  if (Subtarget.hasAVX2())
    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
      return Extract;

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Shift;

  // When loading a scalar and then shuffling it into a vector we can often do
  // the insertion cheaply.
  if (SDValue Insertion = lowerShuffleAsElementInsertion(
          DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
    return Insertion;
  // Try inverting the insertion since for v2 masks it is easy to do and we
  // can't reliably sort the mask one way or the other.
  int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
  if (SDValue Insertion = lowerShuffleAsElementInsertion(
          DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
    return Insertion;

  // We have different paths for blend lowering, but they all must use the
  // *exact* same predicate.
  bool IsBlendSupported = Subtarget.hasSSE41();
  if (IsBlendSupported)
    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
                                            Zeroable, Subtarget, DAG))
      return Blend;

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
    return V;

  // Try to use byte rotation instructions.
  // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
  if (Subtarget.hasSSSE3()) {
    if (Subtarget.hasVLX())
      if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v2i64, V1, V2, Mask,
                                                Subtarget, DAG))
        return Rotate;

    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
                                                  Subtarget, DAG))
      return Rotate;
  }

  // If we have direct support for blends, we should lower by decomposing into
  // a permute. That will be faster than the domain cross.
  if (IsBlendSupported)
    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2, Mask,
                                                Subtarget, DAG);

  // We implement this with SHUFPD which is pretty lame because it will likely
  // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
  // However, all the alternatives are still more cycles and newer chips don't
  // have this problem. It would be really nice if x86 had better shuffles here.
  V1 = DAG.getBitcast(MVT::v2f64, V1);
  V2 = DAG.getBitcast(MVT::v2f64, V2);
  return DAG.getBitcast(MVT::v2i64,
                        DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
}

/// Lower a vector shuffle using the SHUFPS instruction.
///
/// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
/// It makes no assumptions about whether this is the *best* lowering, it simply
/// uses it.
static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
                                      ArrayRef<int> Mask, SDValue V1,
                                      SDValue V2, SelectionDAG &DAG) {
  SDValue LowV = V1, HighV = V2;
  int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};

  int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });

  if (NumV2Elements == 1) {
    int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();

    // Compute the index adjacent to V2Index and in the same half by toggling
    // the low bit.
    int V2AdjIndex = V2Index ^ 1;

    if (Mask[V2AdjIndex] < 0) {
      // Handles all the cases where we have a single V2 element and an undef.
      // This will only ever happen in the high lanes because we commute the
      // vector otherwise.
      if (V2Index < 2)
        std::swap(LowV, HighV);
      NewMask[V2Index] -= 4;
    } else {
      // Handle the case where the V2 element ends up adjacent to a V1 element.
      // To make this work, blend them together as the first step.
      int V1Index = V2AdjIndex;
      int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
      V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
                       getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));

      // Now proceed to reconstruct the final blend as we have the necessary
      // high or low half formed.
      if (V2Index < 2) {
        LowV = V2;
        HighV = V1;
      } else {
        HighV = V2;
      }
      NewMask[V1Index] = 2; // We put the V1 element in V2[2].
      NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
    }
  } else if (NumV2Elements == 2) {
    if (Mask[0] < 4 && Mask[1] < 4) {
      // Handle the easy case where we have V1 in the low lanes and V2 in the
      // high lanes.
      NewMask[2] -= 4;
      NewMask[3] -= 4;
    } else if (Mask[2] < 4 && Mask[3] < 4) {
      // We also handle the reversed case because this utility may get called
      // when we detect a SHUFPS pattern but can't easily commute the shuffle to
      // arrange things in the right direction.
      NewMask[0] -= 4;
      NewMask[1] -= 4;
      HighV = V1;
      LowV = V2;
    } else {
      // We have a mixture of V1 and V2 in both low and high lanes. Rather than
      // trying to place elements directly, just blend them and set up the final
      // shuffle to place them.

      // The first two blend mask elements are for V1, the second two are for
      // V2.
      int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
                          Mask[2] < 4 ? Mask[2] : Mask[3],
                          (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
                          (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
      V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
                       getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));

      // Now we do a normal shuffle of V1 by giving V1 as both operands to
      // a blend.
      LowV = HighV = V1;
      NewMask[0] = Mask[0] < 4 ? 0 : 2;
      NewMask[1] = Mask[0] < 4 ? 2 : 0;
      NewMask[2] = Mask[2] < 4 ? 1 : 3;
      NewMask[3] = Mask[2] < 4 ? 3 : 1;
    }
  }
  return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
                     getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
}

/// Lower 4-lane 32-bit floating point shuffles.
///
/// Uses instructions exclusively from the floating point unit to minimize
/// domain crossing penalties, as these are sufficient to implement all v4f32
/// shuffles.
static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");

  int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });

  if (NumV2Elements == 0) {
    // Check for being able to broadcast a single element.
    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
                                                    Mask, Subtarget, DAG))
      return Broadcast;

    // Use even/odd duplicate instructions for masks that match their pattern.
    if (Subtarget.hasSSE3()) {
      if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
        return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
      if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
        return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
    }

    if (Subtarget.hasAVX()) {
      // If we have AVX, we can use VPERMILPS which will allow folding a load
      // into the shuffle.
      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
                         getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
    }

    // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
    // in SSE1 because otherwise they are widened to v2f64 and never get here.
    if (!Subtarget.hasSSE2()) {
      if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
        return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
      if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
        return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
    }

    // Otherwise, use a straight shuffle of a single input vector. We pass the
    // input vector to both operands to simulate this with a SHUFPS.
    return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
                       getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
  }

  if (Subtarget.hasAVX2())
    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
      return Extract;

  // There are special ways we can lower some single-element blends. However, we
  // have custom ways we can lower more complex single-element blends below that
  // we defer to if both this and BLENDPS fail to match, so restrict this to
  // when the V2 input is targeting element 0 of the mask -- that is the fast
  // case here.
  if (NumV2Elements == 1 && Mask[0] >= 4)
    if (SDValue V = lowerShuffleAsElementInsertion(
            DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
      return V;

  if (Subtarget.hasSSE41()) {
    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
                                            Zeroable, Subtarget, DAG))
      return Blend;

    // Use INSERTPS if we can complete the shuffle efficiently.
    if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
      return V;

    if (!isSingleSHUFPSMask(Mask))
      if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
                                                            V2, Mask, DAG))
        return BlendPerm;
  }

  // Use low/high mov instructions. These are only valid in SSE1 because
  // otherwise they are widened to v2f64 and never get here.
  if (!Subtarget.hasSSE2()) {
    if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
      return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
    if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
      return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
  }

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
    return V;

  // Otherwise fall back to a SHUFPS lowering strategy.
  return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
}

/// Lower 4-lane i32 vector shuffles.
///
/// We try to handle these with integer-domain shuffles where we can, but for
/// blends we use the floating point domain blend instructions.
static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");

  // Whenever we can lower this as a zext, that instruction is strictly faster
  // than any alternative. It also allows us to fold memory operands into the
  // shuffle in many cases.
  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
                                                   Zeroable, Subtarget, DAG))
    return ZExt;

  int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });

  if (NumV2Elements == 0) {
    // Try to use broadcast unless the mask only has one non-undef element.
    if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
      if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
                                                      Mask, Subtarget, DAG))
        return Broadcast;
    }

    // Straight shuffle of a single input vector. For everything from SSE2
    // onward this has a single fast instruction with no scary immediates.
    // We coerce the shuffle pattern to be compatible with UNPCK instructions
    // but we aren't actually going to use the UNPCK instruction because doing
    // so prevents folding a load into this instruction or making a copy.
    const int UnpackLoMask[] = {0, 0, 1, 1};
    const int UnpackHiMask[] = {2, 2, 3, 3};
    if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
      Mask = UnpackLoMask;
    else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
      Mask = UnpackHiMask;

    return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
                       getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
  }

  if (Subtarget.hasAVX2())
    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
      return Extract;

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Shift;

  // There are special ways we can lower some single-element blends.
  if (NumV2Elements == 1)
    if (SDValue V = lowerShuffleAsElementInsertion(
            DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
      return V;

  // We have different paths for blend lowering, but they all must use the
  // *exact* same predicate.
  bool IsBlendSupported = Subtarget.hasSSE41();
  if (IsBlendSupported)
    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
                                            Zeroable, Subtarget, DAG))
      return Blend;

  if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
                                             Zeroable, Subtarget, DAG))
    return Masked;

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
    return V;

  // Try to use byte rotation instructions.
  // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
  if (Subtarget.hasSSSE3()) {
    if (Subtarget.hasVLX())
      if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i32, V1, V2, Mask,
                                                Subtarget, DAG))
        return Rotate;

    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
                                                  Subtarget, DAG))
      return Rotate;
  }

  // Assume that a single SHUFPS is faster than an alternative sequence of
  // multiple instructions (even if the CPU has a domain penalty).
  // If some CPU is harmed by the domain switch, we can fix it in a later pass.
  if (!isSingleSHUFPSMask(Mask)) {
    // If we have direct support for blends, we should lower by decomposing into
    // a permute. That will be faster than the domain cross.
    if (IsBlendSupported)
      return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2, Mask,
                                                  Subtarget, DAG);

    // Try to lower by permuting the inputs into an unpack instruction.
    if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
                                                        Mask, Subtarget, DAG))
      return Unpack;
  }

  // We implement this with SHUFPS because it can blend from two vectors.
  // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
  // up the inputs, bypassing domain shift penalties that we would incur if we
  // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
  // relevant.
  SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
  SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
  SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
  return DAG.getBitcast(MVT::v4i32, ShufPS);
}

/// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
/// shuffle lowering, and the most complex part.
///
/// The lowering strategy is to try to form pairs of input lanes which are
/// targeted at the same half of the final vector, and then use a dword shuffle
/// to place them onto the right half, and finally unpack the paired lanes into
/// their final position.
///
/// The exact breakdown of how to form these dword pairs and align them on the
/// correct sides is really tricky. See the comments within the function for
/// more of the details.
///
/// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
/// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
/// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
/// vector, form the analogous 128-bit 8-element Mask.
static SDValue lowerV8I16GeneralSingleInputShuffle(
    const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
  assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
  MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);

  assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
  MutableArrayRef<int> LoMask = Mask.slice(0, 4);
  MutableArrayRef<int> HiMask = Mask.slice(4, 4);

  // Attempt to directly match PSHUFLW or PSHUFHW.
  if (isUndefOrInRange(LoMask, 0, 4) &&
      isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
    return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
                       getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
  }
  if (isUndefOrInRange(HiMask, 4, 8) &&
      isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
    for (int i = 0; i != 4; ++i)
      HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
    return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
                       getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
  }

  SmallVector<int, 4> LoInputs;
  copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
  array_pod_sort(LoInputs.begin(), LoInputs.end());
  LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
  SmallVector<int, 4> HiInputs;
  copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
  array_pod_sort(HiInputs.begin(), HiInputs.end());
  HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
  int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
  int NumHToL = LoInputs.size() - NumLToL;
  int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
  int NumHToH = HiInputs.size() - NumLToH;
  MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
  MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
  MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
  MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);

  // If we are shuffling values from one half - check how many different DWORD
  // pairs we need to create. If only 1 or 2 then we can perform this as a
  // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
  auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
                               ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
    V = DAG.getNode(ShufWOp, DL, VT, V,
                    getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
    V = DAG.getBitcast(PSHUFDVT, V);
    V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
                    getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
    return DAG.getBitcast(VT, V);
  };

  if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
    int PSHUFDMask[4] = { -1, -1, -1, -1 };
    SmallVector<std::pair<int, int>, 4> DWordPairs;
    int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);

    // Collect the different DWORD pairs.
    for (int DWord = 0; DWord != 4; ++DWord) {
      int M0 = Mask[2 * DWord + 0];
      int M1 = Mask[2 * DWord + 1];
      M0 = (M0 >= 0 ? M0 % 4 : M0);
      M1 = (M1 >= 0 ? M1 % 4 : M1);
      if (M0 < 0 && M1 < 0)
        continue;

      bool Match = false;
      for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
        auto &DWordPair = DWordPairs[j];
        if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
            (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
          DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
          DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
          PSHUFDMask[DWord] = DOffset + j;
          Match = true;
          break;
        }
      }
      if (!Match) {
        PSHUFDMask[DWord] = DOffset + DWordPairs.size();
        DWordPairs.push_back(std::make_pair(M0, M1));
      }
    }

    if (DWordPairs.size() <= 2) {
      DWordPairs.resize(2, std::make_pair(-1, -1));
      int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
                              DWordPairs[1].first, DWordPairs[1].second};
      if ((NumHToL + NumHToH) == 0)
        return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
      if ((NumLToL + NumLToH) == 0)
        return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
    }
  }

  // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
  // such inputs we can swap two of the dwords across the half mark and end up
  // with <=2 inputs to each half in each half. Once there, we can fall through
  // to the generic code below. For example:
  //
  // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
  // Mask:  [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
  //
  // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
  // and an existing 2-into-2 on the other half. In this case we may have to
  // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
  // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
  // Fortunately, we don't have to handle anything but a 2-into-2 pattern
  // because any other situation (including a 3-into-1 or 1-into-3 in the other
  // half than the one we target for fixing) will be fixed when we re-enter this
  // path. We will also combine away any sequence of PSHUFD instructions that
  // result into a single instruction. Here is an example of the tricky case:
  //
  // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
  // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
  //
  // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
  //
  // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
  // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
  //
  // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
  // Mask:  [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
  //
  // The result is fine to be handled by the generic logic.
  auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
                          ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
                          int AOffset, int BOffset) {
    assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
           "Must call this with A having 3 or 1 inputs from the A half.");
    assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
           "Must call this with B having 1 or 3 inputs from the B half.");
    assert(AToAInputs.size() + BToAInputs.size() == 4 &&
           "Must call this with either 3:1 or 1:3 inputs (summing to 4).");

    bool ThreeAInputs = AToAInputs.size() == 3;

    // Compute the index of dword with only one word among the three inputs in
    // a half by taking the sum of the half with three inputs and subtracting
    // the sum of the actual three inputs. The difference is the remaining
    // slot.
    int ADWord = 0, BDWord = 0;
    int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
    int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
    int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
    ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
    int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
    int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
    int TripleNonInputIdx =
        TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
    TripleDWord = TripleNonInputIdx / 2;

    // We use xor with one to compute the adjacent DWord to whichever one the
    // OneInput is in.
    OneInputDWord = (OneInput / 2) ^ 1;

    // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
    // and BToA inputs. If there is also such a problem with the BToB and AToB
    // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
    // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
    // is essential that we don't *create* a 3<-1 as then we might oscillate.
    if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
      // Compute how many inputs will be flipped by swapping these DWords. We
      // need
      // to balance this to ensure we don't form a 3-1 shuffle in the other
      // half.
      int NumFlippedAToBInputs =
          std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
          std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
      int NumFlippedBToBInputs =
          std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
          std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
      if ((NumFlippedAToBInputs == 1 &&
           (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
          (NumFlippedBToBInputs == 1 &&
           (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
        // We choose whether to fix the A half or B half based on whether that
        // half has zero flipped inputs. At zero, we may not be able to fix it
        // with that half. We also bias towards fixing the B half because that
        // will more commonly be the high half, and we have to bias one way.
        auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
                                                       ArrayRef<int> Inputs) {
          int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
          bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
          // Determine whether the free index is in the flipped dword or the
          // unflipped dword based on where the pinned index is. We use this bit
          // in an xor to conditionally select the adjacent dword.
          int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
          bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
          if (IsFixIdxInput == IsFixFreeIdxInput)
            FixFreeIdx += 1;
          IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
          assert(IsFixIdxInput != IsFixFreeIdxInput &&
                 "We need to be changing the number of flipped inputs!");
          int PSHUFHalfMask[] = {0, 1, 2, 3};
          std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
          V = DAG.getNode(
              FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
              MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
              getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));

          for (int &M : Mask)
            if (M >= 0 && M == FixIdx)
              M = FixFreeIdx;
            else if (M >= 0 && M == FixFreeIdx)
              M = FixIdx;
        };
        if (NumFlippedBToBInputs != 0) {
          int BPinnedIdx =
              BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
          FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
        } else {
          assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
          int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
          FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
        }
      }
    }

    int PSHUFDMask[] = {0, 1, 2, 3};
    PSHUFDMask[ADWord] = BDWord;
    PSHUFDMask[BDWord] = ADWord;
    V = DAG.getBitcast(
        VT,
        DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
                    getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));

    // Adjust the mask to match the new locations of A and B.
    for (int &M : Mask)
      if (M >= 0 && M/2 == ADWord)
        M = 2 * BDWord + M % 2;
      else if (M >= 0 && M/2 == BDWord)
        M = 2 * ADWord + M % 2;

    // Recurse back into this routine to re-compute state now that this isn't
    // a 3 and 1 problem.
    return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
  };
  if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
    return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
  if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
    return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);

  // At this point there are at most two inputs to the low and high halves from
  // each half. That means the inputs can always be grouped into dwords and
  // those dwords can then be moved to the correct half with a dword shuffle.
  // We use at most one low and one high word shuffle to collect these paired
  // inputs into dwords, and finally a dword shuffle to place them.
  int PSHUFLMask[4] = {-1, -1, -1, -1};
  int PSHUFHMask[4] = {-1, -1, -1, -1};
  int PSHUFDMask[4] = {-1, -1, -1, -1};

  // First fix the masks for all the inputs that are staying in their
  // original halves. This will then dictate the targets of the cross-half
  // shuffles.
  auto fixInPlaceInputs =
      [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
                    MutableArrayRef<int> SourceHalfMask,
                    MutableArrayRef<int> HalfMask, int HalfOffset) {
    if (InPlaceInputs.empty())
      return;
    if (InPlaceInputs.size() == 1) {
      SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
          InPlaceInputs[0] - HalfOffset;
      PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
      return;
    }
    if (IncomingInputs.empty()) {
      // Just fix all of the in place inputs.
      for (int Input : InPlaceInputs) {
        SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
        PSHUFDMask[Input / 2] = Input / 2;
      }
      return;
    }

    assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
    SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
        InPlaceInputs[0] - HalfOffset;
    // Put the second input next to the first so that they are packed into
    // a dword. We find the adjacent index by toggling the low bit.
    int AdjIndex = InPlaceInputs[0] ^ 1;
    SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
    std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
    PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
  };
  fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
  fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);

  // Now gather the cross-half inputs and place them into a free dword of
  // their target half.
  // FIXME: This operation could almost certainly be simplified dramatically to
  // look more like the 3-1 fixing operation.
  auto moveInputsToRightHalf = [&PSHUFDMask](
      MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
      MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
      MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
      int DestOffset) {
    auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
      return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
    };
    auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
                                               int Word) {
      int LowWord = Word & ~1;
      int HighWord = Word | 1;
      return isWordClobbered(SourceHalfMask, LowWord) ||
             isWordClobbered(SourceHalfMask, HighWord);
    };

    if (IncomingInputs.empty())
      return;

    if (ExistingInputs.empty()) {
      // Map any dwords with inputs from them into the right half.
      for (int Input : IncomingInputs) {
        // If the source half mask maps over the inputs, turn those into
        // swaps and use the swapped lane.
        if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
          if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
            SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
                Input - SourceOffset;
            // We have to swap the uses in our half mask in one sweep.
            for (int &M : HalfMask)
              if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
                M = Input;
              else if (M == Input)
                M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
          } else {
            assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
                       Input - SourceOffset &&
                   "Previous placement doesn't match!");
          }
          // Note that this correctly re-maps both when we do a swap and when
          // we observe the other side of the swap above. We rely on that to
          // avoid swapping the members of the input list directly.
          Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
        }

        // Map the input's dword into the correct half.
        if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
          PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
        else
          assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
                     Input / 2 &&
                 "Previous placement doesn't match!");
      }

      // And just directly shift any other-half mask elements to be same-half
      // as we will have mirrored the dword containing the element into the
      // same position within that half.
      for (int &M : HalfMask)
        if (M >= SourceOffset && M < SourceOffset + 4) {
          M = M - SourceOffset + DestOffset;
          assert(M >= 0 && "This should never wrap below zero!");
        }
      return;
    }

    // Ensure we have the input in a viable dword of its current half. This
    // is particularly tricky because the original position may be clobbered
    // by inputs being moved and *staying* in that half.
    if (IncomingInputs.size() == 1) {
      if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
        int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
                         SourceOffset;
        SourceHalfMask[InputFixed - SourceOffset] =
            IncomingInputs[0] - SourceOffset;
        std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
                     InputFixed);
        IncomingInputs[0] = InputFixed;
      }
    } else if (IncomingInputs.size() == 2) {
      if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
          isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
        // We have two non-adjacent or clobbered inputs we need to extract from
        // the source half. To do this, we need to map them into some adjacent
        // dword slot in the source mask.
        int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
                              IncomingInputs[1] - SourceOffset};

        // If there is a free slot in the source half mask adjacent to one of
        // the inputs, place the other input in it. We use (Index XOR 1) to
        // compute an adjacent index.
        if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
            SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
          SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
          SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
          InputsFixed[1] = InputsFixed[0] ^ 1;
        } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
                   SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
          SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
          SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
          InputsFixed[0] = InputsFixed[1] ^ 1;
        } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
                   SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
          // The two inputs are in the same DWord but it is clobbered and the
          // adjacent DWord isn't used at all. Move both inputs to the free
          // slot.
          SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
          SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
          InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
          InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
        } else {
          // The only way we hit this point is if there is no clobbering
          // (because there are no off-half inputs to this half) and there is no
          // free slot adjacent to one of the inputs. In this case, we have to
          // swap an input with a non-input.
          for (int i = 0; i < 4; ++i)
            assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
                   "We can't handle any clobbers here!");
          assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
                 "Cannot have adjacent inputs here!");

          SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
          SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;

          // We also have to update the final source mask in this case because
          // it may need to undo the above swap.
          for (int &M : FinalSourceHalfMask)
            if (M == (InputsFixed[0] ^ 1) + SourceOffset)
              M = InputsFixed[1] + SourceOffset;
            else if (M == InputsFixed[1] + SourceOffset)
              M = (InputsFixed[0] ^ 1) + SourceOffset;

          InputsFixed[1] = InputsFixed[0] ^ 1;
        }

        // Point everything at the fixed inputs.
        for (int &M : HalfMask)
          if (M == IncomingInputs[0])
            M = InputsFixed[0] + SourceOffset;
          else if (M == IncomingInputs[1])
            M = InputsFixed[1] + SourceOffset;

        IncomingInputs[0] = InputsFixed[0] + SourceOffset;
        IncomingInputs[1] = InputsFixed[1] + SourceOffset;
      }
    } else {
      llvm_unreachable("Unhandled input size!");
    }

    // Now hoist the DWord down to the right half.
    int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
    assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
    PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
    for (int &M : HalfMask)
      for (int Input : IncomingInputs)
        if (M == Input)
          M = FreeDWord * 2 + Input % 2;
  };
  moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
                        /*SourceOffset*/ 4, /*DestOffset*/ 0);
  moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
                        /*SourceOffset*/ 0, /*DestOffset*/ 4);

  // Now enact all the shuffles we've computed to move the inputs into their
  // target half.
  if (!isNoopShuffleMask(PSHUFLMask))
    V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
                    getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
  if (!isNoopShuffleMask(PSHUFHMask))
    V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
                    getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
  if (!isNoopShuffleMask(PSHUFDMask))
    V = DAG.getBitcast(
        VT,
        DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
                    getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));

  // At this point, each half should contain all its inputs, and we can then
  // just shuffle them into their final position.
  assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
         "Failed to lift all the high half inputs to the low mask!");
  assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
         "Failed to lift all the low half inputs to the high mask!");

  // Do a half shuffle for the low mask.
  if (!isNoopShuffleMask(LoMask))
    V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
                    getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));

  // Do a half shuffle with the high mask after shifting its values down.
  for (int &M : HiMask)
    if (M >= 0)
      M -= 4;
  if (!isNoopShuffleMask(HiMask))
    V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
                    getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));

  return V;
}

/// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
/// blend if only one input is used.
static SDValue lowerShuffleAsBlendOfPSHUFBs(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
  assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
         "Lane crossing shuffle masks not supported");

  int NumBytes = VT.getSizeInBits() / 8;
  int Size = Mask.size();
  int Scale = NumBytes / Size;

  SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
  SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
  V1InUse = false;
  V2InUse = false;

  for (int i = 0; i < NumBytes; ++i) {
    int M = Mask[i / Scale];
    if (M < 0)
      continue;

    const int ZeroMask = 0x80;
    int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
    int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
    if (Zeroable[i / Scale])
      V1Idx = V2Idx = ZeroMask;

    V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
    V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
    V1InUse |= (ZeroMask != V1Idx);
    V2InUse |= (ZeroMask != V2Idx);
  }

  MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
  if (V1InUse)
    V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
                     DAG.getBuildVector(ShufVT, DL, V1Mask));
  if (V2InUse)
    V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
                     DAG.getBuildVector(ShufVT, DL, V2Mask));

  // If we need shuffled inputs from both, blend the two.
  SDValue V;
  if (V1InUse && V2InUse)
    V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
  else
    V = V1InUse ? V1 : V2;

  // Cast the result back to the correct type.
  return DAG.getBitcast(VT, V);
}

/// Generic lowering of 8-lane i16 shuffles.
///
/// This handles both single-input shuffles and combined shuffle/blends with
/// two inputs. The single input shuffles are immediately delegated to
/// a dedicated lowering routine.
///
/// The blends are lowered in one of three fundamental ways. If there are few
/// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
/// of the input is significantly cheaper when lowered as an interleaving of
/// the two inputs, try to interleave them. Otherwise, blend the low and high
/// halves of the inputs separately (making them have relatively few inputs)
/// and then concatenate them.
static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");

  // Whenever we can lower this as a zext, that instruction is strictly faster
  // than any alternative.
  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
                                                   Zeroable, Subtarget, DAG))
    return ZExt;

  int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });

  if (NumV2Inputs == 0) {
    // Try to use shift instructions.
    if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
                                            Zeroable, Subtarget, DAG))
      return Shift;

    // Check for being able to broadcast a single element.
    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
                                                    Mask, Subtarget, DAG))
      return Broadcast;

    // Use dedicated unpack instructions for masks that match their pattern.
    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
      return V;

    // Use dedicated pack instructions for masks that match their pattern.
    if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
                                         Subtarget))
      return V;

    // Try to use byte rotation instructions.
    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
                                                  Subtarget, DAG))
      return Rotate;

    // Make a copy of the mask so it can be modified.
    SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
    return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
                                               Subtarget, DAG);
  }

  assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
         "All single-input shuffles should be canonicalized to be V1-input "
         "shuffles.");

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Shift;

  // See if we can use SSE4A Extraction / Insertion.
  if (Subtarget.hasSSE4A())
    if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
                                          Zeroable, DAG))
      return V;

  // There are special ways we can lower some single-element blends.
  if (NumV2Inputs == 1)
    if (SDValue V = lowerShuffleAsElementInsertion(
            DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
      return V;

  // We have different paths for blend lowering, but they all must use the
  // *exact* same predicate.
  bool IsBlendSupported = Subtarget.hasSSE41();
  if (IsBlendSupported)
    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
                                            Zeroable, Subtarget, DAG))
      return Blend;

  if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
                                             Zeroable, Subtarget, DAG))
    return Masked;

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
    return V;

  // Use dedicated pack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
                                       Subtarget))
    return V;

  // Try to use byte rotation instructions.
  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
                                                Subtarget, DAG))
    return Rotate;

  if (SDValue BitBlend =
          lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
    return BitBlend;

  // Try to use byte shift instructions to mask.
  if (SDValue V = lowerVectorShuffleAsByteShiftMask(
          DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
    return V;

  // Try to lower by permuting the inputs into an unpack instruction.
  if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
                                                      Mask, Subtarget, DAG))
    return Unpack;

  // If we can't directly blend but can use PSHUFB, that will be better as it
  // can both shuffle and set up the inefficient blend.
  if (!IsBlendSupported && Subtarget.hasSSSE3()) {
    bool V1InUse, V2InUse;
    return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
                                        Zeroable, DAG, V1InUse, V2InUse);
  }

  // We can always bit-blend if we have to so the fallback strategy is to
  // decompose into single-input permutes and blends.
  return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
                                              Mask, Subtarget, DAG);
}

/// Check whether a compaction lowering can be done by dropping even
/// elements and compute how many times even elements must be dropped.
///
/// This handles shuffles which take every Nth element where N is a power of
/// two. Example shuffle masks:
///
///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14,  0,  2,  4,  6,  8, 10, 12, 14
///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
///  N = 2:  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12
///  N = 2:  0,  4,  8, 12, 16, 20, 24, 28,  0,  4,  8, 12, 16, 20, 24, 28
///  N = 3:  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8
///  N = 3:  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24
///
/// Any of these lanes can of course be undef.
///
/// This routine only supports N <= 3.
/// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
/// for larger N.
///
/// \returns N above, or the number of times even elements must be dropped if
/// there is such a number. Otherwise returns zero.
static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
                                          bool IsSingleInput) {
  // The modulus for the shuffle vector entries is based on whether this is
  // a single input or not.
  int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
  assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
         "We should only be called with masks with a power-of-2 size!");

  uint64_t ModMask = (uint64_t)ShuffleModulus - 1;

  // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
  // and 2^3 simultaneously. This is because we may have ambiguity with
  // partially undef inputs.
  bool ViableForN[3] = {true, true, true};

  for (int i = 0, e = Mask.size(); i < e; ++i) {
    // Ignore undef lanes, we'll optimistically collapse them to the pattern we
    // want.
    if (Mask[i] < 0)
      continue;

    bool IsAnyViable = false;
    for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
      if (ViableForN[j]) {
        uint64_t N = j + 1;

        // The shuffle mask must be equal to (i * 2^N) % M.
        if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
          IsAnyViable = true;
        else
          ViableForN[j] = false;
      }
    // Early exit if we exhaust the possible powers of two.
    if (!IsAnyViable)
      break;
  }

  for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
    if (ViableForN[j])
      return j + 1;

  // Return 0 as there is no viable power of two.
  return 0;
}

static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
                                     ArrayRef<int> Mask, SDValue V1,
                                     SDValue V2, SelectionDAG &DAG) {
  MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
  MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());

  SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
  if (V2.isUndef())
    return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);

  return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
}

/// Generic lowering of v16i8 shuffles.
///
/// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
/// detect any complexity reducing interleaving. If that doesn't help, it uses
/// UNPCK to spread the i8 elements across two i16-element vectors, and uses
/// the existing lowering for v8i16 blends on each half, finally PACK-ing them
/// back together.
static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Shift;

  // Try to use byte rotation instructions.
  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
                                                Subtarget, DAG))
    return Rotate;

  // Use dedicated pack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
                                       Subtarget))
    return V;

  // Try to use a zext lowering.
  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
                                                   Zeroable, Subtarget, DAG))
    return ZExt;

  // See if we can use SSE4A Extraction / Insertion.
  if (Subtarget.hasSSE4A())
    if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
                                          Zeroable, DAG))
      return V;

  int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });

  // For single-input shuffles, there are some nicer lowering tricks we can use.
  if (NumV2Elements == 0) {
    // Check for being able to broadcast a single element.
    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
                                                    Mask, Subtarget, DAG))
      return Broadcast;

    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
      return V;

    // Check whether we can widen this to an i16 shuffle by duplicating bytes.
    // Notably, this handles splat and partial-splat shuffles more efficiently.
    // However, it only makes sense if the pre-duplication shuffle simplifies
    // things significantly. Currently, this means we need to be able to
    // express the pre-duplication shuffle as an i16 shuffle.
    //
    // FIXME: We should check for other patterns which can be widened into an
    // i16 shuffle as well.
    auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
      for (int i = 0; i < 16; i += 2)
        if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
          return false;

      return true;
    };
    auto tryToWidenViaDuplication = [&]() -> SDValue {
      if (!canWidenViaDuplication(Mask))
        return SDValue();
      SmallVector<int, 4> LoInputs;
      copy_if(Mask, std::back_inserter(LoInputs),
              [](int M) { return M >= 0 && M < 8; });
      array_pod_sort(LoInputs.begin(), LoInputs.end());
      LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
                     LoInputs.end());
      SmallVector<int, 4> HiInputs;
      copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
      array_pod_sort(HiInputs.begin(), HiInputs.end());
      HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
                     HiInputs.end());

      bool TargetLo = LoInputs.size() >= HiInputs.size();
      ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
      ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;

      int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
      SmallDenseMap<int, int, 8> LaneMap;
      for (int I : InPlaceInputs) {
        PreDupI16Shuffle[I/2] = I/2;
        LaneMap[I] = I;
      }
      int j = TargetLo ? 0 : 4, je = j + 4;
      for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
        // Check if j is already a shuffle of this input. This happens when
        // there are two adjacent bytes after we move the low one.
        if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
          // If we haven't yet mapped the input, search for a slot into which
          // we can map it.
          while (j < je && PreDupI16Shuffle[j] >= 0)
            ++j;

          if (j == je)
            // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
            return SDValue();

          // Map this input with the i16 shuffle.
          PreDupI16Shuffle[j] = MovingInputs[i] / 2;
        }

        // Update the lane map based on the mapping we ended up with.
        LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
      }
      V1 = DAG.getBitcast(
          MVT::v16i8,
          DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
                               DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));

      // Unpack the bytes to form the i16s that will be shuffled into place.
      bool EvenInUse = false, OddInUse = false;
      for (int i = 0; i < 16; i += 2) {
        EvenInUse |= (Mask[i + 0] >= 0);
        OddInUse |= (Mask[i + 1] >= 0);
        if (EvenInUse && OddInUse)
          break;
      }
      V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
                       MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
                       OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));

      int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
      for (int i = 0; i < 16; ++i)
        if (Mask[i] >= 0) {
          int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
          assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
          if (PostDupI16Shuffle[i / 2] < 0)
            PostDupI16Shuffle[i / 2] = MappedMask;
          else
            assert(PostDupI16Shuffle[i / 2] == MappedMask &&
                   "Conflicting entries in the original shuffle!");
        }
      return DAG.getBitcast(
          MVT::v16i8,
          DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
                               DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
    };
    if (SDValue V = tryToWidenViaDuplication())
      return V;
  }

  if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
                                             Zeroable, Subtarget, DAG))
    return Masked;

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
    return V;

  // Try to use byte shift instructions to mask.
  if (SDValue V = lowerVectorShuffleAsByteShiftMask(
          DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
    return V;

  // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
  // with PSHUFB. It is important to do this before we attempt to generate any
  // blends but after all of the single-input lowerings. If the single input
  // lowerings can find an instruction sequence that is faster than a PSHUFB, we
  // want to preserve that and we can DAG combine any longer sequences into
  // a PSHUFB in the end. But once we start blending from multiple inputs,
  // the complexity of DAG combining bad patterns back into PSHUFB is too high,
  // and there are *very* few patterns that would actually be faster than the
  // PSHUFB approach because of its ability to zero lanes.
  //
  // FIXME: The only exceptions to the above are blends which are exact
  // interleavings with direct instructions supporting them. We currently don't
  // handle those well here.
  if (Subtarget.hasSSSE3()) {
    bool V1InUse = false;
    bool V2InUse = false;

    SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
        DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);

    // If both V1 and V2 are in use and we can use a direct blend or an unpack,
    // do so. This avoids using them to handle blends-with-zero which is
    // important as a single pshufb is significantly faster for that.
    if (V1InUse && V2InUse) {
      if (Subtarget.hasSSE41())
        if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
                                                Zeroable, Subtarget, DAG))
          return Blend;

      // We can use an unpack to do the blending rather than an or in some
      // cases. Even though the or may be (very minorly) more efficient, we
      // preference this lowering because there are common cases where part of
      // the complexity of the shuffles goes away when we do the final blend as
      // an unpack.
      // FIXME: It might be worth trying to detect if the unpack-feeding
      // shuffles will both be pshufb, in which case we shouldn't bother with
      // this.
      if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
              DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
        return Unpack;

      // If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
      if (Subtarget.hasVBMI() && Subtarget.hasVLX())
        return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);

      // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
      // PALIGNR will be cheaper than the second PSHUFB+OR.
      if (SDValue V = lowerShuffleAsByteRotateAndPermute(
              DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
        return V;
    }

    return PSHUFB;
  }

  // There are special ways we can lower some single-element blends.
  if (NumV2Elements == 1)
    if (SDValue V = lowerShuffleAsElementInsertion(
            DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
      return V;

  if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
    return Blend;

  // Check whether a compaction lowering can be done. This handles shuffles
  // which take every Nth element for some even N. See the helper function for
  // details.
  //
  // We special case these as they can be particularly efficiently handled with
  // the PACKUSB instruction on x86 and they show up in common patterns of
  // rearranging bytes to truncate wide elements.
  bool IsSingleInput = V2.isUndef();
  if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
    // NumEvenDrops is the power of two stride of the elements. Another way of
    // thinking about it is that we need to drop the even elements this many
    // times to get the original input.

    // First we need to zero all the dropped bytes.
    assert(NumEvenDrops <= 3 &&
           "No support for dropping even elements more than 3 times.");
    SmallVector<SDValue, 16> ByteClearOps(16, DAG.getConstant(0, DL, MVT::i8));
    for (unsigned i = 0; i != 16; i += 1 << NumEvenDrops)
      ByteClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i8);
    SDValue ByteClearMask = DAG.getBuildVector(MVT::v16i8, DL, ByteClearOps);
    V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
    if (!IsSingleInput)
      V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);

    // Now pack things back together.
    V1 = DAG.getBitcast(MVT::v8i16, V1);
    V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
    SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
    for (int i = 1; i < NumEvenDrops; ++i) {
      Result = DAG.getBitcast(MVT::v8i16, Result);
      Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
    }

    return Result;
  }

  // Handle multi-input cases by blending single-input shuffles.
  if (NumV2Elements > 0)
    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2, Mask,
                                                Subtarget, DAG);

  // The fallback path for single-input shuffles widens this into two v8i16
  // vectors with unpacks, shuffles those, and then pulls them back together
  // with a pack.
  SDValue V = V1;

  std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
  std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
  for (int i = 0; i < 16; ++i)
    if (Mask[i] >= 0)
      (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];

  SDValue VLoHalf, VHiHalf;
  // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
  // them out and avoid using UNPCK{L,H} to extract the elements of V as
  // i16s.
  if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
      none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
    // Use a mask to drop the high bytes.
    VLoHalf = DAG.getBitcast(MVT::v8i16, V);
    VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
                          DAG.getConstant(0x00FF, DL, MVT::v8i16));

    // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
    VHiHalf = DAG.getUNDEF(MVT::v8i16);

    // Squash the masks to point directly into VLoHalf.
    for (int &M : LoBlendMask)
      if (M >= 0)
        M /= 2;
    for (int &M : HiBlendMask)
      if (M >= 0)
        M /= 2;
  } else {
    // Otherwise just unpack the low half of V into VLoHalf and the high half into
    // VHiHalf so that we can blend them as i16s.
    SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);

    VLoHalf = DAG.getBitcast(
        MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
    VHiHalf = DAG.getBitcast(
        MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
  }

  SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
  SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);

  return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
}

/// Dispatching routine to lower various 128-bit x86 vector shuffles.
///
/// This routine breaks down the specific type of 128-bit shuffle and
/// dispatches to the lowering routines accordingly.
static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  MVT VT, SDValue V1, SDValue V2,
                                  const APInt &Zeroable,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {
  switch (VT.SimpleTy) {
  case MVT::v2i64:
    return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v2f64:
    return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v4i32:
    return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v4f32:
    return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v8i16:
    return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v16i8:
    return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);

  default:
    llvm_unreachable("Unimplemented!");
  }
}

/// Generic routine to split vector shuffle into half-sized shuffles.
///
/// This routine just extracts two subvectors, shuffles them independently, and
/// then concatenates them back together. This should work effectively with all
/// AVX vector shuffle types.
static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
                                    SDValue V2, ArrayRef<int> Mask,
                                    SelectionDAG &DAG) {
  assert(VT.getSizeInBits() >= 256 &&
         "Only for 256-bit or wider vector shuffles!");
  assert(V1.getSimpleValueType() == VT && "Bad operand type!");
  assert(V2.getSimpleValueType() == VT && "Bad operand type!");

  ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
  ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);

  int NumElements = VT.getVectorNumElements();
  int SplitNumElements = NumElements / 2;
  MVT ScalarVT = VT.getVectorElementType();
  MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);

  // Rather than splitting build-vectors, just build two narrower build
  // vectors. This helps shuffling with splats and zeros.
  auto SplitVector = [&](SDValue V) {
    V = peekThroughBitcasts(V);

    MVT OrigVT = V.getSimpleValueType();
    int OrigNumElements = OrigVT.getVectorNumElements();
    int OrigSplitNumElements = OrigNumElements / 2;
    MVT OrigScalarVT = OrigVT.getVectorElementType();
    MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);

    SDValue LoV, HiV;

    auto *BV = dyn_cast<BuildVectorSDNode>(V);
    if (!BV) {
      LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
                        DAG.getIntPtrConstant(0, DL));
      HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
                        DAG.getIntPtrConstant(OrigSplitNumElements, DL));
    } else {

      SmallVector<SDValue, 16> LoOps, HiOps;
      for (int i = 0; i < OrigSplitNumElements; ++i) {
        LoOps.push_back(BV->getOperand(i));
        HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
      }
      LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
      HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
    }
    return std::make_pair(DAG.getBitcast(SplitVT, LoV),
                          DAG.getBitcast(SplitVT, HiV));
  };

  SDValue LoV1, HiV1, LoV2, HiV2;
  std::tie(LoV1, HiV1) = SplitVector(V1);
  std::tie(LoV2, HiV2) = SplitVector(V2);

  // Now create two 4-way blends of these half-width vectors.
  auto HalfBlend = [&](ArrayRef<int> HalfMask) {
    bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
    SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
    SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
    SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
    for (int i = 0; i < SplitNumElements; ++i) {
      int M = HalfMask[i];
      if (M >= NumElements) {
        if (M >= NumElements + SplitNumElements)
          UseHiV2 = true;
        else
          UseLoV2 = true;
        V2BlendMask[i] = M - NumElements;
        BlendMask[i] = SplitNumElements + i;
      } else if (M >= 0) {
        if (M >= SplitNumElements)
          UseHiV1 = true;
        else
          UseLoV1 = true;
        V1BlendMask[i] = M;
        BlendMask[i] = i;
      }
    }

    // Because the lowering happens after all combining takes place, we need to
    // manually combine these blend masks as much as possible so that we create
    // a minimal number of high-level vector shuffle nodes.

    // First try just blending the halves of V1 or V2.
    if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
      return DAG.getUNDEF(SplitVT);
    if (!UseLoV2 && !UseHiV2)
      return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
    if (!UseLoV1 && !UseHiV1)
      return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);

    SDValue V1Blend, V2Blend;
    if (UseLoV1 && UseHiV1) {
      V1Blend =
        DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
    } else {
      // We only use half of V1 so map the usage down into the final blend mask.
      V1Blend = UseLoV1 ? LoV1 : HiV1;
      for (int i = 0; i < SplitNumElements; ++i)
        if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
          BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
    }
    if (UseLoV2 && UseHiV2) {
      V2Blend =
        DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
    } else {
      // We only use half of V2 so map the usage down into the final blend mask.
      V2Blend = UseLoV2 ? LoV2 : HiV2;
      for (int i = 0; i < SplitNumElements; ++i)
        if (BlendMask[i] >= SplitNumElements)
          BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
    }
    return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
  };
  SDValue Lo = HalfBlend(LoMask);
  SDValue Hi = HalfBlend(HiMask);
  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
}

/// Either split a vector in halves or decompose the shuffles and the
/// blend.
///
/// This is provided as a good fallback for many lowerings of non-single-input
/// shuffles with more than one 128-bit lane. In those cases, we want to select
/// between splitting the shuffle into 128-bit components and stitching those
/// back together vs. extracting the single-input shuffles and blending those
/// results.
static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
                                          SDValue V2, ArrayRef<int> Mask,
                                          const X86Subtarget &Subtarget,
                                          SelectionDAG &DAG) {
  assert(!V2.isUndef() && "This routine must not be used to lower single-input "
         "shuffles as it could then recurse on itself.");
  int Size = Mask.size();

  // If this can be modeled as a broadcast of two elements followed by a blend,
  // prefer that lowering. This is especially important because broadcasts can
  // often fold with memory operands.
  auto DoBothBroadcast = [&] {
    int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
    for (int M : Mask)
      if (M >= Size) {
        if (V2BroadcastIdx < 0)
          V2BroadcastIdx = M - Size;
        else if (M - Size != V2BroadcastIdx)
          return false;
      } else if (M >= 0) {
        if (V1BroadcastIdx < 0)
          V1BroadcastIdx = M;
        else if (M != V1BroadcastIdx)
          return false;
      }
    return true;
  };
  if (DoBothBroadcast())
    return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
                                                Subtarget, DAG);

  // If the inputs all stem from a single 128-bit lane of each input, then we
  // split them rather than blending because the split will decompose to
  // unusually few instructions.
  int LaneCount = VT.getSizeInBits() / 128;
  int LaneSize = Size / LaneCount;
  SmallBitVector LaneInputs[2];
  LaneInputs[0].resize(LaneCount, false);
  LaneInputs[1].resize(LaneCount, false);
  for (int i = 0; i < Size; ++i)
    if (Mask[i] >= 0)
      LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
  if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
    return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);

  // Otherwise, just fall back to decomposed shuffles and a blend. This requires
  // that the decomposed single-input shuffles don't end up here.
  return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, Subtarget,
                                              DAG);
}

/// Lower a vector shuffle crossing multiple 128-bit lanes as
/// a lane permutation followed by a per-lane permutation.
///
/// This is mainly for cases where we can have non-repeating permutes
/// in each lane.
///
/// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
/// we should investigate merging them.
static SDValue lowerShuffleAsLanePermuteAndPermute(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    SelectionDAG &DAG, const X86Subtarget &Subtarget) {
  int NumElts = VT.getVectorNumElements();
  int NumLanes = VT.getSizeInBits() / 128;
  int NumEltsPerLane = NumElts / NumLanes;

  SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
  SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);

  for (int i = 0; i != NumElts; ++i) {
    int M = Mask[i];
    if (M < 0)
      continue;

    // Ensure that each lane comes from a single source lane.
    int SrcLane = M / NumEltsPerLane;
    int DstLane = i / NumEltsPerLane;
    if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane))
      return SDValue();
    SrcLaneMask[DstLane] = SrcLane;

    PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
  }

  // Make sure we set all elements of the lane mask, to avoid undef propagation.
  SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
  for (int DstLane = 0; DstLane != NumLanes; ++DstLane) {
    int SrcLane = SrcLaneMask[DstLane];
    if (0 <= SrcLane)
      for (int j = 0; j != NumEltsPerLane; ++j) {
        LaneMask[(DstLane * NumEltsPerLane) + j] =
            (SrcLane * NumEltsPerLane) + j;
      }
  }

  // If we're only shuffling a single lowest lane and the rest are identity
  // then don't bother.
  // TODO - isShuffleMaskInputInPlace could be extended to something like this.
  int NumIdentityLanes = 0;
  bool OnlyShuffleLowestLane = true;
  for (int i = 0; i != NumLanes; ++i) {
    if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane,
                                   i * NumEltsPerLane))
      NumIdentityLanes++;
    else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes)
      OnlyShuffleLowestLane = false;
  }
  if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
    return SDValue();

  SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask);
  return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask);
}

/// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
/// source with a lane permutation.
///
/// This lowering strategy results in four instructions in the worst case for a
/// single-input cross lane shuffle which is lower than any other fully general
/// cross-lane shuffle strategy I'm aware of. Special cases for each particular
/// shuffle pattern should be handled prior to trying this lowering.
static SDValue lowerShuffleAsLanePermuteAndShuffle(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    SelectionDAG &DAG, const X86Subtarget &Subtarget) {
  // FIXME: This should probably be generalized for 512-bit vectors as well.
  assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
  int Size = Mask.size();
  int LaneSize = Size / 2;

  // If there are only inputs from one 128-bit lane, splitting will in fact be
  // less expensive. The flags track whether the given lane contains an element
  // that crosses to another lane.
  if (!Subtarget.hasAVX2()) {
    bool LaneCrossing[2] = {false, false};
    for (int i = 0; i < Size; ++i)
      if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
        LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
    if (!LaneCrossing[0] || !LaneCrossing[1])
      return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
  } else {
    bool LaneUsed[2] = {false, false};
    for (int i = 0; i < Size; ++i)
      if (Mask[i] >= 0)
        LaneUsed[(Mask[i] / LaneSize)] = true;
    if (!LaneUsed[0] || !LaneUsed[1])
      return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
  }

  // TODO - we could support shuffling V2 in the Flipped input.
  assert(V2.isUndef() &&
         "This last part of this routine only works on single input shuffles");

  SmallVector<int, 32> InLaneMask(Mask.begin(), Mask.end());
  for (int i = 0; i < Size; ++i) {
    int &M = InLaneMask[i];
    if (M < 0)
      continue;
    if (((M % Size) / LaneSize) != (i / LaneSize))
      M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
  }
  assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
         "In-lane shuffle mask expected");

  // Flip the lanes, and shuffle the results which should now be in-lane.
  MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
  SDValue Flipped = DAG.getBitcast(PVT, V1);
  Flipped =
      DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
  Flipped = DAG.getBitcast(VT, Flipped);
  return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
}

/// Handle lowering 2-lane 128-bit shuffles.
static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
                                  SDValue V2, ArrayRef<int> Mask,
                                  const APInt &Zeroable,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {
  // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
  if (Subtarget.hasAVX2() && V2.isUndef())
    return SDValue();

  SmallVector<int, 4> WidenedMask;
  if (!canWidenShuffleElements(Mask, Zeroable, WidenedMask))
    return SDValue();

  bool IsLowZero = (Zeroable & 0x3) == 0x3;
  bool IsHighZero = (Zeroable & 0xc) == 0xc;

  // Try to use an insert into a zero vector.
  if (WidenedMask[0] == 0 && IsHighZero) {
    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
    SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
                              DAG.getIntPtrConstant(0, DL));
    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
                       getZeroVector(VT, Subtarget, DAG, DL), LoV,
                       DAG.getIntPtrConstant(0, DL));
  }

  // TODO: If minimizing size and one of the inputs is a zero vector and the
  // the zero vector has only one use, we could use a VPERM2X128 to save the
  // instruction bytes needed to explicitly generate the zero vector.

  // Blends are faster and handle all the non-lane-crossing cases.
  if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
                                          Subtarget, DAG))
    return Blend;

  // If either input operand is a zero vector, use VPERM2X128 because its mask
  // allows us to replace the zero input with an implicit zero.
  if (!IsLowZero && !IsHighZero) {
    // Check for patterns which can be matched with a single insert of a 128-bit
    // subvector.
    bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
    if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {

      // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
      // this will likely become vinsertf128 which can't fold a 256-bit memop.
      if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
        MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
        SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
                                     OnlyUsesV1 ? V1 : V2,
                                     DAG.getIntPtrConstant(0, DL));
        return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
                           DAG.getIntPtrConstant(2, DL));
      }
    }

    // Try to use SHUF128 if possible.
    if (Subtarget.hasVLX()) {
      if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
        unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
                            ((WidenedMask[1] % 2) << 1);
        return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
                           DAG.getTargetConstant(PermMask, DL, MVT::i8));
      }
    }
  }

  // Otherwise form a 128-bit permutation. After accounting for undefs,
  // convert the 64-bit shuffle mask selection values into 128-bit
  // selection bits by dividing the indexes by 2 and shifting into positions
  // defined by a vperm2*128 instruction's immediate control byte.

  // The immediate permute control byte looks like this:
  //    [1:0] - select 128 bits from sources for low half of destination
  //    [2]   - ignore
  //    [3]   - zero low half of destination
  //    [5:4] - select 128 bits from sources for high half of destination
  //    [6]   - ignore
  //    [7]   - zero high half of destination

  assert((WidenedMask[0] >= 0 || IsLowZero) &&
         (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");

  unsigned PermMask = 0;
  PermMask |= IsLowZero  ? 0x08 : (WidenedMask[0] << 0);
  PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);

  // Check the immediate mask and replace unused sources with undef.
  if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
    V1 = DAG.getUNDEF(VT);
  if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
    V2 = DAG.getUNDEF(VT);

  return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
                     DAG.getTargetConstant(PermMask, DL, MVT::i8));
}

/// Lower a vector shuffle by first fixing the 128-bit lanes and then
/// shuffling each lane.
///
/// This attempts to create a repeated lane shuffle where each lane uses one
/// or two of the lanes of the inputs. The lanes of the input vectors are
/// shuffled in one or two independent shuffles to get the lanes into the
/// position needed by the final shuffle.
static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
  assert(!V2.isUndef() && "This is only useful with multiple inputs.");

  if (is128BitLaneRepeatedShuffleMask(VT, Mask))
    return SDValue();

  int NumElts = Mask.size();
  int NumLanes = VT.getSizeInBits() / 128;
  int NumLaneElts = 128 / VT.getScalarSizeInBits();
  SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
  SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});

  // First pass will try to fill in the RepeatMask from lanes that need two
  // sources.
  for (int Lane = 0; Lane != NumLanes; ++Lane) {
    int Srcs[2] = {-1, -1};
    SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
    for (int i = 0; i != NumLaneElts; ++i) {
      int M = Mask[(Lane * NumLaneElts) + i];
      if (M < 0)
        continue;
      // Determine which of the possible input lanes (NumLanes from each source)
      // this element comes from. Assign that as one of the sources for this
      // lane. We can assign up to 2 sources for this lane. If we run out
      // sources we can't do anything.
      int LaneSrc = M / NumLaneElts;
      int Src;
      if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
        Src = 0;
      else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
        Src = 1;
      else
        return SDValue();

      Srcs[Src] = LaneSrc;
      InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
    }

    // If this lane has two sources, see if it fits with the repeat mask so far.
    if (Srcs[1] < 0)
      continue;

    LaneSrcs[Lane][0] = Srcs[0];
    LaneSrcs[Lane][1] = Srcs[1];

    auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
      assert(M1.size() == M2.size() && "Unexpected mask size");
      for (int i = 0, e = M1.size(); i != e; ++i)
        if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
          return false;
      return true;
    };

    auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
      assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
      for (int i = 0, e = MergedMask.size(); i != e; ++i) {
        int M = Mask[i];
        if (M < 0)
          continue;
        assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
               "Unexpected mask element");
        MergedMask[i] = M;
      }
    };

    if (MatchMasks(InLaneMask, RepeatMask)) {
      // Merge this lane mask into the final repeat mask.
      MergeMasks(InLaneMask, RepeatMask);
      continue;
    }

    // Didn't find a match. Swap the operands and try again.
    std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
    ShuffleVectorSDNode::commuteMask(InLaneMask);

    if (MatchMasks(InLaneMask, RepeatMask)) {
      // Merge this lane mask into the final repeat mask.
      MergeMasks(InLaneMask, RepeatMask);
      continue;
    }

    // Couldn't find a match with the operands in either order.
    return SDValue();
  }

  // Now handle any lanes with only one source.
  for (int Lane = 0; Lane != NumLanes; ++Lane) {
    // If this lane has already been processed, skip it.
    if (LaneSrcs[Lane][0] >= 0)
      continue;

    for (int i = 0; i != NumLaneElts; ++i) {
      int M = Mask[(Lane * NumLaneElts) + i];
      if (M < 0)
        continue;

      // If RepeatMask isn't defined yet we can define it ourself.
      if (RepeatMask[i] < 0)
        RepeatMask[i] = M % NumLaneElts;

      if (RepeatMask[i] < NumElts) {
        if (RepeatMask[i] != M % NumLaneElts)
          return SDValue();
        LaneSrcs[Lane][0] = M / NumLaneElts;
      } else {
        if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
          return SDValue();
        LaneSrcs[Lane][1] = M / NumLaneElts;
      }
    }

    if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
      return SDValue();
  }

  SmallVector<int, 16> NewMask(NumElts, -1);
  for (int Lane = 0; Lane != NumLanes; ++Lane) {
    int Src = LaneSrcs[Lane][0];
    for (int i = 0; i != NumLaneElts; ++i) {
      int M = -1;
      if (Src >= 0)
        M = Src * NumLaneElts + i;
      NewMask[Lane * NumLaneElts + i] = M;
    }
  }
  SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
  // Ensure we didn't get back the shuffle we started with.
  // FIXME: This is a hack to make up for some splat handling code in
  // getVectorShuffle.
  if (isa<ShuffleVectorSDNode>(NewV1) &&
      cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
    return SDValue();

  for (int Lane = 0; Lane != NumLanes; ++Lane) {
    int Src = LaneSrcs[Lane][1];
    for (int i = 0; i != NumLaneElts; ++i) {
      int M = -1;
      if (Src >= 0)
        M = Src * NumLaneElts + i;
      NewMask[Lane * NumLaneElts + i] = M;
    }
  }
  SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
  // Ensure we didn't get back the shuffle we started with.
  // FIXME: This is a hack to make up for some splat handling code in
  // getVectorShuffle.
  if (isa<ShuffleVectorSDNode>(NewV2) &&
      cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
    return SDValue();

  for (int i = 0; i != NumElts; ++i) {
    NewMask[i] = RepeatMask[i % NumLaneElts];
    if (NewMask[i] < 0)
      continue;

    NewMask[i] += (i / NumLaneElts) * NumLaneElts;
  }
  return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
}

/// If the input shuffle mask results in a vector that is undefined in all upper
/// or lower half elements and that mask accesses only 2 halves of the
/// shuffle's operands, return true. A mask of half the width with mask indexes
/// adjusted to access the extracted halves of the original shuffle operands is
/// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
/// lower half of each input operand is accessed.
static bool
getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
                   int &HalfIdx1, int &HalfIdx2) {
  assert((Mask.size() == HalfMask.size() * 2) &&
         "Expected input mask to be twice as long as output");

  // Exactly one half of the result must be undef to allow narrowing.
  bool UndefLower = isUndefLowerHalf(Mask);
  bool UndefUpper = isUndefUpperHalf(Mask);
  if (UndefLower == UndefUpper)
    return false;

  unsigned HalfNumElts = HalfMask.size();
  unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
  HalfIdx1 = -1;
  HalfIdx2 = -1;
  for (unsigned i = 0; i != HalfNumElts; ++i) {
    int M = Mask[i + MaskIndexOffset];
    if (M < 0) {
      HalfMask[i] = M;
      continue;
    }

    // Determine which of the 4 half vectors this element is from.
    // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
    int HalfIdx = M / HalfNumElts;

    // Determine the element index into its half vector source.
    int HalfElt = M % HalfNumElts;

    // We can shuffle with up to 2 half vectors, set the new 'half'
    // shuffle mask accordingly.
    if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
      HalfMask[i] = HalfElt;
      HalfIdx1 = HalfIdx;
      continue;
    }
    if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
      HalfMask[i] = HalfElt + HalfNumElts;
      HalfIdx2 = HalfIdx;
      continue;
    }

    // Too many half vectors referenced.
    return false;
  }

  return true;
}

/// Given the output values from getHalfShuffleMask(), create a half width
/// shuffle of extracted vectors followed by an insert back to full width.
static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
                                     ArrayRef<int> HalfMask, int HalfIdx1,
                                     int HalfIdx2, bool UndefLower,
                                     SelectionDAG &DAG, bool UseConcat = false) {
  assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
  assert(V1.getValueType().isSimple() && "Expecting only simple types");

  MVT VT = V1.getSimpleValueType();
  MVT HalfVT = VT.getHalfNumVectorElementsVT();
  unsigned HalfNumElts = HalfVT.getVectorNumElements();

  auto getHalfVector = [&](int HalfIdx) {
    if (HalfIdx < 0)
      return DAG.getUNDEF(HalfVT);
    SDValue V = (HalfIdx < 2 ? V1 : V2);
    HalfIdx = (HalfIdx % 2) * HalfNumElts;
    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
                       DAG.getIntPtrConstant(HalfIdx, DL));
  };

  // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
  SDValue Half1 = getHalfVector(HalfIdx1);
  SDValue Half2 = getHalfVector(HalfIdx2);
  SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
  if (UseConcat) {
    SDValue Op0 = V;
    SDValue Op1 = DAG.getUNDEF(HalfVT);
    if (UndefLower)
      std::swap(Op0, Op1);
    return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
  }

  unsigned Offset = UndefLower ? HalfNumElts : 0;
  return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
                     DAG.getIntPtrConstant(Offset, DL));
}

/// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
/// This allows for fast cases such as subvector extraction/insertion
/// or shuffling smaller vector types which can lower more efficiently.
static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
                                         SDValue V2, ArrayRef<int> Mask,
                                         const X86Subtarget &Subtarget,
                                         SelectionDAG &DAG) {
  assert((VT.is256BitVector() || VT.is512BitVector()) &&
         "Expected 256-bit or 512-bit vector");

  bool UndefLower = isUndefLowerHalf(Mask);
  if (!UndefLower && !isUndefUpperHalf(Mask))
    return SDValue();

  assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
         "Completely undef shuffle mask should have been simplified already");

  // Upper half is undef and lower half is whole upper subvector.
  // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
  MVT HalfVT = VT.getHalfNumVectorElementsVT();
  unsigned HalfNumElts = HalfVT.getVectorNumElements();
  if (!UndefLower &&
      isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
    SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
                             DAG.getIntPtrConstant(HalfNumElts, DL));
    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
                       DAG.getIntPtrConstant(0, DL));
  }

  // Lower half is undef and upper half is whole lower subvector.
  // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
  if (UndefLower &&
      isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
    SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
                             DAG.getIntPtrConstant(0, DL));
    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
                       DAG.getIntPtrConstant(HalfNumElts, DL));
  }

  int HalfIdx1, HalfIdx2;
  SmallVector<int, 8> HalfMask(HalfNumElts);
  if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
    return SDValue();

  assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");

  // Only shuffle the halves of the inputs when useful.
  unsigned NumLowerHalves =
      (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
  unsigned NumUpperHalves =
      (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
  assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");

  // Determine the larger pattern of undef/halves, then decide if it's worth
  // splitting the shuffle based on subtarget capabilities and types.
  unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
  if (!UndefLower) {
    // XXXXuuuu: no insert is needed.
    // Always extract lowers when setting lower - these are all free subreg ops.
    if (NumUpperHalves == 0)
      return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
                                   UndefLower, DAG);

    if (NumUpperHalves == 1) {
      // AVX2 has efficient 32/64-bit element cross-lane shuffles.
      if (Subtarget.hasAVX2()) {
        // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
        if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
            !is128BitUnpackShuffleMask(HalfMask) &&
            (!isSingleSHUFPSMask(HalfMask) ||
             Subtarget.hasFastVariableShuffle()))
          return SDValue();
        // If this is a unary shuffle (assume that the 2nd operand is
        // canonicalized to undef), then we can use vpermpd. Otherwise, we
        // are better off extracting the upper half of 1 operand and using a
        // narrow shuffle.
        if (EltWidth == 64 && V2.isUndef())
          return SDValue();
      }
      // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
      if (Subtarget.hasAVX512() && VT.is512BitVector())
        return SDValue();
      // Extract + narrow shuffle is better than the wide alternative.
      return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
                                   UndefLower, DAG);
    }

    // Don't extract both uppers, instead shuffle and then extract.
    assert(NumUpperHalves == 2 && "Half vector count went wrong");
    return SDValue();
  }

  // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
  if (NumUpperHalves == 0) {
    // AVX2 has efficient 64-bit element cross-lane shuffles.
    // TODO: Refine to account for unary shuffle, splat, and other masks?
    if (Subtarget.hasAVX2() && EltWidth == 64)
      return SDValue();
    // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
    if (Subtarget.hasAVX512() && VT.is512BitVector())
      return SDValue();
    // Narrow shuffle + insert is better than the wide alternative.
    return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
                                 UndefLower, DAG);
  }

  // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
  return SDValue();
}

/// Test whether the specified input (0 or 1) is in-place blended by the
/// given mask.
///
/// This returns true if the elements from a particular input are already in the
/// slot required by the given mask and require no permutation.
static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
  assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
  int Size = Mask.size();
  for (int i = 0; i < Size; ++i)
    if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
      return false;

  return true;
}

/// Handle case where shuffle sources are coming from the same 128-bit lane and
/// every lane can be represented as the same repeating mask - allowing us to
/// shuffle the sources with the repeating shuffle and then permute the result
/// to the destination lanes.
static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
  int NumElts = VT.getVectorNumElements();
  int NumLanes = VT.getSizeInBits() / 128;
  int NumLaneElts = NumElts / NumLanes;

  // On AVX2 we may be able to just shuffle the lowest elements and then
  // broadcast the result.
  if (Subtarget.hasAVX2()) {
    for (unsigned BroadcastSize : {16, 32, 64}) {
      if (BroadcastSize <= VT.getScalarSizeInBits())
        continue;
      int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();

      // Attempt to match a repeating pattern every NumBroadcastElts,
      // accounting for UNDEFs but only references the lowest 128-bit
      // lane of the inputs.
      auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
        for (int i = 0; i != NumElts; i += NumBroadcastElts)
          for (int j = 0; j != NumBroadcastElts; ++j) {
            int M = Mask[i + j];
            if (M < 0)
              continue;
            int &R = RepeatMask[j];
            if (0 != ((M % NumElts) / NumLaneElts))
              return false;
            if (0 <= R && R != M)
              return false;
            R = M;
          }
        return true;
      };

      SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
      if (!FindRepeatingBroadcastMask(RepeatMask))
        continue;

      // Shuffle the (lowest) repeated elements in place for broadcast.
      SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);

      // Shuffle the actual broadcast.
      SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
      for (int i = 0; i != NumElts; i += NumBroadcastElts)
        for (int j = 0; j != NumBroadcastElts; ++j)
          BroadcastMask[i + j] = j;
      return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
                                  BroadcastMask);
    }
  }

  // Bail if the shuffle mask doesn't cross 128-bit lanes.
  if (!is128BitLaneCrossingShuffleMask(VT, Mask))
    return SDValue();

  // Bail if we already have a repeated lane shuffle mask.
  SmallVector<int, 8> RepeatedShuffleMask;
  if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
    return SDValue();

  // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
  // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
  int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
  int NumSubLanes = NumLanes * SubLaneScale;
  int NumSubLaneElts = NumLaneElts / SubLaneScale;

  // Check that all the sources are coming from the same lane and see if we can
  // form a repeating shuffle mask (local to each sub-lane). At the same time,
  // determine the source sub-lane for each destination sub-lane.
  int TopSrcSubLane = -1;
  SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
  SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
      SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
      SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};

  for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
    // Extract the sub-lane mask, check that it all comes from the same lane
    // and normalize the mask entries to come from the first lane.
    int SrcLane = -1;
    SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
    for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
      int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
      if (M < 0)
        continue;
      int Lane = (M % NumElts) / NumLaneElts;
      if ((0 <= SrcLane) && (SrcLane != Lane))
        return SDValue();
      SrcLane = Lane;
      int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
      SubLaneMask[Elt] = LocalM;
    }

    // Whole sub-lane is UNDEF.
    if (SrcLane < 0)
      continue;

    // Attempt to match against the candidate repeated sub-lane masks.
    for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
      auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
        for (int i = 0; i != NumSubLaneElts; ++i) {
          if (M1[i] < 0 || M2[i] < 0)
            continue;
          if (M1[i] != M2[i])
            return false;
        }
        return true;
      };

      auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
      if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
        continue;

      // Merge the sub-lane mask into the matching repeated sub-lane mask.
      for (int i = 0; i != NumSubLaneElts; ++i) {
        int M = SubLaneMask[i];
        if (M < 0)
          continue;
        assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
               "Unexpected mask element");
        RepeatedSubLaneMask[i] = M;
      }

      // Track the top most source sub-lane - by setting the remaining to UNDEF
      // we can greatly simplify shuffle matching.
      int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
      TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
      Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
      break;
    }

    // Bail if we failed to find a matching repeated sub-lane mask.
    if (Dst2SrcSubLanes[DstSubLane] < 0)
      return SDValue();
  }
  assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
         "Unexpected source lane");

  // Create a repeating shuffle mask for the entire vector.
  SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
  for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
    int Lane = SubLane / SubLaneScale;
    auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
    for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
      int M = RepeatedSubLaneMask[Elt];
      if (M < 0)
        continue;
      int Idx = (SubLane * NumSubLaneElts) + Elt;
      RepeatedMask[Idx] = M + (Lane * NumLaneElts);
    }
  }
  SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);

  // Shuffle each source sub-lane to its destination.
  SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
  for (int i = 0; i != NumElts; i += NumSubLaneElts) {
    int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
    if (SrcSubLane < 0)
      continue;
    for (int j = 0; j != NumSubLaneElts; ++j)
      SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
  }

  return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
                              SubLaneMask);
}

static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
                                   bool &ForceV1Zero, bool &ForceV2Zero,
                                   unsigned &ShuffleImm, ArrayRef<int> Mask,
                                   const APInt &Zeroable) {
  int NumElts = VT.getVectorNumElements();
  assert(VT.getScalarSizeInBits() == 64 &&
         (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
         "Unexpected data type for VSHUFPD");
  assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
         "Illegal shuffle mask");

  bool ZeroLane[2] = { true, true };
  for (int i = 0; i < NumElts; ++i)
    ZeroLane[i & 1] &= Zeroable[i];

  // Mask for V8F64: 0/1,  8/9,  2/3,  10/11, 4/5, ..
  // Mask for V4F64; 0/1,  4/5,  2/3,  6/7..
  ShuffleImm = 0;
  bool ShufpdMask = true;
  bool CommutableMask = true;
  for (int i = 0; i < NumElts; ++i) {
    if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
      continue;
    if (Mask[i] < 0)
      return false;
    int Val = (i & 6) + NumElts * (i & 1);
    int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
    if (Mask[i] < Val || Mask[i] > Val + 1)
      ShufpdMask = false;
    if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
      CommutableMask = false;
    ShuffleImm |= (Mask[i] % 2) << i;
  }

  if (!ShufpdMask && !CommutableMask)
    return false;

  if (!ShufpdMask && CommutableMask)
    std::swap(V1, V2);

  ForceV1Zero = ZeroLane[0];
  ForceV2Zero = ZeroLane[1];
  return true;
}

static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
                                      SDValue V2, ArrayRef<int> Mask,
                                      const APInt &Zeroable,
                                      const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {
  assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
         "Unexpected data type for VSHUFPD");

  unsigned Immediate = 0;
  bool ForceV1Zero = false, ForceV2Zero = false;
  if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
                              Mask, Zeroable))
    return SDValue();

  // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
  if (ForceV1Zero)
    V1 = getZeroVector(VT, Subtarget, DAG, DL);
  if (ForceV2Zero)
    V2 = getZeroVector(VT, Subtarget, DAG, DL);

  return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
                     DAG.getTargetConstant(Immediate, DL, MVT::i8));
}

// Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
// by zeroable elements in the remaining 24 elements. Turn this into two
// vmovqb instructions shuffled together.
static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
                                             SDValue V1, SDValue V2,
                                             ArrayRef<int> Mask,
                                             const APInt &Zeroable,
                                             SelectionDAG &DAG) {
  assert(VT == MVT::v32i8 && "Unexpected type!");

  // The first 8 indices should be every 8th element.
  if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
    return SDValue();

  // Remaining elements need to be zeroable.
  if (Zeroable.countLeadingOnes() < (Mask.size() - 8))
    return SDValue();

  V1 = DAG.getBitcast(MVT::v4i64, V1);
  V2 = DAG.getBitcast(MVT::v4i64, V2);

  V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
  V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);

  // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
  // the upper bits of the result using an unpckldq.
  SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
                                        { 0, 1, 2, 3, 16, 17, 18, 19,
                                          4, 5, 6, 7, 20, 21, 22, 23 });
  // Insert the unpckldq into a zero vector to widen to v32i8.
  return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
                     DAG.getConstant(0, DL, MVT::v32i8), Unpack,
                     DAG.getIntPtrConstant(0, DL));
}


/// Handle lowering of 4-lane 64-bit floating point shuffles.
///
/// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
/// isn't available.
static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");

  if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
                                     Subtarget, DAG))
    return V;

  if (V2.isUndef()) {
    // Check for being able to broadcast a single element.
    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
                                                    Mask, Subtarget, DAG))
      return Broadcast;

    // Use low duplicate instructions for masks that match their pattern.
    if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
      return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);

    if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
      // Non-half-crossing single input shuffles can be lowered with an
      // interleaved permutation.
      unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
                              ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
                         DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
    }

    // With AVX2 we have direct support for this permutation.
    if (Subtarget.hasAVX2())
      return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
                         getV4X86ShuffleImm8ForMask(Mask, DL, DAG));

    // Try to create an in-lane repeating shuffle mask and then shuffle the
    // results into the target lanes.
    if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
            DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
      return V;

    // Try to permute the lanes and then use a per-lane permute.
    if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
                                                        Mask, DAG, Subtarget))
      return V;

    // Otherwise, fall back.
    return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
                                               DAG, Subtarget);
  }

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
    return V;

  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Blend;

  // Check if the blend happens to exactly fit that of SHUFPD.
  if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Op;

  // If we have one input in place, then we can permute the other input and
  // blend the result.
  if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
                                                Subtarget, DAG);

  // Try to create an in-lane repeating shuffle mask and then shuffle the
  // results into the target lanes.
  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
          DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
    return V;

  // Try to simplify this by merging 128-bit lanes to enable a lane-based
  // shuffle. However, if we have AVX2 and either inputs are already in place,
  // we will be able to shuffle even across lanes the other input in a single
  // instruction so skip this pattern.
  if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
                                isShuffleMaskInputInPlace(1, Mask))))
    if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
            DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
      return V;

  // If we have VLX support, we can use VEXPAND.
  if (Subtarget.hasVLX())
    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
                                         DAG, Subtarget))
      return V;

  // If we have AVX2 then we always want to lower with a blend because an v4 we
  // can fully permute the elements.
  if (Subtarget.hasAVX2())
    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
                                                Subtarget, DAG);

  // Otherwise fall back on generic lowering.
  return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
                                    Subtarget, DAG);
}

/// Handle lowering of 4-lane 64-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v4i64 shuffling..
static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
  assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");

  if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
                                     Subtarget, DAG))
    return V;

  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Blend;

  // Check for being able to broadcast a single element.
  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
                                                  Subtarget, DAG))
    return Broadcast;

  if (V2.isUndef()) {
    // When the shuffle is mirrored between the 128-bit lanes of the unit, we
    // can use lower latency instructions that will operate on both lanes.
    SmallVector<int, 2> RepeatedMask;
    if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
      SmallVector<int, 4> PSHUFDMask;
      scaleShuffleMask<int>(2, RepeatedMask, PSHUFDMask);
      return DAG.getBitcast(
          MVT::v4i64,
          DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
                      DAG.getBitcast(MVT::v8i32, V1),
                      getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
    }

    // AVX2 provides a direct instruction for permuting a single input across
    // lanes.
    return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
                       getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
  }

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Shift;

  // If we have VLX support, we can use VALIGN or VEXPAND.
  if (Subtarget.hasVLX()) {
    if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i64, V1, V2, Mask,
                                              Subtarget, DAG))
      return Rotate;

    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
                                         DAG, Subtarget))
      return V;
  }

  // Try to use PALIGNR.
  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
                                                Subtarget, DAG))
    return Rotate;

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
    return V;

  // If we have one input in place, then we can permute the other input and
  // blend the result.
  if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
                                                Subtarget, DAG);

  // Try to create an in-lane repeating shuffle mask and then shuffle the
  // results into the target lanes.
  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
          DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
    return V;

  // Try to simplify this by merging 128-bit lanes to enable a lane-based
  // shuffle. However, if we have AVX2 and either inputs are already in place,
  // we will be able to shuffle even across lanes the other input in a single
  // instruction so skip this pattern.
  if (!isShuffleMaskInputInPlace(0, Mask) &&
      !isShuffleMaskInputInPlace(1, Mask))
    if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
            DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
      return Result;

  // Otherwise fall back on generic blend lowering.
  return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
                                              Subtarget, DAG);
}

/// Handle lowering of 8-lane 32-bit floating point shuffles.
///
/// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
/// isn't available.
static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");

  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Blend;

  // Check for being able to broadcast a single element.
  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
                                                  Subtarget, DAG))
    return Broadcast;

  // If the shuffle mask is repeated in each 128-bit lane, we have many more
  // options to efficiently lower the shuffle.
  SmallVector<int, 4> RepeatedMask;
  if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
    assert(RepeatedMask.size() == 4 &&
           "Repeated masks must be half the mask width!");

    // Use even/odd duplicate instructions for masks that match their pattern.
    if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
      return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
    if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
      return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);

    if (V2.isUndef())
      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));

    // Use dedicated unpack instructions for masks that match their pattern.
    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
      return V;

    // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
    // have already handled any direct blends.
    return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
  }

  // Try to create an in-lane repeating shuffle mask and then shuffle the
  // results into the target lanes.
  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
          DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
    return V;

  // If we have a single input shuffle with different shuffle patterns in the
  // two 128-bit lanes use the variable mask to VPERMILPS.
  if (V2.isUndef()) {
    SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
    if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
      return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);

    if (Subtarget.hasAVX2())
      return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);

    // Otherwise, fall back.
    return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
                                               DAG, Subtarget);
  }

  // Try to simplify this by merging 128-bit lanes to enable a lane-based
  // shuffle.
  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
          DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
    return Result;

  // If we have VLX support, we can use VEXPAND.
  if (Subtarget.hasVLX())
    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
                                         DAG, Subtarget))
      return V;

  // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
  // since after split we get a more efficient code using vpunpcklwd and
  // vpunpckhwd instrs than vblend.
  if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
    if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
                                               Subtarget, DAG))
      return V;

  // If we have AVX2 then we always want to lower with a blend because at v8 we
  // can fully permute the elements.
  if (Subtarget.hasAVX2())
    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2, Mask,
                                                Subtarget, DAG);

  // Otherwise fall back on generic lowering.
  return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
                                    Subtarget, DAG);
}

/// Handle lowering of 8-lane 32-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v8i32 shuffling..
static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
  assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");

  // Whenever we can lower this as a zext, that instruction is strictly faster
  // than any alternative. It also allows us to fold memory operands into the
  // shuffle in many cases.
  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
                                                   Zeroable, Subtarget, DAG))
    return ZExt;

  // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
  // since after split we get a more efficient code than vblend by using
  // vpunpcklwd and vpunpckhwd instrs.
  if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
      !Subtarget.hasAVX512())
    if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask,
                                               Subtarget, DAG))
      return V;

  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Blend;

  // Check for being able to broadcast a single element.
  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
                                                  Subtarget, DAG))
    return Broadcast;

  // If the shuffle mask is repeated in each 128-bit lane we can use more
  // efficient instructions that mirror the shuffles across the two 128-bit
  // lanes.
  SmallVector<int, 4> RepeatedMask;
  bool Is128BitLaneRepeatedShuffle =
      is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
  if (Is128BitLaneRepeatedShuffle) {
    assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
    if (V2.isUndef())
      return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));

    // Use dedicated unpack instructions for masks that match their pattern.
    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
      return V;
  }

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Shift;

  // If we have VLX support, we can use VALIGN or EXPAND.
  if (Subtarget.hasVLX()) {
    if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i32, V1, V2, Mask,
                                              Subtarget, DAG))
      return Rotate;

    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
                                         DAG, Subtarget))
      return V;
  }

  // Try to use byte rotation instructions.
  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
                                                Subtarget, DAG))
    return Rotate;

  // Try to create an in-lane repeating shuffle mask and then shuffle the
  // results into the target lanes.
  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
          DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
    return V;

  // If the shuffle patterns aren't repeated but it is a single input, directly
  // generate a cross-lane VPERMD instruction.
  if (V2.isUndef()) {
    SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
    return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
  }

  // Assume that a single SHUFPS is faster than an alternative sequence of
  // multiple instructions (even if the CPU has a domain penalty).
  // If some CPU is harmed by the domain switch, we can fix it in a later pass.
  if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
    SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
    SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
    SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
                                            CastV1, CastV2, DAG);
    return DAG.getBitcast(MVT::v8i32, ShufPS);
  }

  // Try to simplify this by merging 128-bit lanes to enable a lane-based
  // shuffle.
  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
          DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
    return Result;

  // Otherwise fall back on generic blend lowering.
  return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2, Mask,
                                              Subtarget, DAG);
}

/// Handle lowering of 16-lane 16-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v16i16 shuffling..
static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  const APInt &Zeroable, SDValue V1, SDValue V2,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
  assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");

  // Whenever we can lower this as a zext, that instruction is strictly faster
  // than any alternative. It also allows us to fold memory operands into the
  // shuffle in many cases.
  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
          DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
    return ZExt;

  // Check for being able to broadcast a single element.
  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
                                                  Subtarget, DAG))
    return Broadcast;

  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Blend;

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
    return V;

  // Use dedicated pack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
                                       Subtarget))
    return V;

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Shift;

  // Try to use byte rotation instructions.
  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
                                                Subtarget, DAG))
    return Rotate;

  // Try to create an in-lane repeating shuffle mask and then shuffle the
  // results into the target lanes.
  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
          DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
    return V;

  if (V2.isUndef()) {
    // There are no generalized cross-lane shuffle operations available on i16
    // element types.
    if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
      if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
              DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
        return V;

      return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
                                                 DAG, Subtarget);
    }

    SmallVector<int, 8> RepeatedMask;
    if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
      // As this is a single-input shuffle, the repeated mask should be
      // a strictly valid v8i16 mask that we can pass through to the v8i16
      // lowering to handle even the v16 case.
      return lowerV8I16GeneralSingleInputShuffle(
          DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
    }
  }

  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
                                              Zeroable, Subtarget, DAG))
    return PSHUFB;

  // AVX512BWVL can lower to VPERMW.
  if (Subtarget.hasBWI() && Subtarget.hasVLX())
    return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);

  // Try to simplify this by merging 128-bit lanes to enable a lane-based
  // shuffle.
  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
          DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
    return Result;

  // Try to permute the lanes and then use a per-lane permute.
  if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
          DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
    return V;

  // Otherwise fall back on generic lowering.
  return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
                                    Subtarget, DAG);
}

/// Handle lowering of 32-lane 8-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v32i8 shuffling..
static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
  assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
  assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");

  // Whenever we can lower this as a zext, that instruction is strictly faster
  // than any alternative. It also allows us to fold memory operands into the
  // shuffle in many cases.
  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
                                                   Zeroable, Subtarget, DAG))
    return ZExt;

  // Check for being able to broadcast a single element.
  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
                                                  Subtarget, DAG))
    return Broadcast;

  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Blend;

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
    return V;

  // Use dedicated pack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
                                       Subtarget))
    return V;

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
                                                Zeroable, Subtarget, DAG))
    return Shift;

  // Try to use byte rotation instructions.
  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
                                                Subtarget, DAG))
    return Rotate;

  // Try to create an in-lane repeating shuffle mask and then shuffle the
  // results into the target lanes.
  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
          DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
    return V;

  // There are no generalized cross-lane shuffle operations available on i8
  // element types.
  if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
    if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
            DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
      return V;

    return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
                                               DAG, Subtarget);
  }

  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
                                              Zeroable, Subtarget, DAG))
    return PSHUFB;

  // AVX512VBMIVL can lower to VPERMB.
  if (Subtarget.hasVBMI() && Subtarget.hasVLX())
    return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);

  // Try to simplify this by merging 128-bit lanes to enable a lane-based
  // shuffle.
  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
          DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
    return Result;

  // Try to permute the lanes and then use a per-lane permute.
  if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
          DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
    return V;

  // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
  // by zeroable elements in the remaining 24 elements. Turn this into two
  // vmovqb instructions shuffled together.
  if (Subtarget.hasVLX())
    if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
                                                  Mask, Zeroable, DAG))
      return V;

  // Otherwise fall back on generic lowering.
  return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
                                    Subtarget, DAG);
}

/// High-level routine to lower various 256-bit x86 vector shuffles.
///
/// This routine either breaks down the specific type of a 256-bit x86 vector
/// shuffle or splits it into two 128-bit shuffles and fuses the results back
/// together based on the available instructions.
static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
                                  SDValue V1, SDValue V2, const APInt &Zeroable,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {
  // If we have a single input to the zero element, insert that into V1 if we
  // can do so cheaply.
  int NumElts = VT.getVectorNumElements();
  int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });

  if (NumV2Elements == 1 && Mask[0] >= NumElts)
    if (SDValue Insertion = lowerShuffleAsElementInsertion(
            DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
      return Insertion;

  // Handle special cases where the lower or upper half is UNDEF.
  if (SDValue V =
          lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
    return V;

  // There is a really nice hard cut-over between AVX1 and AVX2 that means we
  // can check for those subtargets here and avoid much of the subtarget
  // querying in the per-vector-type lowering routines. With AVX1 we have
  // essentially *zero* ability to manipulate a 256-bit vector with integer
  // types. Since we'll use floating point types there eventually, just
  // immediately cast everything to a float and operate entirely in that domain.
  if (VT.isInteger() && !Subtarget.hasAVX2()) {
    int ElementBits = VT.getScalarSizeInBits();
    if (ElementBits < 32) {
      // No floating point type available, if we can't use the bit operations
      // for masking/blending then decompose into 128-bit vectors.
      if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
                                            Subtarget, DAG))
        return V;
      if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
        return V;
      return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
    }

    MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
                                VT.getVectorNumElements());
    V1 = DAG.getBitcast(FpVT, V1);
    V2 = DAG.getBitcast(FpVT, V2);
    return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
  }

  switch (VT.SimpleTy) {
  case MVT::v4f64:
    return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v4i64:
    return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v8f32:
    return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v8i32:
    return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v16i16:
    return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v32i8:
    return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);

  default:
    llvm_unreachable("Not a valid 256-bit x86 vector type!");
  }
}

/// Try to lower a vector shuffle as a 128-bit shuffles.
static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
                                  const APInt &Zeroable, SDValue V1, SDValue V2,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {
  assert(VT.getScalarSizeInBits() == 64 &&
         "Unexpected element type size for 128bit shuffle.");

  // To handle 256 bit vector requires VLX and most probably
  // function lowerV2X128VectorShuffle() is better solution.
  assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");

  // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
  SmallVector<int, 4> WidenedMask;
  if (!canWidenShuffleElements(Mask, WidenedMask))
    return SDValue();

  // Try to use an insert into a zero vector.
  if (WidenedMask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
      (WidenedMask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
    unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
    SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
                              DAG.getIntPtrConstant(0, DL));
    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
                       getZeroVector(VT, Subtarget, DAG, DL), LoV,
                       DAG.getIntPtrConstant(0, DL));
  }

  // Check for patterns which can be matched with a single insert of a 256-bit
  // subvector.
  bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask,
                                        {0, 1, 2, 3, 0, 1, 2, 3});
  if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask,
                                        {0, 1, 2, 3, 8, 9, 10, 11})) {
    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
    SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
                                 OnlyUsesV1 ? V1 : V2,
                              DAG.getIntPtrConstant(0, DL));
    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
                       DAG.getIntPtrConstant(4, DL));
  }

  assert(WidenedMask.size() == 4);

  // See if this is an insertion of the lower 128-bits of V2 into V1.
  bool IsInsert = true;
  int V2Index = -1;
  for (int i = 0; i < 4; ++i) {
    assert(WidenedMask[i] >= -1);
    if (WidenedMask[i] < 0)
      continue;

    // Make sure all V1 subvectors are in place.
    if (WidenedMask[i] < 4) {
      if (WidenedMask[i] != i) {
        IsInsert = false;
        break;
      }
    } else {
      // Make sure we only have a single V2 index and its the lowest 128-bits.
      if (V2Index >= 0 || WidenedMask[i] != 4) {
        IsInsert = false;
        break;
      }
      V2Index = i;
    }
  }
  if (IsInsert && V2Index >= 0) {
    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
    SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
                                 DAG.getIntPtrConstant(0, DL));
    return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
  }

  // Try to lower to vshuf64x2/vshuf32x4.
  SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
  unsigned PermMask = 0;
  // Insure elements came from the same Op.
  for (int i = 0; i < 4; ++i) {
    assert(WidenedMask[i] >= -1);
    if (WidenedMask[i] < 0)
      continue;

    SDValue Op = WidenedMask[i] >= 4 ? V2 : V1;
    unsigned OpIndex = i / 2;
    if (Ops[OpIndex].isUndef())
      Ops[OpIndex] = Op;
    else if (Ops[OpIndex] != Op)
      return SDValue();

    // Convert the 128-bit shuffle mask selection values into 128-bit selection
    // bits defined by a vshuf64x2 instruction's immediate control byte.
    PermMask |= (WidenedMask[i] % 4) << (i * 2);
  }

  return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
                     DAG.getTargetConstant(PermMask, DL, MVT::i8));
}

/// Handle lowering of 8-lane 64-bit floating point shuffles.
static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");

  if (V2.isUndef()) {
    // Use low duplicate instructions for masks that match their pattern.
    if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
      return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);

    if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
      // Non-half-crossing single input shuffles can be lowered with an
      // interleaved permutation.
      unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
                              ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
                              ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
                              ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
                         DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
    }

    SmallVector<int, 4> RepeatedMask;
    if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
      return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
  }

  if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
                                           V2, Subtarget, DAG))
    return Shuf128;

  if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
    return Unpck;

  // Check if the blend happens to exactly fit that of SHUFPD.
  if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Op;

  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
                                       DAG, Subtarget))
    return V;

  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Blend;

  return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
}

/// Handle lowering of 16-lane 32-bit floating point shuffles.
static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  const APInt &Zeroable, SDValue V1, SDValue V2,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");

  // If the shuffle mask is repeated in each 128-bit lane, we have many more
  // options to efficiently lower the shuffle.
  SmallVector<int, 4> RepeatedMask;
  if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
    assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");

    // Use even/odd duplicate instructions for masks that match their pattern.
    if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
      return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
    if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
      return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);

    if (V2.isUndef())
      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));

    // Use dedicated unpack instructions for masks that match their pattern.
    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
      return V;

    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
                                            Zeroable, Subtarget, DAG))
      return Blend;

    // Otherwise, fall back to a SHUFPS sequence.
    return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
  }

  // If we have a single input shuffle with different shuffle patterns in the
  // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
  if (V2.isUndef() &&
      !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
    SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
    return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
  }

  // If we have AVX512F support, we can use VEXPAND.
  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
                                             V1, V2, DAG, Subtarget))
    return V;

  return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
}

/// Handle lowering of 8-lane 64-bit integer shuffles.
static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");

  if (V2.isUndef()) {
    // When the shuffle is mirrored between the 128-bit lanes of the unit, we
    // can use lower latency instructions that will operate on all four
    // 128-bit lanes.
    SmallVector<int, 2> Repeated128Mask;
    if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
      SmallVector<int, 4> PSHUFDMask;
      scaleShuffleMask<int>(2, Repeated128Mask, PSHUFDMask);
      return DAG.getBitcast(
          MVT::v8i64,
          DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
                      DAG.getBitcast(MVT::v16i32, V1),
                      getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
    }

    SmallVector<int, 4> Repeated256Mask;
    if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
      return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
                         getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
  }

  if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
                                           V2, Subtarget, DAG))
    return Shuf128;

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Shift;

  // Try to use VALIGN.
  if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i64, V1, V2, Mask,
                                            Subtarget, DAG))
    return Rotate;

  // Try to use PALIGNR.
  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
                                                Subtarget, DAG))
    return Rotate;

  if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
    return Unpck;
  // If we have AVX512F support, we can use VEXPAND.
  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
                                       DAG, Subtarget))
    return V;

  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Blend;

  return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
}

/// Handle lowering of 16-lane 32-bit integer shuffles.
static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  const APInt &Zeroable, SDValue V1, SDValue V2,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");

  // Whenever we can lower this as a zext, that instruction is strictly faster
  // than any alternative. It also allows us to fold memory operands into the
  // shuffle in many cases.
  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
          DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
    return ZExt;

  // If the shuffle mask is repeated in each 128-bit lane we can use more
  // efficient instructions that mirror the shuffles across the four 128-bit
  // lanes.
  SmallVector<int, 4> RepeatedMask;
  bool Is128BitLaneRepeatedShuffle =
      is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
  if (Is128BitLaneRepeatedShuffle) {
    assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
    if (V2.isUndef())
      return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));

    // Use dedicated unpack instructions for masks that match their pattern.
    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
      return V;
  }

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Shift;

  // Try to use VALIGN.
  if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v16i32, V1, V2, Mask,
                                            Subtarget, DAG))
    return Rotate;

  // Try to use byte rotation instructions.
  if (Subtarget.hasBWI())
    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
                                                  Subtarget, DAG))
      return Rotate;

  // Assume that a single SHUFPS is faster than using a permv shuffle.
  // If some CPU is harmed by the domain switch, we can fix it in a later pass.
  if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
    SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
    SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
    SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
                                            CastV1, CastV2, DAG);
    return DAG.getBitcast(MVT::v16i32, ShufPS);
  }
  // If we have AVX512F support, we can use VEXPAND.
  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
                                       DAG, Subtarget))
    return V;

  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Blend;
  return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
}

/// Handle lowering of 32-lane 16-bit integer shuffles.
static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  const APInt &Zeroable, SDValue V1, SDValue V2,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
  assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
  assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");

  // Whenever we can lower this as a zext, that instruction is strictly faster
  // than any alternative. It also allows us to fold memory operands into the
  // shuffle in many cases.
  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
          DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
    return ZExt;

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
    return V;

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Shift;

  // Try to use byte rotation instructions.
  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
                                                Subtarget, DAG))
    return Rotate;

  if (V2.isUndef()) {
    SmallVector<int, 8> RepeatedMask;
    if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
      // As this is a single-input shuffle, the repeated mask should be
      // a strictly valid v8i16 mask that we can pass through to the v8i16
      // lowering to handle even the v32 case.
      return lowerV8I16GeneralSingleInputShuffle(
          DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
    }
  }

  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
                                                Zeroable, Subtarget, DAG))
    return Blend;

  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
                                              Zeroable, Subtarget, DAG))
    return PSHUFB;

  return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
}

/// Handle lowering of 64-lane 8-bit integer shuffles.
static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
  assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
  assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
  assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");

  // Whenever we can lower this as a zext, that instruction is strictly faster
  // than any alternative. It also allows us to fold memory operands into the
  // shuffle in many cases.
  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
          DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
    return ZExt;

  // Use dedicated unpack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
    return V;

  // Use dedicated pack instructions for masks that match their pattern.
  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
                                       Subtarget))
    return V;

  // Try to use shift instructions.
  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Shift;

  // Try to use byte rotation instructions.
  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
                                                Subtarget, DAG))
    return Rotate;

  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
                                              Zeroable, Subtarget, DAG))
    return PSHUFB;

  // VBMI can use VPERMV/VPERMV3 byte shuffles.
  if (Subtarget.hasVBMI())
    return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);

  // Try to create an in-lane repeating shuffle mask and then shuffle the
  // results into the target lanes.
  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
          DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
    return V;

  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
                                          Zeroable, Subtarget, DAG))
    return Blend;

  // Try to simplify this by merging 128-bit lanes to enable a lane-based
  // shuffle.
  if (!V2.isUndef())
    if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
            DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
      return Result;

  // FIXME: Implement direct support for this type!
  return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
}

/// High-level routine to lower various 512-bit x86 vector shuffles.
///
/// This routine either breaks down the specific type of a 512-bit x86 vector
/// shuffle or splits it into two 256-bit shuffles and fuses the results back
/// together based on the available instructions.
static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  MVT VT, SDValue V1, SDValue V2,
                                  const APInt &Zeroable,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {
  assert(Subtarget.hasAVX512() &&
         "Cannot lower 512-bit vectors w/ basic ISA!");

  // If we have a single input to the zero element, insert that into V1 if we
  // can do so cheaply.
  int NumElts = Mask.size();
  int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });

  if (NumV2Elements == 1 && Mask[0] >= NumElts)
    if (SDValue Insertion = lowerShuffleAsElementInsertion(
            DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
      return Insertion;

  // Handle special cases where the lower or upper half is UNDEF.
  if (SDValue V =
          lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
    return V;

  // Check for being able to broadcast a single element.
  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
                                                  Subtarget, DAG))
    return Broadcast;

  // Dispatch to each element type for lowering. If we don't have support for
  // specific element type shuffles at 512 bits, immediately split them and
  // lower them. Each lowering routine of a given type is allowed to assume that
  // the requisite ISA extensions for that element type are available.
  switch (VT.SimpleTy) {
  case MVT::v8f64:
    return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v16f32:
    return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v8i64:
    return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v16i32:
    return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v32i16:
    return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
  case MVT::v64i8:
    return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);

  default:
    llvm_unreachable("Not a valid 512-bit x86 vector type!");
  }
}

static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
                                         MVT VT, SDValue V1, SDValue V2,
                                         const X86Subtarget &Subtarget,
                                         SelectionDAG &DAG) {
  // Shuffle should be unary.
  if (!V2.isUndef())
    return SDValue();

  int ShiftAmt = -1;
  int NumElts = Mask.size();
  for (int i = 0; i != NumElts; ++i) {
    int M = Mask[i];
    assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
           "Unexpected mask index.");
    if (M < 0)
      continue;

    // The first non-undef element determines our shift amount.
    if (ShiftAmt < 0) {
      ShiftAmt = M - i;
      // Need to be shifting right.
      if (ShiftAmt <= 0)
        return SDValue();
    }
    // All non-undef elements must shift by the same amount.
    if (ShiftAmt != M - i)
      return SDValue();
  }
  assert(ShiftAmt >= 0 && "All undef?");

  // Great we found a shift right.
  MVT WideVT = VT;
  if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
    WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
  SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
                            DAG.getUNDEF(WideVT), V1,
                            DAG.getIntPtrConstant(0, DL));
  Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
                    DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
                     DAG.getIntPtrConstant(0, DL));
}

// Determine if this shuffle can be implemented with a KSHIFT instruction.
// Returns the shift amount if possible or -1 if not. This is a simplified
// version of matchShuffleAsShift.
static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
                                    int MaskOffset, const APInt &Zeroable) {
  int Size = Mask.size();

  auto CheckZeros = [&](int Shift, bool Left) {
    for (int j = 0; j < Shift; ++j)
      if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
        return false;

    return true;
  };

  auto MatchShift = [&](int Shift, bool Left) {
    unsigned Pos = Left ? Shift : 0;
    unsigned Low = Left ? 0 : Shift;
    unsigned Len = Size - Shift;
    return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
  };

  for (int Shift = 1; Shift != Size; ++Shift)
    for (bool Left : {true, false})
      if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
        Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
        return Shift;
      }

  return -1;
}


// Lower vXi1 vector shuffles.
// There is no a dedicated instruction on AVX-512 that shuffles the masks.
// The only way to shuffle bits is to sign-extend the mask vector to SIMD
// vector, shuffle and then truncate it back.
static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                MVT VT, SDValue V1, SDValue V2,
                                const APInt &Zeroable,
                                const X86Subtarget &Subtarget,
                                SelectionDAG &DAG) {
  assert(Subtarget.hasAVX512() &&
         "Cannot lower 512-bit vectors w/o basic ISA!");

  int NumElts = Mask.size();

  // Try to recognize shuffles that are just padding a subvector with zeros.
  int SubvecElts = 0;
  int Src = -1;
  for (int i = 0; i != NumElts; ++i) {
    if (Mask[i] >= 0) {
      // Grab the source from the first valid mask. All subsequent elements need
      // to use this same source.
      if (Src < 0)
        Src = Mask[i] / NumElts;
      if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
        break;
    }

    ++SubvecElts;
  }
  assert(SubvecElts != NumElts && "Identity shuffle?");

  // Clip to a power 2.
  SubvecElts = PowerOf2Floor(SubvecElts);

  // Make sure the number of zeroable bits in the top at least covers the bits
  // not covered by the subvector.
  if ((int)Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
    assert(Src >= 0 && "Expected a source!");
    MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
    SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
                                  Src == 0 ? V1 : V2,
                                  DAG.getIntPtrConstant(0, DL));
    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
                       DAG.getConstant(0, DL, VT),
                       Extract, DAG.getIntPtrConstant(0, DL));
  }

  // Try a simple shift right with undef elements. Later we'll try with zeros.
  if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
                                                DAG))
    return Shift;

  // Try to match KSHIFTs.
  unsigned Offset = 0;
  for (SDValue V : { V1, V2 }) {
    unsigned Opcode;
    int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
    if (ShiftAmt >= 0) {
      MVT WideVT = VT;
      if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
        WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
      SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
                                DAG.getUNDEF(WideVT), V,
                                DAG.getIntPtrConstant(0, DL));
      // Widened right shifts need two shifts to ensure we shift in zeroes.
      if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
        int WideElts = WideVT.getVectorNumElements();
        // Shift left to put the original vector in the MSBs of the new size.
        Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
                          DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
        // Increase the shift amount to account for the left shift.
        ShiftAmt += WideElts - NumElts;
      }

      Res = DAG.getNode(Opcode, DL, WideVT, Res,
                        DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
                         DAG.getIntPtrConstant(0, DL));
    }
    Offset += NumElts; // Increment for next iteration.
  }



  MVT ExtVT;
  switch (VT.SimpleTy) {
  default:
    llvm_unreachable("Expected a vector of i1 elements");
  case MVT::v2i1:
    ExtVT = MVT::v2i64;
    break;
  case MVT::v4i1:
    ExtVT = MVT::v4i32;
    break;
  case MVT::v8i1:
    // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
    // shuffle.
    ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
    break;
  case MVT::v16i1:
    // Take 512-bit type, unless we are avoiding 512-bit types and have the
    // 256-bit operation available.
    ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
    break;
  case MVT::v32i1:
    // Take 512-bit type, unless we are avoiding 512-bit types and have the
    // 256-bit operation available.
    assert(Subtarget.hasBWI() && "Expected AVX512BW support");
    ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
    break;
  case MVT::v64i1:
    ExtVT = MVT::v64i8;
    break;
  }

  V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
  V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);

  SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
  // i1 was sign extended we can use X86ISD::CVT2MASK.
  int NumElems = VT.getVectorNumElements();
  if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
      (Subtarget.hasDQI() && (NumElems < 32)))
    return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
                       Shuffle, ISD::SETGT);

  return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
}

/// Helper function that returns true if the shuffle mask should be
/// commuted to improve canonicalization.
static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
  int NumElements = Mask.size();

  int NumV1Elements = 0, NumV2Elements = 0;
  for (int M : Mask)
    if (M < 0)
      continue;
    else if (M < NumElements)
      ++NumV1Elements;
    else
      ++NumV2Elements;

  // Commute the shuffle as needed such that more elements come from V1 than
  // V2. This allows us to match the shuffle pattern strictly on how many
  // elements come from V1 without handling the symmetric cases.
  if (NumV2Elements > NumV1Elements)
    return true;

  assert(NumV1Elements > 0 && "No V1 indices");

  if (NumV2Elements == 0)
    return false;

  // When the number of V1 and V2 elements are the same, try to minimize the
  // number of uses of V2 in the low half of the vector. When that is tied,
  // ensure that the sum of indices for V1 is equal to or lower than the sum
  // indices for V2. When those are equal, try to ensure that the number of odd
  // indices for V1 is lower than the number of odd indices for V2.
  if (NumV1Elements == NumV2Elements) {
    int LowV1Elements = 0, LowV2Elements = 0;
    for (int M : Mask.slice(0, NumElements / 2))
      if (M >= NumElements)
        ++LowV2Elements;
      else if (M >= 0)
        ++LowV1Elements;
    if (LowV2Elements > LowV1Elements)
      return true;
    if (LowV2Elements == LowV1Elements) {
      int SumV1Indices = 0, SumV2Indices = 0;
      for (int i = 0, Size = Mask.size(); i < Size; ++i)
        if (Mask[i] >= NumElements)
          SumV2Indices += i;
        else if (Mask[i] >= 0)
          SumV1Indices += i;
      if (SumV2Indices < SumV1Indices)
        return true;
      if (SumV2Indices == SumV1Indices) {
        int NumV1OddIndices = 0, NumV2OddIndices = 0;
        for (int i = 0, Size = Mask.size(); i < Size; ++i)
          if (Mask[i] >= NumElements)
            NumV2OddIndices += i % 2;
          else if (Mask[i] >= 0)
            NumV1OddIndices += i % 2;
        if (NumV2OddIndices < NumV1OddIndices)
          return true;
      }
    }
  }

  return false;
}

/// Top-level lowering for x86 vector shuffles.
///
/// This handles decomposition, canonicalization, and lowering of all x86
/// vector shuffles. Most of the specific lowering strategies are encapsulated
/// above in helper routines. The canonicalization attempts to widen shuffles
/// to involve fewer lanes of wider elements, consolidate symmetric patterns
/// s.t. only one of the two inputs needs to be tested, etc.
static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {
  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
  ArrayRef<int> OrigMask = SVOp->getMask();
  SDValue V1 = Op.getOperand(0);
  SDValue V2 = Op.getOperand(1);
  MVT VT = Op.getSimpleValueType();
  int NumElements = VT.getVectorNumElements();
  SDLoc DL(Op);
  bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);

  assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
         "Can't lower MMX shuffles");

  bool V1IsUndef = V1.isUndef();
  bool V2IsUndef = V2.isUndef();
  if (V1IsUndef && V2IsUndef)
    return DAG.getUNDEF(VT);

  // When we create a shuffle node we put the UNDEF node to second operand,
  // but in some cases the first operand may be transformed to UNDEF.
  // In this case we should just commute the node.
  if (V1IsUndef)
    return DAG.getCommutedVectorShuffle(*SVOp);

  // Check for non-undef masks pointing at an undef vector and make the masks
  // undef as well. This makes it easier to match the shuffle based solely on
  // the mask.
  if (V2IsUndef &&
      any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
    SmallVector<int, 8> NewMask(OrigMask.begin(), OrigMask.end());
    for (int &M : NewMask)
      if (M >= NumElements)
        M = -1;
    return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
  }

  // Check for illegal shuffle mask element index values.
  int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
  (void)MaskUpperLimit;
  assert(llvm::all_of(OrigMask,
                      [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
         "Out of bounds shuffle index");

  // We actually see shuffles that are entirely re-arrangements of a set of
  // zero inputs. This mostly happens while decomposing complex shuffles into
  // simple ones. Directly lower these as a buildvector of zeros.
  APInt Zeroable = computeZeroableShuffleElements(OrigMask, V1, V2);
  if (Zeroable.isAllOnesValue())
    return getZeroVector(VT, Subtarget, DAG, DL);

  bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());

  // Create an alternative mask with info about zeroable elements.
  // Here we do not set undef elements as zeroable.
  SmallVector<int, 64> ZeroableMask(OrigMask.begin(), OrigMask.end());
  if (V2IsZero) {
    assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!");
    for (int i = 0; i != NumElements; ++i)
      if (OrigMask[i] != SM_SentinelUndef && Zeroable[i])
        ZeroableMask[i] = SM_SentinelZero;
  }

  // Try to collapse shuffles into using a vector type with fewer elements but
  // wider element types. We cap this to not form integers or floating point
  // elements wider than 64 bits, but it might be interesting to form i128
  // integers to handle flipping the low and high halves of AVX 256-bit vectors.
  SmallVector<int, 16> WidenedMask;
  if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
      canWidenShuffleElements(ZeroableMask, WidenedMask)) {
    // Shuffle mask widening should not interfere with a broadcast opportunity
    // by obfuscating the operands with bitcasts.
    // TODO: Avoid lowering directly from this top-level function: make this
    // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
                                                    Subtarget, DAG))
      return Broadcast;

    MVT NewEltVT = VT.isFloatingPoint()
                       ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
                       : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
    int NewNumElts = NumElements / 2;
    MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
    // Make sure that the new vector type is legal. For example, v2f64 isn't
    // legal on SSE1.
    if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
      if (V2IsZero) {
        // Modify the new Mask to take all zeros from the all-zero vector.
        // Choose indices that are blend-friendly.
        bool UsedZeroVector = false;
        assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
               "V2's non-undef elements are used?!");
        for (int i = 0; i != NewNumElts; ++i)
          if (WidenedMask[i] == SM_SentinelZero) {
            WidenedMask[i] = i + NewNumElts;
            UsedZeroVector = true;
          }
        // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
        // some elements to be undef.
        if (UsedZeroVector)
          V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
      }
      V1 = DAG.getBitcast(NewVT, V1);
      V2 = DAG.getBitcast(NewVT, V2);
      return DAG.getBitcast(
          VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
    }
  }

  // Commute the shuffle if it will improve canonicalization.
  SmallVector<int, 64> Mask(OrigMask.begin(), OrigMask.end());
  if (canonicalizeShuffleMaskWithCommute(Mask)) {
    ShuffleVectorSDNode::commuteMask(Mask);
    std::swap(V1, V2);
  }

  if (SDValue V = lowerShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
    return V;

  // For each vector width, delegate to a specialized lowering routine.
  if (VT.is128BitVector())
    return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);

  if (VT.is256BitVector())
    return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);

  if (VT.is512BitVector())
    return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);

  if (Is1BitVector)
    return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);

  llvm_unreachable("Unimplemented!");
}

/// Try to lower a VSELECT instruction to a vector shuffle.
static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
                                           const X86Subtarget &Subtarget,
                                           SelectionDAG &DAG) {
  SDValue Cond = Op.getOperand(0);
  SDValue LHS = Op.getOperand(1);
  SDValue RHS = Op.getOperand(2);
  MVT VT = Op.getSimpleValueType();

  // Only non-legal VSELECTs reach this lowering, convert those into generic
  // shuffles and re-use the shuffle lowering path for blends.
  SmallVector<int, 32> Mask;
  if (createShuffleMaskFromVSELECT(Mask, Cond))
    return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);

  return SDValue();
}

SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
  SDValue Cond = Op.getOperand(0);
  SDValue LHS = Op.getOperand(1);
  SDValue RHS = Op.getOperand(2);

  // A vselect where all conditions and data are constants can be optimized into
  // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
  if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
      ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
      ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
    return SDValue();

  // Try to lower this to a blend-style vector shuffle. This can handle all
  // constant condition cases.
  if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
    return BlendOp;

  // If this VSELECT has a vector if i1 as a mask, it will be directly matched
  // with patterns on the mask registers on AVX-512.
  MVT CondVT = Cond.getSimpleValueType();
  unsigned CondEltSize = Cond.getScalarValueSizeInBits();
  if (CondEltSize == 1)
    return Op;

  // Variable blends are only legal from SSE4.1 onward.
  if (!Subtarget.hasSSE41())
    return SDValue();

  SDLoc dl(Op);
  MVT VT = Op.getSimpleValueType();
  unsigned EltSize = VT.getScalarSizeInBits();
  unsigned NumElts = VT.getVectorNumElements();

  // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
  // into an i1 condition so that we can use the mask-based 512-bit blend
  // instructions.
  if (VT.getSizeInBits() == 512) {
    // Build a mask by testing the condition against zero.
    MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
    SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
                                DAG.getConstant(0, dl, CondVT),
                                ISD::SETNE);
    // Now return a new VSELECT using the mask.
    return DAG.getSelect(dl, VT, Mask, LHS, RHS);
  }

  // SEXT/TRUNC cases where the mask doesn't match the destination size.
  if (CondEltSize != EltSize) {
    // If we don't have a sign splat, rely on the expansion.
    if (CondEltSize != DAG.ComputeNumSignBits(Cond))
      return SDValue();

    MVT NewCondSVT = MVT::getIntegerVT(EltSize);
    MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
    Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
    return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
  }

  // Only some types will be legal on some subtargets. If we can emit a legal
  // VSELECT-matching blend, return Op, and but if we need to expand, return
  // a null value.
  switch (VT.SimpleTy) {
  default:
    // Most of the vector types have blends past SSE4.1.
    return Op;

  case MVT::v32i8:
    // The byte blends for AVX vectors were introduced only in AVX2.
    if (Subtarget.hasAVX2())
      return Op;

    return SDValue();

  case MVT::v8i16:
  case MVT::v16i16: {
    // Bitcast everything to the vXi8 type and use a vXi8 vselect.
    MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
    Cond = DAG.getBitcast(CastVT, Cond);
    LHS = DAG.getBitcast(CastVT, LHS);
    RHS = DAG.getBitcast(CastVT, RHS);
    SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
    return DAG.getBitcast(VT, Select);
  }
  }
}

static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  SDLoc dl(Op);

  if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
    return SDValue();

  if (VT.getSizeInBits() == 8) {
    SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
                                  Op.getOperand(0), Op.getOperand(1));
    return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
  }

  if (VT == MVT::f32) {
    // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
    // the result back to FR32 register. It's only worth matching if the
    // result has a single use which is a store or a bitcast to i32.  And in
    // the case of a store, it's not worth it if the index is a constant 0,
    // because a MOVSSmr can be used instead, which is smaller and faster.
    if (!Op.hasOneUse())
      return SDValue();
    SDNode *User = *Op.getNode()->use_begin();
    if ((User->getOpcode() != ISD::STORE ||
         isNullConstant(Op.getOperand(1))) &&
        (User->getOpcode() != ISD::BITCAST ||
         User->getValueType(0) != MVT::i32))
      return SDValue();
    SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
                                  DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
                                  Op.getOperand(1));
    return DAG.getBitcast(MVT::f32, Extract);
  }

  if (VT == MVT::i32 || VT == MVT::i64) {
    // ExtractPS/pextrq works with constant index.
    if (isa<ConstantSDNode>(Op.getOperand(1)))
      return Op;
  }

  return SDValue();
}

/// Extract one bit from mask vector, like v16i1 or v8i1.
/// AVX-512 feature.
static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {
  SDValue Vec = Op.getOperand(0);
  SDLoc dl(Vec);
  MVT VecVT = Vec.getSimpleValueType();
  SDValue Idx = Op.getOperand(1);
  MVT EltVT = Op.getSimpleValueType();

  assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
         "Unexpected vector type in ExtractBitFromMaskVector");

  // variable index can't be handled in mask registers,
  // extend vector to VR512/128
  if (!isa<ConstantSDNode>(Idx)) {
    unsigned NumElts = VecVT.getVectorNumElements();
    // Extending v8i1/v16i1 to 512-bit get better performance on KNL
    // than extending to 128/256bit.
    MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
    MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
    SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
    SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
    return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
  }

  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
  if (IdxVal == 0) // the operation is legal
    return Op;

  // Extend to natively supported kshift.
  unsigned NumElems = VecVT.getVectorNumElements();
  MVT WideVecVT = VecVT;
  if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
    WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
                      DAG.getUNDEF(WideVecVT), Vec,
                      DAG.getIntPtrConstant(0, dl));
  }

  // Use kshiftr instruction to move to the lower element.
  Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
                    DAG.getTargetConstant(IdxVal, dl, MVT::i8));

  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
                     DAG.getIntPtrConstant(0, dl));
}

SDValue
X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
                                           SelectionDAG &DAG) const {
  SDLoc dl(Op);
  SDValue Vec = Op.getOperand(0);
  MVT VecVT = Vec.getSimpleValueType();
  SDValue Idx = Op.getOperand(1);

  if (VecVT.getVectorElementType() == MVT::i1)
    return ExtractBitFromMaskVector(Op, DAG, Subtarget);

  if (!isa<ConstantSDNode>(Idx)) {
    // Its more profitable to go through memory (1 cycles throughput)
    // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
    // IACA tool was used to get performance estimation
    // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
    //
    // example : extractelement <16 x i8> %a, i32 %i
    //
    // Block Throughput: 3.00 Cycles
    // Throughput Bottleneck: Port5
    //
    // | Num Of |   Ports pressure in cycles  |    |
    // |  Uops  |  0  - DV  |  5  |  6  |  7  |    |
    // ---------------------------------------------
    // |   1    |           | 1.0 |     |     | CP | vmovd xmm1, edi
    // |   1    |           | 1.0 |     |     | CP | vpshufb xmm0, xmm0, xmm1
    // |   2    | 1.0       | 1.0 |     |     | CP | vpextrb eax, xmm0, 0x0
    // Total Num Of Uops: 4
    //
    //
    // Block Throughput: 1.00 Cycles
    // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
    //
    // |    |  Ports pressure in cycles   |  |
    // |Uops| 1 | 2 - D  |3 -  D  | 4 | 5 |  |
    // ---------------------------------------------------------
    // |2^  |   | 0.5    | 0.5    |1.0|   |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
    // |1   |0.5|        |        |   |0.5|  | lea rax, ptr [rsp-0x18]
    // |1   |   |0.5, 0.5|0.5, 0.5|   |   |CP| mov al, byte ptr [rdi+rax*1]
    // Total Num Of Uops: 4

    return SDValue();
  }

  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();

  // If this is a 256-bit vector result, first extract the 128-bit vector and
  // then extract the element from the 128-bit vector.
  if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
    // Get the 128-bit vector.
    Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
    MVT EltVT = VecVT.getVectorElementType();

    unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
    assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");

    // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
    // this can be done with a mask.
    IdxVal &= ElemsPerChunk - 1;
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
                       DAG.getIntPtrConstant(IdxVal, dl));
  }

  assert(VecVT.is128BitVector() && "Unexpected vector length");

  MVT VT = Op.getSimpleValueType();

  if (VT.getSizeInBits() == 16) {
    // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
    // we're going to zero extend the register or fold the store (SSE41 only).
    if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
        !(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
      return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
                         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
                                     DAG.getBitcast(MVT::v4i32, Vec), Idx));

    // Transform it so it match pextrw which produces a 32-bit result.
    SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
                                  Op.getOperand(0), Op.getOperand(1));
    return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
  }

  if (Subtarget.hasSSE41())
    if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
      return Res;

  // TODO: We only extract a single element from v16i8, we can probably afford
  // to be more aggressive here before using the default approach of spilling to
  // stack.
  if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
    // Extract either the lowest i32 or any i16, and extract the sub-byte.
    int DWordIdx = IdxVal / 4;
    if (DWordIdx == 0) {
      SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
                                DAG.getBitcast(MVT::v4i32, Vec),
                                DAG.getIntPtrConstant(DWordIdx, dl));
      int ShiftVal = (IdxVal % 4) * 8;
      if (ShiftVal != 0)
        Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
                          DAG.getConstant(ShiftVal, dl, MVT::i8));
      return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
    }

    int WordIdx = IdxVal / 2;
    SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
                              DAG.getBitcast(MVT::v8i16, Vec),
                              DAG.getIntPtrConstant(WordIdx, dl));
    int ShiftVal = (IdxVal % 2) * 8;
    if (ShiftVal != 0)
      Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
                        DAG.getConstant(ShiftVal, dl, MVT::i8));
    return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
  }

  if (VT.getSizeInBits() == 32) {
    if (IdxVal == 0)
      return Op;

    // SHUFPS the element to the lowest double word, then movss.
    int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
    Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
                       DAG.getIntPtrConstant(0, dl));
  }

  if (VT.getSizeInBits() == 64) {
    // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
    // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
    //        to match extract_elt for f64.
    if (IdxVal == 0)
      return Op;

    // UNPCKHPD the element to the lowest double word, then movsd.
    // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
    // to a f64mem, the whole operation is folded into a single MOVHPDmr.
    int Mask[2] = { 1, -1 };
    Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
                       DAG.getIntPtrConstant(0, dl));
  }

  return SDValue();
}

/// Insert one bit to mask vector, like v16i1 or v8i1.
/// AVX-512 feature.
static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  SDLoc dl(Op);
  SDValue Vec = Op.getOperand(0);
  SDValue Elt = Op.getOperand(1);
  SDValue Idx = Op.getOperand(2);
  MVT VecVT = Vec.getSimpleValueType();

  if (!isa<ConstantSDNode>(Idx)) {
    // Non constant index. Extend source and destination,
    // insert element and then truncate the result.
    unsigned NumElts = VecVT.getVectorNumElements();
    MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
    MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
    SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
      DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
      DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
    return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
  }

  // Copy into a k-register, extract to v1i1 and insert_subvector.
  SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);

  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec,
                     Op.getOperand(2));
}

SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
                                                  SelectionDAG &DAG) const {
  MVT VT = Op.getSimpleValueType();
  MVT EltVT = VT.getVectorElementType();
  unsigned NumElts = VT.getVectorNumElements();

  if (EltVT == MVT::i1)
    return InsertBitToMaskVector(Op, DAG, Subtarget);

  SDLoc dl(Op);
  SDValue N0 = Op.getOperand(0);
  SDValue N1 = Op.getOperand(1);
  SDValue N2 = Op.getOperand(2);

  auto *N2C = dyn_cast<ConstantSDNode>(N2);
  if (!N2C || N2C->getAPIntValue().uge(NumElts))
    return SDValue();
  uint64_t IdxVal = N2C->getZExtValue();

  bool IsZeroElt = X86::isZeroNode(N1);
  bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);

  // If we are inserting a element, see if we can do this more efficiently with
  // a blend shuffle with a rematerializable vector than a costly integer
  // insertion.
  if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
      16 <= EltVT.getSizeInBits()) {
    SmallVector<int, 8> BlendMask;
    for (unsigned i = 0; i != NumElts; ++i)
      BlendMask.push_back(i == IdxVal ? i + NumElts : i);
    SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
                                  : getOnesVector(VT, DAG, dl);
    return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
  }

  // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
  // into that, and then insert the subvector back into the result.
  if (VT.is256BitVector() || VT.is512BitVector()) {
    // With a 256-bit vector, we can insert into the zero element efficiently
    // using a blend if we have AVX or AVX2 and the right data type.
    if (VT.is256BitVector() && IdxVal == 0) {
      // TODO: It is worthwhile to cast integer to floating point and back
      // and incur a domain crossing penalty if that's what we'll end up
      // doing anyway after extracting to a 128-bit vector.
      if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
          (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
        SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
        return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
                           DAG.getTargetConstant(1, dl, MVT::i8));
      }
    }

    // Get the desired 128-bit vector chunk.
    SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);

    // Insert the element into the desired chunk.
    unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
    assert(isPowerOf2_32(NumEltsIn128));
    // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
    unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);

    V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
                    DAG.getIntPtrConstant(IdxIn128, dl));

    // Insert the changed part back into the bigger vector
    return insert128BitVector(N0, V, IdxVal, DAG, dl);
  }
  assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");

  // This will be just movd/movq/movss/movsd.
  if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode()) &&
      (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
       EltVT == MVT::i64)) {
    N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
    return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
  }

  // Transform it so it match pinsr{b,w} which expects a GR32 as its second
  // argument. SSE41 required for pinsrb.
  if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
    unsigned Opc;
    if (VT == MVT::v8i16) {
      assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
      Opc = X86ISD::PINSRW;
    } else {
      assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
      assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
      Opc = X86ISD::PINSRB;
    }

    if (N1.getValueType() != MVT::i32)
      N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
    if (N2.getValueType() != MVT::i32)
      N2 = DAG.getIntPtrConstant(IdxVal, dl);
    return DAG.getNode(Opc, dl, VT, N0, N1, N2);
  }

  if (Subtarget.hasSSE41()) {
    if (EltVT == MVT::f32) {
      // Bits [7:6] of the constant are the source select. This will always be
      //   zero here. The DAG Combiner may combine an extract_elt index into
      //   these bits. For example (insert (extract, 3), 2) could be matched by
      //   putting the '3' into bits [7:6] of X86ISD::INSERTPS.
      // Bits [5:4] of the constant are the destination select. This is the
      //   value of the incoming immediate.
      // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
      //   combine either bitwise AND or insert of float 0.0 to set these bits.

      bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
      if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
        // If this is an insertion of 32-bits into the low 32-bits of
        // a vector, we prefer to generate a blend with immediate rather
        // than an insertps. Blends are simpler operations in hardware and so
        // will always have equal or better performance than insertps.
        // But if optimizing for size and there's a load folding opportunity,
        // generate insertps because blendps does not have a 32-bit memory
        // operand form.
        N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
        return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
                           DAG.getTargetConstant(1, dl, MVT::i8));
      }
      // Create this as a scalar to vector..
      N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
      return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
                         DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
    }

    // PINSR* works with constant index.
    if (EltVT == MVT::i32 || EltVT == MVT::i64)
      return Op;
  }

  return SDValue();
}

static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {
  SDLoc dl(Op);
  MVT OpVT = Op.getSimpleValueType();

  // It's always cheaper to replace a xor+movd with xorps and simplifies further
  // combines.
  if (X86::isZeroNode(Op.getOperand(0)))
    return getZeroVector(OpVT, Subtarget, DAG, dl);

  // If this is a 256-bit vector result, first insert into a 128-bit
  // vector and then insert into the 256-bit vector.
  if (!OpVT.is128BitVector()) {
    // Insert into a 128-bit vector.
    unsigned SizeFactor = OpVT.getSizeInBits() / 128;
    MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
                                 OpVT.getVectorNumElements() / SizeFactor);

    Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));

    // Insert the 128-bit vector.
    return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
  }
  assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
         "Expected an SSE type!");

  // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
  if (OpVT == MVT::v4i32)
    return Op;

  SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
  return DAG.getBitcast(
      OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
}

// Lower a node with an INSERT_SUBVECTOR opcode.  This may result in a
// simple superregister reference or explicit instructions to insert
// the upper bits of a vector.
static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {
  assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);

  return insert1BitVector(Op, DAG, Subtarget);
}

static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {
  assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
         "Only vXi1 extract_subvectors need custom lowering");

  SDLoc dl(Op);
  SDValue Vec = Op.getOperand(0);
  SDValue Idx = Op.getOperand(1);

  if (!isa<ConstantSDNode>(Idx))
    return SDValue();

  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
  if (IdxVal == 0) // the operation is legal
    return Op;

  MVT VecVT = Vec.getSimpleValueType();
  unsigned NumElems = VecVT.getVectorNumElements();

  // Extend to natively supported kshift.
  MVT WideVecVT = VecVT;
  if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
    WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
                      DAG.getUNDEF(WideVecVT), Vec,
                      DAG.getIntPtrConstant(0, dl));
  }

  // Shift to the LSB.
  Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
                    DAG.getTargetConstant(IdxVal, dl, MVT::i8));

  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
                     DAG.getIntPtrConstant(0, dl));
}

// Returns the appropriate wrapper opcode for a global reference.
unsigned X86TargetLowering::getGlobalWrapperKind(
    const GlobalValue *GV, const unsigned char OpFlags) const {
  // References to absolute symbols are never PC-relative.
  if (GV && GV->isAbsoluteSymbolRef())
    return X86ISD::Wrapper;

  CodeModel::Model M = getTargetMachine().getCodeModel();
  if (Subtarget.isPICStyleRIPRel() &&
      (M == CodeModel::Small || M == CodeModel::Kernel))
    return X86ISD::WrapperRIP;

  // GOTPCREL references must always use RIP.
  if (OpFlags == X86II::MO_GOTPCREL)
    return X86ISD::WrapperRIP;

  return X86ISD::Wrapper;
}

// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
// their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
// one of the above mentioned nodes. It has to be wrapped because otherwise
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
// be used to form addressing mode. These wrapped nodes will be selected
// into MOV32ri.
SDValue
X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);

  // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
  // global base reg.
  unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);

  auto PtrVT = getPointerTy(DAG.getDataLayout());
  SDValue Result = DAG.getTargetConstantPool(
      CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
  SDLoc DL(CP);
  Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
  // With PIC, the address is actually $g + Offset.
  if (OpFlag) {
    Result =
        DAG.getNode(ISD::ADD, DL, PtrVT,
                    DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
  }

  return Result;
}

SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
  JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);

  // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
  // global base reg.
  unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);

  auto PtrVT = getPointerTy(DAG.getDataLayout());
  SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
  SDLoc DL(JT);
  Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);

  // With PIC, the address is actually $g + Offset.
  if (OpFlag)
    Result =
        DAG.getNode(ISD::ADD, DL, PtrVT,
                    DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);

  return Result;
}

SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
                                               SelectionDAG &DAG) const {
  return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
}

SDValue
X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
  // Create the TargetBlockAddressAddress node.
  unsigned char OpFlags =
    Subtarget.classifyBlockAddressReference();
  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
  int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
  SDLoc dl(Op);
  auto PtrVT = getPointerTy(DAG.getDataLayout());
  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
  Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);

  // With PIC, the address is actually $g + Offset.
  if (isGlobalRelativeToPICBase(OpFlags)) {
    Result = DAG.getNode(ISD::ADD, dl, PtrVT,
                         DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
  }

  return Result;
}

/// Creates target global address or external symbol nodes for calls or
/// other uses.
SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
                                                 bool ForCall) const {
  // Unpack the global address or external symbol.
  const SDLoc &dl = SDLoc(Op);
  const GlobalValue *GV = nullptr;
  int64_t Offset = 0;
  const char *ExternalSym = nullptr;
  if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
    GV = G->getGlobal();
    Offset = G->getOffset();
  } else {
    const auto *ES = cast<ExternalSymbolSDNode>(Op);
    ExternalSym = ES->getSymbol();
  }

  // Calculate some flags for address lowering.
  const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
  unsigned char OpFlags;
  if (ForCall)
    OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
  else
    OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
  bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
  bool NeedsLoad = isGlobalStubReference(OpFlags);

  CodeModel::Model M = DAG.getTarget().getCodeModel();
  auto PtrVT = getPointerTy(DAG.getDataLayout());
  SDValue Result;

  if (GV) {
    // Create a target global address if this is a global. If possible, fold the
    // offset into the global address reference. Otherwise, ADD it on later.
    int64_t GlobalOffset = 0;
    if (OpFlags == X86II::MO_NO_FLAG &&
        X86::isOffsetSuitableForCodeModel(Offset, M)) {
      std::swap(GlobalOffset, Offset);
    }
    Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
  } else {
    // If this is not a global address, this must be an external symbol.
    Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
  }

  // If this is a direct call, avoid the wrapper if we don't need to do any
  // loads or adds. This allows SDAG ISel to match direct calls.
  if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
    return Result;

  Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);

  // With PIC, the address is actually $g + Offset.
  if (HasPICReg) {
    Result = DAG.getNode(ISD::ADD, dl, PtrVT,
                         DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
  }

  // For globals that require a load from a stub to get the address, emit the
  // load.
  if (NeedsLoad)
    Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
                         MachinePointerInfo::getGOT(DAG.getMachineFunction()));

  // If there was a non-zero offset that we didn't fold, create an explicit
  // addition for it.
  if (Offset != 0)
    Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
                         DAG.getConstant(Offset, dl, PtrVT));

  return Result;
}

SDValue
X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
  return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
}

static SDValue
GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
           SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
           unsigned char OperandFlags, bool LocalDynamic = false) {
  MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
  SDLoc dl(GA);
  SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
                                           GA->getValueType(0),
                                           GA->getOffset(),
                                           OperandFlags);

  X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
                                           : X86ISD::TLSADDR;

  if (InFlag) {
    SDValue Ops[] = { Chain,  TGA, *InFlag };
    Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
  } else {
    SDValue Ops[]  = { Chain, TGA };
    Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
  }

  // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
  MFI.setAdjustsStack(true);
  MFI.setHasCalls(true);

  SDValue Flag = Chain.getValue(1);
  return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
}

// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
static SDValue
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
                                const EVT PtrVT) {
  SDValue InFlag;
  SDLoc dl(GA);  // ? function entry point might be better
  SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
                                   DAG.getNode(X86ISD::GlobalBaseReg,
                                               SDLoc(), PtrVT), InFlag);
  InFlag = Chain.getValue(1);

  return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
}

// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
static SDValue
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
                                const EVT PtrVT) {
  return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
                    X86::RAX, X86II::MO_TLSGD);
}

static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
                                           SelectionDAG &DAG,
                                           const EVT PtrVT,
                                           bool is64Bit) {
  SDLoc dl(GA);

  // Get the start address of the TLS block for this module.
  X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
      .getInfo<X86MachineFunctionInfo>();
  MFI->incNumLocalDynamicTLSAccesses();

  SDValue Base;
  if (is64Bit) {
    Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
                      X86II::MO_TLSLD, /*LocalDynamic=*/true);
  } else {
    SDValue InFlag;
    SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
        DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
    InFlag = Chain.getValue(1);
    Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
                      X86II::MO_TLSLDM, /*LocalDynamic=*/true);
  }

  // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
  // of Base.

  // Build x@dtpoff.
  unsigned char OperandFlags = X86II::MO_DTPOFF;
  unsigned WrapperKind = X86ISD::Wrapper;
  SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
                                           GA->getValueType(0),
                                           GA->getOffset(), OperandFlags);
  SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);

  // Add x@dtpoff with the base.
  return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
}

// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
                                   const EVT PtrVT, TLSModel::Model model,
                                   bool is64Bit, bool isPIC) {
  SDLoc dl(GA);

  // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
  Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
                                                         is64Bit ? 257 : 256));

  SDValue ThreadPointer =
      DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
                  MachinePointerInfo(Ptr));

  unsigned char OperandFlags = 0;
  // Most TLS accesses are not RIP relative, even on x86-64.  One exception is
  // initialexec.
  unsigned WrapperKind = X86ISD::Wrapper;
  if (model == TLSModel::LocalExec) {
    OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
  } else if (model == TLSModel::InitialExec) {
    if (is64Bit) {
      OperandFlags = X86II::MO_GOTTPOFF;
      WrapperKind = X86ISD::WrapperRIP;
    } else {
      OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
    }
  } else {
    llvm_unreachable("Unexpected model");
  }

  // emit "addl x@ntpoff,%eax" (local exec)
  // or "addl x@indntpoff,%eax" (initial exec)
  // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
  SDValue TGA =
      DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
                                 GA->getOffset(), OperandFlags);
  SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);

  if (model == TLSModel::InitialExec) {
    if (isPIC && !is64Bit) {
      Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
                           DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
                           Offset);
    }

    Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
                         MachinePointerInfo::getGOT(DAG.getMachineFunction()));
  }

  // The address of the thread local variable is the add of the thread
  // pointer with the offset of the variable.
  return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
}

SDValue
X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {

  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);

  if (DAG.getTarget().useEmulatedTLS())
    return LowerToTLSEmulatedModel(GA, DAG);

  const GlobalValue *GV = GA->getGlobal();
  auto PtrVT = getPointerTy(DAG.getDataLayout());
  bool PositionIndependent = isPositionIndependent();

  if (Subtarget.isTargetELF()) {
    TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
    switch (model) {
      case TLSModel::GeneralDynamic:
        if (Subtarget.is64Bit())
          return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
        return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
      case TLSModel::LocalDynamic:
        return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
                                           Subtarget.is64Bit());
      case TLSModel::InitialExec:
      case TLSModel::LocalExec:
        return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
                                   PositionIndependent);
    }
    llvm_unreachable("Unknown TLS model.");
  }

  if (Subtarget.isTargetDarwin()) {
    // Darwin only has one model of TLS.  Lower to that.
    unsigned char OpFlag = 0;
    unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
                           X86ISD::WrapperRIP : X86ISD::Wrapper;

    // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
    // global base reg.
    bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
    if (PIC32)
      OpFlag = X86II::MO_TLVP_PIC_BASE;
    else
      OpFlag = X86II::MO_TLVP;
    SDLoc DL(Op);
    SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
                                                GA->getValueType(0),
                                                GA->getOffset(), OpFlag);
    SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);

    // With PIC32, the address is actually $g + Offset.
    if (PIC32)
      Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
                           DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
                           Offset);

    // Lowering the machine isd will make sure everything is in the right
    // location.
    SDValue Chain = DAG.getEntryNode();
    SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
    Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
    SDValue Args[] = { Chain, Offset };
    Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
    Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
                               DAG.getIntPtrConstant(0, DL, true),
                               Chain.getValue(1), DL);

    // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
    MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
    MFI.setAdjustsStack(true);

    // And our return value (tls address) is in the standard call return value
    // location.
    unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
    return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
  }

  if (Subtarget.isOSWindows()) {
    // Just use the implicit TLS architecture
    // Need to generate something similar to:
    //   mov     rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
    //                                  ; from TEB
    //   mov     ecx, dword [rel _tls_index]: Load index (from C runtime)
    //   mov     rcx, qword [rdx+rcx*8]
    //   mov     eax, .tls$:tlsvar
    //   [rax+rcx] contains the address
    // Windows 64bit: gs:0x58
    // Windows 32bit: fs:__tls_array

    SDLoc dl(GA);
    SDValue Chain = DAG.getEntryNode();

    // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
    // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
    // use its literal value of 0x2C.
    Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
                                        ? Type::getInt8PtrTy(*DAG.getContext(),
                                                             256)
                                        : Type::getInt32PtrTy(*DAG.getContext(),
                                                              257));

    SDValue TlsArray = Subtarget.is64Bit()
                           ? DAG.getIntPtrConstant(0x58, dl)
                           : (Subtarget.isTargetWindowsGNU()
                                  ? DAG.getIntPtrConstant(0x2C, dl)
                                  : DAG.getExternalSymbol("_tls_array", PtrVT));

    SDValue ThreadPointer =
        DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));

    SDValue res;
    if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
      res = ThreadPointer;
    } else {
      // Load the _tls_index variable
      SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
      if (Subtarget.is64Bit())
        IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
                             MachinePointerInfo(), MVT::i32);
      else
        IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());

      auto &DL = DAG.getDataLayout();
      SDValue Scale =
          DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
      IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);

      res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
    }

    res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());

    // Get the offset of start of .tls section
    SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
                                             GA->getValueType(0),
                                             GA->getOffset(), X86II::MO_SECREL);
    SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);

    // The address of the thread local variable is the add of the thread
    // pointer with the offset of the variable.
    return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
  }

  llvm_unreachable("TLS not implemented for this target.");
}

/// Lower SRA_PARTS and friends, which return two i32 values
/// and take a 2 x i32 value to shift plus a shift amount.
/// TODO: Can this be moved to general expansion code?
static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
  assert(Op.getNumOperands() == 3 && "Not a double-shift!");
  MVT VT = Op.getSimpleValueType();
  unsigned VTBits = VT.getSizeInBits();
  SDLoc dl(Op);
  bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
  SDValue ShOpLo = Op.getOperand(0);
  SDValue ShOpHi = Op.getOperand(1);
  SDValue ShAmt  = Op.getOperand(2);
  // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and
  // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away
  // during isel.
  SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
                                  DAG.getConstant(VTBits - 1, dl, MVT::i8));
  SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
                                     DAG.getConstant(VTBits - 1, dl, MVT::i8))
                       : DAG.getConstant(0, dl, VT);

  SDValue Tmp2, Tmp3;
  if (Op.getOpcode() == ISD::SHL_PARTS) {
    Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt);
    Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
  } else {
    Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt);
    Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
  }

  // If the shift amount is larger or equal than the width of a part we can't
  // rely on the results of shld/shrd. Insert a test and select the appropriate
  // values for large shift amounts.
  SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
                                DAG.getConstant(VTBits, dl, MVT::i8));
  SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
                             DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);

  SDValue Hi, Lo;
  if (Op.getOpcode() == ISD::SHL_PARTS) {
    Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
    Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
  } else {
    Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
    Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
  }

  return DAG.getMergeValues({ Lo, Hi }, dl);
}

static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
                                SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
         "Unexpected funnel shift opcode!");

  SDLoc DL(Op);
  SDValue Op0 = Op.getOperand(0);
  SDValue Op1 = Op.getOperand(1);
  SDValue Amt = Op.getOperand(2);

  bool IsFSHR = Op.getOpcode() == ISD::FSHR;

  if (VT.isVector()) {
    assert(Subtarget.hasVBMI2() && "Expected VBMI2");

    if (IsFSHR)
      std::swap(Op0, Op1);

    APInt APIntShiftAmt;
    if (X86::isConstantSplat(Amt, APIntShiftAmt)) {
      uint64_t ShiftAmt = APIntShiftAmt.urem(VT.getScalarSizeInBits());
      return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT, Op0,
                         Op1, DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
    }

    return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
                       Op0, Op1, Amt);
  }

  assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
         "Unexpected funnel shift type!");

  // Expand slow SHLD/SHRD cases if we are not optimizing for size.
  bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
  if (!OptForSize && Subtarget.isSHLDSlow())
    return SDValue();

  if (IsFSHR)
    std::swap(Op0, Op1);

  // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
  if (VT == MVT::i16)
    Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
                      DAG.getConstant(15, DL, Amt.getValueType()));

  unsigned SHDOp = (IsFSHR ? X86ISD::SHRD : X86ISD::SHLD);
  return DAG.getNode(SHDOp, DL, VT, Op0, Op1, Amt);
}

// Try to use a packed vector operation to handle i64 on 32-bit targets when
// AVX512DQ is enabled.
static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {
  assert((Op.getOpcode() == ISD::SINT_TO_FP ||
          Op.getOpcode() == ISD::UINT_TO_FP) && "Unexpected opcode!");
  SDValue Src = Op.getOperand(0);
  MVT SrcVT = Src.getSimpleValueType();
  MVT VT = Op.getSimpleValueType();

   if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
       (VT != MVT::f32 && VT != MVT::f64))
    return SDValue();

  // Pack the i64 into a vector, do the operation and extract.

  // Using 256-bit to ensure result is 128-bits for f32 case.
  unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
  MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
  MVT VecVT = MVT::getVectorVT(VT, NumElts);

  SDLoc dl(Op);
  SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
  SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
                     DAG.getIntPtrConstant(0, dl));
}

static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
                          const X86Subtarget &Subtarget) {
  switch (Opcode) {
    case ISD::SINT_TO_FP:
      // TODO: Handle wider types with AVX/AVX512.
      if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
        return false;
      // CVTDQ2PS or (V)CVTDQ2PD
      return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);

    case ISD::UINT_TO_FP:
      // TODO: Handle wider types and i64 elements.
      if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
        return false;
      // VCVTUDQ2PS or VCVTUDQ2PD
      return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;

    default:
      return false;
  }
}

/// Given a scalar cast operation that is extracted from a vector, try to
/// vectorize the cast op followed by extraction. This will avoid an expensive
/// round-trip between XMM and GPR.
static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {
  // TODO: This could be enhanced to handle smaller integer types by peeking
  // through an extend.
  SDValue Extract = Cast.getOperand(0);
  MVT DestVT = Cast.getSimpleValueType();
  if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
      !isa<ConstantSDNode>(Extract.getOperand(1)))
    return SDValue();

  // See if we have a 128-bit vector cast op for this type of cast.
  SDValue VecOp = Extract.getOperand(0);
  MVT FromVT = VecOp.getSimpleValueType();
  unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
  MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
  MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
  if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
    return SDValue();

  // If we are extracting from a non-zero element, first shuffle the source
  // vector to allow extracting from element zero.
  SDLoc DL(Cast);
  if (!isNullConstant(Extract.getOperand(1))) {
    SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
    Mask[0] = Extract.getConstantOperandVal(1);
    VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
  }
  // If the source vector is wider than 128-bits, extract the low part. Do not
  // create an unnecessarily wide vector cast op.
  if (FromVT != Vec128VT)
    VecOp = extract128BitVector(VecOp, 0, DAG, DL);

  // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
  // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
  SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
                     DAG.getIntPtrConstant(0, DL));
}

SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
                                           SelectionDAG &DAG) const {
  SDValue Src = Op.getOperand(0);
  MVT SrcVT = Src.getSimpleValueType();
  MVT VT = Op.getSimpleValueType();
  SDLoc dl(Op);

  if (VT == MVT::f128)
    return LowerF128Call(Op, DAG, RTLIB::getSINTTOFP(SrcVT, VT));

  if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
    return Extract;

  if (SrcVT.isVector()) {
    if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
      return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
                         DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
                                     DAG.getUNDEF(SrcVT)));
    }
    return SDValue();
  }

  assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
         "Unknown SINT_TO_FP to lower!");

  // These are really Legal; return the operand so the caller accepts it as
  // Legal.
  if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(VT))
    return Op;
  if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) && Subtarget.is64Bit())
    return Op;

  if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
    return V;

  SDValue ValueToStore = Op.getOperand(0);
  if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) &&
      !Subtarget.is64Bit())
    // Bitcasting to f64 here allows us to do a single 64-bit store from
    // an SSE register, avoiding the store forwarding penalty that would come
    // with two 32-bit stores.
    ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);

  unsigned Size = SrcVT.getSizeInBits()/8;
  MachineFunction &MF = DAG.getMachineFunction();
  auto PtrVT = getPointerTy(MF.getDataLayout());
  int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false);
  SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
  SDValue Chain = DAG.getStore(
      DAG.getEntryNode(), dl, ValueToStore, StackSlot,
      MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
  return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
}

SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
                                     SDValue StackSlot,
                                     SelectionDAG &DAG) const {
  // Build the FILD
  SDLoc DL(Op);
  SDVTList Tys;
  bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
  if (useSSE)
    Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
  else
    Tys = DAG.getVTList(Op.getValueType(), MVT::Other);

  unsigned ByteSize = SrcVT.getSizeInBits() / 8;

  FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
  MachineMemOperand *LoadMMO;
  if (FI) {
    int SSFI = FI->getIndex();
    LoadMMO = DAG.getMachineFunction().getMachineMemOperand(
        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
        MachineMemOperand::MOLoad, ByteSize, ByteSize);
  } else {
    LoadMMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
    StackSlot = StackSlot.getOperand(1);
  }
  SDValue FILDOps[] = {Chain, StackSlot};
  SDValue Result =
      DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, DL,
                              Tys, FILDOps, SrcVT, LoadMMO);

  if (useSSE) {
    Chain = Result.getValue(1);
    SDValue InFlag = Result.getValue(2);

    // FIXME: Currently the FST is glued to the FILD_FLAG. This
    // shouldn't be necessary except that RFP cannot be live across
    // multiple blocks. When stackifier is fixed, they can be uncoupled.
    MachineFunction &MF = DAG.getMachineFunction();
    unsigned SSFISize = Op.getValueSizeInBits() / 8;
    int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
    auto PtrVT = getPointerTy(MF.getDataLayout());
    SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
    Tys = DAG.getVTList(MVT::Other);
    SDValue FSTOps[] = {Chain, Result, StackSlot, InFlag};
    MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
        MachineMemOperand::MOStore, SSFISize, SSFISize);

    Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps,
                                    Op.getValueType(), StoreMMO);
    Result = DAG.getLoad(
        Op.getValueType(), DL, Chain, StackSlot,
        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
  }

  return Result;
}

/// Horizontal vector math instructions may be slower than normal math with
/// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
/// implementation, and likely shuffle complexity of the alternate sequence.
static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {
  bool IsOptimizingSize = DAG.getMachineFunction().getFunction().hasOptSize();
  bool HasFastHOps = Subtarget.hasFastHorizontalOps();
  return !IsSingleSource || IsOptimizingSize || HasFastHOps;
}

/// 64-bit unsigned integer to double expansion.
static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {
  // This algorithm is not obvious. Here it is what we're trying to output:
  /*
     movq       %rax,  %xmm0
     punpckldq  (c0),  %xmm0  // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
     subpd      (c1),  %xmm0  // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
     #ifdef __SSE3__
       haddpd   %xmm0, %xmm0
     #else
       pshufd   $0x4e, %xmm0, %xmm1
       addpd    %xmm1, %xmm0
     #endif
  */

  SDLoc dl(Op);
  LLVMContext *Context = DAG.getContext();

  // Build some magic constants.
  static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
  Constant *C0 = ConstantDataVector::get(*Context, CV0);
  auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
  SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);

  SmallVector<Constant*,2> CV1;
  CV1.push_back(
    ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
                                      APInt(64, 0x4330000000000000ULL))));
  CV1.push_back(
    ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
                                      APInt(64, 0x4530000000000000ULL))));
  Constant *C1 = ConstantVector::get(CV1);
  SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);

  // Load the 64-bit value into an XMM register.
  SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
                            Op.getOperand(0));
  SDValue CLod0 =
      DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
                  MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
                  /* Alignment = */ 16);
  SDValue Unpck1 =
      getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);

  SDValue CLod1 =
      DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
                  MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
                  /* Alignment = */ 16);
  SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
  // TODO: Are there any fast-math-flags to propagate here?
  SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
  SDValue Result;

  if (Subtarget.hasSSE3() && shouldUseHorizontalOp(true, DAG, Subtarget)) {
    Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
  } else {
    SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
    Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
  }

  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
                     DAG.getIntPtrConstant(0, dl));
}

/// 32-bit unsigned integer to float expansion.
static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {
  SDLoc dl(Op);
  // FP constant to bias correct the final result.
  SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
                                   MVT::f64);

  // Load the 32-bit value into an XMM register.
  SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
                             Op.getOperand(0));

  // Zero out the upper parts of the register.
  Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);

  Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
                     DAG.getBitcast(MVT::v2f64, Load),
                     DAG.getIntPtrConstant(0, dl));

  // Or the load with the bias.
  SDValue Or = DAG.getNode(
      ISD::OR, dl, MVT::v2i64,
      DAG.getBitcast(MVT::v2i64,
                     DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Load)),
      DAG.getBitcast(MVT::v2i64,
                     DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
  Or =
      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
                  DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));

  // Subtract the bias.
  // TODO: Are there any fast-math-flags to propagate here?
  SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);

  // Handle final rounding.
  return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
}

static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget,
                                     const SDLoc &DL) {
  if (Op.getSimpleValueType() != MVT::v2f64)
    return SDValue();

  SDValue N0 = Op.getOperand(0);
  assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");

  // Legalize to v4i32 type.
  N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
                   DAG.getUNDEF(MVT::v2i32));

  if (Subtarget.hasAVX512())
    return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);

  // Same implementation as VectorLegalizer::ExpandUINT_TO_FLOAT,
  // but using v2i32 to v2f64 with X86ISD::CVTSI2P.
  SDValue HalfWord = DAG.getConstant(16, DL, MVT::v4i32);
  SDValue HalfWordMask = DAG.getConstant(0x0000FFFF, DL, MVT::v4i32);

  // Two to the power of half-word-size.
  SDValue TWOHW = DAG.getConstantFP((double)(1 << 16), DL, MVT::v2f64);

  // Clear upper part of LO, lower HI.
  SDValue HI = DAG.getNode(ISD::SRL, DL, MVT::v4i32, N0, HalfWord);
  SDValue LO = DAG.getNode(ISD::AND, DL, MVT::v4i32, N0, HalfWordMask);

  SDValue fHI = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, HI);
          fHI = DAG.getNode(ISD::FMUL, DL, MVT::v2f64, fHI, TWOHW);
  SDValue fLO = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, LO);

  // Add the two halves.
  return DAG.getNode(ISD::FADD, DL, MVT::v2f64, fHI, fLO);
}

static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  // The algorithm is the following:
  // #ifdef __SSE4_1__
  //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
  //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
  //                                 (uint4) 0x53000000, 0xaa);
  // #else
  //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
  //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
  // #endif
  //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
  //     return (float4) lo + fhi;

  // We shouldn't use it when unsafe-fp-math is enabled though: we might later
  // reassociate the two FADDs, and if we do that, the algorithm fails
  // spectacularly (PR24512).
  // FIXME: If we ever have some kind of Machine FMF, this should be marked
  // as non-fast and always be enabled. Why isn't SDAG FMF enough? Because
  // there's also the MachineCombiner reassociations happening on Machine IR.
  if (DAG.getTarget().Options.UnsafeFPMath)
    return SDValue();

  SDLoc DL(Op);
  SDValue V = Op->getOperand(0);
  MVT VecIntVT = V.getSimpleValueType();
  bool Is128 = VecIntVT == MVT::v4i32;
  MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
  // If we convert to something else than the supported type, e.g., to v4f64,
  // abort early.
  if (VecFloatVT != Op->getSimpleValueType(0))
    return SDValue();

  assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
         "Unsupported custom type");

  // In the #idef/#else code, we have in common:
  // - The vector of constants:
  // -- 0x4b000000
  // -- 0x53000000
  // - A shift:
  // -- v >> 16

  // Create the splat vector for 0x4b000000.
  SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
  // Create the splat vector for 0x53000000.
  SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);

  // Create the right shift.
  SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
  SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);

  SDValue Low, High;
  if (Subtarget.hasSSE41()) {
    MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
    //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
    SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
    SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
    // Low will be bitcasted right away, so do not bother bitcasting back to its
    // original type.
    Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
                      VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
    //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
    //                                 (uint4) 0x53000000, 0xaa);
    SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
    SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
    // High will be bitcasted right away, so do not bother bitcasting back to
    // its original type.
    High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
                       VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
  } else {
    SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
    //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
    SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
    Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);

    //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
    High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
  }

  // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
  SDValue VecCstFAdd = DAG.getConstantFP(
      APFloat(APFloat::IEEEsingle(), APInt(32, 0xD3000080)), DL, VecFloatVT);

  //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
  SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
  // TODO: Are there any fast-math-flags to propagate here?
  SDValue FHigh =
      DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
  //     return (float4) lo + fhi;
  SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
  return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
}

static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {
  SDValue N0 = Op.getOperand(0);
  MVT SrcVT = N0.getSimpleValueType();
  SDLoc dl(Op);

  switch (SrcVT.SimpleTy) {
  default:
    llvm_unreachable("Custom UINT_TO_FP is not supported!");
  case MVT::v2i32:
    return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
  case MVT::v4i32:
  case MVT::v8i32:
    assert(!Subtarget.hasAVX512());
    return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
  }
}

SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
                                           SelectionDAG &DAG) const {
  SDValue N0 = Op.getOperand(0);
  SDLoc dl(Op);
  auto PtrVT = getPointerTy(DAG.getDataLayout());
  MVT SrcVT = N0.getSimpleValueType();
  MVT DstVT = Op.getSimpleValueType();

  if (DstVT == MVT::f128)
    return LowerF128Call(Op, DAG, RTLIB::getUINTTOFP(SrcVT, DstVT));

  if (DstVT.isVector())
    return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);

  if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
    return Extract;

  if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
      (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
    // Conversions from unsigned i32 to f32/f64 are legal,
    // using VCVTUSI2SS/SD.  Same for i64 in 64-bit mode.
    return Op;
  }

  // Promote i32 to i64 and use a signed conversion on 64-bit targets.
  if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
    N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, N0);
    return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, N0);
  }

  if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
    return V;

  if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
    return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
  if (SrcVT == MVT::i32 && X86ScalarSSEf64)
    return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
  if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
    return SDValue();

  // Make a 64-bit buffer, and use it to build an FILD.
  SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
  if (SrcVT == MVT::i32) {
    SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
    SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
                                  StackSlot, MachinePointerInfo());
    SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
                                  OffsetSlot, MachinePointerInfo());
    SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
    return Fild;
  }

  assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
  SDValue ValueToStore = Op.getOperand(0);
  if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit())
    // Bitcasting to f64 here allows us to do a single 64-bit store from
    // an SSE register, avoiding the store forwarding penalty that would come
    // with two 32-bit stores.
    ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
  SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot,
                               MachinePointerInfo());
  // For i64 source, we need to add the appropriate power of 2 if the input
  // was negative.  This is the same as the optimization in
  // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
  // we must be careful to do the computation in x87 extended precision, not
  // in SSE. (The generic code can't know it's OK to do this, or how to.)
  int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
      MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
      MachineMemOperand::MOLoad, 8, 8);

  SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
  SDValue Ops[] = { Store, StackSlot };
  SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
                                         MVT::i64, MMO);

  APInt FF(32, 0x5F800000ULL);

  // Check whether the sign bit is set.
  SDValue SignSet = DAG.getSetCC(
      dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
      Op.getOperand(0), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);

  // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
  SDValue FudgePtr = DAG.getConstantPool(
      ConstantInt::get(*DAG.getContext(), FF.zext(64)), PtrVT);

  // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
  SDValue Zero = DAG.getIntPtrConstant(0, dl);
  SDValue Four = DAG.getIntPtrConstant(4, dl);
  SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Zero, Four);
  FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);

  // Load the value out, extending it from f32 to f80.
  // FIXME: Avoid the extend by constructing the right constant pool?
  SDValue Fudge = DAG.getExtLoad(
      ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
      MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
      /* Alignment = */ 4);
  // Extend everything to 80 bits to force it to be done on x87.
  // TODO: Are there any fast-math-flags to propagate here?
  SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
  return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
                     DAG.getIntPtrConstant(0, dl));
}

// If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
// is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
// just return an SDValue().
// Otherwise it is assumed to be a conversion from one of f32, f64 or f80
// to i16, i32 or i64, and we lower it to a legal sequence and return the
// result.
SDValue
X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
                                   bool IsSigned) const {
  SDLoc DL(Op);

  EVT DstTy = Op.getValueType();
  EVT TheVT = Op.getOperand(0).getValueType();
  auto PtrVT = getPointerTy(DAG.getDataLayout());

  if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
    // f16 must be promoted before using the lowering in this routine.
    // fp128 does not use this lowering.
    return SDValue();
  }

  // If using FIST to compute an unsigned i64, we'll need some fixup
  // to handle values above the maximum signed i64.  A FIST is always
  // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
  bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;

  if (!IsSigned && DstTy != MVT::i64) {
    // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
    // The low 32 bits of the fist result will have the correct uint32 result.
    assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
    DstTy = MVT::i64;
  }

  assert(DstTy.getSimpleVT() <= MVT::i64 &&
         DstTy.getSimpleVT() >= MVT::i16 &&
         "Unknown FP_TO_INT to lower!");

  // We lower FP->int64 into FISTP64 followed by a load from a temporary
  // stack slot.
  MachineFunction &MF = DAG.getMachineFunction();
  unsigned MemSize = DstTy.getStoreSize();
  int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
  SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);

  SDValue Chain = DAG.getEntryNode();
  SDValue Value = Op.getOperand(0);
  SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.

  if (UnsignedFixup) {
    //
    // Conversion to unsigned i64 is implemented with a select,
    // depending on whether the source value fits in the range
    // of a signed i64.  Let Thresh be the FP equivalent of
    // 0x8000000000000000ULL.
    //
    //  Adjust i32 = (Value < Thresh) ? 0 : 0x80000000;
    //  FistSrc    = (Value < Thresh) ? Value : (Value - Thresh);
    //  Fist-to-mem64 FistSrc
    //  Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
    //  to XOR'ing the high 32 bits with Adjust.
    //
    // Being a power of 2, Thresh is exactly representable in all FP formats.
    // For X87 we'd like to use the smallest FP type for this constant, but
    // for DAG type consistency we have to match the FP operand type.

    APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
    LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
    bool LosesInfo = false;
    if (TheVT == MVT::f64)
      // The rounding mode is irrelevant as the conversion should be exact.
      Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
                              &LosesInfo);
    else if (TheVT == MVT::f80)
      Status = Thresh.convert(APFloat::x87DoubleExtended(),
                              APFloat::rmNearestTiesToEven, &LosesInfo);

    assert(Status == APFloat::opOK && !LosesInfo &&
           "FP conversion should have been exact");

    SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);

    SDValue Cmp = DAG.getSetCC(DL,
                               getSetCCResultType(DAG.getDataLayout(),
                                                  *DAG.getContext(), TheVT),
                               Value, ThreshVal, ISD::SETLT);
    Adjust = DAG.getSelect(DL, MVT::i64, Cmp,
                           DAG.getConstant(0, DL, MVT::i64),
                           DAG.getConstant(APInt::getSignMask(64),
                                           DL, MVT::i64));
    SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
    Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(),
                                              *DAG.getContext(), TheVT),
                       Value, ThreshVal, ISD::SETLT);
    Value = DAG.getSelect(DL, TheVT, Cmp, Value, Sub);
  }

  MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);

  // FIXME This causes a redundant load/store if the SSE-class value is already
  // in memory, such as if it is on the callstack.
  if (isScalarFPTypeInSSEReg(TheVT)) {
    assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
    Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
    SDVTList Tys = DAG.getVTList(TheVT, MVT::Other);
    SDValue Ops[] = { Chain, StackSlot };

    unsigned FLDSize = TheVT.getStoreSize();
    assert(FLDSize <= MemSize && "Stack slot not big enough");
    MachineMemOperand *MMO = MF.getMachineMemOperand(
        MPI, MachineMemOperand::MOLoad, FLDSize, FLDSize);
    Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
    Chain = Value.getValue(1);
  }

  // Build the FP_TO_INT*_IN_MEM
  MachineMemOperand *MMO = MF.getMachineMemOperand(
      MPI, MachineMemOperand::MOStore, MemSize, MemSize);
  SDValue Ops[] = { Chain, Value, StackSlot };
  SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
                                         DAG.getVTList(MVT::Other),
                                         Ops, DstTy, MMO);

  SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);

  // If we need an unsigned fixup, XOR the result with adjust.
  if (UnsignedFixup)
    Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);

  return Res;
}

static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
                              const X86Subtarget &Subtarget) {
  MVT VT = Op.getSimpleValueType();
  SDValue In = Op.getOperand(0);
  MVT InVT = In.getSimpleValueType();
  SDLoc dl(Op);
  unsigned Opc = Op.getOpcode();

  assert(VT.isVector() && InVT.isVector() && "Expected vector type");
  assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
         "Unexpected extension opcode");
  assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
         "Expected same number of elements");
  assert((VT.getVectorElementType() == MVT::i16 ||
          VT.getVectorElementType() == MVT::i32 ||
          VT.getVectorElementType() == MVT::i64) &&
         "Unexpected element type");
  assert((InVT.getVectorElementType() == MVT::i8 ||
          InVT.getVectorElementType() == MVT::i16 ||
          InVT.getVectorElementType() == MVT::i32) &&
         "Unexpected element type");

  unsigned ExtendInVecOpc = getOpcode_EXTEND_VECTOR_INREG(Opc);

  // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
  if (InVT == MVT::v8i8) {
    if (VT != MVT::v8i64)
      return SDValue();

    In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
                     MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
    return DAG.getNode(ExtendInVecOpc, dl, VT, In);
  }

  if (Subtarget.hasInt256())
    return Op;

  // Optimize vectors in AVX mode:
  //
  //   v8i16 -> v8i32
  //   Use vpmovzwd for 4 lower elements  v8i16 -> v4i32.
  //   Use vpunpckhwd for 4 upper elements  v8i16 -> v4i32.
  //   Concat upper and lower parts.
  //
  //   v4i32 -> v4i64
  //   Use vpmovzdq for 4 lower elements  v4i32 -> v2i64.
  //   Use vpunpckhdq for 4 upper elements  v4i32 -> v2i64.
  //   Concat upper and lower parts.
  //
  MVT HalfVT = VT.getHalfNumVectorElementsVT();
  SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);

  // Short-circuit if we can determine that each 128-bit half is the same value.
  // Otherwise, this is difficult to match and optimize.
  if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
    if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
      return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);

  SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
  SDValue Undef = DAG.getUNDEF(InVT);
  bool NeedZero = Opc == ISD::ZERO_EXTEND;
  SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
  OpHi = DAG.getBitcast(HalfVT, OpHi);

  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}

// Helper to split and extend a v16i1 mask to v16i8 or v16i16.
static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
                                   const SDLoc &dl, SelectionDAG &DAG) {
  assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
  SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
                           DAG.getIntPtrConstant(0, dl));
  SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
                           DAG.getIntPtrConstant(8, dl));
  Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
  Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
  SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
  return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
}

static  SDValue LowerZERO_EXTEND_Mask(SDValue Op,
                                      const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {
  MVT VT = Op->getSimpleValueType(0);
  SDValue In = Op->getOperand(0);
  MVT InVT = In.getSimpleValueType();
  assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
  SDLoc DL(Op);
  unsigned NumElts = VT.getVectorNumElements();

  // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
  // avoids a constant pool load.
  if (VT.getVectorElementType() != MVT::i8) {
    SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
    return DAG.getNode(ISD::SRL, DL, VT, Extend,
                       DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
  }

  // Extend VT if BWI is not supported.
  MVT ExtVT = VT;
  if (!Subtarget.hasBWI()) {
    // If v16i32 is to be avoided, we'll need to split and concatenate.
    if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
      return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);

    ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
  }

  // Widen to 512-bits if VLX is not supported.
  MVT WideVT = ExtVT;
  if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
    NumElts *= 512 / ExtVT.getSizeInBits();
    InVT = MVT::getVectorVT(MVT::i1, NumElts);
    In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
                     In, DAG.getIntPtrConstant(0, DL));
    WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
                              NumElts);
  }

  SDValue One = DAG.getConstant(1, DL, WideVT);
  SDValue Zero = DAG.getConstant(0, DL, WideVT);

  SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);

  // Truncate if we had to extend above.
  if (VT != ExtVT) {
    WideVT = MVT::getVectorVT(MVT::i8, NumElts);
    SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
  }

  // Extract back to 128/256-bit if we widened.
  if (WideVT != VT)
    SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
                              DAG.getIntPtrConstant(0, DL));

  return SelectedVal;
}

static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
                                SelectionDAG &DAG) {
  SDValue In = Op.getOperand(0);
  MVT SVT = In.getSimpleValueType();

  if (SVT.getVectorElementType() == MVT::i1)
    return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);

  assert(Subtarget.hasAVX() && "Expected AVX support");
  return LowerAVXExtend(Op, DAG, Subtarget);
}

/// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
/// It makes use of the fact that vectors with enough leading sign/zero bits
/// prevent the PACKSS/PACKUS from saturating the results.
/// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
/// within each 128-bit lane.
static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
                                      const SDLoc &DL, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {
  assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
         "Unexpected PACK opcode");
  assert(DstVT.isVector() && "VT not a vector?");

  // Requires SSE2 but AVX512 has fast vector truncate.
  if (!Subtarget.hasSSE2())
    return SDValue();

  EVT SrcVT = In.getValueType();

  // No truncation required, we might get here due to recursive calls.
  if (SrcVT == DstVT)
    return In;

  // We only support vector truncation to 64bits or greater from a
  // 128bits or greater source.
  unsigned DstSizeInBits = DstVT.getSizeInBits();
  unsigned SrcSizeInBits = SrcVT.getSizeInBits();
  if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
    return SDValue();

  unsigned NumElems = SrcVT.getVectorNumElements();
  if (!isPowerOf2_32(NumElems))
    return SDValue();

  LLVMContext &Ctx = *DAG.getContext();
  assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
  assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");

  EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);

  // Pack to the largest type possible:
  // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
  EVT InVT = MVT::i16, OutVT = MVT::i8;
  if (SrcVT.getScalarSizeInBits() > 16 &&
      (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
    InVT = MVT::i32;
    OutVT = MVT::i16;
  }

  // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
  if (SrcVT.is128BitVector()) {
    InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
    OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
    In = DAG.getBitcast(InVT, In);
    SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, In);
    Res = extractSubVector(Res, 0, DAG, DL, 64);
    return DAG.getBitcast(DstVT, Res);
  }

  // Extract lower/upper subvectors.
  unsigned NumSubElts = NumElems / 2;
  SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
  SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);

  unsigned SubSizeInBits = SrcSizeInBits / 2;
  InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
  OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());

  // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
  if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
    Lo = DAG.getBitcast(InVT, Lo);
    Hi = DAG.getBitcast(InVT, Hi);
    SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
    return DAG.getBitcast(DstVT, Res);
  }

  // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
  // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
  if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
    Lo = DAG.getBitcast(InVT, Lo);
    Hi = DAG.getBitcast(InVT, Hi);
    SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);

    // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
    // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
    // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
    SmallVector<int, 64> Mask;
    int Scale = 64 / OutVT.getScalarSizeInBits();
    scaleShuffleMask<int>(Scale, ArrayRef<int>({ 0, 2, 1, 3 }), Mask);
    Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);

    if (DstVT.is256BitVector())
      return DAG.getBitcast(DstVT, Res);

    // If 512bit -> 128bit truncate another stage.
    EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
    Res = DAG.getBitcast(PackedVT, Res);
    return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
  }

  // Recursively pack lower/upper subvectors, concat result and pack again.
  assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
  EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumSubElts);
  Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
  Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);

  PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
  SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
  return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
}

static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {

  SDLoc DL(Op);
  MVT VT = Op.getSimpleValueType();
  SDValue In = Op.getOperand(0);
  MVT InVT = In.getSimpleValueType();

  assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");

  // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
  unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
  if (InVT.getScalarSizeInBits() <= 16) {
    if (Subtarget.hasBWI()) {
      // legal, will go to VPMOVB2M, VPMOVW2M
      if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
        // We need to shift to get the lsb into sign position.
        // Shift packed bytes not supported natively, bitcast to word
        MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
        In = DAG.getNode(ISD::SHL, DL, ExtVT,
                         DAG.getBitcast(ExtVT, In),
                         DAG.getConstant(ShiftInx, DL, ExtVT));
        In = DAG.getBitcast(InVT, In);
      }
      return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
                          In, ISD::SETGT);
    }
    // Use TESTD/Q, extended vector to packed dword/qword.
    assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
           "Unexpected vector type.");
    unsigned NumElts = InVT.getVectorNumElements();
    assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
    // We need to change to a wider element type that we have support for.
    // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
    // For 16 element vectors we extend to v16i32 unless we are explicitly
    // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
    // we need to split into two 8 element vectors which we can extend to v8i32,
    // truncate and concat the results. There's an additional complication if
    // the original type is v16i8. In that case we can't split the v16i8 so
    // first we pre-extend it to v16i16 which we can split to v8i16, then extend
    // to v8i32, truncate that to v8i1 and concat the two halves.
    if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
      if (InVT == MVT::v16i8) {
        // First we need to sign extend up to 256-bits so we can split that.
        InVT = MVT::v16i16;
        In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In);
      }
      SDValue Lo = extract128BitVector(In, 0, DAG, DL);
      SDValue Hi = extract128BitVector(In, 8, DAG, DL);
      // We're split now, just emit two truncates and a concat. The two
      // truncates will trigger legalization to come back to this function.
      Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
      Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
      return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
    }
    // We either have 8 elements or we're allowed to use 512-bit vectors.
    // If we have VLX, we want to use the narrowest vector that can get the
    // job done so we use vXi32.
    MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
    MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
    In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
    InVT = ExtVT;
    ShiftInx = InVT.getScalarSizeInBits() - 1;
  }

  if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
    // We need to shift to get the lsb into sign position.
    In = DAG.getNode(ISD::SHL, DL, InVT, In,
                     DAG.getConstant(ShiftInx, DL, InVT));
  }
  // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
  if (Subtarget.hasDQI())
    return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
  return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
}

SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
  SDLoc DL(Op);
  MVT VT = Op.getSimpleValueType();
  SDValue In = Op.getOperand(0);
  MVT InVT = In.getSimpleValueType();
  unsigned InNumEltBits = InVT.getScalarSizeInBits();

  assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
         "Invalid TRUNCATE operation");

  // If we're called by the type legalizer, handle a few cases.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (!TLI.isTypeLegal(InVT)) {
    if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
        VT.is128BitVector()) {
      assert(Subtarget.hasVLX() && "Unexpected subtarget!");
      // The default behavior is to truncate one step, concatenate, and then
      // truncate the remainder. We'd rather produce two 64-bit results and
      // concatenate those.
      SDValue Lo, Hi;
      std::tie(Lo, Hi) = DAG.SplitVector(In, DL);

      EVT LoVT, HiVT;
      std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);

      Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
      Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
      return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
    }

    // Otherwise let default legalization handle it.
    return SDValue();
  }

  if (VT.getVectorElementType() == MVT::i1)
    return LowerTruncateVecI1(Op, DAG, Subtarget);

  // vpmovqb/w/d, vpmovdb/w, vpmovwb
  if (Subtarget.hasAVX512()) {
    // word to byte only under BWI. Otherwise we have to promoted to v16i32
    // and then truncate that. But we should only do that if we haven't been
    // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
    // handled by isel patterns.
    if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
        Subtarget.canExtendTo512DQ())
      return Op;
  }

  unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
  unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;

  // Truncate with PACKUS if we are truncating a vector with leading zero bits
  // that extend all the way to the packed/truncated value.
  // Pre-SSE41 we can only use PACKUSWB.
  KnownBits Known = DAG.computeKnownBits(In);
  if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
    if (SDValue V =
            truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
      return V;

  // Truncate with PACKSS if we are truncating a vector with sign-bits that
  // extend all the way to the packed/truncated value.
  if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
    if (SDValue V =
            truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
      return V;

  // Handle truncation of V256 to V128 using shuffles.
  assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");

  if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
    // On AVX2, v4i64 -> v4i32 becomes VPERMD.
    if (Subtarget.hasInt256()) {
      static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
      In = DAG.getBitcast(MVT::v8i32, In);
      In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
                         DAG.getIntPtrConstant(0, DL));
    }

    SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
                               DAG.getIntPtrConstant(0, DL));
    SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
                               DAG.getIntPtrConstant(2, DL));
    OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
    OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
    static const int ShufMask[] = {0, 2, 4, 6};
    return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
  }

  if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
    // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
    if (Subtarget.hasInt256()) {
      In = DAG.getBitcast(MVT::v32i8, In);

      // The PSHUFB mask:
      static const int ShufMask1[] = { 0,  1,  4,  5,  8,  9, 12, 13,
                                      -1, -1, -1, -1, -1, -1, -1, -1,
                                      16, 17, 20, 21, 24, 25, 28, 29,
                                      -1, -1, -1, -1, -1, -1, -1, -1 };
      In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
      In = DAG.getBitcast(MVT::v4i64, In);

      static const int ShufMask2[] = {0,  2,  -1,  -1};
      In = DAG.getVectorShuffle(MVT::v4i64, DL,  In, In, ShufMask2);
      In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
                       DAG.getIntPtrConstant(0, DL));
      return DAG.getBitcast(VT, In);
    }

    SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
                               DAG.getIntPtrConstant(0, DL));

    SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
                               DAG.getIntPtrConstant(4, DL));

    OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
    OpHi = DAG.getBitcast(MVT::v16i8, OpHi);

    // The PSHUFB mask:
    static const int ShufMask1[] = {0,  1,  4,  5,  8,  9, 12, 13,
                                   -1, -1, -1, -1, -1, -1, -1, -1};

    OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
    OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);

    OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
    OpHi = DAG.getBitcast(MVT::v4i32, OpHi);

    // The MOVLHPS Mask:
    static const int ShufMask2[] = {0, 1, 4, 5};
    SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
    return DAG.getBitcast(MVT::v8i16, res);
  }

  if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
    // Use an AND to zero uppper bits for PACKUS.
    In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));

    SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
                               DAG.getIntPtrConstant(0, DL));
    SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
                               DAG.getIntPtrConstant(8, DL));
    return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
  }

  llvm_unreachable("All 256->128 cases should have been handled above!");
}

SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
  bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
  MVT VT = Op.getSimpleValueType();
  SDValue Src = Op.getOperand(0);
  MVT SrcVT = Src.getSimpleValueType();
  SDLoc dl(Op);

  if (SrcVT == MVT::f128) {
    RTLIB::Libcall LC;
    if (Op.getOpcode() == ISD::FP_TO_SINT)
      LC = RTLIB::getFPTOSINT(SrcVT, VT);
    else
      LC = RTLIB::getFPTOUINT(SrcVT, VT);

    MakeLibCallOptions CallOptions;
    return makeLibCall(DAG, LC, VT, Src, CallOptions, SDLoc(Op)).first;
  }

  if (VT.isVector()) {
    if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
      MVT ResVT = MVT::v4i32;
      MVT TruncVT = MVT::v4i1;
      unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
      if (!IsSigned && !Subtarget.hasVLX()) {
        // Widen to 512-bits.
        ResVT = MVT::v8i32;
        TruncVT = MVT::v8i1;
        Opc = ISD::FP_TO_UINT;
        Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64,
                          DAG.getUNDEF(MVT::v8f64),
                          Src, DAG.getIntPtrConstant(0, dl));
      }
      SDValue Res = DAG.getNode(Opc, dl, ResVT, Src);
      Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
                         DAG.getIntPtrConstant(0, dl));
    }

    assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
    if (VT == MVT::v2i64 && SrcVT  == MVT::v2f32) {
      return DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl, VT,
                         DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
                                     DAG.getUNDEF(MVT::v2f32)));
    }

    return SDValue();
  }

  assert(!VT.isVector());

  bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);

  if (!IsSigned && UseSSEReg) {
    // Conversions from f32/f64 with AVX512 should be legal.
    if (Subtarget.hasAVX512())
      return Op;

    // Use default expansion for i64.
    if (VT == MVT::i64)
      return SDValue();

    assert(VT == MVT::i32 && "Unexpected VT!");

    // Promote i32 to i64 and use a signed operation on 64-bit targets.
    if (Subtarget.is64Bit()) {
      SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
      return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
    }

    // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
    // use fisttp which will be handled later.
    if (!Subtarget.hasSSE3())
      return SDValue();
  }

  // Promote i16 to i32 if we can use a SSE operation.
  if (VT == MVT::i16 && UseSSEReg) {
    assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
    SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
    return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
  }

  // If this is a SINT_TO_FP using SSEReg we're done.
  if (UseSSEReg && IsSigned)
    return Op;

  // Fall back to X87.
  if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned))
    return V;

  llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
}

SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
  SDLoc DL(Op);
  MVT VT = Op.getSimpleValueType();
  SDValue In = Op.getOperand(0);
  MVT SVT = In.getSimpleValueType();

  if (VT == MVT::f128) {
    RTLIB::Libcall LC = RTLIB::getFPEXT(SVT, VT);
    return LowerF128Call(Op, DAG, LC);
  }

  assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");

  return DAG.getNode(X86ISD::VFPEXT, DL, VT,
                     DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
                                 In, DAG.getUNDEF(SVT)));
}

SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
  MVT VT = Op.getSimpleValueType();
  SDValue In = Op.getOperand(0);
  MVT SVT = In.getSimpleValueType();

  // It's legal except when f128 is involved
  if (SVT != MVT::f128)
    return Op;

  RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, VT);

  // FP_ROUND node has a second operand indicating whether it is known to be
  // precise. That doesn't take part in the LibCall so we can't directly use
  // LowerF128Call.
  MakeLibCallOptions CallOptions;
  return makeLibCall(DAG, LC, VT, In, CallOptions, SDLoc(Op)).first;
}

// FIXME: This is a hack to allow FP_ROUND to be marked Custom without breaking
// the default expansion of STRICT_FP_ROUND.
static SDValue LowerSTRICT_FP_ROUND(SDValue Op, SelectionDAG &DAG) {
  // FIXME: Need to form a libcall with an input chain for f128.
  assert(Op.getOperand(0).getValueType() != MVT::f128 &&
         "Don't know how to handle f128 yet!");
  return Op;
}

/// Depending on uarch and/or optimizing for size, we might prefer to use a
/// vector operation in place of the typical scalar operation.
static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {
  // If both operands have other uses, this is probably not profitable.
  SDValue LHS = Op.getOperand(0);
  SDValue RHS = Op.getOperand(1);
  if (!LHS.hasOneUse() && !RHS.hasOneUse())
    return Op;

  // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
  bool IsFP = Op.getSimpleValueType().isFloatingPoint();
  if (IsFP && !Subtarget.hasSSE3())
    return Op;
  if (!IsFP && !Subtarget.hasSSSE3())
    return Op;

  // Extract from a common vector.
  if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
      RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
      LHS.getOperand(0) != RHS.getOperand(0) ||
      !isa<ConstantSDNode>(LHS.getOperand(1)) ||
      !isa<ConstantSDNode>(RHS.getOperand(1)) ||
      !shouldUseHorizontalOp(true, DAG, Subtarget))
    return Op;

  // Allow commuted 'hadd' ops.
  // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
  unsigned HOpcode;
  switch (Op.getOpcode()) {
    case ISD::ADD: HOpcode = X86ISD::HADD; break;
    case ISD::SUB: HOpcode = X86ISD::HSUB; break;
    case ISD::FADD: HOpcode = X86ISD::FHADD; break;
    case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
    default:
      llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
  }
  unsigned LExtIndex = LHS.getConstantOperandVal(1);
  unsigned RExtIndex = RHS.getConstantOperandVal(1);
  if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
      (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
    std::swap(LExtIndex, RExtIndex);

  if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
    return Op;

  SDValue X = LHS.getOperand(0);
  EVT VecVT = X.getValueType();
  unsigned BitWidth = VecVT.getSizeInBits();
  unsigned NumLanes = BitWidth / 128;
  unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
  assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
         "Not expecting illegal vector widths here");

  // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
  // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
  SDLoc DL(Op);
  if (BitWidth == 256 || BitWidth == 512) {
    unsigned LaneIdx = LExtIndex / NumEltsPerLane;
    X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
    LExtIndex %= NumEltsPerLane;
  }

  // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
  // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
  // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
  // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
  SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
                     DAG.getIntPtrConstant(LExtIndex / 2, DL));
}

/// Depending on uarch and/or optimizing for size, we might prefer to use a
/// vector operation in place of the typical scalar operation.
SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
  if (Op.getValueType() == MVT::f128) {
    RTLIB::Libcall LC = Op.getOpcode() == ISD::FADD ? RTLIB::ADD_F128
                                                    : RTLIB::SUB_F128;
    return LowerF128Call(Op, DAG, LC);
  }

  assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
         "Only expecting float/double");
  return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
}

/// The only differences between FABS and FNEG are the mask and the logic op.
/// FNEG also has a folding opportunity for FNEG(FABS(x)).
static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
  assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
         "Wrong opcode for lowering FABS or FNEG.");

  bool IsFABS = (Op.getOpcode() == ISD::FABS);

  // If this is a FABS and it has an FNEG user, bail out to fold the combination
  // into an FNABS. We'll lower the FABS after that if it is still in use.
  if (IsFABS)
    for (SDNode *User : Op->uses())
      if (User->getOpcode() == ISD::FNEG)
        return Op;

  SDLoc dl(Op);
  MVT VT = Op.getSimpleValueType();

  bool IsF128 = (VT == MVT::f128);
  assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
          VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
          VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
         "Unexpected type in LowerFABSorFNEG");

  // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
  // decide if we should generate a 16-byte constant mask when we only need 4 or
  // 8 bytes for the scalar case.

  // There are no scalar bitwise logical SSE/AVX instructions, so we
  // generate a 16-byte vector constant and logic op even for the scalar case.
  // Using a 16-byte mask allows folding the load of the mask with
  // the logic op, so it can save (~4 bytes) on code size.
  bool IsFakeVector = !VT.isVector() && !IsF128;
  MVT LogicVT = VT;
  if (IsFakeVector)
    LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;

  unsigned EltBits = VT.getScalarSizeInBits();
  // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
  APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
                           APInt::getSignMask(EltBits);
  const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
  SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);

  SDValue Op0 = Op.getOperand(0);
  bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
  unsigned LogicOp = IsFABS  ? X86ISD::FAND :
                     IsFNABS ? X86ISD::FOR  :
                               X86ISD::FXOR;
  SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;

  if (VT.isVector() || IsF128)
    return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);

  // For the scalar case extend to a 128-bit vector, perform the logic op,
  // and extract the scalar result back out.
  Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
  SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
                     DAG.getIntPtrConstant(0, dl));
}

static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
  SDValue Mag = Op.getOperand(0);
  SDValue Sign = Op.getOperand(1);
  SDLoc dl(Op);

  // If the sign operand is smaller, extend it first.
  MVT VT = Op.getSimpleValueType();
  if (Sign.getSimpleValueType().bitsLT(VT))
    Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);

  // And if it is bigger, shrink it first.
  if (Sign.getSimpleValueType().bitsGT(VT))
    Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));

  // At this point the operands and the result should have the same
  // type, and that won't be f80 since that is not custom lowered.
  bool IsF128 = (VT == MVT::f128);
  assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
          VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
          VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
         "Unexpected type in LowerFCOPYSIGN");

  const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);

  // Perform all scalar logic operations as 16-byte vectors because there are no
  // scalar FP logic instructions in SSE.
  // TODO: This isn't necessary. If we used scalar types, we might avoid some
  // unnecessary splats, but we might miss load folding opportunities. Should
  // this decision be based on OptimizeForSize?
  bool IsFakeVector = !VT.isVector() && !IsF128;
  MVT LogicVT = VT;
  if (IsFakeVector)
    LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;

  // The mask constants are automatically splatted for vector types.
  unsigned EltSizeInBits = VT.getScalarSizeInBits();
  SDValue SignMask = DAG.getConstantFP(
      APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
  SDValue MagMask = DAG.getConstantFP(
      APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);

  // First, clear all bits but the sign bit from the second operand (sign).
  if (IsFakeVector)
    Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
  SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);

  // Next, clear the sign bit from the first operand (magnitude).
  // TODO: If we had general constant folding for FP logic ops, this check
  // wouldn't be necessary.
  SDValue MagBits;
  if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
    APFloat APF = Op0CN->getValueAPF();
    APF.clearSign();
    MagBits = DAG.getConstantFP(APF, dl, LogicVT);
  } else {
    // If the magnitude operand wasn't a constant, we need to AND out the sign.
    if (IsFakeVector)
      Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
    MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
  }

  // OR the magnitude value with the sign bit.
  SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
  return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
                                          DAG.getIntPtrConstant(0, dl));
}

static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
  SDValue N0 = Op.getOperand(0);
  SDLoc dl(Op);
  MVT VT = Op.getSimpleValueType();

  MVT OpVT = N0.getSimpleValueType();
  assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
         "Unexpected type for FGETSIGN");

  // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
  MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
  SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
  Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
  Res = DAG.getZExtOrTrunc(Res, dl, VT);
  Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
  return Res;
}

/// Helper for creating a X86ISD::SETCC node.
static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
                        SelectionDAG &DAG) {
  return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
                     DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
}

/// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
/// style scalarized (associative) reduction patterns.
static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
                                 SmallVectorImpl<SDValue> &SrcOps) {
  SmallVector<SDValue, 8> Opnds;
  DenseMap<SDValue, APInt> SrcOpMap;
  EVT VT = MVT::Other;

  // Recognize a special case where a vector is casted into wide integer to
  // test all 0s.
  assert(Op.getOpcode() == unsigned(BinOp) &&
         "Unexpected bit reduction opcode");
  Opnds.push_back(Op.getOperand(0));
  Opnds.push_back(Op.getOperand(1));

  for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
    SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
    // BFS traverse all BinOp operands.
    if (I->getOpcode() == unsigned(BinOp)) {
      Opnds.push_back(I->getOperand(0));
      Opnds.push_back(I->getOperand(1));
      // Re-evaluate the number of nodes to be traversed.
      e += 2; // 2 more nodes (LHS and RHS) are pushed.
      continue;
    }

    // Quit if a non-EXTRACT_VECTOR_ELT
    if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
      return false;

    // Quit if without a constant index.
    SDValue Idx = I->getOperand(1);
    if (!isa<ConstantSDNode>(Idx))
      return false;

    SDValue Src = I->getOperand(0);
    DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
    if (M == SrcOpMap.end()) {
      VT = Src.getValueType();
      // Quit if not the same type.
      if (SrcOpMap.begin() != SrcOpMap.end() &&
          VT != SrcOpMap.begin()->first.getValueType())
        return false;
      unsigned NumElts = VT.getVectorNumElements();
      APInt EltCount = APInt::getNullValue(NumElts);
      M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
      SrcOps.push_back(Src);
    }
    // Quit if element already used.
    unsigned CIdx = cast<ConstantSDNode>(Idx)->getZExtValue();
    if (M->second[CIdx])
      return false;
    M->second.setBit(CIdx);
  }

  // Quit if not all elements are used.
  for (DenseMap<SDValue, APInt>::const_iterator I = SrcOpMap.begin(),
                                                E = SrcOpMap.end();
       I != E; ++I) {
    if (!I->second.isAllOnesValue())
      return false;
  }

  return true;
}

// Check whether an OR'd tree is PTEST-able.
static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
                                      const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG, SDValue &X86CC) {
  assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");

  if (!Subtarget.hasSSE41() || !Op->hasOneUse())
    return SDValue();

  SmallVector<SDValue, 8> VecIns;
  if (!matchScalarReduction(Op, ISD::OR, VecIns))
    return SDValue();

  // Quit if not 128/256-bit vector.
  EVT VT = VecIns[0].getValueType();
  if (!VT.is128BitVector() && !VT.is256BitVector())
    return SDValue();

  SDLoc DL(Op);
  MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;

  // Cast all vectors into TestVT for PTEST.
  for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
    VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);

  // If more than one full vector is evaluated, OR them first before PTEST.
  for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
    // Each iteration will OR 2 nodes and append the result until there is only
    // 1 node left, i.e. the final OR'd value of all vectors.
    SDValue LHS = VecIns[Slot];
    SDValue RHS = VecIns[Slot + 1];
    VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
  }

  X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE,
                                DL, MVT::i8);
  return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, VecIns.back(), VecIns.back());
}

/// return true if \c Op has a use that doesn't just read flags.
static bool hasNonFlagsUse(SDValue Op) {
  for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
       ++UI) {
    SDNode *User = *UI;
    unsigned UOpNo = UI.getOperandNo();
    if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
      // Look pass truncate.
      UOpNo = User->use_begin().getOperandNo();
      User = *User->use_begin();
    }

    if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
        !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
      return true;
  }
  return false;
}

/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent.
static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
                        SelectionDAG &DAG, const X86Subtarget &Subtarget) {
  // CF and OF aren't always set the way we want. Determine which
  // of these we need.
  bool NeedCF = false;
  bool NeedOF = false;
  switch (X86CC) {
  default: break;
  case X86::COND_A: case X86::COND_AE:
  case X86::COND_B: case X86::COND_BE:
    NeedCF = true;
    break;
  case X86::COND_G: case X86::COND_GE:
  case X86::COND_L: case X86::COND_LE:
  case X86::COND_O: case X86::COND_NO: {
    // Check if we really need to set the
    // Overflow flag. If NoSignedWrap is present
    // that is not actually needed.
    switch (Op->getOpcode()) {
    case ISD::ADD:
    case ISD::SUB:
    case ISD::MUL:
    case ISD::SHL:
      if (Op.getNode()->getFlags().hasNoSignedWrap())
        break;
      LLVM_FALLTHROUGH;
    default:
      NeedOF = true;
      break;
    }
    break;
  }
  }
  // See if we can use the EFLAGS value from the operand instead of
  // doing a separate TEST. TEST always sets OF and CF to 0, so unless
  // we prove that the arithmetic won't overflow, we can't use OF or CF.
  if (Op.getResNo() != 0 || NeedOF || NeedCF) {
    // Emit a CMP with 0, which is the TEST pattern.
    return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
                       DAG.getConstant(0, dl, Op.getValueType()));
  }
  unsigned Opcode = 0;
  unsigned NumOperands = 0;

  SDValue ArithOp = Op;

  // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
  // which may be the result of a CAST.  We use the variable 'Op', which is the
  // non-casted variable when we check for possible users.
  switch (ArithOp.getOpcode()) {
  case ISD::AND:
    // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
    // because a TEST instruction will be better.
    if (!hasNonFlagsUse(Op))
      break;

    LLVM_FALLTHROUGH;
  case ISD::ADD:
  case ISD::SUB:
  case ISD::OR:
  case ISD::XOR:
    // Transform to an x86-specific ALU node with flags if there is a chance of
    // using an RMW op or only the flags are used. Otherwise, leave
    // the node alone and emit a 'test' instruction.
    for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
           UE = Op.getNode()->use_end(); UI != UE; ++UI)
      if (UI->getOpcode() != ISD::CopyToReg &&
          UI->getOpcode() != ISD::SETCC &&
          UI->getOpcode() != ISD::STORE)
        goto default_case;

    // Otherwise use a regular EFLAGS-setting instruction.
    switch (ArithOp.getOpcode()) {
    default: llvm_unreachable("unexpected operator!");
    case ISD::ADD: Opcode = X86ISD::ADD; break;
    case ISD::SUB: Opcode = X86ISD::SUB; break;
    case ISD::XOR: Opcode = X86ISD::XOR; break;
    case ISD::AND: Opcode = X86ISD::AND; break;
    case ISD::OR:  Opcode = X86ISD::OR;  break;
    }

    NumOperands = 2;
    break;
  case X86ISD::ADD:
  case X86ISD::SUB:
  case X86ISD::OR:
  case X86ISD::XOR:
  case X86ISD::AND:
    return SDValue(Op.getNode(), 1);
  case ISD::SSUBO:
  case ISD::USUBO: {
    // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
    SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
    return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
                       Op->getOperand(1)).getValue(1);
  }
  default:
  default_case:
    break;
  }

  if (Opcode == 0) {
    // Emit a CMP with 0, which is the TEST pattern.
    return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
                       DAG.getConstant(0, dl, Op.getValueType()));
  }
  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
  SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);

  SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
  DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
  return SDValue(New.getNode(), 1);
}

/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent.
SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
                                   const SDLoc &dl, SelectionDAG &DAG) const {
  if (isNullConstant(Op1))
    return EmitTest(Op0, X86CC, dl, DAG, Subtarget);

  EVT CmpVT = Op0.getValueType();

  if (CmpVT.isFloatingPoint())
    return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);

  assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
          CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");

  // Only promote the compare up to I32 if it is a 16 bit operation
  // with an immediate.  16 bit immediates are to be avoided.
  if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
      !DAG.getMachineFunction().getFunction().hasMinSize()) {
    ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
    ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
    // Don't do this if the immediate can fit in 8-bits.
    if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
        (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
      unsigned ExtendOp =
          isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
      if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
        // For equality comparisons try to use SIGN_EXTEND if the input was
        // truncate from something with enough sign bits.
        if (Op0.getOpcode() == ISD::TRUNCATE) {
          SDValue In = Op0.getOperand(0);
          unsigned EffBits =
              In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
          if (EffBits <= 16)
            ExtendOp = ISD::SIGN_EXTEND;
        } else if (Op1.getOpcode() == ISD::TRUNCATE) {
          SDValue In = Op1.getOperand(0);
          unsigned EffBits =
              In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
          if (EffBits <= 16)
            ExtendOp = ISD::SIGN_EXTEND;
        }
      }

      CmpVT = MVT::i32;
      Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
      Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
    }
  }
  // Use SUB instead of CMP to enable CSE between SUB and CMP.
  SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
  SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
  return Sub.getValue(1);
}

/// Convert a comparison if required by the subtarget.
SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
                                                 SelectionDAG &DAG) const {
  // If the subtarget does not support the FUCOMI instruction, floating-point
  // comparisons have to be converted.
  if (Subtarget.hasCMov() ||
      Cmp.getOpcode() != X86ISD::CMP ||
      !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
      !Cmp.getOperand(1).getValueType().isFloatingPoint())
    return Cmp;

  // The instruction selector will select an FUCOM instruction instead of
  // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
  // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
  // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
  SDLoc dl(Cmp);
  SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
  SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
  SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
                            DAG.getConstant(8, dl, MVT::i8));
  SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);

  // Some 64-bit targets lack SAHF support, but they do support FCOMI.
  assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
  return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
}

/// Check if replacement of SQRT with RSQRT should be disabled.
bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
  EVT VT = Op.getValueType();

  // We never want to use both SQRT and RSQRT instructions for the same input.
  if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
    return false;

  if (VT.isVector())
    return Subtarget.hasFastVectorFSQRT();
  return Subtarget.hasFastScalarFSQRT();
}

/// The minimum architected relative accuracy is 2^-12. We need one
/// Newton-Raphson step to have a good float result (24 bits of precision).
SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
                                           SelectionDAG &DAG, int Enabled,
                                           int &RefinementSteps,
                                           bool &UseOneConstNR,
                                           bool Reciprocal) const {
  EVT VT = Op.getValueType();

  // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
  // It is likely not profitable to do this for f64 because a double-precision
  // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
  // instructions: convert to single, rsqrtss, convert back to double, refine
  // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
  // along with FMA, this could be a throughput win.
  // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
  // after legalize types.
  if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
      (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
      (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
      (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
      (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
    if (RefinementSteps == ReciprocalEstimate::Unspecified)
      RefinementSteps = 1;

    UseOneConstNR = false;
    // There is no FSQRT for 512-bits, but there is RSQRT14.
    unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
    return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
  }
  return SDValue();
}

/// The minimum architected relative accuracy is 2^-12. We need one
/// Newton-Raphson step to have a good float result (24 bits of precision).
SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
                                            int Enabled,
                                            int &RefinementSteps) const {
  EVT VT = Op.getValueType();

  // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
  // It is likely not profitable to do this for f64 because a double-precision
  // reciprocal estimate with refinement on x86 prior to FMA requires
  // 15 instructions: convert to single, rcpss, convert back to double, refine
  // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
  // along with FMA, this could be a throughput win.

  if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
      (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
      (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
      (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
    // Enable estimate codegen with 1 refinement step for vector division.
    // Scalar division estimates are disabled because they break too much
    // real-world code. These defaults are intended to match GCC behavior.
    if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
      return SDValue();

    if (RefinementSteps == ReciprocalEstimate::Unspecified)
      RefinementSteps = 1;

    // There is no FSQRT for 512-bits, but there is RCP14.
    unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
    return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
  }
  return SDValue();
}

/// If we have at least two divisions that use the same divisor, convert to
/// multiplication by a reciprocal. This may need to be adjusted for a given
/// CPU if a division's cost is not at least twice the cost of a multiplication.
/// This is because we still need one division to calculate the reciprocal and
/// then we need two multiplies by that reciprocal as replacements for the
/// original divisions.
unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
  return 2;
}

SDValue
X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
                                 SelectionDAG &DAG,
                                 SmallVectorImpl<SDNode *> &Created) const {
  AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
  if (isIntDivCheap(N->getValueType(0), Attr))
    return SDValue(N,0); // Lower SDIV as SDIV

  assert((Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) &&
         "Unexpected divisor!");

  // Only perform this transform if CMOV is supported otherwise the select
  // below will become a branch.
  if (!Subtarget.hasCMov())
    return SDValue();

  // fold (sdiv X, pow2)
  EVT VT = N->getValueType(0);
  // FIXME: Support i8.
  if (VT != MVT::i16 && VT != MVT::i32 &&
      !(Subtarget.is64Bit() && VT == MVT::i64))
    return SDValue();

  unsigned Lg2 = Divisor.countTrailingZeros();

  // If the divisor is 2 or -2, the default expansion is better.
  if (Lg2 == 1)
    return SDValue();

  SDLoc DL(N);
  SDValue N0 = N->getOperand(0);
  SDValue Zero = DAG.getConstant(0, DL, VT);
  APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
  SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);

  // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
  SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
  SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
  SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);

  Created.push_back(Cmp.getNode());
  Created.push_back(Add.getNode());
  Created.push_back(CMov.getNode());

  // Divide by pow2.
  SDValue SRA =
      DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i64));

  // If we're dividing by a positive value, we're done.  Otherwise, we must
  // negate the result.
  if (Divisor.isNonNegative())
    return SRA;

  Created.push_back(SRA.getNode());
  return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
}

/// Result of 'and' is compared against zero. Change to a BT node if possible.
/// Returns the BT node and the condition code needed to use it.
static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
                            const SDLoc &dl, SelectionDAG &DAG,
                            SDValue &X86CC) {
  assert(And.getOpcode() == ISD::AND && "Expected AND node!");
  SDValue Op0 = And.getOperand(0);
  SDValue Op1 = And.getOperand(1);
  if (Op0.getOpcode() == ISD::TRUNCATE)
    Op0 = Op0.getOperand(0);
  if (Op1.getOpcode() == ISD::TRUNCATE)
    Op1 = Op1.getOperand(0);

  SDValue Src, BitNo;
  if (Op1.getOpcode() == ISD::SHL)
    std::swap(Op0, Op1);
  if (Op0.getOpcode() == ISD::SHL) {
    if (isOneConstant(Op0.getOperand(0))) {
      // If we looked past a truncate, check that it's only truncating away
      // known zeros.
      unsigned BitWidth = Op0.getValueSizeInBits();
      unsigned AndBitWidth = And.getValueSizeInBits();
      if (BitWidth > AndBitWidth) {
        KnownBits Known = DAG.computeKnownBits(Op0);
        if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
          return SDValue();
      }
      Src = Op1;
      BitNo = Op0.getOperand(1);
    }
  } else if (Op1.getOpcode() == ISD::Constant) {
    ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
    uint64_t AndRHSVal = AndRHS->getZExtValue();
    SDValue AndLHS = Op0;

    if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
      Src = AndLHS.getOperand(0);
      BitNo = AndLHS.getOperand(1);
    } else {
      // Use BT if the immediate can't be encoded in a TEST instruction or we
      // are optimizing for size and the immedaite won't fit in a byte.
      bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
      if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
          isPowerOf2_64(AndRHSVal)) {
        Src = AndLHS;
        BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
                                Src.getValueType());
      }
    }
  }

  // No patterns found, give up.
  if (!Src.getNode())
    return SDValue();

  // If Src is i8, promote it to i32 with any_extend.  There is no i8 BT
  // instruction.  Since the shift amount is in-range-or-undefined, we know
  // that doing a bittest on the i32 value is ok.  We extend to i32 because
  // the encoding for the i16 version is larger than the i32 version.
  // Also promote i16 to i32 for performance / code size reason.
  if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
    Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);

  // See if we can use the 32-bit instruction instead of the 64-bit one for a
  // shorter encoding. Since the former takes the modulo 32 of BitNo and the
  // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
  // known to be zero.
  if (Src.getValueType() == MVT::i64 &&
      DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
    Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);

  // If the operand types disagree, extend the shift amount to match.  Since
  // BT ignores high bits (like shifts) we can use anyextend.
  if (Src.getValueType() != BitNo.getValueType())
    BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);

  X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
                                dl, MVT::i8);
  return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
}

/// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
/// CMPs.
static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
                                   SDValue &Op1) {
  unsigned SSECC;
  bool Swap = false;

  // SSE Condition code mapping:
  //  0 - EQ
  //  1 - LT
  //  2 - LE
  //  3 - UNORD
  //  4 - NEQ
  //  5 - NLT
  //  6 - NLE
  //  7 - ORD
  switch (SetCCOpcode) {
  default: llvm_unreachable("Unexpected SETCC condition");
  case ISD::SETOEQ:
  case ISD::SETEQ:  SSECC = 0; break;
  case ISD::SETOGT:
  case ISD::SETGT:  Swap = true; LLVM_FALLTHROUGH;
  case ISD::SETLT:
  case ISD::SETOLT: SSECC = 1; break;
  case ISD::SETOGE:
  case ISD::SETGE:  Swap = true; LLVM_FALLTHROUGH;
  case ISD::SETLE:
  case ISD::SETOLE: SSECC = 2; break;
  case ISD::SETUO:  SSECC = 3; break;
  case ISD::SETUNE:
  case ISD::SETNE:  SSECC = 4; break;
  case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
  case ISD::SETUGE: SSECC = 5; break;
  case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
  case ISD::SETUGT: SSECC = 6; break;
  case ISD::SETO:   SSECC = 7; break;
  case ISD::SETUEQ: SSECC = 8; break;
  case ISD::SETONE: SSECC = 12; break;
  }
  if (Swap)
    std::swap(Op0, Op1);

  return SSECC;
}

/// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
/// concatenate the result back.
static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();

  assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
         "Unsupported value type for operation");

  unsigned NumElems = VT.getVectorNumElements();
  SDLoc dl(Op);
  SDValue CC = Op.getOperand(2);

  // Extract the LHS vectors
  SDValue LHS = Op.getOperand(0);
  SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
  SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);

  // Extract the RHS vectors
  SDValue RHS = Op.getOperand(1);
  SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
  SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);

  // Issue the operation on the smaller types and concatenate the result back
  MVT EltVT = VT.getVectorElementType();
  MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
}

static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {

  SDValue Op0 = Op.getOperand(0);
  SDValue Op1 = Op.getOperand(1);
  SDValue CC = Op.getOperand(2);
  MVT VT = Op.getSimpleValueType();
  SDLoc dl(Op);

  assert(VT.getVectorElementType() == MVT::i1 &&
         "Cannot set masked compare for this operation");

  ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();

  // Prefer SETGT over SETLT.
  if (SetCCOpcode == ISD::SETLT) {
    SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
    std::swap(Op0, Op1);
  }

  return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
}

/// Given a buildvector constant, return a new vector constant with each element
/// incremented or decremented. If incrementing or decrementing would result in
/// unsigned overflow or underflow or this is not a simple vector constant,
/// return an empty value.
static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
  auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
  if (!BV)
    return SDValue();

  MVT VT = V.getSimpleValueType();
  MVT EltVT = VT.getVectorElementType();
  unsigned NumElts = VT.getVectorNumElements();
  SmallVector<SDValue, 8> NewVecC;
  SDLoc DL(V);
  for (unsigned i = 0; i < NumElts; ++i) {
    auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
    if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
      return SDValue();

    // Avoid overflow/underflow.
    const APInt &EltC = Elt->getAPIntValue();
    if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isNullValue()))
      return SDValue();

    NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
  }

  return DAG.getBuildVector(VT, DL, NewVecC);
}

/// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
/// Op0 u<= Op1:
///   t = psubus Op0, Op1
///   pcmpeq t, <0..0>
static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
                                    ISD::CondCode Cond, const SDLoc &dl,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {
  if (!Subtarget.hasSSE2())
    return SDValue();

  MVT VET = VT.getVectorElementType();
  if (VET != MVT::i8 && VET != MVT::i16)
    return SDValue();

  switch (Cond) {
  default:
    return SDValue();
  case ISD::SETULT: {
    // If the comparison is against a constant we can turn this into a
    // setule.  With psubus, setule does not require a swap.  This is
    // beneficial because the constant in the register is no longer
    // destructed as the destination so it can be hoisted out of a loop.
    // Only do this pre-AVX since vpcmp* is no longer destructive.
    if (Subtarget.hasAVX())
      return SDValue();
    SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false);
    if (!ULEOp1)
      return SDValue();
    Op1 = ULEOp1;
    break;
  }
  case ISD::SETUGT: {
    // If the comparison is against a constant, we can turn this into a setuge.
    // This is beneficial because materializing a constant 0 for the PCMPEQ is
    // probably cheaper than XOR+PCMPGT using 2 different vector constants:
    // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
    SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true);
    if (!UGEOp1)
      return SDValue();
    Op1 = Op0;
    Op0 = UGEOp1;
    break;
  }
  // Psubus is better than flip-sign because it requires no inversion.
  case ISD::SETUGE:
    std::swap(Op0, Op1);
    break;
  case ISD::SETULE:
    break;
  }

  SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
  return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
                     DAG.getConstant(0, dl, VT));
}

static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
                           SelectionDAG &DAG) {
  SDValue Op0 = Op.getOperand(0);
  SDValue Op1 = Op.getOperand(1);
  SDValue CC = Op.getOperand(2);
  MVT VT = Op.getSimpleValueType();
  ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
  bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
  SDLoc dl(Op);

  if (isFP) {
#ifndef NDEBUG
    MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
    assert(EltVT == MVT::f32 || EltVT == MVT::f64);
#endif

    unsigned Opc;
    if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
      assert(VT.getVectorNumElements() <= 16);
      Opc = X86ISD::CMPM;
    } else {
      Opc = X86ISD::CMPP;
      // The SSE/AVX packed FP comparison nodes are defined with a
      // floating-point vector result that matches the operand type. This allows
      // them to work with an SSE1 target (integer vector types are not legal).
      VT = Op0.getSimpleValueType();
    }

    // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
    // emit two comparisons and a logic op to tie them together.
    SDValue Cmp;
    unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1);
    if (SSECC >= 8 && !Subtarget.hasAVX()) {
      // LLVM predicate is SETUEQ or SETONE.
      unsigned CC0, CC1;
      unsigned CombineOpc;
      if (Cond == ISD::SETUEQ) {
        CC0 = 3; // UNORD
        CC1 = 0; // EQ
        CombineOpc = X86ISD::FOR;
      } else {
        assert(Cond == ISD::SETONE);
        CC0 = 7; // ORD
        CC1 = 4; // NEQ
        CombineOpc = X86ISD::FAND;
      }

      SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
                                 DAG.getTargetConstant(CC0, dl, MVT::i8));
      SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
                                 DAG.getTargetConstant(CC1, dl, MVT::i8));
      Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
    } else {
      // Handle all other FP comparisons here.
      Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1,
                        DAG.getTargetConstant(SSECC, dl, MVT::i8));
    }

    // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
    // result type of SETCC. The bitcast is expected to be optimized away
    // during combining/isel.
    if (Opc == X86ISD::CMPP)
      Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);

    return Cmp;
  }

  MVT VTOp0 = Op0.getSimpleValueType();
  (void)VTOp0;
  assert(VTOp0 == Op1.getSimpleValueType() &&
         "Expected operands with same type!");
  assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
         "Invalid number of packed elements for source and destination!");

  // The non-AVX512 code below works under the assumption that source and
  // destination types are the same.
  assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
         "Value types for source and destination must be the same!");

  // The result is boolean, but operands are int/float
  if (VT.getVectorElementType() == MVT::i1) {
    // In AVX-512 architecture setcc returns mask with i1 elements,
    // But there is no compare instruction for i8 and i16 elements in KNL.
    assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
           "Unexpected operand type");
    return LowerIntVSETCC_AVX512(Op, DAG);
  }

  // Lower using XOP integer comparisons.
  if (VT.is128BitVector() && Subtarget.hasXOP()) {
    // Translate compare code to XOP PCOM compare mode.
    unsigned CmpMode = 0;
    switch (Cond) {
    default: llvm_unreachable("Unexpected SETCC condition");
    case ISD::SETULT:
    case ISD::SETLT: CmpMode = 0x00; break;
    case ISD::SETULE:
    case ISD::SETLE: CmpMode = 0x01; break;
    case ISD::SETUGT:
    case ISD::SETGT: CmpMode = 0x02; break;
    case ISD::SETUGE:
    case ISD::SETGE: CmpMode = 0x03; break;
    case ISD::SETEQ: CmpMode = 0x04; break;
    case ISD::SETNE: CmpMode = 0x05; break;
    }

    // Are we comparing unsigned or signed integers?
    unsigned Opc =
        ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;

    return DAG.getNode(Opc, dl, VT, Op0, Op1,
                       DAG.getTargetConstant(CmpMode, dl, MVT::i8));
  }

  // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
  // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
  if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
    SDValue BC0 = peekThroughBitcasts(Op0);
    if (BC0.getOpcode() == ISD::AND) {
      APInt UndefElts;
      SmallVector<APInt, 64> EltBits;
      if (getTargetConstantBitsFromNode(BC0.getOperand(1),
                                        VT.getScalarSizeInBits(), UndefElts,
                                        EltBits, false, false)) {
        if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
          Cond = ISD::SETEQ;
          Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
        }
      }
    }
  }

  // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
  if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
      Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
    ConstantSDNode *C1 = isConstOrConstSplat(Op1);
    if (C1 && C1->getAPIntValue().isPowerOf2()) {
      unsigned BitWidth = VT.getScalarSizeInBits();
      unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;

      SDValue Result = Op0.getOperand(0);
      Result = DAG.getNode(ISD::SHL, dl, VT, Result,
                           DAG.getConstant(ShiftAmt, dl, VT));
      Result = DAG.getNode(ISD::SRA, dl, VT, Result,
                           DAG.getConstant(BitWidth - 1, dl, VT));
      return Result;
    }
  }

  // Break 256-bit integer vector compare into smaller ones.
  if (VT.is256BitVector() && !Subtarget.hasInt256())
    return Lower256IntVSETCC(Op, DAG);

  // If this is a SETNE against the signed minimum value, change it to SETGT.
  // If this is a SETNE against the signed maximum value, change it to SETLT.
  // which will be swapped to SETGT.
  // Otherwise we use PCMPEQ+invert.
  APInt ConstValue;
  if (Cond == ISD::SETNE &&
      ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
    if (ConstValue.isMinSignedValue())
      Cond = ISD::SETGT;
    else if (ConstValue.isMaxSignedValue())
      Cond = ISD::SETLT;
  }

  // If both operands are known non-negative, then an unsigned compare is the
  // same as a signed compare and there's no need to flip signbits.
  // TODO: We could check for more general simplifications here since we're
  // computing known bits.
  bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
                   !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));

  // Special case: Use min/max operations for unsigned compares.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (ISD::isUnsignedIntSetCC(Cond) &&
      (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
      TLI.isOperationLegal(ISD::UMIN, VT)) {
    // If we have a constant operand, increment/decrement it and change the
    // condition to avoid an invert.
    if (Cond == ISD::SETUGT) {
      // X > C --> X >= (C+1) --> X == umax(X, C+1)
      if (SDValue UGTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true)) {
        Op1 = UGTOp1;
        Cond = ISD::SETUGE;
      }
    }
    if (Cond == ISD::SETULT) {
      // X < C --> X <= (C-1) --> X == umin(X, C-1)
      if (SDValue ULTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false)) {
        Op1 = ULTOp1;
        Cond = ISD::SETULE;
      }
    }
    bool Invert = false;
    unsigned Opc;
    switch (Cond) {
    default: llvm_unreachable("Unexpected condition code");
    case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
    case ISD::SETULE: Opc = ISD::UMIN; break;
    case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
    case ISD::SETUGE: Opc = ISD::UMAX; break;
    }

    SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
    Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);

    // If the logical-not of the result is required, perform that now.
    if (Invert)
      Result = DAG.getNOT(dl, Result, VT);

    return Result;
  }

  // Try to use SUBUS and PCMPEQ.
  if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
    return V;

  // We are handling one of the integer comparisons here. Since SSE only has
  // GT and EQ comparisons for integer, swapping operands and multiple
  // operations may be required for some comparisons.
  unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
                                                            : X86ISD::PCMPGT;
  bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
              Cond == ISD::SETGE || Cond == ISD::SETUGE;
  bool Invert = Cond == ISD::SETNE ||
                (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));

  if (Swap)
    std::swap(Op0, Op1);

  // Check that the operation in question is available (most are plain SSE2,
  // but PCMPGTQ and PCMPEQQ have different requirements).
  if (VT == MVT::v2i64) {
    if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
      assert(Subtarget.hasSSE2() && "Don't know how to lower!");

      // Since SSE has no unsigned integer comparisons, we need to flip the sign
      // bits of the inputs before performing those operations. The lower
      // compare is always unsigned.
      SDValue SB;
      if (FlipSigns) {
        SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64);
      } else {
        SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64);
      }
      Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
      Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);

      // Cast everything to the right type.
      Op0 = DAG.getBitcast(MVT::v4i32, Op0);
      Op1 = DAG.getBitcast(MVT::v4i32, Op1);

      // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
      SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
      SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);

      // Create masks for only the low parts/high parts of the 64 bit integers.
      static const int MaskHi[] = { 1, 1, 3, 3 };
      static const int MaskLo[] = { 0, 0, 2, 2 };
      SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
      SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
      SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);

      SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
      Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);

      if (Invert)
        Result = DAG.getNOT(dl, Result, MVT::v4i32);

      return DAG.getBitcast(VT, Result);
    }

    if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
      // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
      // pcmpeqd + pshufd + pand.
      assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");

      // First cast everything to the right type.
      Op0 = DAG.getBitcast(MVT::v4i32, Op0);
      Op1 = DAG.getBitcast(MVT::v4i32, Op1);

      // Do the compare.
      SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);

      // Make sure the lower and upper halves are both all-ones.
      static const int Mask[] = { 1, 0, 3, 2 };
      SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
      Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);

      if (Invert)
        Result = DAG.getNOT(dl, Result, MVT::v4i32);

      return DAG.getBitcast(VT, Result);
    }
  }

  // Since SSE has no unsigned integer comparisons, we need to flip the sign
  // bits of the inputs before performing those operations.
  if (FlipSigns) {
    MVT EltVT = VT.getVectorElementType();
    SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
                                 VT);
    Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
    Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
  }

  SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);

  // If the logical-not of the result is required, perform that now.
  if (Invert)
    Result = DAG.getNOT(dl, Result, VT);

  return Result;
}

// Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
                              const SDLoc &dl, SelectionDAG &DAG,
                              const X86Subtarget &Subtarget,
                              SDValue &X86CC) {
  // Only support equality comparisons.
  if (CC != ISD::SETEQ && CC != ISD::SETNE)
    return SDValue();

  // Must be a bitcast from vXi1.
  if (Op0.getOpcode() != ISD::BITCAST)
    return SDValue();

  Op0 = Op0.getOperand(0);
  MVT VT = Op0.getSimpleValueType();
  if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
      !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
      !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
    return SDValue();

  X86::CondCode X86Cond;
  if (isNullConstant(Op1)) {
    X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
  } else if (isAllOnesConstant(Op1)) {
    // C flag is set for all ones.
    X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
  } else
    return SDValue();

  // If the input is an AND, we can combine it's operands into the KTEST.
  bool KTestable = false;
  if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
    KTestable = true;
  if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
    KTestable = true;
  if (!isNullConstant(Op1))
    KTestable = false;
  if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
    SDValue LHS = Op0.getOperand(0);
    SDValue RHS = Op0.getOperand(1);
    X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
    return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
  }

  // If the input is an OR, we can combine it's operands into the KORTEST.
  SDValue LHS = Op0;
  SDValue RHS = Op0;
  if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
    LHS = Op0.getOperand(0);
    RHS = Op0.getOperand(1);
  }

  X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
  return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
}

/// Emit flags for the given setcc condition and operands. Also returns the
/// corresponding X86 condition code constant in X86CC.
SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
                                             ISD::CondCode CC, const SDLoc &dl,
                                             SelectionDAG &DAG,
                                             SDValue &X86CC) const {
  // Optimize to BT if possible.
  // Lower (X & (1 << N)) == 0 to BT(X, N).
  // Lower ((X >>u N) & 1) != 0 to BT(X, N).
  // Lower ((X >>s N) & 1) != 0 to BT(X, N).
  if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
    if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
      return BT;
  }

  // Try to use PTEST for a tree ORs equality compared with 0.
  // TODO: We could do AND tree with all 1s as well by using the C flag.
  if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
    if (SDValue PTEST = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG, X86CC))
      return PTEST;
  }

  // Try to lower using KORTEST or KTEST.
  if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
    return Test;

  // Look for X == 0, X == 1, X != 0, or X != 1.  We can simplify some forms of
  // these.
  if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
    // If the input is a setcc, then reuse the input setcc or use a new one with
    // the inverted condition.
    if (Op0.getOpcode() == X86ISD::SETCC) {
      bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);

      X86CC = Op0.getOperand(0);
      if (Invert) {
        X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
        CCode = X86::GetOppositeBranchCondition(CCode);
        X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
      }

      return Op0.getOperand(1);
    }
  }

  bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
  X86::CondCode CondCode = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
  if (CondCode == X86::COND_INVALID)
    return SDValue();

  SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG);
  EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
  X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
  return EFLAGS;
}

SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {

  MVT VT = Op.getSimpleValueType();

  if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);

  assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
  SDValue Op0 = Op.getOperand(0);
  SDValue Op1 = Op.getOperand(1);
  SDLoc dl(Op);
  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();

  // Handle f128 first, since one possible outcome is a normal integer
  // comparison which gets handled by emitFlagsForSetcc.
  if (Op0.getValueType() == MVT::f128) {
    softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1);

    // If softenSetCCOperands returned a scalar, use it.
    if (!Op1.getNode()) {
      assert(Op0.getValueType() == Op.getValueType() &&
             "Unexpected setcc expansion!");
      return Op0;
    }
  }

  SDValue X86CC;
  SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
  if (!EFLAGS)
    return SDValue();

  return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
}

SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
  SDValue LHS = Op.getOperand(0);
  SDValue RHS = Op.getOperand(1);
  SDValue Carry = Op.getOperand(2);
  SDValue Cond = Op.getOperand(3);
  SDLoc DL(Op);

  assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
  X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());

  // Recreate the carry if needed.
  EVT CarryVT = Carry.getValueType();
  APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
  Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
                      Carry, DAG.getConstant(NegOne, DL, CarryVT));

  SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
  SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
  return getSETCC(CC, Cmp.getValue(1), DL, DAG);
}

// This function returns three things: the arithmetic computation itself
// (Value), an EFLAGS result (Overflow), and a condition code (Cond).  The
// flag and the condition code define the case in which the arithmetic
// computation overflows.
static std::pair<SDValue, SDValue>
getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
  assert(Op.getResNo() == 0 && "Unexpected result number!");
  SDValue Value, Overflow;
  SDValue LHS = Op.getOperand(0);
  SDValue RHS = Op.getOperand(1);
  unsigned BaseOp = 0;
  SDLoc DL(Op);
  switch (Op.getOpcode()) {
  default: llvm_unreachable("Unknown ovf instruction!");
  case ISD::SADDO:
    BaseOp = X86ISD::ADD;
    Cond = X86::COND_O;
    break;
  case ISD::UADDO:
    BaseOp = X86ISD::ADD;
    Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
    break;
  case ISD::SSUBO:
    BaseOp = X86ISD::SUB;
    Cond = X86::COND_O;
    break;
  case ISD::USUBO:
    BaseOp = X86ISD::SUB;
    Cond = X86::COND_B;
    break;
  case ISD::SMULO:
    BaseOp = X86ISD::SMUL;
    Cond = X86::COND_O;
    break;
  case ISD::UMULO:
    BaseOp = X86ISD::UMUL;
    Cond = X86::COND_O;
    break;
  }

  if (BaseOp) {
    // Also sets EFLAGS.
    SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
    Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
    Overflow = Value.getValue(1);
  }

  return std::make_pair(Value, Overflow);
}

static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
  // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
  // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
  // looks for this combo and may remove the "setcc" instruction if the "setcc"
  // has only one use.
  SDLoc DL(Op);
  X86::CondCode Cond;
  SDValue Value, Overflow;
  std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);

  SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
  assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
  return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
}

/// Return true if opcode is a X86 logical comparison.
static bool isX86LogicalCmp(SDValue Op) {
  unsigned Opc = Op.getOpcode();
  if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
      Opc == X86ISD::SAHF)
    return true;
  if (Op.getResNo() == 1 &&
      (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
       Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
       Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
    return true;

  return false;
}

static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
  if (V.getOpcode() != ISD::TRUNCATE)
    return false;

  SDValue VOp0 = V.getOperand(0);
  unsigned InBits = VOp0.getValueSizeInBits();
  unsigned Bits = V.getValueSizeInBits();
  return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
}

SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
  bool AddTest = true;
  SDValue Cond  = Op.getOperand(0);
  SDValue Op1 = Op.getOperand(1);
  SDValue Op2 = Op.getOperand(2);
  SDLoc DL(Op);
  MVT VT = Op1.getSimpleValueType();
  SDValue CC;

  // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
  // are available or VBLENDV if AVX is available.
  // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
  if (Cond.getOpcode() == ISD::SETCC &&
      ((Subtarget.hasSSE2() && VT == MVT::f64) ||
       (Subtarget.hasSSE1() && VT == MVT::f32)) &&
      VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
    SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
    unsigned SSECC = translateX86FSETCC(
        cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);

    if (Subtarget.hasAVX512()) {
      SDValue Cmp =
          DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
                      DAG.getTargetConstant(SSECC, DL, MVT::i8));
      assert(!VT.isVector() && "Not a scalar type?");
      return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
    }

    if (SSECC < 8 || Subtarget.hasAVX()) {
      SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
                                DAG.getTargetConstant(SSECC, DL, MVT::i8));

      // If we have AVX, we can use a variable vector select (VBLENDV) instead
      // of 3 logic instructions for size savings and potentially speed.
      // Unfortunately, there is no scalar form of VBLENDV.

      // If either operand is a +0.0 constant, don't try this. We can expect to
      // optimize away at least one of the logic instructions later in that
      // case, so that sequence would be faster than a variable blend.

      // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
      // uses XMM0 as the selection register. That may need just as many
      // instructions as the AND/ANDN/OR sequence due to register moves, so
      // don't bother.
      if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
          !isNullFPConstant(Op2)) {
        // Convert to vectors, do a VSELECT, and convert back to scalar.
        // All of the conversions should be optimized away.
        MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
        SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
        SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
        SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);

        MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
        VCmp = DAG.getBitcast(VCmpVT, VCmp);

        SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);

        return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
                           VSel, DAG.getIntPtrConstant(0, DL));
      }
      SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
      SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
      return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
    }
  }

  // AVX512 fallback is to lower selects of scalar floats to masked moves.
  if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
    SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
    return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
  }

  // For v64i1 without 64-bit support we need to split and rejoin.
  if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
    assert(Subtarget.hasBWI() && "Expected BWI to be legal");
    SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32);
    SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32);
    SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32);
    SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32);
    SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo);
    SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi);
    return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
  }

  if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
    SDValue Op1Scalar;
    if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
      Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
    else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
      Op1Scalar = Op1.getOperand(0);
    SDValue Op2Scalar;
    if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
      Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
    else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
      Op2Scalar = Op2.getOperand(0);
    if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
      SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond,
                                        Op1Scalar, Op2Scalar);
      if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
        return DAG.getBitcast(VT, newSelect);
      SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
                         DAG.getIntPtrConstant(0, DL));
    }
  }

  if (Cond.getOpcode() == ISD::SETCC) {
    if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
      Cond = NewCond;
      // If the condition was updated, it's possible that the operands of the
      // select were also updated (for example, EmitTest has a RAUW). Refresh
      // the local references to the select operands in case they got stale.
      Op1 = Op.getOperand(1);
      Op2 = Op.getOperand(2);
    }
  }

  // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
  // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
  // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
  // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
  // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
  // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
  if (Cond.getOpcode() == X86ISD::SETCC &&
      Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
      isNullConstant(Cond.getOperand(1).getOperand(1))) {
    SDValue Cmp = Cond.getOperand(1);
    unsigned CondCode = Cond.getConstantOperandVal(0);

    if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
        (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
      SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
      SDValue CmpOp0 = Cmp.getOperand(0);

      // Apply further optimizations for special cases
      // (select (x != 0), -1, 0) -> neg & sbb
      // (select (x == 0), 0, -1) -> neg & sbb
      if (isNullConstant(Y) &&
          (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
        SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
        SDValue CmpZero = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Zero, CmpOp0);
        SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
        Zero = DAG.getConstant(0, DL, Op.getValueType());
        return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, CmpZero);
      }

      Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
                        CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
      Cmp = ConvertCmpIfNecessary(Cmp, DAG);

      SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
      SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
      SDValue Res =   // Res = 0 or -1.
        DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp);

      if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
        Res = DAG.getNOT(DL, Res, Res.getValueType());

      if (!isNullConstant(Op2))
        Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
      return Res;
    } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
               Cmp.getOperand(0).getOpcode() == ISD::AND &&
               isOneConstant(Cmp.getOperand(0).getOperand(1))) {
      SDValue CmpOp0 = Cmp.getOperand(0);
      SDValue Src1, Src2;
      // true if Op2 is XOR or OR operator and one of its operands
      // is equal to Op1
      // ( a , a op b) || ( b , a op b)
      auto isOrXorPattern = [&]() {
        if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
            (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
          Src1 =
              Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
          Src2 = Op1;
          return true;
        }
        return false;
      };

      if (isOrXorPattern()) {
        SDValue Neg;
        unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
        // we need mask of all zeros or ones with same size of the other
        // operands.
        if (CmpSz > VT.getSizeInBits())
          Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
        else if (CmpSz < VT.getSizeInBits())
          Neg = DAG.getNode(ISD::AND, DL, VT,
              DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
              DAG.getConstant(1, DL, VT));
        else
          Neg = CmpOp0;
        SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
                                   Neg); // -(and (x, 0x1))
        SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
        return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2);  // And Op y
      }
    }
  }

  // Look past (and (setcc_carry (cmp ...)), 1).
  if (Cond.getOpcode() == ISD::AND &&
      Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
      isOneConstant(Cond.getOperand(1)))
    Cond = Cond.getOperand(0);

  // If condition flag is set by a X86ISD::CMP, then use it as the condition
  // setting operand in place of the X86ISD::SETCC.
  unsigned CondOpcode = Cond.getOpcode();
  if (CondOpcode == X86ISD::SETCC ||
      CondOpcode == X86ISD::SETCC_CARRY) {
    CC = Cond.getOperand(0);

    SDValue Cmp = Cond.getOperand(1);
    bool IllegalFPCMov = false;
    if (VT.isFloatingPoint() && !VT.isVector() &&
        !isScalarFPTypeInSSEReg(VT))  // FPStack?
      IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());

    if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
        Cmp.getOpcode() == X86ISD::BT) { // FIXME
      Cond = Cmp;
      AddTest = false;
    }
  } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
             CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
             CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
    SDValue Value;
    X86::CondCode X86Cond;
    std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);

    CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
    AddTest = false;
  }

  if (AddTest) {
    // Look past the truncate if the high bits are known zero.
    if (isTruncWithZeroHighBitsInput(Cond, DAG))
      Cond = Cond.getOperand(0);

    // We know the result of AND is compared against zero. Try to match
    // it to BT.
    if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
      SDValue BTCC;
      if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
        CC = BTCC;
        Cond = BT;
        AddTest = false;
      }
    }
  }

  if (AddTest) {
    CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
    Cond = EmitCmp(Cond, DAG.getConstant(0, DL, Cond.getValueType()),
                   X86::COND_NE, DL, DAG);
  }

  // a <  b ? -1 :  0 -> RES = ~setcc_carry
  // a <  b ?  0 : -1 -> RES = setcc_carry
  // a >= b ? -1 :  0 -> RES = setcc_carry
  // a >= b ?  0 : -1 -> RES = ~setcc_carry
  if (Cond.getOpcode() == X86ISD::SUB) {
    Cond = ConvertCmpIfNecessary(Cond, DAG);
    unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();

    if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
        (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
        (isNullConstant(Op1) || isNullConstant(Op2))) {
      SDValue Res =
          DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
                      DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
      if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
        return DAG.getNOT(DL, Res, Res.getValueType());
      return Res;
    }
  }

  // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
  // widen the cmov and push the truncate through. This avoids introducing a new
  // branch during isel and doesn't add any extensions.
  if (Op.getValueType() == MVT::i8 &&
      Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
    SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
    if (T1.getValueType() == T2.getValueType() &&
        // Blacklist CopyFromReg to avoid partial register stalls.
        T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
      SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
                                 CC, Cond);
      return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
    }
  }

  // Or finally, promote i8 cmovs if we have CMOV,
  //                 or i16 cmovs if it won't prevent folding a load.
  // FIXME: we should not limit promotion of i8 case to only when the CMOV is
  //        legal, but EmitLoweredSelect() can not deal with these extensions
  //        being inserted between two CMOV's. (in i16 case too TBN)
  //        https://bugs.llvm.org/show_bug.cgi?id=40974
  if ((Op.getValueType() == MVT::i8 && Subtarget.hasCMov()) ||
      (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) &&
       !MayFoldLoad(Op2))) {
    Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
    Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
    SDValue Ops[] = { Op2, Op1, CC, Cond };
    SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
    return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
  }

  // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
  // condition is true.
  SDValue Ops[] = { Op2, Op1, CC, Cond };
  return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
}

static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
                                     const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {
  MVT VT = Op->getSimpleValueType(0);
  SDValue In = Op->getOperand(0);
  MVT InVT = In.getSimpleValueType();
  assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
  MVT VTElt = VT.getVectorElementType();
  SDLoc dl(Op);

  unsigned NumElts = VT.getVectorNumElements();

  // Extend VT if the scalar type is i8/i16 and BWI is not supported.
  MVT ExtVT = VT;
  if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
    // If v16i32 is to be avoided, we'll need to split and concatenate.
    if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
      return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);

    ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
  }

  // Widen to 512-bits if VLX is not supported.
  MVT WideVT = ExtVT;
  if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
    NumElts *= 512 / ExtVT.getSizeInBits();
    InVT = MVT::getVectorVT(MVT::i1, NumElts);
    In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
                     In, DAG.getIntPtrConstant(0, dl));
    WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
  }

  SDValue V;
  MVT WideEltVT = WideVT.getVectorElementType();
  if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
      (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
    V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
  } else {
    SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
    SDValue Zero = DAG.getConstant(0, dl, WideVT);
    V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
  }

  // Truncate if we had to extend i16/i8 above.
  if (VT != ExtVT) {
    WideVT = MVT::getVectorVT(VTElt, NumElts);
    V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
  }

  // Extract back to 128/256-bit if we widened.
  if (WideVT != VT)
    V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
                    DAG.getIntPtrConstant(0, dl));

  return V;
}

static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
                               SelectionDAG &DAG) {
  SDValue In = Op->getOperand(0);
  MVT InVT = In.getSimpleValueType();

  if (InVT.getVectorElementType() == MVT::i1)
    return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);

  assert(Subtarget.hasAVX() && "Expected AVX support");
  return LowerAVXExtend(Op, DAG, Subtarget);
}

// Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
// For sign extend this needs to handle all vector sizes and SSE4.1 and
// non-SSE4.1 targets. For zero extend this should only handle inputs of
// MVT::v64i8 when BWI is not supported, but AVX512 is.
static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
                                        const X86Subtarget &Subtarget,
                                        SelectionDAG &DAG) {
  SDValue In = Op->getOperand(0);
  MVT VT = Op->getSimpleValueType(0);
  MVT InVT = In.getSimpleValueType();

  MVT SVT = VT.getVectorElementType();
  MVT InSVT = InVT.getVectorElementType();
  assert(SVT.getSizeInBits() > InSVT.getSizeInBits());

  if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
    return SDValue();
  if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
    return SDValue();
  if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
      !(VT.is256BitVector() && Subtarget.hasAVX()) &&
      !(VT.is512BitVector() && Subtarget.hasAVX512()))
    return SDValue();

  SDLoc dl(Op);
  unsigned Opc = Op.getOpcode();
  unsigned NumElts = VT.getVectorNumElements();

  // For 256-bit vectors, we only need the lower (128-bit) half of the input.
  // For 512-bit vectors, we need 128-bits or 256-bits.
  if (InVT.getSizeInBits() > 128) {
    // Input needs to be at least the same number of elements as output, and
    // at least 128-bits.
    int InSize = InSVT.getSizeInBits() * NumElts;
    In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
    InVT = In.getSimpleValueType();
  }

  // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
  // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
  // need to be handled here for 256/512-bit results.
  if (Subtarget.hasInt256()) {
    assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");

    if (InVT.getVectorNumElements() != NumElts)
      return DAG.getNode(Op.getOpcode(), dl, VT, In);

    // FIXME: Apparently we create inreg operations that could be regular
    // extends.
    unsigned ExtOpc =
        Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
                                             : ISD::ZERO_EXTEND;
    return DAG.getNode(ExtOpc, dl, VT, In);
  }

  // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
  if (Subtarget.hasAVX()) {
    assert(VT.is256BitVector() && "256-bit vector expected");
    MVT HalfVT = VT.getHalfNumVectorElementsVT();
    int HalfNumElts = HalfVT.getVectorNumElements();

    unsigned NumSrcElts = InVT.getVectorNumElements();
    SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
    for (int i = 0; i != HalfNumElts; ++i)
      HiMask[i] = HalfNumElts + i;

    SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
    SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
    Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
    return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
  }

  // We should only get here for sign extend.
  assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
  assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");

  // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
  SDValue Curr = In;
  SDValue SignExt = Curr;

  // As SRAI is only available on i16/i32 types, we expand only up to i32
  // and handle i64 separately.
  if (InVT != MVT::v4i32) {
    MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;

    unsigned DestWidth = DestVT.getScalarSizeInBits();
    unsigned Scale = DestWidth / InSVT.getSizeInBits();

    unsigned InNumElts = InVT.getVectorNumElements();
    unsigned DestElts = DestVT.getVectorNumElements();

    // Build a shuffle mask that takes each input element and places it in the
    // MSBs of the new element size.
    SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
    for (unsigned i = 0; i != DestElts; ++i)
      Mask[i * Scale + (Scale - 1)] = i;

    Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
    Curr = DAG.getBitcast(DestVT, Curr);

    unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
    SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
                          DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
  }

  if (VT == MVT::v2i64) {
    assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
    SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
    SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
    SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
    SignExt = DAG.getBitcast(VT, SignExt);
  }

  return SignExt;
}

static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
                                SelectionDAG &DAG) {
  MVT VT = Op->getSimpleValueType(0);
  SDValue In = Op->getOperand(0);
  MVT InVT = In.getSimpleValueType();
  SDLoc dl(Op);

  if (InVT.getVectorElementType() == MVT::i1)
    return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);

  assert(VT.isVector() && InVT.isVector() && "Expected vector type");
  assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
         "Expected same number of elements");
  assert((VT.getVectorElementType() == MVT::i16 ||
          VT.getVectorElementType() == MVT::i32 ||
          VT.getVectorElementType() == MVT::i64) &&
         "Unexpected element type");
  assert((InVT.getVectorElementType() == MVT::i8 ||
          InVT.getVectorElementType() == MVT::i16 ||
          InVT.getVectorElementType() == MVT::i32) &&
         "Unexpected element type");

  // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
  if (InVT == MVT::v8i8) {
    if (VT != MVT::v8i64)
      return SDValue();

    In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
                     MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
    return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, VT, In);
  }

  if (Subtarget.hasInt256())
    return Op;

  // Optimize vectors in AVX mode
  // Sign extend  v8i16 to v8i32 and
  //              v4i32 to v4i64
  //
  // Divide input vector into two parts
  // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
  // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
  // concat the vectors to original VT
  MVT HalfVT = VT.getHalfNumVectorElementsVT();
  SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);

  unsigned NumElems = InVT.getVectorNumElements();
  SmallVector<int,8> ShufMask(NumElems, -1);
  for (unsigned i = 0; i != NumElems/2; ++i)
    ShufMask[i] = i + NumElems/2;

  SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
  OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);

  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}

/// Change a vector store into a pair of half-size vector stores.
static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
  SDValue StoredVal = Store->getValue();
  assert((StoredVal.getValueType().is256BitVector() ||
          StoredVal.getValueType().is512BitVector()) &&
         "Expecting 256/512-bit op");

  // Splitting volatile memory ops is not allowed unless the operation was not
  // legal to begin with. We are assuming the input op is legal (this transform
  // is only used for targets with AVX).
  if (!Store->isSimple())
    return SDValue();

  MVT StoreVT = StoredVal.getSimpleValueType();
  unsigned NumElems = StoreVT.getVectorNumElements();
  unsigned HalfSize = StoredVal.getValueSizeInBits() / 2;
  unsigned HalfAlign = (128 == HalfSize ? 16 : 32);

  SDLoc DL(Store);
  SDValue Value0 = extractSubVector(StoredVal, 0, DAG, DL, HalfSize);
  SDValue Value1 = extractSubVector(StoredVal, NumElems / 2, DAG, DL, HalfSize);
  SDValue Ptr0 = Store->getBasePtr();
  SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, HalfAlign, DL);
  unsigned Alignment = Store->getAlignment();
  SDValue Ch0 =
      DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
                   Alignment, Store->getMemOperand()->getFlags());
  SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
                             Store->getPointerInfo().getWithOffset(HalfAlign),
                             MinAlign(Alignment, HalfAlign),
                             Store->getMemOperand()->getFlags());
  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
}

/// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
/// type.
static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
                                    SelectionDAG &DAG) {
  SDValue StoredVal = Store->getValue();
  assert(StoreVT.is128BitVector() &&
         StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
  StoredVal = DAG.getBitcast(StoreVT, StoredVal);

  // Splitting volatile memory ops is not allowed unless the operation was not
  // legal to begin with. We are assuming the input op is legal (this transform
  // is only used for targets with AVX).
  if (!Store->isSimple())
    return SDValue();

  MVT StoreSVT = StoreVT.getScalarType();
  unsigned NumElems = StoreVT.getVectorNumElements();
  unsigned ScalarSize = StoreSVT.getStoreSize();
  unsigned Alignment = Store->getAlignment();

  SDLoc DL(Store);
  SmallVector<SDValue, 4> Stores;
  for (unsigned i = 0; i != NumElems; ++i) {
    unsigned Offset = i * ScalarSize;
    SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(), Offset, DL);
    SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
                              DAG.getIntPtrConstant(i, DL));
    SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
                              Store->getPointerInfo().getWithOffset(Offset),
                              MinAlign(Alignment, Offset),
                              Store->getMemOperand()->getFlags());
    Stores.push_back(Ch);
  }
  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
}

static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
                          SelectionDAG &DAG) {
  StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
  SDLoc dl(St);
  SDValue StoredVal = St->getValue();

  // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
  if (StoredVal.getValueType().isVector() &&
      StoredVal.getValueType().getVectorElementType() == MVT::i1) {
    assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&
           "Unexpected VT");
    assert(!St->isTruncatingStore() && "Expected non-truncating store");
    assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
           "Expected AVX512F without AVX512DQI");

    StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
                            DAG.getUNDEF(MVT::v16i1), StoredVal,
                            DAG.getIntPtrConstant(0, dl));
    StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
    StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);

    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
                        St->getPointerInfo(), St->getAlignment(),
                        St->getMemOperand()->getFlags());
  }

  if (St->isTruncatingStore())
    return SDValue();

  // If this is a 256-bit store of concatenated ops, we are better off splitting
  // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
  // and each half can execute independently. Some cores would split the op into
  // halves anyway, so the concat (vinsertf128) is purely an extra op.
  MVT StoreVT = StoredVal.getSimpleValueType();
  if (StoreVT.is256BitVector()) {
    SmallVector<SDValue, 4> CatOps;
    if (StoredVal.hasOneUse() && collectConcatOps(StoredVal.getNode(), CatOps))
      return splitVectorStore(St, DAG);
    return SDValue();
  }

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&
         "Unexpected VT");
  assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
             TargetLowering::TypeWidenVector && "Unexpected type action!");

  EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
  StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
                          DAG.getUNDEF(StoreVT));

  if (Subtarget.hasSSE2()) {
    // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
    // and store it.
    MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
    MVT CastVT = MVT::getVectorVT(StVT, 2);
    StoredVal = DAG.getBitcast(CastVT, StoredVal);
    StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
                            DAG.getIntPtrConstant(0, dl));

    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
                        St->getPointerInfo(), St->getAlignment(),
                        St->getMemOperand()->getFlags());
  }
  assert(Subtarget.hasSSE1() && "Expected SSE");
  SDVTList Tys = DAG.getVTList(MVT::Other);
  SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
  return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
                                 St->getMemOperand());
}

// Lower vector extended loads using a shuffle. If SSSE3 is not available we
// may emit an illegal shuffle but the expansion is still better than scalar
// code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
// we'll emit a shuffle and a arithmetic shift.
// FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
// TODO: It is possible to support ZExt by zeroing the undef values during
// the shuffle phase or after the shuffle.
static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  MVT RegVT = Op.getSimpleValueType();
  assert(RegVT.isVector() && "We only custom lower vector loads.");
  assert(RegVT.isInteger() &&
         "We only custom lower integer vector loads.");

  LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
  SDLoc dl(Ld);

  // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
  if (RegVT.getVectorElementType() == MVT::i1) {
    assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
    assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
    assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
           "Expected AVX512F without AVX512DQI");

    SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
                                Ld->getPointerInfo(), Ld->getAlignment(),
                                Ld->getMemOperand()->getFlags());

    // Replace chain users with the new chain.
    assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");

    SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
    Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
                      DAG.getBitcast(MVT::v16i1, Val),
                      DAG.getIntPtrConstant(0, dl));
    return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
  }

  return SDValue();
}

/// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
/// each of which has no other use apart from the AND / OR.
static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
  Opc = Op.getOpcode();
  if (Opc != ISD::OR && Opc != ISD::AND)
    return false;
  return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
          Op.getOperand(0).hasOneUse() &&
          Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
          Op.getOperand(1).hasOneUse());
}

/// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
/// SETCC node has a single use.
static bool isXor1OfSetCC(SDValue Op) {
  if (Op.getOpcode() != ISD::XOR)
    return false;
  if (isOneConstant(Op.getOperand(1)))
    return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
           Op.getOperand(0).hasOneUse();
  return false;
}

SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
  bool addTest = true;
  SDValue Chain = Op.getOperand(0);
  SDValue Cond  = Op.getOperand(1);
  SDValue Dest  = Op.getOperand(2);
  SDLoc dl(Op);
  SDValue CC;
  bool Inverted = false;

  if (Cond.getOpcode() == ISD::SETCC) {
    // Check for setcc([su]{add,sub,mul}o == 0).
    if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
        isNullConstant(Cond.getOperand(1)) &&
        Cond.getOperand(0).getResNo() == 1 &&
        (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
         Cond.getOperand(0).getOpcode() == ISD::UADDO ||
         Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
         Cond.getOperand(0).getOpcode() == ISD::USUBO ||
         Cond.getOperand(0).getOpcode() == ISD::SMULO ||
         Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
      Inverted = true;
      Cond = Cond.getOperand(0);
    } else {
      if (SDValue NewCond = LowerSETCC(Cond, DAG))
        Cond = NewCond;
    }
  }
#if 0
  // FIXME: LowerXALUO doesn't handle these!!
  else if (Cond.getOpcode() == X86ISD::ADD  ||
           Cond.getOpcode() == X86ISD::SUB  ||
           Cond.getOpcode() == X86ISD::SMUL ||
           Cond.getOpcode() == X86ISD::UMUL)
    Cond = LowerXALUO(Cond, DAG);
#endif

  // Look pass (and (setcc_carry (cmp ...)), 1).
  if (Cond.getOpcode() == ISD::AND &&
      Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
      isOneConstant(Cond.getOperand(1)))
    Cond = Cond.getOperand(0);

  // If condition flag is set by a X86ISD::CMP, then use it as the condition
  // setting operand in place of the X86ISD::SETCC.
  unsigned CondOpcode = Cond.getOpcode();
  if (CondOpcode == X86ISD::SETCC ||
      CondOpcode == X86ISD::SETCC_CARRY) {
    CC = Cond.getOperand(0);

    SDValue Cmp = Cond.getOperand(1);
    unsigned Opc = Cmp.getOpcode();
    // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
    if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
      Cond = Cmp;
      addTest = false;
    } else {
      switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
      default: break;
      case X86::COND_O:
      case X86::COND_B:
        // These can only come from an arithmetic instruction with overflow,
        // e.g. SADDO, UADDO.
        Cond = Cond.getOperand(1);
        addTest = false;
        break;
      }
    }
  }
  CondOpcode = Cond.getOpcode();
  if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
      CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
      CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
    SDValue Value;
    X86::CondCode X86Cond;
    std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);

    if (Inverted)
      X86Cond = X86::GetOppositeBranchCondition(X86Cond);

    CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
    addTest = false;
  } else {
    unsigned CondOpc;
    if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
      SDValue Cmp = Cond.getOperand(0).getOperand(1);
      if (CondOpc == ISD::OR) {
        // Also, recognize the pattern generated by an FCMP_UNE. We can emit
        // two branches instead of an explicit OR instruction with a
        // separate test.
        if (Cmp == Cond.getOperand(1).getOperand(1) &&
            isX86LogicalCmp(Cmp)) {
          CC = Cond.getOperand(0).getOperand(0);
          Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
                              Chain, Dest, CC, Cmp);
          CC = Cond.getOperand(1).getOperand(0);
          Cond = Cmp;
          addTest = false;
        }
      } else { // ISD::AND
        // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
        // two branches instead of an explicit AND instruction with a
        // separate test. However, we only do this if this block doesn't
        // have a fall-through edge, because this requires an explicit
        // jmp when the condition is false.
        if (Cmp == Cond.getOperand(1).getOperand(1) &&
            isX86LogicalCmp(Cmp) &&
            Op.getNode()->hasOneUse()) {
          X86::CondCode CCode0 =
              (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
          CCode0 = X86::GetOppositeBranchCondition(CCode0);
          CC = DAG.getTargetConstant(CCode0, dl, MVT::i8);
          SDNode *User = *Op.getNode()->use_begin();
          // Look for an unconditional branch following this conditional branch.
          // We need this because we need to reverse the successors in order
          // to implement FCMP_OEQ.
          if (User->getOpcode() == ISD::BR) {
            SDValue FalseBB = User->getOperand(1);
            SDNode *NewBR =
              DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
            assert(NewBR == User);
            (void)NewBR;
            Dest = FalseBB;

            Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), Chain,
                                Dest, CC, Cmp);
            X86::CondCode CCode1 =
                (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
            CCode1 = X86::GetOppositeBranchCondition(CCode1);
            CC = DAG.getTargetConstant(CCode1, dl, MVT::i8);
            Cond = Cmp;
            addTest = false;
          }
        }
      }
    } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
      // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
      // It should be transformed during dag combiner except when the condition
      // is set by a arithmetics with overflow node.
      X86::CondCode CCode =
        (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
      CCode = X86::GetOppositeBranchCondition(CCode);
      CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
      Cond = Cond.getOperand(0).getOperand(1);
      addTest = false;
    } else if (Cond.getOpcode() == ISD::SETCC &&
               cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
      // For FCMP_OEQ, we can emit
      // two branches instead of an explicit AND instruction with a
      // separate test. However, we only do this if this block doesn't
      // have a fall-through edge, because this requires an explicit
      // jmp when the condition is false.
      if (Op.getNode()->hasOneUse()) {
        SDNode *User = *Op.getNode()->use_begin();
        // Look for an unconditional branch following this conditional branch.
        // We need this because we need to reverse the successors in order
        // to implement FCMP_OEQ.
        if (User->getOpcode() == ISD::BR) {
          SDValue FalseBB = User->getOperand(1);
          SDNode *NewBR =
            DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
          assert(NewBR == User);
          (void)NewBR;
          Dest = FalseBB;

          SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
                                    Cond.getOperand(0), Cond.getOperand(1));
          Cmp = ConvertCmpIfNecessary(Cmp, DAG);
          CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
          Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
                              Chain, Dest, CC, Cmp);
          CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
          Cond = Cmp;
          addTest = false;
        }
      }
    } else if (Cond.getOpcode() == ISD::SETCC &&
               cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
      // For FCMP_UNE, we can emit
      // two branches instead of an explicit OR instruction with a
      // separate test.
      SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
                                Cond.getOperand(0), Cond.getOperand(1));
      Cmp = ConvertCmpIfNecessary(Cmp, DAG);
      CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
      Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
                          Chain, Dest, CC, Cmp);
      CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
      Cond = Cmp;
      addTest = false;
    }
  }

  if (addTest) {
    // Look pass the truncate if the high bits are known zero.
    if (isTruncWithZeroHighBitsInput(Cond, DAG))
        Cond = Cond.getOperand(0);

    // We know the result of AND is compared against zero. Try to match
    // it to BT.
    if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
      SDValue BTCC;
      if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, dl, DAG, BTCC)) {
        CC = BTCC;
        Cond = BT;
        addTest = false;
      }
    }
  }

  if (addTest) {
    X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
    CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
    Cond = EmitCmp(Cond, DAG.getConstant(0, dl, Cond.getValueType()),
                   X86Cond, dl, DAG);
  }
  Cond = ConvertCmpIfNecessary(Cond, DAG);
  return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
                     Chain, Dest, CC, Cond);
}

// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
// Calls to _alloca are needed to probe the stack when allocating more than 4k
// bytes in one go. Touching the stack at 4K increments is necessary to ensure
// that the guard pages used by the OS virtual memory manager are allocated in
// correct sequence.
SDValue
X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
                                           SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();
  bool SplitStack = MF.shouldSplitStack();
  bool EmitStackProbe = !getStackProbeSymbolName(MF).empty();
  bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
               SplitStack || EmitStackProbe;
  SDLoc dl(Op);

  // Get the inputs.
  SDNode *Node = Op.getNode();
  SDValue Chain = Op.getOperand(0);
  SDValue Size  = Op.getOperand(1);
  unsigned Align = Op.getConstantOperandVal(2);
  EVT VT = Node->getValueType(0);

  // Chain the dynamic stack allocation so that it doesn't modify the stack
  // pointer when other instructions are using the stack.
  Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);

  bool Is64Bit = Subtarget.is64Bit();
  MVT SPTy = getPointerTy(DAG.getDataLayout());

  SDValue Result;
  if (!Lower) {
    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
    assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
                    " not tell us which reg is the stack pointer!");

    SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
    Chain = SP.getValue(1);
    const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
    unsigned StackAlign = TFI.getStackAlignment();
    Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
    if (Align > StackAlign)
      Result = DAG.getNode(ISD::AND, dl, VT, Result,
                         DAG.getConstant(-(uint64_t)Align, dl, VT));
    Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
  } else if (SplitStack) {
    MachineRegisterInfo &MRI = MF.getRegInfo();

    if (Is64Bit) {
      // The 64 bit implementation of segmented stacks needs to clobber both r10
      // r11. This makes it impossible to use it along with nested parameters.
      const Function &F = MF.getFunction();
      for (const auto &A : F.args()) {
        if (A.hasNestAttr())
          report_fatal_error("Cannot use segmented stacks with functions that "
                             "have nested arguments.");
      }
    }

    const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
    Register Vreg = MRI.createVirtualRegister(AddrRegClass);
    Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
    Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
                                DAG.getRegister(Vreg, SPTy));
  } else {
    SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
    Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
    MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);

    const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
    Register SPReg = RegInfo->getStackRegister();
    SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
    Chain = SP.getValue(1);

    if (Align) {
      SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
                       DAG.getConstant(-(uint64_t)Align, dl, VT));
      Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
    }

    Result = SP;
  }

  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
                             DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);

  SDValue Ops[2] = {Result, Chain};
  return DAG.getMergeValues(Ops, dl);
}

SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();
  auto PtrVT = getPointerTy(MF.getDataLayout());
  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();

  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
  SDLoc DL(Op);

  if (!Subtarget.is64Bit() ||
      Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
    // vastart just stores the address of the VarArgsFrameIndex slot into the
    // memory location argument.
    SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
    return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
                        MachinePointerInfo(SV));
  }

  // __va_list_tag:
  //   gp_offset         (0 - 6 * 8)
  //   fp_offset         (48 - 48 + 8 * 16)
  //   overflow_arg_area (point to parameters coming in memory).
  //   reg_save_area
  SmallVector<SDValue, 8> MemOps;
  SDValue FIN = Op.getOperand(1);
  // Store gp_offset
  SDValue Store = DAG.getStore(
      Op.getOperand(0), DL,
      DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
      MachinePointerInfo(SV));
  MemOps.push_back(Store);

  // Store fp_offset
  FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
  Store = DAG.getStore(
      Op.getOperand(0), DL,
      DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
      MachinePointerInfo(SV, 4));
  MemOps.push_back(Store);

  // Store ptr to overflow_arg_area
  FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
  SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
  Store =
      DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
  MemOps.push_back(Store);

  // Store ptr to reg_save_area.
  FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
      Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
  SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
  Store = DAG.getStore(
      Op.getOperand(0), DL, RSFIN, FIN,
      MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
  MemOps.push_back(Store);
  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}

SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
  assert(Subtarget.is64Bit() &&
         "LowerVAARG only handles 64-bit va_arg!");
  assert(Op.getNumOperands() == 4);

  MachineFunction &MF = DAG.getMachineFunction();
  if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
    // The Win64 ABI uses char* instead of a structure.
    return DAG.expandVAArg(Op.getNode());

  SDValue Chain = Op.getOperand(0);
  SDValue SrcPtr = Op.getOperand(1);
  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
  unsigned Align = Op.getConstantOperandVal(3);
  SDLoc dl(Op);

  EVT ArgVT = Op.getNode()->getValueType(0);
  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
  uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
  uint8_t ArgMode;

  // Decide which area this value should be read from.
  // TODO: Implement the AMD64 ABI in its entirety. This simple
  // selection mechanism works only for the basic types.
  if (ArgVT == MVT::f80) {
    llvm_unreachable("va_arg for f80 not yet implemented");
  } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
    ArgMode = 2;  // Argument passed in XMM register. Use fp_offset.
  } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
    ArgMode = 1;  // Argument passed in GPR64 register(s). Use gp_offset.
  } else {
    llvm_unreachable("Unhandled argument type in LowerVAARG");
  }

  if (ArgMode == 2) {
    // Sanity Check: Make sure using fp_offset makes sense.
    assert(!Subtarget.useSoftFloat() &&
           !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
           Subtarget.hasSSE1());
  }

  // Insert VAARG_64 node into the DAG
  // VAARG_64 returns two values: Variable Argument Address, Chain
  SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
                       DAG.getConstant(ArgMode, dl, MVT::i8),
                       DAG.getConstant(Align, dl, MVT::i32)};
  SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
  SDValue VAARG = DAG.getMemIntrinsicNode(
    X86ISD::VAARG_64, dl,
    VTs, InstOps, MVT::i64,
    MachinePointerInfo(SV),
    /*Align=*/0,
    MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
  Chain = VAARG.getValue(1);

  // Load the next argument and return it
  return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
}

static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
                           SelectionDAG &DAG) {
  // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
  // where a va_list is still an i8*.
  assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
  if (Subtarget.isCallingConvWin64(
        DAG.getMachineFunction().getFunction().getCallingConv()))
    // Probably a Win64 va_copy.
    return DAG.expandVACopy(Op.getNode());

  SDValue Chain = Op.getOperand(0);
  SDValue DstPtr = Op.getOperand(1);
  SDValue SrcPtr = Op.getOperand(2);
  const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
  const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
  SDLoc DL(Op);

  return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
                       DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
                       false, false,
                       MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}

// Helper to get immediate/variable SSE shift opcode from other shift opcodes.
static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
  switch (Opc) {
  case ISD::SHL:
  case X86ISD::VSHL:
  case X86ISD::VSHLI:
    return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
  case ISD::SRL:
  case X86ISD::VSRL:
  case X86ISD::VSRLI:
    return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
  case ISD::SRA:
  case X86ISD::VSRA:
  case X86ISD::VSRAI:
    return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
  }
  llvm_unreachable("Unknown target vector shift node");
}

/// Handle vector element shifts where the shift amount is a constant.
/// Takes immediate version of shift as input.
static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
                                          SDValue SrcOp, uint64_t ShiftAmt,
                                          SelectionDAG &DAG) {
  MVT ElementType = VT.getVectorElementType();

  // Bitcast the source vector to the output type, this is mainly necessary for
  // vXi8/vXi64 shifts.
  if (VT != SrcOp.getSimpleValueType())
    SrcOp = DAG.getBitcast(VT, SrcOp);

  // Fold this packed shift into its first operand if ShiftAmt is 0.
  if (ShiftAmt == 0)
    return SrcOp;

  // Check for ShiftAmt >= element width
  if (ShiftAmt >= ElementType.getSizeInBits()) {
    if (Opc == X86ISD::VSRAI)
      ShiftAmt = ElementType.getSizeInBits() - 1;
    else
      return DAG.getConstant(0, dl, VT);
  }

  assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
         && "Unknown target vector shift-by-constant node");

  // Fold this packed vector shift into a build vector if SrcOp is a
  // vector of Constants or UNDEFs.
  if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
    SmallVector<SDValue, 8> Elts;
    unsigned NumElts = SrcOp->getNumOperands();

    switch (Opc) {
    default: llvm_unreachable("Unknown opcode!");
    case X86ISD::VSHLI:
      for (unsigned i = 0; i != NumElts; ++i) {
        SDValue CurrentOp = SrcOp->getOperand(i);
        if (CurrentOp->isUndef()) {
          Elts.push_back(CurrentOp);
          continue;
        }
        auto *ND = cast<ConstantSDNode>(CurrentOp);
        const APInt &C = ND->getAPIntValue();
        Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
      }
      break;
    case X86ISD::VSRLI:
      for (unsigned i = 0; i != NumElts; ++i) {
        SDValue CurrentOp = SrcOp->getOperand(i);
        if (CurrentOp->isUndef()) {
          Elts.push_back(CurrentOp);
          continue;
        }
        auto *ND = cast<ConstantSDNode>(CurrentOp);
        const APInt &C = ND->getAPIntValue();
        Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
      }
      break;
    case X86ISD::VSRAI:
      for (unsigned i = 0; i != NumElts; ++i) {
        SDValue CurrentOp = SrcOp->getOperand(i);
        if (CurrentOp->isUndef()) {
          Elts.push_back(CurrentOp);
          continue;
        }
        auto *ND = cast<ConstantSDNode>(CurrentOp);
        const APInt &C = ND->getAPIntValue();
        Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
      }
      break;
    }

    return DAG.getBuildVector(VT, dl, Elts);
  }

  return DAG.getNode(Opc, dl, VT, SrcOp,
                     DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
}

/// Handle vector element shifts where the shift amount may or may not be a
/// constant. Takes immediate version of shift as input.
static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
                                   SDValue SrcOp, SDValue ShAmt,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG) {
  MVT SVT = ShAmt.getSimpleValueType();
  assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");

  // Catch shift-by-constant.
  if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
    return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
                                      CShAmt->getZExtValue(), DAG);

  // Change opcode to non-immediate version.
  Opc = getTargetVShiftUniformOpcode(Opc, true);

  // Need to build a vector containing shift amount.
  // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
  // +====================+============+=======================================+
  // | ShAmt is           | HasSSE4.1? | Construct ShAmt vector as             |
  // +====================+============+=======================================+
  // | i64                | Yes, No    | Use ShAmt as lowest elt               |
  // | i32                | Yes        | zero-extend in-reg                    |
  // | (i32 zext(i16/i8)) | Yes        | zero-extend in-reg                    |
  // | (i32 zext(i16/i8)) | No         | byte-shift-in-reg                     |
  // | i16/i32            | No         | v4i32 build_vector(ShAmt, 0, ud, ud)) |
  // +====================+============+=======================================+

  if (SVT == MVT::i64)
    ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
  else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
           ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
           (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
            ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
    ShAmt = ShAmt.getOperand(0);
    MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
    ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
    if (Subtarget.hasSSE41())
      ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
                          MVT::v2i64, ShAmt);
    else {
      SDValue ByteShift = DAG.getTargetConstant(
          (128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
      ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
      ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
                          ByteShift);
      ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
                          ByteShift);
    }
  } else if (Subtarget.hasSSE41() &&
             ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
    ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
    ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
                        MVT::v2i64, ShAmt);
  } else {
    SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT),
                        DAG.getUNDEF(SVT)};
    ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
  }

  // The return type has to be a 128-bit type with the same element
  // type as the input type.
  MVT EltVT = VT.getVectorElementType();
  MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());

  ShAmt = DAG.getBitcast(ShVT, ShAmt);
  return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
}

/// Return Mask with the necessary casting or extending
/// for \p Mask according to \p MaskVT when lowering masking intrinsics
static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
                           const X86Subtarget &Subtarget, SelectionDAG &DAG,
                           const SDLoc &dl) {

  if (isAllOnesConstant(Mask))
    return DAG.getConstant(1, dl, MaskVT);
  if (X86::isZeroNode(Mask))
    return DAG.getConstant(0, dl, MaskVT);

  assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");

  if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
    assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
    assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
    // In case 32bit mode, bitcast i64 is illegal, extend/split it.
    SDValue Lo, Hi;
    Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
                        DAG.getConstant(0, dl, MVT::i32));
    Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
                        DAG.getConstant(1, dl, MVT::i32));

    Lo = DAG.getBitcast(MVT::v32i1, Lo);
    Hi = DAG.getBitcast(MVT::v32i1, Hi);

    return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
  } else {
    MVT BitcastVT = MVT::getVectorVT(MVT::i1,
                                     Mask.getSimpleValueType().getSizeInBits());
    // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
    // are extracted by EXTRACT_SUBVECTOR.
    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
                       DAG.getBitcast(BitcastVT, Mask),
                       DAG.getIntPtrConstant(0, dl));
  }
}

/// Return (and \p Op, \p Mask) for compare instructions or
/// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
/// necessary casting or extending for \p Mask when lowering masking intrinsics
static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
                  SDValue PreservedSrc,
                  const X86Subtarget &Subtarget,
                  SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
  unsigned OpcodeSelect = ISD::VSELECT;
  SDLoc dl(Op);

  if (isAllOnesConstant(Mask))
    return Op;

  SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);

  if (PreservedSrc.isUndef())
    PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
  return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
}

/// Creates an SDNode for a predicated scalar operation.
/// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
/// The mask is coming as MVT::i8 and it should be transformed
/// to MVT::v1i1 while lowering masking intrinsics.
/// The main difference between ScalarMaskingNode and VectorMaskingNode is using
/// "X86select" instead of "vselect". We just can't create the "vselect" node
/// for a scalar instruction.
static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
                                    SDValue PreservedSrc,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {

  if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
    if (MaskConst->getZExtValue() & 0x1)
      return Op;

  MVT VT = Op.getSimpleValueType();
  SDLoc dl(Op);

  assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
  SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
                              DAG.getBitcast(MVT::v8i1, Mask),
                              DAG.getIntPtrConstant(0, dl));
  if (Op.getOpcode() == X86ISD::FSETCCM ||
      Op.getOpcode() == X86ISD::FSETCCM_SAE ||
      Op.getOpcode() == X86ISD::VFPCLASSS)
    return DAG.getNode(ISD::AND, dl, VT, Op, IMask);

  if (PreservedSrc.isUndef())
    PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
  return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
}

static int getSEHRegistrationNodeSize(const Function *Fn) {
  if (!Fn->hasPersonalityFn())
    report_fatal_error(
        "querying registration node size for function without personality");
  // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
  // WinEHStatePass for the full struct definition.
  switch (classifyEHPersonality(Fn->getPersonalityFn())) {
  case EHPersonality::MSVC_X86SEH: return 24;
  case EHPersonality::MSVC_CXX: return 16;
  default: break;
  }
  report_fatal_error(
      "can only recover FP for 32-bit MSVC EH personality functions");
}

/// When the MSVC runtime transfers control to us, either to an outlined
/// function or when returning to a parent frame after catching an exception, we
/// recover the parent frame pointer by doing arithmetic on the incoming EBP.
/// Here's the math:
///   RegNodeBase = EntryEBP - RegNodeSize
///   ParentFP = RegNodeBase - ParentFrameOffset
/// Subtracting RegNodeSize takes us to the offset of the registration node, and
/// subtracting the offset (negative on x86) takes us back to the parent FP.
static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
                                   SDValue EntryEBP) {
  MachineFunction &MF = DAG.getMachineFunction();
  SDLoc dl;

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());

  // It's possible that the parent function no longer has a personality function
  // if the exceptional code was optimized away, in which case we just return
  // the incoming EBP.
  if (!Fn->hasPersonalityFn())
    return EntryEBP;

  // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
  // registration, or the .set_setframe offset.
  MCSymbol *OffsetSym =
      MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
          GlobalValue::dropLLVMManglingEscape(Fn->getName()));
  SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
  SDValue ParentFrameOffset =
      DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);

  // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
  // prologue to RBP in the parent function.
  const X86Subtarget &Subtarget =
      static_cast<const X86Subtarget &>(DAG.getSubtarget());
  if (Subtarget.is64Bit())
    return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);

  int RegNodeSize = getSEHRegistrationNodeSize(Fn);
  // RegNodeBase = EntryEBP - RegNodeSize
  // ParentFP = RegNodeBase - ParentFrameOffset
  SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
                                    DAG.getConstant(RegNodeSize, dl, PtrVT));
  return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
}

SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
                                                   SelectionDAG &DAG) const {
  // Helper to detect if the operand is CUR_DIRECTION rounding mode.
  auto isRoundModeCurDirection = [](SDValue Rnd) {
    if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
      return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;

    return false;
  };
  auto isRoundModeSAE = [](SDValue Rnd) {
    if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
      unsigned RC = C->getZExtValue();
      if (RC & X86::STATIC_ROUNDING::NO_EXC) {
        // Clear the NO_EXC bit and check remaining bits.
        RC ^= X86::STATIC_ROUNDING::NO_EXC;
        // As a convenience we allow no other bits or explicitly
        // current direction.
        return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
      }
    }

    return false;
  };
  auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
    if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
      RC = C->getZExtValue();
      if (RC & X86::STATIC_ROUNDING::NO_EXC) {
        // Clear the NO_EXC bit and check remaining bits.
        RC ^= X86::STATIC_ROUNDING::NO_EXC;
        return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
               RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
               RC == X86::STATIC_ROUNDING::TO_POS_INF ||
               RC == X86::STATIC_ROUNDING::TO_ZERO;
      }
    }

    return false;
  };

  SDLoc dl(Op);
  unsigned IntNo = Op.getConstantOperandVal(0);
  MVT VT = Op.getSimpleValueType();
  const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
  if (IntrData) {
    switch(IntrData->Type) {
    case INTR_TYPE_1OP: {
      // We specify 2 possible opcodes for intrinsics with rounding modes.
      // First, we check if the intrinsic may have non-default rounding mode,
      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
      if (IntrWithRoundingModeOpcode != 0) {
        SDValue Rnd = Op.getOperand(2);
        unsigned RC = 0;
        if (isRoundModeSAEToX(Rnd, RC))
          return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
                             Op.getOperand(1),
                             DAG.getTargetConstant(RC, dl, MVT::i32));
        if (!isRoundModeCurDirection(Rnd))
          return SDValue();
      }
      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
    }
    case INTR_TYPE_1OP_SAE: {
      SDValue Sae = Op.getOperand(2);

      unsigned Opc;
      if (isRoundModeCurDirection(Sae))
        Opc = IntrData->Opc0;
      else if (isRoundModeSAE(Sae))
        Opc = IntrData->Opc1;
      else
        return SDValue();

      return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
    }
    case INTR_TYPE_2OP: {
      SDValue Src2 = Op.getOperand(2);

      // We specify 2 possible opcodes for intrinsics with rounding modes.
      // First, we check if the intrinsic may have non-default rounding mode,
      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
      if (IntrWithRoundingModeOpcode != 0) {
        SDValue Rnd = Op.getOperand(3);
        unsigned RC = 0;
        if (isRoundModeSAEToX(Rnd, RC))
          return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
                             Op.getOperand(1), Src2,
                             DAG.getTargetConstant(RC, dl, MVT::i32));
        if (!isRoundModeCurDirection(Rnd))
          return SDValue();
      }

      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
                         Op.getOperand(1), Src2);
    }
    case INTR_TYPE_2OP_SAE: {
      SDValue Sae = Op.getOperand(3);

      unsigned Opc;
      if (isRoundModeCurDirection(Sae))
        Opc = IntrData->Opc0;
      else if (isRoundModeSAE(Sae))
        Opc = IntrData->Opc1;
      else
        return SDValue();

      return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
                         Op.getOperand(2));
    }
    case INTR_TYPE_3OP:
    case INTR_TYPE_3OP_IMM8: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);
      SDValue Src3 = Op.getOperand(3);

      // We specify 2 possible opcodes for intrinsics with rounding modes.
      // First, we check if the intrinsic may have non-default rounding mode,
      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
      if (IntrWithRoundingModeOpcode != 0) {
        SDValue Rnd = Op.getOperand(4);
        unsigned RC = 0;
        if (isRoundModeSAEToX(Rnd, RC))
          return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
                             Src1, Src2, Src3,
                             DAG.getTargetConstant(RC, dl, MVT::i32));
        if (!isRoundModeCurDirection(Rnd))
          return SDValue();
      }

      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
                         Src1, Src2, Src3);
    }
    case INTR_TYPE_4OP:
      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
        Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
    case INTR_TYPE_1OP_MASK: {
      SDValue Src = Op.getOperand(1);
      SDValue PassThru = Op.getOperand(2);
      SDValue Mask = Op.getOperand(3);
      // We add rounding mode to the Node when
      //   - RC Opcode is specified and
      //   - RC is not "current direction".
      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
      if (IntrWithRoundingModeOpcode != 0) {
        SDValue Rnd = Op.getOperand(4);
        unsigned RC = 0;
        if (isRoundModeSAEToX(Rnd, RC))
          return getVectorMaskingNode(
              DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
                          Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
              Mask, PassThru, Subtarget, DAG);
        if (!isRoundModeCurDirection(Rnd))
          return SDValue();
      }
      return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
                                  Mask, PassThru, Subtarget, DAG);
    }
    case INTR_TYPE_1OP_MASK_SAE: {
      SDValue Src = Op.getOperand(1);
      SDValue PassThru = Op.getOperand(2);
      SDValue Mask = Op.getOperand(3);
      SDValue Rnd = Op.getOperand(4);

      unsigned Opc;
      if (isRoundModeCurDirection(Rnd))
        Opc = IntrData->Opc0;
      else if (isRoundModeSAE(Rnd))
        Opc = IntrData->Opc1;
      else
        return SDValue();

      return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src),
                                  Mask, PassThru, Subtarget, DAG);
    }
    case INTR_TYPE_SCALAR_MASK: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);
      SDValue passThru = Op.getOperand(3);
      SDValue Mask = Op.getOperand(4);
      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
      // There are 2 kinds of intrinsics in this group:
      // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
      // (2) With rounding mode and sae - 7 operands.
      bool HasRounding = IntrWithRoundingModeOpcode != 0;
      if (Op.getNumOperands() == (5U + HasRounding)) {
        if (HasRounding) {
          SDValue Rnd = Op.getOperand(5);
          unsigned RC = 0;
          if (isRoundModeSAEToX(Rnd, RC))
            return getScalarMaskingNode(
                DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
                            DAG.getTargetConstant(RC, dl, MVT::i32)),
                Mask, passThru, Subtarget, DAG);
          if (!isRoundModeCurDirection(Rnd))
            return SDValue();
        }
        return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
                                                Src2),
                                    Mask, passThru, Subtarget, DAG);
      }

      assert(Op.getNumOperands() == (6U + HasRounding) &&
             "Unexpected intrinsic form");
      SDValue RoundingMode = Op.getOperand(5);
      unsigned Opc = IntrData->Opc0;
      if (HasRounding) {
        SDValue Sae = Op.getOperand(6);
        if (isRoundModeSAE(Sae))
          Opc = IntrWithRoundingModeOpcode;
        else if (!isRoundModeCurDirection(Sae))
          return SDValue();
      }
      return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
                                              Src2, RoundingMode),
                                  Mask, passThru, Subtarget, DAG);
    }
    case INTR_TYPE_SCALAR_MASK_RND: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);
      SDValue passThru = Op.getOperand(3);
      SDValue Mask = Op.getOperand(4);
      SDValue Rnd = Op.getOperand(5);

      SDValue NewOp;
      unsigned RC = 0;
      if (isRoundModeCurDirection(Rnd))
        NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
      else if (isRoundModeSAEToX(Rnd, RC))
        NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
                            DAG.getTargetConstant(RC, dl, MVT::i32));
      else
        return SDValue();

      return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
    }
    case INTR_TYPE_SCALAR_MASK_SAE: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);
      SDValue passThru = Op.getOperand(3);
      SDValue Mask = Op.getOperand(4);
      SDValue Sae = Op.getOperand(5);
      unsigned Opc;
      if (isRoundModeCurDirection(Sae))
        Opc = IntrData->Opc0;
      else if (isRoundModeSAE(Sae))
        Opc = IntrData->Opc1;
      else
        return SDValue();

      return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
                                  Mask, passThru, Subtarget, DAG);
    }
    case INTR_TYPE_2OP_MASK: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);
      SDValue PassThru = Op.getOperand(3);
      SDValue Mask = Op.getOperand(4);
      SDValue NewOp;
      if (IntrData->Opc1 != 0) {
        SDValue Rnd = Op.getOperand(5);
        unsigned RC = 0;
        if (isRoundModeSAEToX(Rnd, RC))
          NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
                              DAG.getTargetConstant(RC, dl, MVT::i32));
        else if (!isRoundModeCurDirection(Rnd))
          return SDValue();
      }
      if (!NewOp)
        NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
      return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
    }
    case INTR_TYPE_2OP_MASK_SAE: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);
      SDValue PassThru = Op.getOperand(3);
      SDValue Mask = Op.getOperand(4);

      unsigned Opc = IntrData->Opc0;
      if (IntrData->Opc1 != 0) {
        SDValue Sae = Op.getOperand(5);
        if (isRoundModeSAE(Sae))
          Opc = IntrData->Opc1;
        else if (!isRoundModeCurDirection(Sae))
          return SDValue();
      }

      return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
                                  Mask, PassThru, Subtarget, DAG);
    }
    case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);
      SDValue Src3 = Op.getOperand(3);
      SDValue PassThru = Op.getOperand(4);
      SDValue Mask = Op.getOperand(5);
      SDValue Sae = Op.getOperand(6);
      unsigned Opc;
      if (isRoundModeCurDirection(Sae))
        Opc = IntrData->Opc0;
      else if (isRoundModeSAE(Sae))
        Opc = IntrData->Opc1;
      else
        return SDValue();

      return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
                                  Mask, PassThru, Subtarget, DAG);
    }
    case INTR_TYPE_3OP_MASK_SAE: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);
      SDValue Src3 = Op.getOperand(3);
      SDValue PassThru = Op.getOperand(4);
      SDValue Mask = Op.getOperand(5);

      unsigned Opc = IntrData->Opc0;
      if (IntrData->Opc1 != 0) {
        SDValue Sae = Op.getOperand(6);
        if (isRoundModeSAE(Sae))
          Opc = IntrData->Opc1;
        else if (!isRoundModeCurDirection(Sae))
          return SDValue();
      }
      return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
                                  Mask, PassThru, Subtarget, DAG);
    }
    case BLENDV: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);
      SDValue Src3 = Op.getOperand(3);

      EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
      Src3 = DAG.getBitcast(MaskVT, Src3);

      // Reverse the operands to match VSELECT order.
      return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
    }
    case VPERM_2OP : {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);

      // Swap Src1 and Src2 in the node creation
      return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
    }
    case IFMA_OP:
      // NOTE: We need to swizzle the operands to pass the multiply operands
      // first.
      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
                         Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
    case FPCLASSS: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Imm = Op.getOperand(2);
      SDValue Mask = Op.getOperand(3);
      SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
      SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
                                                 Subtarget, DAG);
      // Need to fill with zeros to ensure the bitcast will produce zeroes
      // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
      SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
                                DAG.getConstant(0, dl, MVT::v8i1),
                                FPclassMask, DAG.getIntPtrConstant(0, dl));
      return DAG.getBitcast(MVT::i8, Ins);
    }

    case CMP_MASK_CC: {
      MVT MaskVT = Op.getSimpleValueType();
      SDValue CC = Op.getOperand(3);
      // We specify 2 possible opcodes for intrinsics with rounding modes.
      // First, we check if the intrinsic may have non-default rounding mode,
      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
      if (IntrData->Opc1 != 0) {
        SDValue Sae = Op.getOperand(4);
        if (isRoundModeSAE(Sae))
          return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
                             Op.getOperand(2), CC, Sae);
        if (!isRoundModeCurDirection(Sae))
          return SDValue();
      }
      //default rounding mode
      return DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
                          Op.getOperand(2), CC);
    }
    case CMP_MASK_SCALAR_CC: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);
      SDValue CC = Op.getOperand(3);
      SDValue Mask = Op.getOperand(4);

      SDValue Cmp;
      if (IntrData->Opc1 != 0) {
        SDValue Sae = Op.getOperand(5);
        if (isRoundModeSAE(Sae))
          Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
        else if (!isRoundModeCurDirection(Sae))
          return SDValue();
      }
      //default rounding mode
      if (!Cmp.getNode())
        Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);

      SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
                                             Subtarget, DAG);
      // Need to fill with zeros to ensure the bitcast will produce zeroes
      // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
      SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
                                DAG.getConstant(0, dl, MVT::v8i1),
                                CmpMask, DAG.getIntPtrConstant(0, dl));
      return DAG.getBitcast(MVT::i8, Ins);
    }
    case COMI: { // Comparison intrinsics
      ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
      SDValue LHS = Op.getOperand(1);
      SDValue RHS = Op.getOperand(2);
      SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
      SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
      SDValue SetCC;
      switch (CC) {
      case ISD::SETEQ: { // (ZF = 0 and PF = 0)
        SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
        SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
        SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
        break;
      }
      case ISD::SETNE: { // (ZF = 1 or PF = 1)
        SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
        SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
        SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
        break;
      }
      case ISD::SETGT: // (CF = 0 and ZF = 0)
        SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
        break;
      case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
        SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG);
        break;
      }
      case ISD::SETGE: // CF = 0
        SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
        break;
      case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
        SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG);
        break;
      default:
        llvm_unreachable("Unexpected illegal condition!");
      }
      return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
    }
    case COMI_RM: { // Comparison intrinsics with Sae
      SDValue LHS = Op.getOperand(1);
      SDValue RHS = Op.getOperand(2);
      unsigned CondVal = Op.getConstantOperandVal(3);
      SDValue Sae = Op.getOperand(4);

      SDValue FCmp;
      if (isRoundModeCurDirection(Sae))
        FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
                           DAG.getTargetConstant(CondVal, dl, MVT::i8));
      else if (isRoundModeSAE(Sae))
        FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
                           DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
      else
        return SDValue();
      // Need to fill with zeros to ensure the bitcast will produce zeroes
      // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
      SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
                                DAG.getConstant(0, dl, MVT::v16i1),
                                FCmp, DAG.getIntPtrConstant(0, dl));
      return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
                         DAG.getBitcast(MVT::i16, Ins));
    }
    case VSHIFT:
      return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
                                 Op.getOperand(1), Op.getOperand(2), Subtarget,
                                 DAG);
    case COMPRESS_EXPAND_IN_REG: {
      SDValue Mask = Op.getOperand(3);
      SDValue DataToCompress = Op.getOperand(1);
      SDValue PassThru = Op.getOperand(2);
      if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
        return Op.getOperand(1);

      // Avoid false dependency.
      if (PassThru.isUndef())
        PassThru = DAG.getConstant(0, dl, VT);

      return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
                         Mask);
    }
    case FIXUPIMM:
    case FIXUPIMM_MASKZ: {
      SDValue Src1 = Op.getOperand(1);
      SDValue Src2 = Op.getOperand(2);
      SDValue Src3 = Op.getOperand(3);
      SDValue Imm = Op.getOperand(4);
      SDValue Mask = Op.getOperand(5);
      SDValue Passthru = (IntrData->Type == FIXUPIMM)
                             ? Src1
                             : getZeroVector(VT, Subtarget, DAG, dl);

      unsigned Opc = IntrData->Opc0;
      if (IntrData->Opc1 != 0) {
        SDValue Sae = Op.getOperand(6);
        if (isRoundModeSAE(Sae))
          Opc = IntrData->Opc1;
        else if (!isRoundModeCurDirection(Sae))
          return SDValue();
      }

      SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);

      if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
        return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);

      return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
    }
    case ROUNDP: {
      assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
      // Clear the upper bits of the rounding immediate so that the legacy
      // intrinsic can't trigger the scaling behavior of VRNDSCALE.
      auto Round = cast<ConstantSDNode>(Op.getOperand(2));
      SDValue RoundingMode =
          DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
                         Op.getOperand(1), RoundingMode);
    }
    case ROUNDS: {
      assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
      // Clear the upper bits of the rounding immediate so that the legacy
      // intrinsic can't trigger the scaling behavior of VRNDSCALE.
      auto Round = cast<ConstantSDNode>(Op.getOperand(3));
      SDValue RoundingMode =
          DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
                         Op.getOperand(1), Op.getOperand(2), RoundingMode);
    }
    case BEXTRI: {
      assert(IntrData->Opc0 == X86ISD::BEXTR && "Unexpected opcode");

      // The control is a TargetConstant, but we need to convert it to a
      // ConstantSDNode.
      uint64_t Imm = Op.getConstantOperandVal(2);
      SDValue Control = DAG.getConstant(Imm, dl, Op.getValueType());
      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
                         Op.getOperand(1), Control);
    }
    // ADC/ADCX/SBB
    case ADX: {
      SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
      SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);

      SDValue Res;
      // If the carry in is zero, then we should just use ADD/SUB instead of
      // ADC/SBB.
      if (isNullConstant(Op.getOperand(1))) {
        Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
                          Op.getOperand(3));
      } else {
        SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
                                    DAG.getConstant(-1, dl, MVT::i8));
        Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
                          Op.getOperand(3), GenCF.getValue(1));
      }
      SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
      SDValue Results[] = { SetCC, Res };
      return DAG.getMergeValues(Results, dl);
    }
    case CVTPD2PS_MASK:
    case CVTPD2DQ_MASK:
    case CVTQQ2PS_MASK:
    case TRUNCATE_TO_REG: {
      SDValue Src = Op.getOperand(1);
      SDValue PassThru = Op.getOperand(2);
      SDValue Mask = Op.getOperand(3);

      if (isAllOnesConstant(Mask))
        return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);

      MVT SrcVT = Src.getSimpleValueType();
      MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
      Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
      return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
                         Mask);
    }
    case CVTPS2PH_MASK: {
      SDValue Src = Op.getOperand(1);
      SDValue Rnd = Op.getOperand(2);
      SDValue PassThru = Op.getOperand(3);
      SDValue Mask = Op.getOperand(4);

      if (isAllOnesConstant(Mask))
        return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);

      MVT SrcVT = Src.getSimpleValueType();
      MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
      Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
      return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
                         PassThru, Mask);

    }
    case CVTNEPS2BF16_MASK: {
      SDValue Src = Op.getOperand(1);
      SDValue PassThru = Op.getOperand(2);
      SDValue Mask = Op.getOperand(3);

      if (ISD::isBuildVectorAllOnes(Mask.getNode()))
        return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);

      // Break false dependency.
      if (PassThru.isUndef())
        PassThru = DAG.getConstant(0, dl, PassThru.getValueType());

      return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
                         Mask);
    }
    default:
      break;
    }
  }

  switch (IntNo) {
  default: return SDValue();    // Don't custom lower most intrinsics.

  // ptest and testp intrinsics. The intrinsic these come from are designed to
  // return an integer value, not just an instruction so lower it to the ptest
  // or testp pattern and a setcc for the result.
  case Intrinsic::x86_avx512_ktestc_b:
  case Intrinsic::x86_avx512_ktestc_w:
  case Intrinsic::x86_avx512_ktestc_d:
  case Intrinsic::x86_avx512_ktestc_q:
  case Intrinsic::x86_avx512_ktestz_b:
  case Intrinsic::x86_avx512_ktestz_w:
  case Intrinsic::x86_avx512_ktestz_d:
  case Intrinsic::x86_avx512_ktestz_q:
  case Intrinsic::x86_sse41_ptestz:
  case Intrinsic::x86_sse41_ptestc:
  case Intrinsic::x86_sse41_ptestnzc:
  case Intrinsic::x86_avx_ptestz_256:
  case Intrinsic::x86_avx_ptestc_256:
  case Intrinsic::x86_avx_ptestnzc_256:
  case Intrinsic::x86_avx_vtestz_ps:
  case Intrinsic::x86_avx_vtestc_ps:
  case Intrinsic::x86_avx_vtestnzc_ps:
  case Intrinsic::x86_avx_vtestz_pd:
  case Intrinsic::x86_avx_vtestc_pd:
  case Intrinsic::x86_avx_vtestnzc_pd:
  case Intrinsic::x86_avx_vtestz_ps_256:
  case Intrinsic::x86_avx_vtestc_ps_256:
  case Intrinsic::x86_avx_vtestnzc_ps_256:
  case Intrinsic::x86_avx_vtestz_pd_256:
  case Intrinsic::x86_avx_vtestc_pd_256:
  case Intrinsic::x86_avx_vtestnzc_pd_256: {
    unsigned TestOpc = X86ISD::PTEST;
    X86::CondCode X86CC;
    switch (IntNo) {
    default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
    case Intrinsic::x86_avx512_ktestc_b:
    case Intrinsic::x86_avx512_ktestc_w:
    case Intrinsic::x86_avx512_ktestc_d:
    case Intrinsic::x86_avx512_ktestc_q:
      // CF = 1
      TestOpc = X86ISD::KTEST;
      X86CC = X86::COND_B;
      break;
    case Intrinsic::x86_avx512_ktestz_b:
    case Intrinsic::x86_avx512_ktestz_w:
    case Intrinsic::x86_avx512_ktestz_d:
    case Intrinsic::x86_avx512_ktestz_q:
      TestOpc = X86ISD::KTEST;
      X86CC = X86::COND_E;
      break;
    case Intrinsic::x86_avx_vtestz_ps:
    case Intrinsic::x86_avx_vtestz_pd:
    case Intrinsic::x86_avx_vtestz_ps_256:
    case Intrinsic::x86_avx_vtestz_pd_256:
      TestOpc = X86ISD::TESTP;
      LLVM_FALLTHROUGH;
    case Intrinsic::x86_sse41_ptestz:
    case Intrinsic::x86_avx_ptestz_256:
      // ZF = 1
      X86CC = X86::COND_E;
      break;
    case Intrinsic::x86_avx_vtestc_ps:
    case Intrinsic::x86_avx_vtestc_pd:
    case Intrinsic::x86_avx_vtestc_ps_256:
    case Intrinsic::x86_avx_vtestc_pd_256:
      TestOpc = X86ISD::TESTP;
      LLVM_FALLTHROUGH;
    case Intrinsic::x86_sse41_ptestc:
    case Intrinsic::x86_avx_ptestc_256:
      // CF = 1
      X86CC = X86::COND_B;
      break;
    case Intrinsic::x86_avx_vtestnzc_ps:
    case Intrinsic::x86_avx_vtestnzc_pd:
    case Intrinsic::x86_avx_vtestnzc_ps_256:
    case Intrinsic::x86_avx_vtestnzc_pd_256:
      TestOpc = X86ISD::TESTP;
      LLVM_FALLTHROUGH;
    case Intrinsic::x86_sse41_ptestnzc:
    case Intrinsic::x86_avx_ptestnzc_256:
      // ZF and CF = 0
      X86CC = X86::COND_A;
      break;
    }

    SDValue LHS = Op.getOperand(1);
    SDValue RHS = Op.getOperand(2);
    SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
    SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
    return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
  }

  case Intrinsic::x86_sse42_pcmpistria128:
  case Intrinsic::x86_sse42_pcmpestria128:
  case Intrinsic::x86_sse42_pcmpistric128:
  case Intrinsic::x86_sse42_pcmpestric128:
  case Intrinsic::x86_sse42_pcmpistrio128:
  case Intrinsic::x86_sse42_pcmpestrio128:
  case Intrinsic::x86_sse42_pcmpistris128:
  case Intrinsic::x86_sse42_pcmpestris128:
  case Intrinsic::x86_sse42_pcmpistriz128:
  case Intrinsic::x86_sse42_pcmpestriz128: {
    unsigned Opcode;
    X86::CondCode X86CC;
    switch (IntNo) {
    default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
    case Intrinsic::x86_sse42_pcmpistria128:
      Opcode = X86ISD::PCMPISTR;
      X86CC = X86::COND_A;
      break;
    case Intrinsic::x86_sse42_pcmpestria128:
      Opcode = X86ISD::PCMPESTR;
      X86CC = X86::COND_A;
      break;
    case Intrinsic::x86_sse42_pcmpistric128:
      Opcode = X86ISD::PCMPISTR;
      X86CC = X86::COND_B;
      break;
    case Intrinsic::x86_sse42_pcmpestric128:
      Opcode = X86ISD::PCMPESTR;
      X86CC = X86::COND_B;
      break;
    case Intrinsic::x86_sse42_pcmpistrio128:
      Opcode = X86ISD::PCMPISTR;
      X86CC = X86::COND_O;
      break;
    case Intrinsic::x86_sse42_pcmpestrio128:
      Opcode = X86ISD::PCMPESTR;
      X86CC = X86::COND_O;
      break;
    case Intrinsic::x86_sse42_pcmpistris128:
      Opcode = X86ISD::PCMPISTR;
      X86CC = X86::COND_S;
      break;
    case Intrinsic::x86_sse42_pcmpestris128:
      Opcode = X86ISD::PCMPESTR;
      X86CC = X86::COND_S;
      break;
    case Intrinsic::x86_sse42_pcmpistriz128:
      Opcode = X86ISD::PCMPISTR;
      X86CC = X86::COND_E;
      break;
    case Intrinsic::x86_sse42_pcmpestriz128:
      Opcode = X86ISD::PCMPESTR;
      X86CC = X86::COND_E;
      break;
    }
    SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
    SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
    SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
    SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
    return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
  }

  case Intrinsic::x86_sse42_pcmpistri128:
  case Intrinsic::x86_sse42_pcmpestri128: {
    unsigned Opcode;
    if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
      Opcode = X86ISD::PCMPISTR;
    else
      Opcode = X86ISD::PCMPESTR;

    SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
    SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
    return DAG.getNode(Opcode, dl, VTs, NewOps);
  }

  case Intrinsic::x86_sse42_pcmpistrm128:
  case Intrinsic::x86_sse42_pcmpestrm128: {
    unsigned Opcode;
    if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
      Opcode = X86ISD::PCMPISTR;
    else
      Opcode = X86ISD::PCMPESTR;

    SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
    SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
    return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
  }

  case Intrinsic::eh_sjlj_lsda: {
    MachineFunction &MF = DAG.getMachineFunction();
    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
    auto &Context = MF.getMMI().getContext();
    MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
                                            Twine(MF.getFunctionNumber()));
    return DAG.getNode(getGlobalWrapperKind(), dl, VT,
                       DAG.getMCSymbol(S, PtrVT));
  }

  case Intrinsic::x86_seh_lsda: {
    // Compute the symbol for the LSDA. We know it'll get emitted later.
    MachineFunction &MF = DAG.getMachineFunction();
    SDValue Op1 = Op.getOperand(1);
    auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
    MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
        GlobalValue::dropLLVMManglingEscape(Fn->getName()));

    // Generate a simple absolute symbol reference. This intrinsic is only
    // supported on 32-bit Windows, which isn't PIC.
    SDValue Result = DAG.getMCSymbol(LSDASym, VT);
    return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
  }

  case Intrinsic::eh_recoverfp: {
    SDValue FnOp = Op.getOperand(1);
    SDValue IncomingFPOp = Op.getOperand(2);
    GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
    auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
    if (!Fn)
      report_fatal_error(
          "llvm.eh.recoverfp must take a function as the first argument");
    return recoverFramePointer(DAG, Fn, IncomingFPOp);
  }

  case Intrinsic::localaddress: {
    // Returns one of the stack, base, or frame pointer registers, depending on
    // which is used to reference local variables.
    MachineFunction &MF = DAG.getMachineFunction();
    const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
    unsigned Reg;
    if (RegInfo->hasBasePointer(MF))
      Reg = RegInfo->getBaseRegister();
    else { // Handles the SP or FP case.
      bool CantUseFP = RegInfo->needsStackRealignment(MF);
      if (CantUseFP)
        Reg = RegInfo->getPtrSizedStackRegister(MF);
      else
        Reg = RegInfo->getPtrSizedFrameRegister(MF);
    }
    return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
  }

  case Intrinsic::x86_avx512_vp2intersect_q_512:
  case Intrinsic::x86_avx512_vp2intersect_q_256:
  case Intrinsic::x86_avx512_vp2intersect_q_128:
  case Intrinsic::x86_avx512_vp2intersect_d_512:
  case Intrinsic::x86_avx512_vp2intersect_d_256:
  case Intrinsic::x86_avx512_vp2intersect_d_128: {
    MVT MaskVT = Op.getSimpleValueType();

    SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
    SDLoc DL(Op);

    SDValue Operation =
        DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
                    Op->getOperand(1), Op->getOperand(2));

    SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
                                                 MaskVT, Operation);
    SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
                                                 MaskVT, Operation);
    return DAG.getMergeValues({Result0, Result1}, DL);
  }
  case Intrinsic::x86_mmx_pslli_w:
  case Intrinsic::x86_mmx_pslli_d:
  case Intrinsic::x86_mmx_pslli_q:
  case Intrinsic::x86_mmx_psrli_w:
  case Intrinsic::x86_mmx_psrli_d:
  case Intrinsic::x86_mmx_psrli_q:
  case Intrinsic::x86_mmx_psrai_w:
  case Intrinsic::x86_mmx_psrai_d: {
    SDLoc DL(Op);
    SDValue ShAmt = Op.getOperand(2);
    // If the argument is a constant, convert it to a target constant.
    if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
      ShAmt = DAG.getTargetConstant(C->getZExtValue(), DL, MVT::i32);
      return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
                         Op.getOperand(0), Op.getOperand(1), ShAmt);
    }

    unsigned NewIntrinsic;
    switch (IntNo) {
    default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
    case Intrinsic::x86_mmx_pslli_w:
      NewIntrinsic = Intrinsic::x86_mmx_psll_w;
      break;
    case Intrinsic::x86_mmx_pslli_d:
      NewIntrinsic = Intrinsic::x86_mmx_psll_d;
      break;
    case Intrinsic::x86_mmx_pslli_q:
      NewIntrinsic = Intrinsic::x86_mmx_psll_q;
      break;
    case Intrinsic::x86_mmx_psrli_w:
      NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
      break;
    case Intrinsic::x86_mmx_psrli_d:
      NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
      break;
    case Intrinsic::x86_mmx_psrli_q:
      NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
      break;
    case Intrinsic::x86_mmx_psrai_w:
      NewIntrinsic = Intrinsic::x86_mmx_psra_w;
      break;
    case Intrinsic::x86_mmx_psrai_d:
      NewIntrinsic = Intrinsic::x86_mmx_psra_d;
      break;
    }

    // The vector shift intrinsics with scalars uses 32b shift amounts but
    // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
    // MMX register.
    ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
    return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
                       DAG.getConstant(NewIntrinsic, DL, MVT::i32),
                       Op.getOperand(1), ShAmt);

  }
  }
}

static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
                                 SDValue Src, SDValue Mask, SDValue Base,
                                 SDValue Index, SDValue ScaleOp, SDValue Chain,
                                 const X86Subtarget &Subtarget) {
  SDLoc dl(Op);
  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
  // Scale must be constant.
  if (!C)
    return SDValue();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
                                        TLI.getPointerTy(DAG.getDataLayout()));
  EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
  SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
  // If source is undef or we know it won't be used, use a zero vector
  // to break register dependency.
  // TODO: use undef instead and let BreakFalseDeps deal with it?
  if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
    Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);

  MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);

  SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
  SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
    VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
  return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
}

static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
                             SDValue Src, SDValue Mask, SDValue Base,
                             SDValue Index, SDValue ScaleOp, SDValue Chain,
                             const X86Subtarget &Subtarget) {
  MVT VT = Op.getSimpleValueType();
  SDLoc dl(Op);
  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
  // Scale must be constant.
  if (!C)
    return SDValue();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
                                        TLI.getPointerTy(DAG.getDataLayout()));
  unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
                              VT.getVectorNumElements());
  MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);

  // We support two versions of the gather intrinsics. One with scalar mask and
  // one with vXi1 mask. Convert scalar to vXi1 if necessary.
  if (Mask.getValueType() != MaskVT)
    Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);

  SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
  // If source is undef or we know it won't be used, use a zero vector
  // to break register dependency.
  // TODO: use undef instead and let BreakFalseDeps deal with it?
  if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
    Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);

  MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);

  SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
  SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
    VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
  return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
}

static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
                               SDValue Src, SDValue Mask, SDValue Base,
                               SDValue Index, SDValue ScaleOp, SDValue Chain,
                               const X86Subtarget &Subtarget) {
  SDLoc dl(Op);
  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
  // Scale must be constant.
  if (!C)
    return SDValue();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
                                        TLI.getPointerTy(DAG.getDataLayout()));
  unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
                              Src.getSimpleValueType().getVectorNumElements());
  MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);

  // We support two versions of the scatter intrinsics. One with scalar mask and
  // one with vXi1 mask. Convert scalar to vXi1 if necessary.
  if (Mask.getValueType() != MaskVT)
    Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);

  MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);

  SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
  SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
  SDValue Res = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
      VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
  return Res.getValue(1);
}

static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
                               SDValue Mask, SDValue Base, SDValue Index,
                               SDValue ScaleOp, SDValue Chain,
                               const X86Subtarget &Subtarget) {
  SDLoc dl(Op);
  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
  // Scale must be constant.
  if (!C)
    return SDValue();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
                                        TLI.getPointerTy(DAG.getDataLayout()));
  SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
  SDValue Segment = DAG.getRegister(0, MVT::i32);
  MVT MaskVT =
    MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
  SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
  SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
  SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
  return SDValue(Res, 0);
}

/// Handles the lowering of builtin intrinsics with chain that return their
/// value into registers EDX:EAX.
/// If operand ScrReg is a valid register identifier, then operand 2 of N is
/// copied to SrcReg. The assumption is that SrcReg is an implicit input to
/// TargetOpcode.
/// Returns a Glue value which can be used to add extra copy-from-reg if the
/// expanded intrinsics implicitly defines extra registers (i.e. not just
/// EDX:EAX).
static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
                                        SelectionDAG &DAG,
                                        unsigned TargetOpcode,
                                        unsigned SrcReg,
                                        const X86Subtarget &Subtarget,
                                        SmallVectorImpl<SDValue> &Results) {
  SDValue Chain = N->getOperand(0);
  SDValue Glue;

  if (SrcReg) {
    assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
    Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
    Glue = Chain.getValue(1);
  }

  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
  SDValue N1Ops[] = {Chain, Glue};
  SDNode *N1 = DAG.getMachineNode(
      TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
  Chain = SDValue(N1, 0);

  // Reads the content of XCR and returns it in registers EDX:EAX.
  SDValue LO, HI;
  if (Subtarget.is64Bit()) {
    LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
    HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
                            LO.getValue(2));
  } else {
    LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
    HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
                            LO.getValue(2));
  }
  Chain = HI.getValue(1);
  Glue = HI.getValue(2);

  if (Subtarget.is64Bit()) {
    // Merge the two 32-bit values into a 64-bit one.
    SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
                              DAG.getConstant(32, DL, MVT::i8));
    Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
    Results.push_back(Chain);
    return Glue;
  }

  // Use a buildpair to merge the two 32-bit values into a 64-bit one.
  SDValue Ops[] = { LO, HI };
  SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
  Results.push_back(Pair);
  Results.push_back(Chain);
  return Glue;
}

/// Handles the lowering of builtin intrinsics that read the time stamp counter
/// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
/// READCYCLECOUNTER nodes.
static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
                                    SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget,
                                    SmallVectorImpl<SDValue> &Results) {
  // The processor's time-stamp counter (a 64-bit MSR) is stored into the
  // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
  // and the EAX register is loaded with the low-order 32 bits.
  SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
                                             /* NoRegister */0, Subtarget,
                                             Results);
  if (Opcode != X86::RDTSCP)
    return;

  SDValue Chain = Results[1];
  // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
  // the ECX register. Add 'ecx' explicitly to the chain.
  SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
  Results[1] = ecx;
  Results.push_back(ecx.getValue(1));
}

static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {
  SmallVector<SDValue, 3> Results;
  SDLoc DL(Op);
  getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
                          Results);
  return DAG.getMergeValues(Results, DL);
}

static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
  MachineFunction &MF = DAG.getMachineFunction();
  SDValue Chain = Op.getOperand(0);
  SDValue RegNode = Op.getOperand(2);
  WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
  if (!EHInfo)
    report_fatal_error("EH registrations only live in functions using WinEH");

  // Cast the operand to an alloca, and remember the frame index.
  auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
  if (!FINode)
    report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
  EHInfo->EHRegNodeFrameIndex = FINode->getIndex();

  // Return the chain operand without making any DAG nodes.
  return Chain;
}

static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
  MachineFunction &MF = DAG.getMachineFunction();
  SDValue Chain = Op.getOperand(0);
  SDValue EHGuard = Op.getOperand(2);
  WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
  if (!EHInfo)
    report_fatal_error("EHGuard only live in functions using WinEH");

  // Cast the operand to an alloca, and remember the frame index.
  auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
  if (!FINode)
    report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
  EHInfo->EHGuardFrameIndex = FINode->getIndex();

  // Return the chain operand without making any DAG nodes.
  return Chain;
}

/// Emit Truncating Store with signed or unsigned saturation.
static SDValue
EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
                SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
                SelectionDAG &DAG) {

  SDVTList VTs = DAG.getVTList(MVT::Other);
  SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
  SDValue Ops[] = { Chain, Val, Ptr, Undef };
  return SignedSat ?
    DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
    DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
}

/// Emit Masked Truncating Store with signed or unsigned saturation.
static SDValue
EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
                      SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
                      MachineMemOperand *MMO, SelectionDAG &DAG) {

  SDVTList VTs = DAG.getVTList(MVT::Other);
  SDValue Ops[] = { Chain, Val, Ptr, Mask };
  return SignedSat ?
    DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
    DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
}

static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {
  unsigned IntNo = Op.getConstantOperandVal(1);
  const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
  if (!IntrData) {
    switch (IntNo) {
    case llvm::Intrinsic::x86_seh_ehregnode:
      return MarkEHRegistrationNode(Op, DAG);
    case llvm::Intrinsic::x86_seh_ehguard:
      return MarkEHGuard(Op, DAG);
    case llvm::Intrinsic::x86_rdpkru: {
      SDLoc dl(Op);
      SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
      // Create a RDPKRU node and pass 0 to the ECX parameter.
      return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
                         DAG.getConstant(0, dl, MVT::i32));
    }
    case llvm::Intrinsic::x86_wrpkru: {
      SDLoc dl(Op);
      // Create a WRPKRU node, pass the input to the EAX parameter,  and pass 0
      // to the EDX and ECX parameters.
      return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
                         Op.getOperand(0), Op.getOperand(2),
                         DAG.getConstant(0, dl, MVT::i32),
                         DAG.getConstant(0, dl, MVT::i32));
    }
    case llvm::Intrinsic::x86_flags_read_u32:
    case llvm::Intrinsic::x86_flags_read_u64:
    case llvm::Intrinsic::x86_flags_write_u32:
    case llvm::Intrinsic::x86_flags_write_u64: {
      // We need a frame pointer because this will get lowered to a PUSH/POP
      // sequence.
      MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
      MFI.setHasCopyImplyingStackAdjustment(true);
      // Don't do anything here, we will expand these intrinsics out later
      // during FinalizeISel in EmitInstrWithCustomInserter.
      return SDValue();
    }
    case Intrinsic::x86_lwpins32:
    case Intrinsic::x86_lwpins64:
    case Intrinsic::x86_umwait:
    case Intrinsic::x86_tpause: {
      SDLoc dl(Op);
      SDValue Chain = Op->getOperand(0);
      SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
      unsigned Opcode;

      switch (IntNo) {
      default: llvm_unreachable("Impossible intrinsic");
      case Intrinsic::x86_umwait:
        Opcode = X86ISD::UMWAIT;
        break;
      case Intrinsic::x86_tpause:
        Opcode = X86ISD::TPAUSE;
        break;
      case Intrinsic::x86_lwpins32:
      case Intrinsic::x86_lwpins64:
        Opcode = X86ISD::LWPINS;
        break;
      }

      SDValue Operation =
          DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
                      Op->getOperand(3), Op->getOperand(4));
      SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
      return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
                         Operation.getValue(1));
    }
    case Intrinsic::x86_enqcmd:
    case Intrinsic::x86_enqcmds: {
      SDLoc dl(Op);
      SDValue Chain = Op.getOperand(0);
      SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
      unsigned Opcode;
      switch (IntNo) {
      default: llvm_unreachable("Impossible intrinsic!");
      case Intrinsic::x86_enqcmd:
        Opcode = X86ISD::ENQCMD;
        break;
      case Intrinsic::x86_enqcmds:
        Opcode = X86ISD::ENQCMDS;
        break;
      }
      SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
                                      Op.getOperand(3));
      SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
      return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
                         Operation.getValue(1));
    }
    }
    return SDValue();
  }

  SDLoc dl(Op);
  switch(IntrData->Type) {
  default: llvm_unreachable("Unknown Intrinsic Type");
  case RDSEED:
  case RDRAND: {
    // Emit the node with the right value type.
    SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
    SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));

    // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
    // Otherwise return the value from Rand, which is always 0, casted to i32.
    SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
                     DAG.getConstant(1, dl, Op->getValueType(1)),
                     DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
                     SDValue(Result.getNode(), 1)};
    SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);

    // Return { result, isValid, chain }.
    return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
                       SDValue(Result.getNode(), 2));
  }
  case GATHER_AVX2: {
    SDValue Chain = Op.getOperand(0);
    SDValue Src   = Op.getOperand(2);
    SDValue Base  = Op.getOperand(3);
    SDValue Index = Op.getOperand(4);
    SDValue Mask  = Op.getOperand(5);
    SDValue Scale = Op.getOperand(6);
    return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
                             Scale, Chain, Subtarget);
  }
  case GATHER: {
  //gather(v1, mask, index, base, scale);
    SDValue Chain = Op.getOperand(0);
    SDValue Src   = Op.getOperand(2);
    SDValue Base  = Op.getOperand(3);
    SDValue Index = Op.getOperand(4);
    SDValue Mask  = Op.getOperand(5);
    SDValue Scale = Op.getOperand(6);
    return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
                         Chain, Subtarget);
  }
  case SCATTER: {
  //scatter(base, mask, index, v1, scale);
    SDValue Chain = Op.getOperand(0);
    SDValue Base  = Op.getOperand(2);
    SDValue Mask  = Op.getOperand(3);
    SDValue Index = Op.getOperand(4);
    SDValue Src   = Op.getOperand(5);
    SDValue Scale = Op.getOperand(6);
    return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
                          Scale, Chain, Subtarget);
  }
  case PREFETCH: {
    const APInt &HintVal = Op.getConstantOperandAPInt(6);
    assert((HintVal == 2 || HintVal == 3) &&
           "Wrong prefetch hint in intrinsic: should be 2 or 3");
    unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
    SDValue Chain = Op.getOperand(0);
    SDValue Mask  = Op.getOperand(2);
    SDValue Index = Op.getOperand(3);
    SDValue Base  = Op.getOperand(4);
    SDValue Scale = Op.getOperand(5);
    return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
                           Subtarget);
  }
  // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
  case RDTSC: {
    SmallVector<SDValue, 2> Results;
    getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
                            Results);
    return DAG.getMergeValues(Results, dl);
  }
  // Read Performance Monitoring Counters.
  case RDPMC:
  // GetExtended Control Register.
  case XGETBV: {
    SmallVector<SDValue, 2> Results;

    // RDPMC uses ECX to select the index of the performance counter to read.
    // XGETBV uses ECX to select the index of the XCR register to return.
    // The result is stored into registers EDX:EAX.
    expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
                                Subtarget, Results);
    return DAG.getMergeValues(Results, dl);
  }
  // XTEST intrinsics.
  case XTEST: {
    SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
    SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));

    SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
    SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
    return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
                       Ret, SDValue(InTrans.getNode(), 1));
  }
  case TRUNCATE_TO_MEM_VI8:
  case TRUNCATE_TO_MEM_VI16:
  case TRUNCATE_TO_MEM_VI32: {
    SDValue Mask = Op.getOperand(4);
    SDValue DataToTruncate = Op.getOperand(3);
    SDValue Addr = Op.getOperand(2);
    SDValue Chain = Op.getOperand(0);

    MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
    assert(MemIntr && "Expected MemIntrinsicSDNode!");

    EVT MemVT  = MemIntr->getMemoryVT();

    uint16_t TruncationOp = IntrData->Opc0;
    switch (TruncationOp) {
    case X86ISD::VTRUNC: {
      if (isAllOnesConstant(Mask)) // return just a truncate store
        return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
                                 MemIntr->getMemOperand());

      MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
      SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);

      return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, MemVT,
                                MemIntr->getMemOperand(), true /* truncating */);
    }
    case X86ISD::VTRUNCUS:
    case X86ISD::VTRUNCS: {
      bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
      if (isAllOnesConstant(Mask))
        return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
                               MemIntr->getMemOperand(), DAG);

      MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
      SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);

      return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
                                   VMask, MemVT, MemIntr->getMemOperand(), DAG);
    }
    default:
      llvm_unreachable("Unsupported truncstore intrinsic");
    }
  }
  }
}

SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
                                           SelectionDAG &DAG) const {
  MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
  MFI.setReturnAddressIsTaken(true);

  if (verifyReturnAddressArgumentIsConstant(Op, DAG))
    return SDValue();

  unsigned Depth = Op.getConstantOperandVal(0);
  SDLoc dl(Op);
  EVT PtrVT = getPointerTy(DAG.getDataLayout());

  if (Depth > 0) {
    SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
    const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
    SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
    return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
                       DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
                       MachinePointerInfo());
  }

  // Just load the return address.
  SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
  return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
                     MachinePointerInfo());
}

SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
                                                 SelectionDAG &DAG) const {
  DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
  return getReturnAddressFrameIndex(DAG);
}

SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();
  MachineFrameInfo &MFI = MF.getFrameInfo();
  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  EVT VT = Op.getValueType();

  MFI.setFrameAddressIsTaken(true);

  if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
    // Depth > 0 makes no sense on targets which use Windows unwind codes.  It
    // is not possible to crawl up the stack without looking at the unwind codes
    // simultaneously.
    int FrameAddrIndex = FuncInfo->getFAIndex();
    if (!FrameAddrIndex) {
      // Set up a frame object for the return address.
      unsigned SlotSize = RegInfo->getSlotSize();
      FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
          SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
      FuncInfo->setFAIndex(FrameAddrIndex);
    }
    return DAG.getFrameIndex(FrameAddrIndex, VT);
  }

  unsigned FrameReg =
      RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
  SDLoc dl(Op);  // FIXME probably not meaningful
  unsigned Depth = Op.getConstantOperandVal(0);
  assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
          (FrameReg == X86::EBP && VT == MVT::i32)) &&
         "Invalid Frame Register!");
  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
  while (Depth--)
    FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
                            MachinePointerInfo());
  return FrameAddr;
}

// FIXME? Maybe this could be a TableGen attribute on some registers and
// this table could be generated automatically from RegInfo.
Register X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
                                              const MachineFunction &MF) const {
  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();

  Register Reg = StringSwitch<unsigned>(RegName)
                       .Case("esp", X86::ESP)
                       .Case("rsp", X86::RSP)
                       .Case("ebp", X86::EBP)
                       .Case("rbp", X86::RBP)
                       .Default(0);

  if (Reg == X86::EBP || Reg == X86::RBP) {
    if (!TFI.hasFP(MF))
      report_fatal_error("register " + StringRef(RegName) +
                         " is allocatable: function has no frame pointer");
#ifndef NDEBUG
    else {
      const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
      Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
      assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
             "Invalid Frame Register!");
    }
#endif
  }

  if (Reg)
    return Reg;

  report_fatal_error("Invalid register name global variable");
}

SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
                                                     SelectionDAG &DAG) const {
  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
}

unsigned X86TargetLowering::getExceptionPointerRegister(
    const Constant *PersonalityFn) const {
  if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
    return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;

  return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
}

unsigned X86TargetLowering::getExceptionSelectorRegister(
    const Constant *PersonalityFn) const {
  // Funclet personalities don't use selectors (the runtime does the selection).
  assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
  return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
}

bool X86TargetLowering::needsFixedCatchObjects() const {
  return Subtarget.isTargetWin64();
}

SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
  SDValue Chain     = Op.getOperand(0);
  SDValue Offset    = Op.getOperand(1);
  SDValue Handler   = Op.getOperand(2);
  SDLoc dl      (Op);

  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
  assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
          (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
         "Invalid Frame Register!");
  SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
  unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;

  SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
                                 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
                                                       dl));
  StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
  Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
  Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);

  return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
                     DAG.getRegister(StoreAddrReg, PtrVT));
}

SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
                                               SelectionDAG &DAG) const {
  SDLoc DL(Op);
  // If the subtarget is not 64bit, we may need the global base reg
  // after isel expand pseudo, i.e., after CGBR pass ran.
  // Therefore, ask for the GlobalBaseReg now, so that the pass
  // inserts the code for us in case we need it.
  // Otherwise, we will end up in a situation where we will
  // reference a virtual register that is not defined!
  if (!Subtarget.is64Bit()) {
    const X86InstrInfo *TII = Subtarget.getInstrInfo();
    (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
  }
  return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
                     DAG.getVTList(MVT::i32, MVT::Other),
                     Op.getOperand(0), Op.getOperand(1));
}

SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
                                                SelectionDAG &DAG) const {
  SDLoc DL(Op);
  return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
                     Op.getOperand(0), Op.getOperand(1));
}

SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
                                                       SelectionDAG &DAG) const {
  SDLoc DL(Op);
  return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
                     Op.getOperand(0));
}

static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
  return Op.getOperand(0);
}

SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
                                                SelectionDAG &DAG) const {
  SDValue Root = Op.getOperand(0);
  SDValue Trmp = Op.getOperand(1); // trampoline
  SDValue FPtr = Op.getOperand(2); // nested function
  SDValue Nest = Op.getOperand(3); // 'nest' parameter value
  SDLoc dl (Op);

  const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();

  if (Subtarget.is64Bit()) {
    SDValue OutChains[6];

    // Large code-model.
    const unsigned char JMP64r  = 0xFF; // 64-bit jmp through register opcode.
    const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.

    const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
    const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;

    const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix

    // Load the pointer to the nested function into R11.
    unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
    SDValue Addr = Trmp;
    OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
                                Addr, MachinePointerInfo(TrmpAddr));

    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
                       DAG.getConstant(2, dl, MVT::i64));
    OutChains[1] =
        DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
                     /* Alignment = */ 2);

    // Load the 'nest' parameter value into R10.
    // R10 is specified in X86CallingConv.td
    OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
                       DAG.getConstant(10, dl, MVT::i64));
    OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
                                Addr, MachinePointerInfo(TrmpAddr, 10));

    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
                       DAG.getConstant(12, dl, MVT::i64));
    OutChains[3] =
        DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
                     /* Alignment = */ 2);

    // Jump to the nested function.
    OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
                       DAG.getConstant(20, dl, MVT::i64));
    OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
                                Addr, MachinePointerInfo(TrmpAddr, 20));

    unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
                       DAG.getConstant(22, dl, MVT::i64));
    OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
                                Addr, MachinePointerInfo(TrmpAddr, 22));

    return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
  } else {
    const Function *Func =
      cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
    CallingConv::ID CC = Func->getCallingConv();
    unsigned NestReg;

    switch (CC) {
    default:
      llvm_unreachable("Unsupported calling convention");
    case CallingConv::C:
    case CallingConv::X86_StdCall: {
      // Pass 'nest' parameter in ECX.
      // Must be kept in sync with X86CallingConv.td
      NestReg = X86::ECX;

      // Check that ECX wasn't needed by an 'inreg' parameter.
      FunctionType *FTy = Func->getFunctionType();
      const AttributeList &Attrs = Func->getAttributes();

      if (!Attrs.isEmpty() && !Func->isVarArg()) {
        unsigned InRegCount = 0;
        unsigned Idx = 1;

        for (FunctionType::param_iterator I = FTy->param_begin(),
             E = FTy->param_end(); I != E; ++I, ++Idx)
          if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
            auto &DL = DAG.getDataLayout();
            // FIXME: should only count parameters that are lowered to integers.
            InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
          }

        if (InRegCount > 2) {
          report_fatal_error("Nest register in use - reduce number of inreg"
                             " parameters!");
        }
      }
      break;
    }
    case CallingConv::X86_FastCall:
    case CallingConv::X86_ThisCall:
    case CallingConv::Fast:
    case CallingConv::Tail:
      // Pass 'nest' parameter in EAX.
      // Must be kept in sync with X86CallingConv.td
      NestReg = X86::EAX;
      break;
    }

    SDValue OutChains[4];
    SDValue Addr, Disp;

    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
                       DAG.getConstant(10, dl, MVT::i32));
    Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);

    // This is storing the opcode for MOV32ri.
    const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
    const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
    OutChains[0] =
        DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
                     Trmp, MachinePointerInfo(TrmpAddr));

    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
                       DAG.getConstant(1, dl, MVT::i32));
    OutChains[1] =
        DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
                     /* Alignment = */ 1);

    const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
                       DAG.getConstant(5, dl, MVT::i32));
    OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
                                Addr, MachinePointerInfo(TrmpAddr, 5),
                                /* Alignment = */ 1);

    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
                       DAG.getConstant(6, dl, MVT::i32));
    OutChains[3] =
        DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
                     /* Alignment = */ 1);

    return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
  }
}

SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
                                            SelectionDAG &DAG) const {
  /*
   The rounding mode is in bits 11:10 of FPSR, and has the following
   settings:
     00 Round to nearest
     01 Round to -inf
     10 Round to +inf
     11 Round to 0

  FLT_ROUNDS, on the other hand, expects the following:
    -1 Undefined
     0 Round to 0
     1 Round to nearest
     2 Round to +inf
     3 Round to -inf

  To perform the conversion, we do:
    (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
  */

  MachineFunction &MF = DAG.getMachineFunction();
  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
  unsigned StackAlignment = TFI.getStackAlignment();
  MVT VT = Op.getSimpleValueType();
  SDLoc DL(Op);

  // Save FP Control Word to stack slot
  int SSFI = MF.getFrameInfo().CreateStackObject(2, StackAlignment, false);
  SDValue StackSlot =
      DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));

  MachineMemOperand *MMO =
      MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
                              MachineMemOperand::MOStore, 2, 2);

  SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
  SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
                                          DAG.getVTList(MVT::Other),
                                          Ops, MVT::i16, MMO);

  // Load FP Control Word from stack slot
  SDValue CWD =
      DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());

  // Transform as necessary
  SDValue CWD1 =
    DAG.getNode(ISD::SRL, DL, MVT::i16,
                DAG.getNode(ISD::AND, DL, MVT::i16,
                            CWD, DAG.getConstant(0x800, DL, MVT::i16)),
                DAG.getConstant(11, DL, MVT::i8));
  SDValue CWD2 =
    DAG.getNode(ISD::SRL, DL, MVT::i16,
                DAG.getNode(ISD::AND, DL, MVT::i16,
                            CWD, DAG.getConstant(0x400, DL, MVT::i16)),
                DAG.getConstant(9, DL, MVT::i8));

  SDValue RetVal =
    DAG.getNode(ISD::AND, DL, MVT::i16,
                DAG.getNode(ISD::ADD, DL, MVT::i16,
                            DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
                            DAG.getConstant(1, DL, MVT::i16)),
                DAG.getConstant(3, DL, MVT::i16));

  return DAG.getNode((VT.getSizeInBits() < 16 ?
                      ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
}

// Split an unary integer op into 2 half sized ops.
static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  unsigned NumElems = VT.getVectorNumElements();
  unsigned SizeInBits = VT.getSizeInBits();
  MVT EltVT = VT.getVectorElementType();
  SDValue Src = Op.getOperand(0);
  assert(EltVT == Src.getSimpleValueType().getVectorElementType() &&
         "Src and Op should have the same element type!");

  // Extract the Lo/Hi vectors
  SDLoc dl(Op);
  SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
  SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);

  MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
                     DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
                     DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
}

// Decompose 256-bit ops into smaller 128-bit ops.
static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
  assert(Op.getSimpleValueType().is256BitVector() &&
         Op.getSimpleValueType().isInteger() &&
         "Only handle AVX 256-bit vector integer operation");
  return LowerVectorIntUnary(Op, DAG);
}

// Decompose 512-bit ops into smaller 256-bit ops.
static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
  assert(Op.getSimpleValueType().is512BitVector() &&
         Op.getSimpleValueType().isInteger() &&
         "Only handle AVX 512-bit vector integer operation");
  return LowerVectorIntUnary(Op, DAG);
}

/// Lower a vector CTLZ using native supported vector CTLZ instruction.
//
// i8/i16 vector implemented using dword LZCNT vector instruction
// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
// split the vector, perform operation on it's Lo a Hi part and
// concatenate the results.
static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {
  assert(Op.getOpcode() == ISD::CTLZ);
  SDLoc dl(Op);
  MVT VT = Op.getSimpleValueType();
  MVT EltVT = VT.getVectorElementType();
  unsigned NumElems = VT.getVectorNumElements();

  assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
          "Unsupported element type");

  // Split vector, it's Lo and Hi parts will be handled in next iteration.
  if (NumElems > 16 ||
      (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
    return LowerVectorIntUnary(Op, DAG);

  MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
  assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
          "Unsupported value type for operation");

  // Use native supported vector instruction vplzcntd.
  Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
  SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
  SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
  SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);

  return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
}

// Lower CTLZ using a PSHUFB lookup table implementation.
static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  int NumElts = VT.getVectorNumElements();
  int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
  MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);

  // Per-nibble leading zero PSHUFB lookup table.
  const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
                       /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
                       /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
                       /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};

  SmallVector<SDValue, 64> LUTVec;
  for (int i = 0; i < NumBytes; ++i)
    LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
  SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);

  // Begin by bitcasting the input to byte vector, then split those bytes
  // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
  // If the hi input nibble is zero then we add both results together, otherwise
  // we just take the hi result (by masking the lo result to zero before the
  // add).
  SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
  SDValue Zero = DAG.getConstant(0, DL, CurrVT);

  SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
  SDValue Lo = Op0;
  SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
  SDValue HiZ;
  if (CurrVT.is512BitVector()) {
    MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
    HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
    HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
  } else {
    HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
  }

  Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
  Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
  Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
  SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);

  // Merge result back from vXi8 back to VT, working on the lo/hi halves
  // of the current vector width in the same way we did for the nibbles.
  // If the upper half of the input element is zero then add the halves'
  // leading zero counts together, otherwise just use the upper half's.
  // Double the width of the result until we are at target width.
  while (CurrVT != VT) {
    int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
    int CurrNumElts = CurrVT.getVectorNumElements();
    MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
    MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
    SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);

    // Check if the upper half of the input element is zero.
    if (CurrVT.is512BitVector()) {
      MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
      HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
                         DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
      HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
    } else {
      HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
                         DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
    }
    HiZ = DAG.getBitcast(NextVT, HiZ);

    // Move the upper/lower halves to the lower bits as we'll be extending to
    // NextVT. Mask the lower result to zero if HiZ is true and add the results
    // together.
    SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
    SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
    SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
    R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
    Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
    CurrVT = NextVT;
  }

  return Res;
}

static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
                               const X86Subtarget &Subtarget,
                               SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();

  if (Subtarget.hasCDI() &&
      // vXi8 vectors need to be promoted to 512-bits for vXi32.
      (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
    return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);

  // Decompose 256-bit ops into smaller 128-bit ops.
  if (VT.is256BitVector() && !Subtarget.hasInt256())
    return Lower256IntUnary(Op, DAG);

  // Decompose 512-bit ops into smaller 256-bit ops.
  if (VT.is512BitVector() && !Subtarget.hasBWI())
    return Lower512IntUnary(Op, DAG);

  assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
  return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
}

static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
                         SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  MVT OpVT = VT;
  unsigned NumBits = VT.getSizeInBits();
  SDLoc dl(Op);
  unsigned Opc = Op.getOpcode();

  if (VT.isVector())
    return LowerVectorCTLZ(Op, dl, Subtarget, DAG);

  Op = Op.getOperand(0);
  if (VT == MVT::i8) {
    // Zero extend to i32 since there is not an i8 bsr.
    OpVT = MVT::i32;
    Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
  }

  // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
  SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
  Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);

  if (Opc == ISD::CTLZ) {
    // If src is zero (i.e. bsr sets ZF), returns NumBits.
    SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
                     DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
                     Op.getValue(1)};
    Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
  }

  // Finally xor with NumBits-1.
  Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
                   DAG.getConstant(NumBits - 1, dl, OpVT));

  if (VT == MVT::i8)
    Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
  return Op;
}

static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
                         SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  unsigned NumBits = VT.getScalarSizeInBits();
  SDValue N0 = Op.getOperand(0);
  SDLoc dl(Op);

  assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
         "Only scalar CTTZ requires custom lowering");

  // Issue a bsf (scan bits forward) which also sets EFLAGS.
  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
  Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);

  // If src is zero (i.e. bsf sets ZF), returns NumBits.
  SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
                   DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
                   Op.getValue(1)};
  return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
}

/// Break a 256-bit integer operation into two new 128-bit ones and then
/// concatenate the result back.
static SDValue split256IntArith(SDValue Op, SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();

  assert(VT.is256BitVector() && VT.isInteger() &&
         "Unsupported value type for operation");

  unsigned NumElems = VT.getVectorNumElements();
  SDLoc dl(Op);

  // Extract the LHS vectors
  SDValue LHS = Op.getOperand(0);
  SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
  SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);

  // Extract the RHS vectors
  SDValue RHS = Op.getOperand(1);
  SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
  SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);

  MVT EltVT = VT.getVectorElementType();
  MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);

  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
}

/// Break a 512-bit integer operation into two new 256-bit ones and then
/// concatenate the result back.
static SDValue split512IntArith(SDValue Op, SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();

  assert(VT.is512BitVector() && VT.isInteger() &&
         "Unsupported value type for operation");

  unsigned NumElems = VT.getVectorNumElements();
  SDLoc dl(Op);

  // Extract the LHS vectors
  SDValue LHS = Op.getOperand(0);
  SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
  SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);

  // Extract the RHS vectors
  SDValue RHS = Op.getOperand(1);
  SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
  SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);

  MVT EltVT = VT.getVectorElementType();
  MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);

  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
}

static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
                           const X86Subtarget &Subtarget) {
  MVT VT = Op.getSimpleValueType();
  if (VT == MVT::i16 || VT == MVT::i32)
    return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);

  if (VT.getScalarType() == MVT::i1)
    return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
                       Op.getOperand(0), Op.getOperand(1));

  assert(Op.getSimpleValueType().is256BitVector() &&
         Op.getSimpleValueType().isInteger() &&
         "Only handle AVX 256-bit vector integer operation");
  return split256IntArith(Op, DAG);
}

static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {
  MVT VT = Op.getSimpleValueType();
  SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
  unsigned Opcode = Op.getOpcode();
  if (VT.getScalarType() == MVT::i1) {
    SDLoc dl(Op);
    switch (Opcode) {
    default: llvm_unreachable("Expected saturated arithmetic opcode");
    case ISD::UADDSAT:
    case ISD::SADDSAT:
      // *addsat i1 X, Y --> X | Y
      return DAG.getNode(ISD::OR, dl, VT, X, Y);
    case ISD::USUBSAT:
    case ISD::SSUBSAT:
      // *subsat i1 X, Y --> X & ~Y
      return DAG.getNode(ISD::AND, dl, VT, X, DAG.getNOT(dl, Y, VT));
    }
  }

  if (VT.is128BitVector()) {
    // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
                                                 *DAG.getContext(), VT);
    SDLoc DL(Op);
    if (Opcode == ISD::UADDSAT && !TLI.isOperationLegal(ISD::UMIN, VT)) {
      // uaddsat X, Y --> (X >u (X + Y)) ? -1 : X + Y
      SDValue Add = DAG.getNode(ISD::ADD, DL, VT, X, Y);
      SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Add, ISD::SETUGT);
      return DAG.getSelect(DL, VT, Cmp, DAG.getAllOnesConstant(DL, VT), Add);
    }
    if (Opcode == ISD::USUBSAT && !TLI.isOperationLegal(ISD::UMAX, VT)) {
      // usubsat X, Y --> (X >u Y) ? X - Y : 0
      SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
      SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
      return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
    }
    // Use default expansion.
    return SDValue();
  }

  assert(Op.getSimpleValueType().is256BitVector() &&
         Op.getSimpleValueType().isInteger() &&
         "Only handle AVX 256-bit vector integer operation");
  return split256IntArith(Op, DAG);
}

static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
                        SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
    // Since X86 does not have CMOV for 8-bit integer, we don't convert
    // 8-bit integer abs to NEG and CMOV.
    SDLoc DL(Op);
    SDValue N0 = Op.getOperand(0);
    SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
                              DAG.getConstant(0, DL, VT), N0);
    SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_GE, DL, MVT::i8),
                     SDValue(Neg.getNode(), 1)};
    return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
  }

  // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
  if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
    SDLoc DL(Op);
    SDValue Src = Op.getOperand(0);
    SDValue Sub =
        DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
    return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
  }

  if (VT.is256BitVector() && !Subtarget.hasInt256()) {
    assert(VT.isInteger() &&
           "Only handle AVX 256-bit vector integer operation");
    return Lower256IntUnary(Op, DAG);
  }

  // Default to expand.
  return SDValue();
}

static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();

  // For AVX1 cases, split to use legal ops (everything but v4i64).
  if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
    return split256IntArith(Op, DAG);

  SDLoc DL(Op);
  unsigned Opcode = Op.getOpcode();
  SDValue N0 = Op.getOperand(0);
  SDValue N1 = Op.getOperand(1);

  // For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
  // using the SMIN/SMAX instructions and flipping the signbit back.
  if (VT == MVT::v8i16) {
    assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&
           "Unexpected MIN/MAX opcode");
    SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
    N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
    N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
    Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
    SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
    return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
  }

  // Else, expand to a compare/select.
  ISD::CondCode CC;
  switch (Opcode) {
  case ISD::SMIN: CC = ISD::CondCode::SETLT;  break;
  case ISD::SMAX: CC = ISD::CondCode::SETGT;  break;
  case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
  case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
  default: llvm_unreachable("Unknown MINMAX opcode");
  }

  SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
  return DAG.getSelect(DL, VT, Cond, N0, N1);
}

static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
                        SelectionDAG &DAG) {
  SDLoc dl(Op);
  MVT VT = Op.getSimpleValueType();

  if (VT.getScalarType() == MVT::i1)
    return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));

  // Decompose 256-bit ops into 128-bit ops.
  if (VT.is256BitVector() && !Subtarget.hasInt256())
    return split256IntArith(Op, DAG);

  SDValue A = Op.getOperand(0);
  SDValue B = Op.getOperand(1);

  // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
  // vector pairs, multiply and truncate.
  if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
    unsigned NumElts = VT.getVectorNumElements();

    if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
        (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
      MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
      return DAG.getNode(
          ISD::TRUNCATE, dl, VT,
          DAG.getNode(ISD::MUL, dl, ExVT,
                      DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
                      DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
    }

    MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);

    // Extract the lo/hi parts to any extend to i16.
    // We're going to mask off the low byte of each result element of the
    // pmullw, so it doesn't matter what's in the high byte of each 16-bit
    // element.
    SDValue Undef = DAG.getUNDEF(VT);
    SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
    SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));

    SDValue BLo, BHi;
    if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
      // If the LHS is a constant, manually unpackl/unpackh.
      SmallVector<SDValue, 16> LoOps, HiOps;
      for (unsigned i = 0; i != NumElts; i += 16) {
        for (unsigned j = 0; j != 8; ++j) {
          LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
                                               MVT::i16));
          HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
                                               MVT::i16));
        }
      }

      BLo = DAG.getBuildVector(ExVT, dl, LoOps);
      BHi = DAG.getBuildVector(ExVT, dl, HiOps);
    } else {
      BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
      BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
    }

    // Multiply, mask the lower 8bits of the lo/hi results and pack.
    SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
    SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
    RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
    RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
    return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
  }

  // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
  if (VT == MVT::v4i32) {
    assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
           "Should not custom lower when pmulld is available!");

    // Extract the odd parts.
    static const int UnpackMask[] = { 1, -1, 3, -1 };
    SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
    SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);

    // Multiply the even parts.
    SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
                                DAG.getBitcast(MVT::v2i64, A),
                                DAG.getBitcast(MVT::v2i64, B));
    // Now multiply odd parts.
    SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
                               DAG.getBitcast(MVT::v2i64, Aodds),
                               DAG.getBitcast(MVT::v2i64, Bodds));

    Evens = DAG.getBitcast(VT, Evens);
    Odds = DAG.getBitcast(VT, Odds);

    // Merge the two vectors back together with a shuffle. This expands into 2
    // shuffles.
    static const int ShufMask[] = { 0, 4, 2, 6 };
    return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
  }

  assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
         "Only know how to lower V2I64/V4I64/V8I64 multiply");
  assert(!Subtarget.hasDQI() && "DQI should use MULLQ");

  //  Ahi = psrlqi(a, 32);
  //  Bhi = psrlqi(b, 32);
  //
  //  AloBlo = pmuludq(a, b);
  //  AloBhi = pmuludq(a, Bhi);
  //  AhiBlo = pmuludq(Ahi, b);
  //
  //  Hi = psllqi(AloBhi + AhiBlo, 32);
  //  return AloBlo + Hi;
  KnownBits AKnown = DAG.computeKnownBits(A);
  KnownBits BKnown = DAG.computeKnownBits(B);

  APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
  bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
  bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);

  APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
  bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
  bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);

  SDValue Zero = DAG.getConstant(0, dl, VT);

  // Only multiply lo/hi halves that aren't known to be zero.
  SDValue AloBlo = Zero;
  if (!ALoIsZero && !BLoIsZero)
    AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);

  SDValue AloBhi = Zero;
  if (!ALoIsZero && !BHiIsZero) {
    SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
    AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
  }

  SDValue AhiBlo = Zero;
  if (!AHiIsZero && !BLoIsZero) {
    SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
    AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
  }

  SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
  Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);

  return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
}

static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
                         SelectionDAG &DAG) {
  SDLoc dl(Op);
  MVT VT = Op.getSimpleValueType();
  bool IsSigned = Op->getOpcode() == ISD::MULHS;
  unsigned NumElts = VT.getVectorNumElements();
  SDValue A = Op.getOperand(0);
  SDValue B = Op.getOperand(1);

  // Decompose 256-bit ops into 128-bit ops.
  if (VT.is256BitVector() && !Subtarget.hasInt256())
    return split256IntArith(Op, DAG);

  if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
    assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
           (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
           (VT == MVT::v16i32 && Subtarget.hasAVX512()));

    // PMULxD operations multiply each even value (starting at 0) of LHS with
    // the related value of RHS and produce a widen result.
    // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
    // => <2 x i64> <ae|cg>
    //
    // In other word, to have all the results, we need to perform two PMULxD:
    // 1. one with the even values.
    // 2. one with the odd values.
    // To achieve #2, with need to place the odd values at an even position.
    //
    // Place the odd value at an even position (basically, shift all values 1
    // step to the left):
    const int Mask[] = {1, -1,  3, -1,  5, -1,  7, -1,
                        9, -1, 11, -1, 13, -1, 15, -1};
    // <a|b|c|d> => <b|undef|d|undef>
    SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
                                        makeArrayRef(&Mask[0], NumElts));
    // <e|f|g|h> => <f|undef|h|undef>
    SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
                                        makeArrayRef(&Mask[0], NumElts));

    // Emit two multiplies, one for the lower 2 ints and one for the higher 2
    // ints.
    MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
    unsigned Opcode =
        (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
    // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
    // => <2 x i64> <ae|cg>
    SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
                                                  DAG.getBitcast(MulVT, A),
                                                  DAG.getBitcast(MulVT, B)));
    // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
    // => <2 x i64> <bf|dh>
    SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
                                                  DAG.getBitcast(MulVT, Odd0),
                                                  DAG.getBitcast(MulVT, Odd1)));

    // Shuffle it back into the right order.
    SmallVector<int, 16> ShufMask(NumElts);
    for (int i = 0; i != (int)NumElts; ++i)
      ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;

    SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);

    // If we have a signed multiply but no PMULDQ fix up the result of an
    // unsigned multiply.
    if (IsSigned && !Subtarget.hasSSE41()) {
      SDValue Zero = DAG.getConstant(0, dl, VT);
      SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
                               DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
      SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
                               DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);

      SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
      Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
    }

    return Res;
  }

  // Only i8 vectors should need custom lowering after this.
  assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
         (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
         "Unsupported vector type");

  // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
  // logical shift down the upper half and pack back to i8.

  // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
  // and then ashr/lshr the upper bits down to the lower bits before multiply.
  unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;

  if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
      (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
    MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
    SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
    SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
    SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
    Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
    return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
  }

  // For signed 512-bit vectors, split into 256-bit vectors to allow the
  // sign-extension to occur.
  if (VT == MVT::v64i8 && IsSigned)
    return split512IntArith(Op, DAG);

  // Signed AVX2 implementation - extend xmm subvectors to ymm.
  if (VT == MVT::v32i8 && IsSigned) {
    MVT ExVT = MVT::v16i16;
    SDValue ALo = extract128BitVector(A, 0, DAG, dl);
    SDValue BLo = extract128BitVector(B, 0, DAG, dl);
    SDValue AHi = extract128BitVector(A, NumElts / 2, DAG, dl);
    SDValue BHi = extract128BitVector(B, NumElts / 2, DAG, dl);
    ALo = DAG.getNode(ExAVX, dl, ExVT, ALo);
    BLo = DAG.getNode(ExAVX, dl, ExVT, BLo);
    AHi = DAG.getNode(ExAVX, dl, ExVT, AHi);
    BHi = DAG.getNode(ExAVX, dl, ExVT, BHi);
    SDValue Lo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
    SDValue Hi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
    Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Lo, 8, DAG);
    Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Hi, 8, DAG);

    // Bitcast back to VT and then pack all the even elements from Lo and Hi.
    // Shuffle lowering should turn this into PACKUS+PERMQ
    Lo = DAG.getBitcast(VT, Lo);
    Hi = DAG.getBitcast(VT, Hi);
    return DAG.getVectorShuffle(VT, dl, Lo, Hi,
                                { 0,  2,  4,  6,  8, 10, 12, 14,
                                 16, 18, 20, 22, 24, 26, 28, 30,
                                 32, 34, 36, 38, 40, 42, 44, 46,
                                 48, 50, 52, 54, 56, 58, 60, 62});
  }

  // For signed v16i8 and all unsigned vXi8 we will unpack the low and high
  // half of each 128 bit lane to widen to a vXi16 type. Do the multiplies,
  // shift the results and pack the half lane results back together.

  MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);

  static const int PSHUFDMask[] = { 8,  9, 10, 11, 12, 13, 14, 15,
                                   -1, -1, -1, -1, -1, -1, -1, -1};

  // Extract the lo parts and zero/sign extend to i16.
  // Only use SSE4.1 instructions for signed v16i8 where using unpack requires
  // shifts to sign extend. Using unpack for unsigned only requires an xor to
  // create zeros and a copy due to tied registers contraints pre-avx. But using
  // zero_extend_vector_inreg would require an additional pshufd for the high
  // part.

  SDValue ALo, AHi;
  if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
    ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A);

    AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask);
    AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi);
  } else if (IsSigned) {
    ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A));
    AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A));

    ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG);
    AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG);
  } else {
    ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A,
                                          DAG.getConstant(0, dl, VT)));
    AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A,
                                          DAG.getConstant(0, dl, VT)));
  }

  SDValue BLo, BHi;
  if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
    // If the LHS is a constant, manually unpackl/unpackh and extend.
    SmallVector<SDValue, 16> LoOps, HiOps;
    for (unsigned i = 0; i != NumElts; i += 16) {
      for (unsigned j = 0; j != 8; ++j) {
        SDValue LoOp = B.getOperand(i + j);
        SDValue HiOp = B.getOperand(i + j + 8);

        if (IsSigned) {
          LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16);
          HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16);
        } else {
          LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
          HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
        }

        LoOps.push_back(LoOp);
        HiOps.push_back(HiOp);
      }
    }

    BLo = DAG.getBuildVector(ExVT, dl, LoOps);
    BHi = DAG.getBuildVector(ExVT, dl, HiOps);
  } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
    BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B);

    BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask);
    BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi);
  } else if (IsSigned) {
    BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B));
    BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B));

    BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG);
    BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG);
  } else {
    BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B,
                                          DAG.getConstant(0, dl, VT)));
    BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B,
                                          DAG.getConstant(0, dl, VT)));
  }

  // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
  // pack back to vXi8.
  SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
  SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
  RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG);
  RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG);

  // Bitcast back to VT and then pack all the even elements from Lo and Hi.
  return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
}

SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
  assert(Subtarget.isTargetWin64() && "Unexpected target");
  EVT VT = Op.getValueType();
  assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
         "Unexpected return type for lowering");

  RTLIB::Libcall LC;
  bool isSigned;
  switch (Op->getOpcode()) {
  default: llvm_unreachable("Unexpected request for libcall!");
  case ISD::SDIV:      isSigned = true;  LC = RTLIB::SDIV_I128;    break;
  case ISD::UDIV:      isSigned = false; LC = RTLIB::UDIV_I128;    break;
  case ISD::SREM:      isSigned = true;  LC = RTLIB::SREM_I128;    break;
  case ISD::UREM:      isSigned = false; LC = RTLIB::UREM_I128;    break;
  case ISD::SDIVREM:   isSigned = true;  LC = RTLIB::SDIVREM_I128; break;
  case ISD::UDIVREM:   isSigned = false; LC = RTLIB::UDIVREM_I128; break;
  }

  SDLoc dl(Op);
  SDValue InChain = DAG.getEntryNode();

  TargetLowering::ArgListTy Args;
  TargetLowering::ArgListEntry Entry;
  for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
    EVT ArgVT = Op->getOperand(i).getValueType();
    assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
           "Unexpected argument type for lowering");
    SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
    Entry.Node = StackPtr;
    InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
                           MachinePointerInfo(), /* Alignment = */ 16);
    Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
    Entry.Ty = PointerType::get(ArgTy,0);
    Entry.IsSExt = false;
    Entry.IsZExt = false;
    Args.push_back(Entry);
  }

  SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
                                         getPointerTy(DAG.getDataLayout()));

  TargetLowering::CallLoweringInfo CLI(DAG);
  CLI.setDebugLoc(dl)
      .setChain(InChain)
      .setLibCallee(
          getLibcallCallingConv(LC),
          static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
          std::move(Args))
      .setInRegister()
      .setSExtResult(isSigned)
      .setZExtResult(!isSigned);

  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
  return DAG.getBitcast(VT, CallInfo.first);
}

// Return true if the required (according to Opcode) shift-imm form is natively
// supported by the Subtarget
static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
                                        unsigned Opcode) {
  if (VT.getScalarSizeInBits() < 16)
    return false;

  if (VT.is512BitVector() && Subtarget.hasAVX512() &&
      (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
    return true;

  bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
                (VT.is256BitVector() && Subtarget.hasInt256());

  bool AShift = LShift && (Subtarget.hasAVX512() ||
                           (VT != MVT::v2i64 && VT != MVT::v4i64));
  return (Opcode == ISD::SRA) ? AShift : LShift;
}

// The shift amount is a variable, but it is the same for all vector lanes.
// These instructions are defined together with shift-immediate.
static
bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
                                      unsigned Opcode) {
  return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
}

// Return true if the required (according to Opcode) variable-shift form is
// natively supported by the Subtarget
static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
                                    unsigned Opcode) {

  if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
    return false;

  // vXi16 supported only on AVX-512, BWI
  if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
    return false;

  if (Subtarget.hasAVX512())
    return true;

  bool LShift = VT.is128BitVector() || VT.is256BitVector();
  bool AShift = LShift &&  VT != MVT::v2i64 && VT != MVT::v4i64;
  return (Opcode == ISD::SRA) ? AShift : LShift;
}

static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {
  MVT VT = Op.getSimpleValueType();
  SDLoc dl(Op);
  SDValue R = Op.getOperand(0);
  SDValue Amt = Op.getOperand(1);
  unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);

  auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
    assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
    MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
    SDValue Ex = DAG.getBitcast(ExVT, R);

    // ashr(R, 63) === cmp_slt(R, 0)
    if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
      assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
             "Unsupported PCMPGT op");
      return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
    }

    if (ShiftAmt >= 32) {
      // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
      SDValue Upper =
          getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
      SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
                                                 ShiftAmt - 32, DAG);
      if (VT == MVT::v2i64)
        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
      if (VT == MVT::v4i64)
        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
                                  {9, 1, 11, 3, 13, 5, 15, 7});
    } else {
      // SRA upper i32, SRL whole i64 and select lower i32.
      SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
                                                 ShiftAmt, DAG);
      SDValue Lower =
          getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
      Lower = DAG.getBitcast(ExVT, Lower);
      if (VT == MVT::v2i64)
        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
      if (VT == MVT::v4i64)
        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
                                  {8, 1, 10, 3, 12, 5, 14, 7});
    }
    return DAG.getBitcast(VT, Ex);
  };

  // Optimize shl/srl/sra with constant shift amount.
  APInt APIntShiftAmt;
  if (!X86::isConstantSplat(Amt, APIntShiftAmt))
    return SDValue();

  // If the shift amount is out of range, return undef.
  if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
    return DAG.getUNDEF(VT);

  uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();

  if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
    return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);

  // i64 SRA needs to be performed as partial shifts.
  if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
       (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
      Op.getOpcode() == ISD::SRA)
    return ArithmeticShiftRight64(ShiftAmt);

  if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
      VT == MVT::v64i8) {
    unsigned NumElts = VT.getVectorNumElements();
    MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);

    // Simple i8 add case
    if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
      return DAG.getNode(ISD::ADD, dl, VT, R, R);

    // ashr(R, 7)  === cmp_slt(R, 0)
    if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
      SDValue Zeros = DAG.getConstant(0, dl, VT);
      if (VT.is512BitVector()) {
        assert(VT == MVT::v64i8 && "Unexpected element type!");
        SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
        return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
      }
      return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
    }

    // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
    if (VT == MVT::v16i8 && Subtarget.hasXOP())
      return SDValue();

    if (Op.getOpcode() == ISD::SHL) {
      // Make a large shift.
      SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
                                               ShiftAmt, DAG);
      SHL = DAG.getBitcast(VT, SHL);
      // Zero out the rightmost bits.
      APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
      return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
    }
    if (Op.getOpcode() == ISD::SRL) {
      // Make a large shift.
      SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
                                               ShiftAmt, DAG);
      SRL = DAG.getBitcast(VT, SRL);
      // Zero out the leftmost bits.
      return DAG.getNode(ISD::AND, dl, VT, SRL,
                         DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
    }
    if (Op.getOpcode() == ISD::SRA) {
      // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
      SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);

      SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
      Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
      Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
      return Res;
    }
    llvm_unreachable("Unknown shift opcode.");
  }

  return SDValue();
}

static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {
  MVT VT = Op.getSimpleValueType();
  SDLoc dl(Op);
  SDValue R = Op.getOperand(0);
  SDValue Amt = Op.getOperand(1);
  unsigned Opcode = Op.getOpcode();
  unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
  unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true);

  if (SDValue BaseShAmt = DAG.getSplatValue(Amt)) {
    if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
      MVT EltVT = VT.getVectorElementType();
      assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
      if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
        BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
      else if (EltVT.bitsLT(MVT::i32))
        BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);

      return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
    }

    // vXi8 shifts - shift as v8i16 + mask result.
    if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
         (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
         VT == MVT::v64i8) &&
        !Subtarget.hasXOP()) {
      unsigned NumElts = VT.getVectorNumElements();
      MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
      if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
        unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
        unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
        BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);

        // Create the mask using vXi16 shifts. For shift-rights we need to move
        // the upper byte down before splatting the vXi8 mask.
        SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
        BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
                                      BaseShAmt, Subtarget, DAG);
        if (Opcode != ISD::SHL)
          BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
                                               8, DAG);
        BitMask = DAG.getBitcast(VT, BitMask);
        BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
                                       SmallVector<int, 64>(NumElts, 0));

        SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
                                          DAG.getBitcast(ExtVT, R), BaseShAmt,
                                          Subtarget, DAG);
        Res = DAG.getBitcast(VT, Res);
        Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);

        if (Opcode == ISD::SRA) {
          // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
          // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
          SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
          SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
                                         BaseShAmt, Subtarget, DAG);
          SignMask = DAG.getBitcast(VT, SignMask);
          Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
          Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
        }
        return Res;
      }
    }
  }

  // Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
  if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
      Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
    Amt = Amt.getOperand(0);
    unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
    std::vector<SDValue> Vals(Ratio);
    for (unsigned i = 0; i != Ratio; ++i)
      Vals[i] = Amt.getOperand(i);
    for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
      for (unsigned j = 0; j != Ratio; ++j)
        if (Vals[j] != Amt.getOperand(i + j))
          return SDValue();
    }

    if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
      return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
  }
  return SDValue();
}

// Convert a shift/rotate left amount to a multiplication scale factor.
static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {
  MVT VT = Amt.getSimpleValueType();
  if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
        (Subtarget.hasInt256() && VT == MVT::v16i16) ||
        (!Subtarget.hasAVX512() && VT == MVT::v16i8)))
    return SDValue();

  if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
    SmallVector<SDValue, 8> Elts;
    MVT SVT = VT.getVectorElementType();
    unsigned SVTBits = SVT.getSizeInBits();
    APInt One(SVTBits, 1);
    unsigned NumElems = VT.getVectorNumElements();

    for (unsigned i = 0; i != NumElems; ++i) {
      SDValue Op = Amt->getOperand(i);
      if (Op->isUndef()) {
        Elts.push_back(Op);
        continue;
      }

      ConstantSDNode *ND = cast<ConstantSDNode>(Op);
      APInt C(SVTBits, ND->getZExtValue());
      uint64_t ShAmt = C.getZExtValue();
      if (ShAmt >= SVTBits) {
        Elts.push_back(DAG.getUNDEF(SVT));
        continue;
      }
      Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
    }
    return DAG.getBuildVector(VT, dl, Elts);
  }

  // If the target doesn't support variable shifts, use either FP conversion
  // or integer multiplication to avoid shifting each element individually.
  if (VT == MVT::v4i32) {
    Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
                      DAG.getConstant(0x3f800000U, dl, VT));
    Amt = DAG.getBitcast(MVT::v4f32, Amt);
    return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
  }

  // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
  if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
    SDValue Z = DAG.getConstant(0, dl, VT);
    SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
    SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
    Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
    Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
    if (Subtarget.hasSSE41())
      return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);

    return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
                                        DAG.getBitcast(VT, Hi),
                                        {0, 2, 4, 6, 8, 10, 12, 14});
  }

  return SDValue();
}

static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
                          SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  SDLoc dl(Op);
  SDValue R = Op.getOperand(0);
  SDValue Amt = Op.getOperand(1);
  unsigned EltSizeInBits = VT.getScalarSizeInBits();
  bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());

  unsigned Opc = Op.getOpcode();
  unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
  unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);

  assert(VT.isVector() && "Custom lowering only for vector shifts!");
  assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");

  if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
    return V;

  if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
    return V;

  if (SupportedVectorVarShift(VT, Subtarget, Opc))
    return Op;

  // XOP has 128-bit variable logical/arithmetic shifts.
  // +ve/-ve Amt = shift left/right.
  if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
                             VT == MVT::v8i16 || VT == MVT::v16i8)) {
    if (Opc == ISD::SRL || Opc == ISD::SRA) {
      SDValue Zero = DAG.getConstant(0, dl, VT);
      Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
    }
    if (Opc == ISD::SHL || Opc == ISD::SRL)
      return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
    if (Opc == ISD::SRA)
      return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
  }

  // 2i64 vector logical shifts can efficiently avoid scalarization - do the
  // shifts per-lane and then shuffle the partial results back together.
  if (VT == MVT::v2i64 && Opc != ISD::SRA) {
    // Splat the shift amounts so the scalar shifts above will catch it.
    SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
    SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
    SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
    SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
    return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
  }

  // i64 vector arithmetic shift can be emulated with the transform:
  // M = lshr(SIGN_MASK, Amt)
  // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
  if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
      Opc == ISD::SRA) {
    SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
    SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
    R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
    R = DAG.getNode(ISD::XOR, dl, VT, R, M);
    R = DAG.getNode(ISD::SUB, dl, VT, R, M);
    return R;
  }

  // If possible, lower this shift as a sequence of two shifts by
  // constant plus a BLENDing shuffle instead of scalarizing it.
  // Example:
  //   (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
  //
  // Could be rewritten as:
  //   (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
  //
  // The advantage is that the two shifts from the example would be
  // lowered as X86ISD::VSRLI nodes in parallel before blending.
  if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
                      (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
    SDValue Amt1, Amt2;
    unsigned NumElts = VT.getVectorNumElements();
    SmallVector<int, 8> ShuffleMask;
    for (unsigned i = 0; i != NumElts; ++i) {
      SDValue A = Amt->getOperand(i);
      if (A.isUndef()) {
        ShuffleMask.push_back(SM_SentinelUndef);
        continue;
      }
      if (!Amt1 || Amt1 == A) {
        ShuffleMask.push_back(i);
        Amt1 = A;
        continue;
      }
      if (!Amt2 || Amt2 == A) {
        ShuffleMask.push_back(i + NumElts);
        Amt2 = A;
        continue;
      }
      break;
    }

    // Only perform this blend if we can perform it without loading a mask.
    if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
        (VT != MVT::v16i16 ||
         is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
        (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
         canWidenShuffleElements(ShuffleMask))) {
      auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
      auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
      if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
          Cst2->getAPIntValue().ult(EltSizeInBits)) {
        SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
                                                    Cst1->getZExtValue(), DAG);
        SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
                                                    Cst2->getZExtValue(), DAG);
        return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
      }
    }
  }

  // If possible, lower this packed shift into a vector multiply instead of
  // expanding it into a sequence of scalar shifts.
  if (Opc == ISD::SHL)
    if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
      return DAG.getNode(ISD::MUL, dl, VT, R, Scale);

  // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
  // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
  if (Opc == ISD::SRL && ConstantAmt &&
      (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
    SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
    SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
    if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
      SDValue Zero = DAG.getConstant(0, dl, VT);
      SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
      SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
      return DAG.getSelect(dl, VT, ZAmt, R, Res);
    }
  }

  // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
  // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
  // TODO: Special case handling for shift by 0/1, really we can afford either
  // of these cases in pre-SSE41/XOP/AVX512 but not both.
  if (Opc == ISD::SRA && ConstantAmt &&
      (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
      ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
        !Subtarget.hasAVX512()) ||
       DAG.isKnownNeverZero(Amt))) {
    SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
    SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
    if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
      SDValue Amt0 =
          DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
      SDValue Amt1 =
          DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
      SDValue Sra1 =
          getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
      SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
      Res = DAG.getSelect(dl, VT, Amt0, R, Res);
      return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
    }
  }

  // v4i32 Non Uniform Shifts.
  // If the shift amount is constant we can shift each lane using the SSE2
  // immediate shifts, else we need to zero-extend each lane to the lower i64
  // and shift using the SSE2 variable shifts.
  // The separate results can then be blended together.
  if (VT == MVT::v4i32) {
    SDValue Amt0, Amt1, Amt2, Amt3;
    if (ConstantAmt) {
      Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
      Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
      Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
      Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
    } else {
      // The SSE2 shifts use the lower i64 as the same shift amount for
      // all lanes and the upper i64 is ignored. On AVX we're better off
      // just zero-extending, but for SSE just duplicating the top 16-bits is
      // cheaper and has the same effect for out of range values.
      if (Subtarget.hasAVX()) {
        SDValue Z = DAG.getConstant(0, dl, VT);
        Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
        Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
        Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
        Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
      } else {
        SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
        SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
                                             {4, 5, 6, 7, -1, -1, -1, -1});
        Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
                                    {0, 1, 1, 1, -1, -1, -1, -1});
        Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
                                    {2, 3, 3, 3, -1, -1, -1, -1});
        Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
                                    {0, 1, 1, 1, -1, -1, -1, -1});
        Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
                                    {2, 3, 3, 3, -1, -1, -1, -1});
      }
    }

    unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
    SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
    SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
    SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
    SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));

    // Merge the shifted lane results optimally with/without PBLENDW.
    // TODO - ideally shuffle combining would handle this.
    if (Subtarget.hasSSE41()) {
      SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
      SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
      return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
    }
    SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
    SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
    return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
  }

  // It's worth extending once and using the vXi16/vXi32 shifts for smaller
  // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
  // make the existing SSE solution better.
  // NOTE: We honor prefered vector width before promoting to 512-bits.
  if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
      (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
      (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
      (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
      (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
    assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
           "Unexpected vector type");
    MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
    MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
    unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
    R = DAG.getNode(ExtOpc, dl, ExtVT, R);
    Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
    return DAG.getNode(ISD::TRUNCATE, dl, VT,
                       DAG.getNode(Opc, dl, ExtVT, R, Amt));
  }

  // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
  // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
  if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
      (VT == MVT::v16i8 || VT == MVT::v64i8 ||
       (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
      !Subtarget.hasXOP()) {
    int NumElts = VT.getVectorNumElements();
    SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);

    // Extend constant shift amount to vXi16 (it doesn't matter if the type
    // isn't legal).
    MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
    Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
    Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
    Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
    assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
           "Constant build vector expected");

    if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
      R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
                          : DAG.getZExtOrTrunc(R, dl, ExVT);
      R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
      R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
      return DAG.getZExtOrTrunc(R, dl, VT);
    }

    SmallVector<SDValue, 16> LoAmt, HiAmt;
    for (int i = 0; i != NumElts; i += 16) {
      for (int j = 0; j != 8; ++j) {
        LoAmt.push_back(Amt.getOperand(i + j));
        HiAmt.push_back(Amt.getOperand(i + j + 8));
      }
    }

    MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
    SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
    SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);

    SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
    SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
    LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
    HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
    LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
    HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
    LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
    HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
    return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
  }

  if (VT == MVT::v16i8 ||
      (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
      (VT == MVT::v64i8 && Subtarget.hasBWI())) {
    MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);

    auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
      if (VT.is512BitVector()) {
        // On AVX512BW targets we make use of the fact that VSELECT lowers
        // to a masked blend which selects bytes based just on the sign bit
        // extracted to a mask.
        MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
        V0 = DAG.getBitcast(VT, V0);
        V1 = DAG.getBitcast(VT, V1);
        Sel = DAG.getBitcast(VT, Sel);
        Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
                           ISD::SETGT);
        return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
      } else if (Subtarget.hasSSE41()) {
        // On SSE41 targets we make use of the fact that VSELECT lowers
        // to PBLENDVB which selects bytes based just on the sign bit.
        V0 = DAG.getBitcast(VT, V0);
        V1 = DAG.getBitcast(VT, V1);
        Sel = DAG.getBitcast(VT, Sel);
        return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
      }
      // On pre-SSE41 targets we test for the sign bit by comparing to
      // zero - a negative value will set all bits of the lanes to true
      // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
      SDValue Z = DAG.getConstant(0, dl, SelVT);
      SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
      return DAG.getSelect(dl, SelVT, C, V0, V1);
    };

    // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
    // We can safely do this using i16 shifts as we're only interested in
    // the 3 lower bits of each byte.
    Amt = DAG.getBitcast(ExtVT, Amt);
    Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
    Amt = DAG.getBitcast(VT, Amt);

    if (Opc == ISD::SHL || Opc == ISD::SRL) {
      // r = VSELECT(r, shift(r, 4), a);
      SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
      R = SignBitSelect(VT, Amt, M, R);

      // a += a
      Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);

      // r = VSELECT(r, shift(r, 2), a);
      M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
      R = SignBitSelect(VT, Amt, M, R);

      // a += a
      Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);

      // return VSELECT(r, shift(r, 1), a);
      M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
      R = SignBitSelect(VT, Amt, M, R);
      return R;
    }

    if (Opc == ISD::SRA) {
      // For SRA we need to unpack each byte to the higher byte of a i16 vector
      // so we can correctly sign extend. We don't care what happens to the
      // lower byte.
      SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
      SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
      SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
      SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
      ALo = DAG.getBitcast(ExtVT, ALo);
      AHi = DAG.getBitcast(ExtVT, AHi);
      RLo = DAG.getBitcast(ExtVT, RLo);
      RHi = DAG.getBitcast(ExtVT, RHi);

      // r = VSELECT(r, shift(r, 4), a);
      SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
      SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);

      // a += a
      ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
      AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);

      // r = VSELECT(r, shift(r, 2), a);
      MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
      MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);

      // a += a
      ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
      AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);

      // r = VSELECT(r, shift(r, 1), a);
      MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
      MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);

      // Logical shift the result back to the lower byte, leaving a zero upper
      // byte meaning that we can safely pack with PACKUSWB.
      RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
      RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
      return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
    }
  }

  if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
    MVT ExtVT = MVT::v8i32;
    SDValue Z = DAG.getConstant(0, dl, VT);
    SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
    SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
    SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
    SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
    ALo = DAG.getBitcast(ExtVT, ALo);
    AHi = DAG.getBitcast(ExtVT, AHi);
    RLo = DAG.getBitcast(ExtVT, RLo);
    RHi = DAG.getBitcast(ExtVT, RHi);
    SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
    SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
    Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
    Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
    return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
  }

  if (VT == MVT::v8i16) {
    // If we have a constant shift amount, the non-SSE41 path is best as
    // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
    bool UseSSE41 = Subtarget.hasSSE41() &&
                    !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());

    auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
      // On SSE41 targets we make use of the fact that VSELECT lowers
      // to PBLENDVB which selects bytes based just on the sign bit.
      if (UseSSE41) {
        MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
        V0 = DAG.getBitcast(ExtVT, V0);
        V1 = DAG.getBitcast(ExtVT, V1);
        Sel = DAG.getBitcast(ExtVT, Sel);
        return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1));
      }
      // On pre-SSE41 targets we splat the sign bit - a negative value will
      // set all bits of the lanes to true and VSELECT uses that in
      // its OR(AND(V0,C),AND(V1,~C)) lowering.
      SDValue C =
          getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
      return DAG.getSelect(dl, VT, C, V0, V1);
    };

    // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
    if (UseSSE41) {
      // On SSE41 targets we need to replicate the shift mask in both
      // bytes for PBLENDVB.
      Amt = DAG.getNode(
          ISD::OR, dl, VT,
          getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
          getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
    } else {
      Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
    }

    // r = VSELECT(r, shift(r, 8), a);
    SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
    R = SignBitSelect(Amt, M, R);

    // a += a
    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);

    // r = VSELECT(r, shift(r, 4), a);
    M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
    R = SignBitSelect(Amt, M, R);

    // a += a
    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);

    // r = VSELECT(r, shift(r, 2), a);
    M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
    R = SignBitSelect(Amt, M, R);

    // a += a
    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);

    // return VSELECT(r, shift(r, 1), a);
    M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
    R = SignBitSelect(Amt, M, R);
    return R;
  }

  // Decompose 256-bit shifts into 128-bit shifts.
  if (VT.is256BitVector())
    return split256IntArith(Op, DAG);

  return SDValue();
}

static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
                           SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  assert(VT.isVector() && "Custom lowering only for vector rotates!");

  SDLoc DL(Op);
  SDValue R = Op.getOperand(0);
  SDValue Amt = Op.getOperand(1);
  unsigned Opcode = Op.getOpcode();
  unsigned EltSizeInBits = VT.getScalarSizeInBits();
  int NumElts = VT.getVectorNumElements();

  // Check for constant splat rotation amount.
  APInt UndefElts;
  SmallVector<APInt, 32> EltBits;
  int CstSplatIndex = -1;
  if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits))
    for (int i = 0; i != NumElts; ++i)
      if (!UndefElts[i]) {
        if (CstSplatIndex < 0 || EltBits[i] == EltBits[CstSplatIndex]) {
          CstSplatIndex = i;
          continue;
        }
        CstSplatIndex = -1;
        break;
      }

  // AVX512 implicitly uses modulo rotation amounts.
  if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
    // Attempt to rotate by immediate.
    if (0 <= CstSplatIndex) {
      unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
      uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
      return DAG.getNode(Op, DL, VT, R,
                         DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
    }

    // Else, fall-back on VPROLV/VPRORV.
    return Op;
  }

  assert((Opcode == ISD::ROTL) && "Only ROTL supported");

  // XOP has 128-bit vector variable + immediate rotates.
  // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
  // XOP implicitly uses modulo rotation amounts.
  if (Subtarget.hasXOP()) {
    if (VT.is256BitVector())
      return split256IntArith(Op, DAG);
    assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");

    // Attempt to rotate by immediate.
    if (0 <= CstSplatIndex) {
      uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
      return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
                         DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
    }

    // Use general rotate by variable (per-element).
    return Op;
  }

  // Split 256-bit integers on pre-AVX2 targets.
  if (VT.is256BitVector() && !Subtarget.hasAVX2())
    return split256IntArith(Op, DAG);

  assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
          ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
           Subtarget.hasAVX2())) &&
         "Only vXi32/vXi16/vXi8 vector rotates supported");

  // Rotate by an uniform constant - expand back to shifts.
  if (0 <= CstSplatIndex)
    return SDValue();

  bool IsSplatAmt = DAG.isSplatValue(Amt);

  // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
  // the amount bit.
  if (EltSizeInBits == 8 && !IsSplatAmt) {
    if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
      return SDValue();

    // We don't need ModuloAmt here as we just peek at individual bits.
    MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);

    auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
      if (Subtarget.hasSSE41()) {
        // On SSE41 targets we make use of the fact that VSELECT lowers
        // to PBLENDVB which selects bytes based just on the sign bit.
        V0 = DAG.getBitcast(VT, V0);
        V1 = DAG.getBitcast(VT, V1);
        Sel = DAG.getBitcast(VT, Sel);
        return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1));
      }
      // On pre-SSE41 targets we test for the sign bit by comparing to
      // zero - a negative value will set all bits of the lanes to true
      // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
      SDValue Z = DAG.getConstant(0, DL, SelVT);
      SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
      return DAG.getSelect(DL, SelVT, C, V0, V1);
    };

    // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
    // We can safely do this using i16 shifts as we're only interested in
    // the 3 lower bits of each byte.
    Amt = DAG.getBitcast(ExtVT, Amt);
    Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
    Amt = DAG.getBitcast(VT, Amt);

    // r = VSELECT(r, rot(r, 4), a);
    SDValue M;
    M = DAG.getNode(
        ISD::OR, DL, VT,
        DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
        DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
    R = SignBitSelect(VT, Amt, M, R);

    // a += a
    Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);

    // r = VSELECT(r, rot(r, 2), a);
    M = DAG.getNode(
        ISD::OR, DL, VT,
        DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
        DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
    R = SignBitSelect(VT, Amt, M, R);

    // a += a
    Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);

    // return VSELECT(r, rot(r, 1), a);
    M = DAG.getNode(
        ISD::OR, DL, VT,
        DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
        DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
    return SignBitSelect(VT, Amt, M, R);
  }

  // ISD::ROT* uses modulo rotate amounts.
  Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
                    DAG.getConstant(EltSizeInBits - 1, DL, VT));

  bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
  bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
                        SupportedVectorVarShift(VT, Subtarget, ISD::SRL);

  // Fallback for splats + all supported variable shifts.
  // Fallback for non-constants AVX2 vXi16 as well.
  if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
    SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
    AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
    SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
    SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
    return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
  }

  // As with shifts, convert the rotation amount to a multiplication factor.
  SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
  assert(Scale && "Failed to convert ROTL amount to scale");

  // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
  if (EltSizeInBits == 16) {
    SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
    SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
    return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
  }

  // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
  // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
  // that can then be OR'd with the lower 32-bits.
  assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
  static const int OddMask[] = {1, -1, 3, -1};
  SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
  SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);

  SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
                              DAG.getBitcast(MVT::v2i64, R),
                              DAG.getBitcast(MVT::v2i64, Scale));
  SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
                              DAG.getBitcast(MVT::v2i64, R13),
                              DAG.getBitcast(MVT::v2i64, Scale13));
  Res02 = DAG.getBitcast(VT, Res02);
  Res13 = DAG.getBitcast(VT, Res13);

  return DAG.getNode(ISD::OR, DL, VT,
                     DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
                     DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
}

/// Returns true if the operand type is exactly twice the native width, and
/// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
/// Used to know whether to use cmpxchg8/16b when expanding atomic operations
/// (otherwise we leave them alone to become __sync_fetch_and_... calls).
bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
  unsigned OpWidth = MemType->getPrimitiveSizeInBits();

  if (OpWidth == 64)
    return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit();
  if (OpWidth == 128)
    return Subtarget.hasCmpxchg16b();

  return false;
}

// TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
// TODO: In 32-bit mode, use FISTP when X87 is available?
bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
  Type *MemType = SI->getValueOperand()->getType();

  bool NoImplicitFloatOps =
      SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
  if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
      !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2())
    return false;

  return needsCmpXchgNb(MemType);
}

// Note: this turns large loads into lock cmpxchg8b/16b.
// TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
  Type *MemType = LI->getType();

  // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
  // can use movq to do the load. If we have X87 we can load into an 80-bit
  // X87 register and store it to a stack temporary.
  bool NoImplicitFloatOps =
      LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
  if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
      !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
      (Subtarget.hasSSE2() || Subtarget.hasX87()))
    return AtomicExpansionKind::None;

  return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
                                 : AtomicExpansionKind::None;
}

TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
  unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
  Type *MemType = AI->getType();

  // If the operand is too big, we must see if cmpxchg8/16b is available
  // and default to library calls otherwise.
  if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
    return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
                                   : AtomicExpansionKind::None;
  }

  AtomicRMWInst::BinOp Op = AI->getOperation();
  switch (Op) {
  default:
    llvm_unreachable("Unknown atomic operation");
  case AtomicRMWInst::Xchg:
  case AtomicRMWInst::Add:
  case AtomicRMWInst::Sub:
    // It's better to use xadd, xsub or xchg for these in all cases.
    return AtomicExpansionKind::None;
  case AtomicRMWInst::Or:
  case AtomicRMWInst::And:
  case AtomicRMWInst::Xor:
    // If the atomicrmw's result isn't actually used, we can just add a "lock"
    // prefix to a normal instruction for these operations.
    return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
                            : AtomicExpansionKind::None;
  case AtomicRMWInst::Nand:
  case AtomicRMWInst::Max:
  case AtomicRMWInst::Min:
  case AtomicRMWInst::UMax:
  case AtomicRMWInst::UMin:
  case AtomicRMWInst::FAdd:
  case AtomicRMWInst::FSub:
    // These always require a non-trivial set of data operations on x86. We must
    // use a cmpxchg loop.
    return AtomicExpansionKind::CmpXChg;
  }
}

LoadInst *
X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
  unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
  Type *MemType = AI->getType();
  // Accesses larger than the native width are turned into cmpxchg/libcalls, so
  // there is no benefit in turning such RMWs into loads, and it is actually
  // harmful as it introduces a mfence.
  if (MemType->getPrimitiveSizeInBits() > NativeWidth)
    return nullptr;

  // If this is a canonical idempotent atomicrmw w/no uses, we have a better
  // lowering available in lowerAtomicArith.
  // TODO: push more cases through this path.
  if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
    if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
        AI->use_empty())
      return nullptr;

  auto Builder = IRBuilder<>(AI);
  Module *M = Builder.GetInsertBlock()->getParent()->getParent();
  auto SSID = AI->getSyncScopeID();
  // We must restrict the ordering to avoid generating loads with Release or
  // ReleaseAcquire orderings.
  auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());

  // Before the load we need a fence. Here is an example lifted from
  // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
  // is required:
  // Thread 0:
  //   x.store(1, relaxed);
  //   r1 = y.fetch_add(0, release);
  // Thread 1:
  //   y.fetch_add(42, acquire);
  //   r2 = x.load(relaxed);
  // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
  // lowered to just a load without a fence. A mfence flushes the store buffer,
  // making the optimization clearly correct.
  // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
  // otherwise, we might be able to be more aggressive on relaxed idempotent
  // rmw. In practice, they do not look useful, so we don't try to be
  // especially clever.
  if (SSID == SyncScope::SingleThread)
    // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
    // the IR level, so we must wrap it in an intrinsic.
    return nullptr;

  if (!Subtarget.hasMFence())
    // FIXME: it might make sense to use a locked operation here but on a
    // different cache-line to prevent cache-line bouncing. In practice it
    // is probably a small win, and x86 processors without mfence are rare
    // enough that we do not bother.
    return nullptr;

  Function *MFence =
      llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
  Builder.CreateCall(MFence, {});

  // Finally we can emit the atomic load.
  LoadInst *Loaded =
      Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
                                AI->getType()->getPrimitiveSizeInBits());
  Loaded->setAtomic(Order, SSID);
  AI->replaceAllUsesWith(Loaded);
  AI->eraseFromParent();
  return Loaded;
}

bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
  if (!SI.isUnordered())
    return false;
  return ExperimentalUnorderedISEL;
}
bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
  if (!LI.isUnordered())
    return false;
  return ExperimentalUnorderedISEL;
}


/// Emit a locked operation on a stack location which does not change any
/// memory location, but does involve a lock prefix.  Location is chosen to be
/// a) very likely accessed only by a single thread to minimize cache traffic,
/// and b) definitely dereferenceable.  Returns the new Chain result.
static SDValue emitLockedStackOp(SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget,
                                 SDValue Chain, SDLoc DL) {
  // Implementation notes:
  // 1) LOCK prefix creates a full read/write reordering barrier for memory
  // operations issued by the current processor.  As such, the location
  // referenced is not relevant for the ordering properties of the instruction.
  // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
  // 8.2.3.9  Loads and Stores Are Not Reordered with Locked Instructions
  // 2) Using an immediate operand appears to be the best encoding choice
  // here since it doesn't require an extra register.
  // 3) OR appears to be very slightly faster than ADD. (Though, the difference
  // is small enough it might just be measurement noise.)
  // 4) When choosing offsets, there are several contributing factors:
  //   a) If there's no redzone, we default to TOS.  (We could allocate a cache
  //      line aligned stack object to improve this case.)
  //   b) To minimize our chances of introducing a false dependence, we prefer
  //      to offset the stack usage from TOS slightly.
  //   c) To minimize concerns about cross thread stack usage - in particular,
  //      the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
  //      captures state in the TOS frame and accesses it from many threads -
  //      we want to use an offset such that the offset is in a distinct cache
  //      line from the TOS frame.
  //
  // For a general discussion of the tradeoffs and benchmark results, see:
  // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/

  auto &MF = DAG.getMachineFunction();
  auto &TFL = *Subtarget.getFrameLowering();
  const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;

  if (Subtarget.is64Bit()) {
    SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
    SDValue Ops[] = {
      DAG.getRegister(X86::RSP, MVT::i64),                  // Base
      DAG.getTargetConstant(1, DL, MVT::i8),                // Scale
      DAG.getRegister(0, MVT::i64),                         // Index
      DAG.getTargetConstant(SPOffset, DL, MVT::i32),        // Disp
      DAG.getRegister(0, MVT::i16),                         // Segment.
      Zero,
      Chain};
    SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
                                     MVT::Other, Ops);
    return SDValue(Res, 1);
  }

  SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
  SDValue Ops[] = {
    DAG.getRegister(X86::ESP, MVT::i32),            // Base
    DAG.getTargetConstant(1, DL, MVT::i8),          // Scale
    DAG.getRegister(0, MVT::i32),                   // Index
    DAG.getTargetConstant(SPOffset, DL, MVT::i32),  // Disp
    DAG.getRegister(0, MVT::i16),                   // Segment.
    Zero,
    Chain
  };
  SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
                                   MVT::Other, Ops);
  return SDValue(Res, 1);
}

static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {
  SDLoc dl(Op);
  AtomicOrdering FenceOrdering =
      static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
  SyncScope::ID FenceSSID =
      static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));

  // The only fence that needs an instruction is a sequentially-consistent
  // cross-thread fence.
  if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
      FenceSSID == SyncScope::System) {
    if (Subtarget.hasMFence())
      return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));

    SDValue Chain = Op.getOperand(0);
    return emitLockedStackOp(DAG, Subtarget, Chain, dl);
  }

  // MEMBARRIER is a compiler barrier; it codegens to a no-op.
  return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
}

static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
                             SelectionDAG &DAG) {
  MVT T = Op.getSimpleValueType();
  SDLoc DL(Op);
  unsigned Reg = 0;
  unsigned size = 0;
  switch(T.SimpleTy) {
  default: llvm_unreachable("Invalid value type!");
  case MVT::i8:  Reg = X86::AL;  size = 1; break;
  case MVT::i16: Reg = X86::AX;  size = 2; break;
  case MVT::i32: Reg = X86::EAX; size = 4; break;
  case MVT::i64:
    assert(Subtarget.is64Bit() && "Node not type legal!");
    Reg = X86::RAX; size = 8;
    break;
  }
  SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
                                  Op.getOperand(2), SDValue());
  SDValue Ops[] = { cpIn.getValue(0),
                    Op.getOperand(1),
                    Op.getOperand(3),
                    DAG.getTargetConstant(size, DL, MVT::i8),
                    cpIn.getValue(1) };
  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
  MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
  SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
                                           Ops, T, MMO);

  SDValue cpOut =
    DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
  SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
                                      MVT::i32, cpOut.getValue(2));
  SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);

  return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
                     cpOut, Success, EFLAGS.getValue(1));
}

// Create MOVMSKB, taking into account whether we need to split for AVX1.
static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
                           const X86Subtarget &Subtarget) {
  MVT InVT = V.getSimpleValueType();

  if (InVT == MVT::v64i8) {
    SDValue Lo, Hi;
    std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
    Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
    Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
    Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
    Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
    Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
                     DAG.getConstant(32, DL, MVT::i8));
    return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
  }
  if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
    SDValue Lo, Hi;
    std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
    Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
    Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
    Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
                     DAG.getConstant(16, DL, MVT::i8));
    return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
  }

  return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
}

static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
                            SelectionDAG &DAG) {
  SDValue Src = Op.getOperand(0);
  MVT SrcVT = Src.getSimpleValueType();
  MVT DstVT = Op.getSimpleValueType();

  // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
  // half to v32i1 and concatenating the result.
  if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
    assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
    assert(Subtarget.hasBWI() && "Expected BWI target");
    SDLoc dl(Op);
    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
                             DAG.getIntPtrConstant(0, dl));
    Lo = DAG.getBitcast(MVT::v32i1, Lo);
    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
                             DAG.getIntPtrConstant(1, dl));
    Hi = DAG.getBitcast(MVT::v32i1, Hi);
    return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
  }

  // Custom splitting for BWI types when AVX512F is available but BWI isn't.
  if ((SrcVT == MVT::v32i16 || SrcVT == MVT::v64i8) && DstVT.isVector() &&
    DAG.getTargetLoweringInfo().isTypeLegal(DstVT)) {
    SDLoc dl(Op);
    SDValue Lo, Hi;
    std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
    MVT CastVT = DstVT.getHalfNumVectorElementsVT();
    Lo = DAG.getBitcast(CastVT, Lo);
    Hi = DAG.getBitcast(CastVT, Hi);
    return DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
  }

  // Use MOVMSK for vector to scalar conversion to prevent scalarization.
  if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
    assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
    MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
    SDLoc DL(Op);
    SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
    V = getPMOVMSKB(DL, V, DAG, Subtarget);
    return DAG.getZExtOrTrunc(V, DL, DstVT);
  }

  assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
          SrcVT == MVT::i64) && "Unexpected VT!");

  assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
  if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
      !(DstVT == MVT::x86mmx && SrcVT.isVector()))
    // This conversion needs to be expanded.
    return SDValue();

  SDLoc dl(Op);
  if (SrcVT.isVector()) {
    // Widen the vector in input in the case of MVT::v2i32.
    // Example: from MVT::v2i32 to MVT::v4i32.
    MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
                                 SrcVT.getVectorNumElements() * 2);
    Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
                      DAG.getUNDEF(SrcVT));
  } else {
    assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
           "Unexpected source type in LowerBITCAST");
    Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
  }

  MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
  Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);

  if (DstVT == MVT::x86mmx)
    return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);

  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
                     DAG.getIntPtrConstant(0, dl));
}

/// Compute the horizontal sum of bytes in V for the elements of VT.
///
/// Requires V to be a byte vector and VT to be an integer vector type with
/// wider elements than V's type. The width of the elements of VT determines
/// how many bytes of V are summed horizontally to produce each element of the
/// result.
static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
                                      const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {
  SDLoc DL(V);
  MVT ByteVecVT = V.getSimpleValueType();
  MVT EltVT = VT.getVectorElementType();
  assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
         "Expected value to have byte element type.");
  assert(EltVT != MVT::i8 &&
         "Horizontal byte sum only makes sense for wider elements!");
  unsigned VecSize = VT.getSizeInBits();
  assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");

  // PSADBW instruction horizontally add all bytes and leave the result in i64
  // chunks, thus directly computes the pop count for v2i64 and v4i64.
  if (EltVT == MVT::i64) {
    SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
    MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
    V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
    return DAG.getBitcast(VT, V);
  }

  if (EltVT == MVT::i32) {
    // We unpack the low half and high half into i32s interleaved with zeros so
    // that we can use PSADBW to horizontally sum them. The most useful part of
    // this is that it lines up the results of two PSADBW instructions to be
    // two v2i64 vectors which concatenated are the 4 population counts. We can
    // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
    SDValue Zeros = DAG.getConstant(0, DL, VT);
    SDValue V32 = DAG.getBitcast(VT, V);
    SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
    SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);

    // Do the horizontal sums into two v2i64s.
    Zeros = DAG.getConstant(0, DL, ByteVecVT);
    MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
    Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
                      DAG.getBitcast(ByteVecVT, Low), Zeros);
    High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
                       DAG.getBitcast(ByteVecVT, High), Zeros);

    // Merge them together.
    MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
    V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
                    DAG.getBitcast(ShortVecVT, Low),
                    DAG.getBitcast(ShortVecVT, High));

    return DAG.getBitcast(VT, V);
  }

  // The only element type left is i16.
  assert(EltVT == MVT::i16 && "Unknown how to handle type");

  // To obtain pop count for each i16 element starting from the pop count for
  // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
  // right by 8. It is important to shift as i16s as i8 vector shift isn't
  // directly supported.
  SDValue ShifterV = DAG.getConstant(8, DL, VT);
  SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
  V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
                  DAG.getBitcast(ByteVecVT, V));
  return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
}

static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
                                        const X86Subtarget &Subtarget,
                                        SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  MVT EltVT = VT.getVectorElementType();
  int NumElts = VT.getVectorNumElements();
  (void)EltVT;
  assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");

  // Implement a lookup table in register by using an algorithm based on:
  // http://wm.ite.pl/articles/sse-popcount.html
  //
  // The general idea is that every lower byte nibble in the input vector is an
  // index into a in-register pre-computed pop count table. We then split up the
  // input vector in two new ones: (1) a vector with only the shifted-right
  // higher nibbles for each byte and (2) a vector with the lower nibbles (and
  // masked out higher ones) for each byte. PSHUFB is used separately with both
  // to index the in-register table. Next, both are added and the result is a
  // i8 vector where each element contains the pop count for input byte.
  const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
                       /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
                       /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
                       /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};

  SmallVector<SDValue, 64> LUTVec;
  for (int i = 0; i < NumElts; ++i)
    LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
  SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
  SDValue M0F = DAG.getConstant(0x0F, DL, VT);

  // High nibbles
  SDValue FourV = DAG.getConstant(4, DL, VT);
  SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);

  // Low nibbles
  SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);

  // The input vector is used as the shuffle mask that index elements into the
  // LUT. After counting low and high nibbles, add the vector to obtain the
  // final pop count per i8 element.
  SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
  SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
  return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
}

// Please ensure that any codegen change from LowerVectorCTPOP is reflected in
// updated cost models in X86TTIImpl::getIntrinsicInstrCost.
static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
                                SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
         "Unknown CTPOP type to handle");
  SDLoc DL(Op.getNode());
  SDValue Op0 = Op.getOperand(0);

  // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
  if (Subtarget.hasVPOPCNTDQ()) {
    unsigned NumElems = VT.getVectorNumElements();
    assert((VT.getVectorElementType() == MVT::i8 ||
            VT.getVectorElementType() == MVT::i16) && "Unexpected type");
    if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
      MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
      Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
      Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
      return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
    }
  }

  // Decompose 256-bit ops into smaller 128-bit ops.
  if (VT.is256BitVector() && !Subtarget.hasInt256())
    return Lower256IntUnary(Op, DAG);

  // Decompose 512-bit ops into smaller 256-bit ops.
  if (VT.is512BitVector() && !Subtarget.hasBWI())
    return Lower512IntUnary(Op, DAG);

  // For element types greater than i8, do vXi8 pop counts and a bytesum.
  if (VT.getScalarType() != MVT::i8) {
    MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
    SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
    SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
    return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
  }

  // We can't use the fast LUT approach, so fall back on LegalizeDAG.
  if (!Subtarget.hasSSSE3())
    return SDValue();

  return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
}

static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
                          SelectionDAG &DAG) {
  assert(Op.getSimpleValueType().isVector() &&
         "We only do custom lowering for vector population count.");
  return LowerVectorCTPOP(Op, Subtarget, DAG);
}

static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();
  SDValue In = Op.getOperand(0);
  SDLoc DL(Op);

  // For scalars, its still beneficial to transfer to/from the SIMD unit to
  // perform the BITREVERSE.
  if (!VT.isVector()) {
    MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
    SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
    Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
                       DAG.getIntPtrConstant(0, DL));
  }

  int NumElts = VT.getVectorNumElements();
  int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;

  // Decompose 256-bit ops into smaller 128-bit ops.
  if (VT.is256BitVector())
    return Lower256IntUnary(Op, DAG);

  assert(VT.is128BitVector() &&
         "Only 128-bit vector bitreverse lowering supported.");

  // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
  // perform the BSWAP in the shuffle.
  // Its best to shuffle using the second operand as this will implicitly allow
  // memory folding for multiple vectors.
  SmallVector<SDValue, 16> MaskElts;
  for (int i = 0; i != NumElts; ++i) {
    for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
      int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
      int PermuteByte = SourceByte | (2 << 5);
      MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
    }
  }

  SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
  SDValue Res = DAG.getBitcast(MVT::v16i8, In);
  Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
                    Res, Mask);
  return DAG.getBitcast(VT, Res);
}

static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
                               SelectionDAG &DAG) {
  MVT VT = Op.getSimpleValueType();

  if (Subtarget.hasXOP() && !VT.is512BitVector())
    return LowerBITREVERSE_XOP(Op, DAG);

  assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");

  SDValue In = Op.getOperand(0);
  SDLoc DL(Op);

  // Split v8i64/v16i32 without BWI so that we can still use the PSHUFB
  // lowering.
  if (VT == MVT::v8i64 || VT == MVT::v16i32) {
    assert(!Subtarget.hasBWI() && "BWI should Expand BITREVERSE");
    return Lower512IntUnary(Op, DAG);
  }

  unsigned NumElts = VT.getVectorNumElements();
  assert(VT.getScalarType() == MVT::i8 &&
         "Only byte vector BITREVERSE supported");

  // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
  if (VT.is256BitVector() && !Subtarget.hasInt256())
    return Lower256IntUnary(Op, DAG);

  // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
  // two nibbles and a PSHUFB lookup to find the bitreverse of each
  // 0-15 value (moved to the other nibble).
  SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
  SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
  SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));

  const int LoLUT[16] = {
      /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
      /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
      /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
      /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
  const int HiLUT[16] = {
      /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
      /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
      /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
      /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};

  SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
  for (unsigned i = 0; i < NumElts; ++i) {
    LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
    HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
  }

  SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
  SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
  Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
  Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
  return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
}

static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {
  unsigned NewOpc = 0;
  switch (N->getOpcode()) {
  case ISD::ATOMIC_LOAD_ADD:
    NewOpc = X86ISD::LADD;
    break;
  case ISD::ATOMIC_LOAD_SUB:
    NewOpc = X86ISD::LSUB;
    break;
  case ISD::ATOMIC_LOAD_OR:
    NewOpc = X86ISD::LOR;
    break;
  case ISD::ATOMIC_LOAD_XOR:
    NewOpc = X86ISD::LXOR;
    break;
  case ISD::ATOMIC_LOAD_AND:
    NewOpc = X86ISD::LAND;
    break;
  default:
    llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
  }

  MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();

  return DAG.getMemIntrinsicNode(
      NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
      {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
      /*MemVT=*/N->getSimpleValueType(0), MMO);
}

/// Lower atomic_load_ops into LOCK-prefixed operations.
static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
                                const X86Subtarget &Subtarget) {
  AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
  SDValue Chain = N->getOperand(0);
  SDValue LHS = N->getOperand(1);
  SDValue RHS = N->getOperand(2);
  unsigned Opc = N->getOpcode();
  MVT VT = N->getSimpleValueType(0);
  SDLoc DL(N);

  // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
  // can only be lowered when the result is unused.  They should have already
  // been transformed into a cmpxchg loop in AtomicExpand.
  if (N->hasAnyUseOfValue(0)) {
    // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
    // select LXADD if LOCK_SUB can't be selected.
    if (Opc == ISD::ATOMIC_LOAD_SUB) {
      RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
      return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
                           RHS, AN->getMemOperand());
    }
    assert(Opc == ISD::ATOMIC_LOAD_ADD &&
           "Used AtomicRMW ops other than Add should have been expanded!");
    return N;
  }

  // Specialized lowering for the canonical form of an idemptotent atomicrmw.
  // The core idea here is that since the memory location isn't actually
  // changing, all we need is a lowering for the *ordering* impacts of the
  // atomicrmw.  As such, we can chose a different operation and memory
  // location to minimize impact on other code.
  if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
    // On X86, the only ordering which actually requires an instruction is
    // seq_cst which isn't SingleThread, everything just needs to be preserved
    // during codegen and then dropped. Note that we expect (but don't assume),
    // that orderings other than seq_cst and acq_rel have been canonicalized to
    // a store or load.
    if (AN->getOrdering() == AtomicOrdering::SequentiallyConsistent &&
        AN->getSyncScopeID() == SyncScope::System) {
      // Prefer a locked operation against a stack location to minimize cache
      // traffic.  This assumes that stack locations are very likely to be
      // accessed only by the owning thread.
      SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
      assert(!N->hasAnyUseOfValue(0));
      // NOTE: The getUNDEF is needed to give something for the unused result 0.
      return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
                         DAG.getUNDEF(VT), NewChain);
    }
    // MEMBARRIER is a compiler barrier; it codegens to a no-op.
    SDValue NewChain = DAG.getNode(X86ISD::MEMBARRIER, DL, MVT::Other, Chain);
    assert(!N->hasAnyUseOfValue(0));
    // NOTE: The getUNDEF is needed to give something for the unused result 0.
    return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
                       DAG.getUNDEF(VT), NewChain);
  }

  SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
  // RAUW the chain, but don't worry about the result, as it's unused.
  assert(!N->hasAnyUseOfValue(0));
  // NOTE: The getUNDEF is needed to give something for the unused result 0.
  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
                     DAG.getUNDEF(VT), LockOp.getValue(1));
}

static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget) {
  auto *Node = cast<AtomicSDNode>(Op.getNode());
  SDLoc dl(Node);
  EVT VT = Node->getMemoryVT();

  bool IsSeqCst = Node->getOrdering() == AtomicOrdering::SequentiallyConsistent;
  bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);

  // If this store is not sequentially consistent and the type is legal
  // we can just keep it.
  if (!IsSeqCst && IsTypeLegal)
    return Op;

  if (VT == MVT::i64 && !IsTypeLegal) {
    // For illegal i64 atomic_stores, we can try to use MOVQ if SSE2 is enabled.
    // FIXME: Use movlps with SSE1.
    // FIXME: Use fist with X87.
    bool NoImplicitFloatOps =
        DAG.getMachineFunction().getFunction().hasFnAttribute(
            Attribute::NoImplicitFloat);
    if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
        Subtarget.hasSSE2()) {
      SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
                                     Node->getOperand(2));
      SDVTList Tys = DAG.getVTList(MVT::Other);
      SDValue Ops[] = { Node->getChain(), SclToVec, Node->getBasePtr() };
      SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys,
                                              Ops, MVT::i64,
                                              Node->getMemOperand());

      // If this is a sequentially consistent store, also emit an appropriate
      // barrier.
      if (IsSeqCst)
        Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);

      return Chain;
    }
  }

  // Convert seq_cst store -> xchg
  // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
  // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
  SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
                               Node->getMemoryVT(),
                               Node->getOperand(0),
                               Node->getOperand(1), Node->getOperand(2),
                               Node->getMemOperand());
  return Swap.getValue(1);
}

static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
  SDNode *N = Op.getNode();
  MVT VT = N->getSimpleValueType(0);

  // Let legalize expand this if it isn't a legal type yet.
  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
    return SDValue();

  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
  SDLoc DL(N);

  // Set the carry flag.
  SDValue Carry = Op.getOperand(2);
  EVT CarryVT = Carry.getValueType();
  APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
  Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
                      Carry, DAG.getConstant(NegOne, DL, CarryVT));

  unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
  SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
                            Op.getOperand(1), Carry.getValue(1));

  SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
  if (N->getValueType(1) == MVT::i1)
    SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);

  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
}

static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
                            SelectionDAG &DAG) {
  assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());

  // For MacOSX, we want to call an alternative entry point: __sincos_stret,
  // which returns the values as { float, float } (in XMM0) or
  // { double, double } (which is returned in XMM0, XMM1).
  SDLoc dl(Op);
  SDValue Arg = Op.getOperand(0);
  EVT ArgVT = Arg.getValueType();
  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());

  TargetLowering::ArgListTy Args;
  TargetLowering::ArgListEntry Entry;

  Entry.Node = Arg;
  Entry.Ty = ArgTy;
  Entry.IsSExt = false;
  Entry.IsZExt = false;
  Args.push_back(Entry);

  bool isF64 = ArgVT == MVT::f64;
  // Only optimize x86_64 for now. i386 is a bit messy. For f32,
  // the small struct {f32, f32} is returned in (eax, edx). For f64,
  // the results are returned via SRet in memory.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
  const char *LibcallName = TLI.getLibcallName(LC);
  SDValue Callee =
      DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));

  Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
                      : (Type *)VectorType::get(ArgTy, 4);

  TargetLowering::CallLoweringInfo CLI(DAG);
  CLI.setDebugLoc(dl)
      .setChain(DAG.getEntryNode())
      .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));

  std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);

  if (isF64)
    // Returned in xmm0 and xmm1.
    return CallResult.first;

  // Returned in bits 0:31 and 32:64 xmm0.
  SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
                               CallResult.first, DAG.getIntPtrConstant(0, dl));
  SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
                               CallResult.first, DAG.getIntPtrConstant(1, dl));
  SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
  return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
}

/// Widen a vector input to a vector of NVT.  The
/// input vector must have the same element type as NVT.
static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
                            bool FillWithZeroes = false) {
  // Check if InOp already has the right width.
  MVT InVT = InOp.getSimpleValueType();
  if (InVT == NVT)
    return InOp;

  if (InOp.isUndef())
    return DAG.getUNDEF(NVT);

  assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
         "input and widen element type must match");

  unsigned InNumElts = InVT.getVectorNumElements();
  unsigned WidenNumElts = NVT.getVectorNumElements();
  assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
         "Unexpected request for vector widening");

  SDLoc dl(InOp);
  if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
      InOp.getNumOperands() == 2) {
    SDValue N1 = InOp.getOperand(1);
    if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
        N1.isUndef()) {
      InOp = InOp.getOperand(0);
      InVT = InOp.getSimpleValueType();
      InNumElts = InVT.getVectorNumElements();
    }
  }
  if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
      ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
    SmallVector<SDValue, 16> Ops;
    for (unsigned i = 0; i < InNumElts; ++i)
      Ops.push_back(InOp.getOperand(i));

    EVT EltVT = InOp.getOperand(0).getValueType();

    SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
      DAG.getUNDEF(EltVT);
    for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
      Ops.push_back(FillVal);
    return DAG.getBuildVector(NVT, dl, Ops);
  }
  SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
    DAG.getUNDEF(NVT);
  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
                     InOp, DAG.getIntPtrConstant(0, dl));
}

static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
                             SelectionDAG &DAG) {
  assert(Subtarget.hasAVX512() &&
         "MGATHER/MSCATTER are supported on AVX-512 arch only");

  MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
  SDValue Src = N->getValue();
  MVT VT = Src.getSimpleValueType();
  assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
  SDLoc dl(Op);

  SDValue Scale = N->getScale();
  SDValue Index = N->getIndex();
  SDValue Mask = N->getMask();
  SDValue Chain = N->getChain();
  SDValue BasePtr = N->getBasePtr();

  if (VT == MVT::v2f32 || VT == MVT::v2i32) {
    assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
    // If the index is v2i64 and we have VLX we can use xmm for data and index.
    if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
      EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
      Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
      SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
      SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
      SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
          VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
      return SDValue(NewScatter.getNode(), 1);
    }
    return SDValue();
  }

  MVT IndexVT = Index.getSimpleValueType();
  MVT MaskVT = Mask.getSimpleValueType();

  // If the index is v2i32, we're being called by type legalization and we
  // should just let the default handling take care of it.
  if (IndexVT == MVT::v2i32)
    return SDValue();

  // If we don't have VLX and neither the passthru or index is 512-bits, we
  // need to widen until one is.
  if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
      !Index.getSimpleValueType().is512BitVector()) {
    // Determine how much we need to widen by to get a 512-bit type.
    unsigned Factor = std::min(512/VT.getSizeInBits(),
                               512/IndexVT.getSizeInBits());
    unsigned NumElts = VT.getVectorNumElements() * Factor;

    VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
    IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
    MaskVT = MVT::getVectorVT(MVT::i1, NumElts);

    Src = ExtendToType(Src, VT, DAG);
    Index = ExtendToType(Index, IndexVT, DAG);
    Mask = ExtendToType(Mask, MaskVT, DAG, true);
  }

  SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
  SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
  SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
      VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
  return SDValue(NewScatter.getNode(), 1);
}

static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
                          SelectionDAG &DAG) {

  MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
  MVT VT = Op.getSimpleValueType();
  MVT ScalarVT = VT.getScalarType();
  SDValue Mask = N->getMask();
  MVT MaskVT = Mask.getSimpleValueType();
  SDValue PassThru = N->getPassThru();
  SDLoc dl(Op);

  // Handle AVX masked loads which don't support passthru other than 0.
  if (MaskVT.getVectorElementType() != MVT::i1) {
    // We also allow undef in the isel pattern.
    if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
      return Op;

    SDValue NewLoad = DAG.getMaskedLoad(VT, dl, N->getChain(),
                                        N->getBasePtr(), Mask,
                                        getZeroVector(VT, Subtarget, DAG, dl),
                                        N->getMemoryVT(), N->getMemOperand(),
                                        N->getExtensionType(),
                                        N->isExpandingLoad());
    // Emit a blend.
    SDValue Select = DAG.getNode(ISD::VSELECT, dl, MaskVT, Mask, NewLoad,
                                 PassThru);
    return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
  }

  assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
         "Expanding masked load is supported on AVX-512 target only!");

  assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
         "Expanding masked load is supported for 32 and 64-bit types only!");

  assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
         "Cannot lower masked load op.");

  assert((ScalarVT.getSizeInBits() >= 32 ||
          (Subtarget.hasBWI() &&
              (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
         "Unsupported masked load op.");

  // This operation is legal for targets with VLX, but without
  // VLX the vector should be widened to 512 bit
  unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
  MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
  PassThru = ExtendToType(PassThru, WideDataVT, DAG);

  // Mask element has to be i1.
  assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
         "Unexpected mask type");

  MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);

  Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
  SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
                                      N->getBasePtr(), Mask, PassThru,
                                      N->getMemoryVT(), N->getMemOperand(),
                                      N->getExtensionType(),
                                      N->isExpandingLoad());

  SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
                               NewLoad.getValue(0),
                               DAG.getIntPtrConstant(0, dl));
  SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
  return DAG.getMergeValues(RetOps, dl);
}

static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
                           SelectionDAG &DAG) {
  MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
  SDValue DataToStore = N->getValue();
  MVT VT = DataToStore.getSimpleValueType();
  MVT ScalarVT = VT.getScalarType();
  SDValue Mask = N->getMask();
  SDLoc dl(Op);

  assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
         "Expanding masked load is supported on AVX-512 target only!");

  assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
         "Expanding masked load is supported for 32 and 64-bit types only!");

  assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
         "Cannot lower masked store op.");

  assert((ScalarVT.getSizeInBits() >= 32 ||
          (Subtarget.hasBWI() &&
              (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
          "Unsupported masked store op.");

  // This operation is legal for targets with VLX, but without
  // VLX the vector should be widened to 512 bit
  unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
  MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);

  // Mask element has to be i1.
  assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
         "Unexpected mask type");

  MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);

  DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
  Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
  return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
                            Mask, N->getMemoryVT(), N->getMemOperand(),
                            N->isTruncatingStore(), N->isCompressingStore());
}

static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
                            SelectionDAG &DAG) {
  assert(Subtarget.hasAVX2() &&
         "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");

  MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
  SDLoc dl(Op);
  MVT VT = Op.getSimpleValueType();
  SDValue Index = N->getIndex();
  SDValue Mask = N->getMask();
  SDValue PassThru = N->getPassThru();
  MVT IndexVT = Index.getSimpleValueType();
  MVT MaskVT = Mask.getSimpleValueType();

  assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");

  // If the index is v2i32, we're being called by type legalization.
  if (IndexVT == MVT::v2i32)
    return SDValue();

  // If we don't have VLX and neither the passthru or index is 512-bits, we
  // need to widen until one is.
  MVT OrigVT = VT;
  if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
      !IndexVT.is512BitVector()) {
    // Determine how much we need to widen by to get a 512-bit type.
    unsigned Factor = std::min(512/VT.getSizeInBits(),
                               512/IndexVT.getSizeInBits());

    unsigned NumElts = VT.getVectorNumElements() * Factor;

    VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
    IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
    MaskVT = MVT::getVectorVT(MVT::i1, NumElts);

    PassThru = ExtendToType(PassThru, VT, DAG);
    Index = ExtendToType(Index, IndexVT, DAG);
    Mask = ExtendToType(Mask, MaskVT, DAG, true);
  }

  SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
                    N->getScale() };
  SDValue NewGather = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
      DAG.getVTList(VT, MaskVT, MVT::Other), Ops, dl, N->getMemoryVT(),
      N->getMemOperand());
  SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
                                NewGather, DAG.getIntPtrConstant(0, dl));
  return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl);
}

SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op,
                                                    SelectionDAG &DAG) const {
  // TODO: Eventually, the lowering of these nodes should be informed by or
  // deferred to the GC strategy for the function in which they appear. For
  // now, however, they must be lowered to something. Since they are logically
  // no-ops in the case of a null GC strategy (or a GC strategy which does not
  // require special handling for these nodes), lower them as literal NOOPs for
  // the time being.
  SmallVector<SDValue, 2> Ops;

  Ops.push_back(Op.getOperand(0));
  if (Op->getGluedNode())
    Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));

  SDLoc OpDL(Op);
  SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
  SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);

  return NOOP;
}

SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
                                                  SelectionDAG &DAG) const {
  // TODO: Eventually, the lowering of these nodes should be informed by or
  // deferred to the GC strategy for the function in which they appear. For
  // now, however, they must be lowered to something. Since they are logically
  // no-ops in the case of a null GC strategy (or a GC strategy which does not
  // require special handling for these nodes), lower them as literal NOOPs for
  // the time being.
  SmallVector<SDValue, 2> Ops;

  Ops.push_back(Op.getOperand(0));
  if (Op->getGluedNode())
    Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));

  SDLoc OpDL(Op);
  SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
  SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);

  return NOOP;
}

SDValue X86TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG,
                                         RTLIB::Libcall Call) const {
  SmallVector<SDValue, 2> Ops(Op->op_begin(), Op->op_end());
  MakeLibCallOptions CallOptions;
  return makeLibCall(DAG, Call, MVT::f128, Ops, CallOptions, SDLoc(Op)).first;
}

/// Provide custom lowering hooks for some operations.
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
  switch (Op.getOpcode()) {
  default: llvm_unreachable("Should not custom lower this!");
  case ISD::ATOMIC_FENCE:       return LowerATOMIC_FENCE(Op, Subtarget, DAG);
  case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
    return LowerCMP_SWAP(Op, Subtarget, DAG);
  case ISD::CTPOP:              return LowerCTPOP(Op, Subtarget, DAG);
  case ISD::ATOMIC_LOAD_ADD:
  case ISD::ATOMIC_LOAD_SUB:
  case ISD::ATOMIC_LOAD_OR:
  case ISD::ATOMIC_LOAD_XOR:
  case ISD::ATOMIC_LOAD_AND:    return lowerAtomicArith(Op, DAG, Subtarget);
  case ISD::ATOMIC_STORE:       return LowerATOMIC_STORE(Op, DAG, Subtarget);
  case ISD::BITREVERSE:         return LowerBITREVERSE(Op, Subtarget, DAG);
  case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
  case ISD::CONCAT_VECTORS:     return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
  case ISD::VECTOR_SHUFFLE:     return lowerVectorShuffle(Op, Subtarget, DAG);
  case ISD::VSELECT:            return LowerVSELECT(Op, DAG);
  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
  case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
  case ISD::INSERT_SUBVECTOR:   return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
  case ISD::EXTRACT_SUBVECTOR:  return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
  case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
  case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
  case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
  case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
  case ISD::ExternalSymbol:     return LowerExternalSymbol(Op, DAG);
  case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
  case ISD::SHL_PARTS:
  case ISD::SRA_PARTS:
  case ISD::SRL_PARTS:          return LowerShiftParts(Op, DAG);
  case ISD::FSHL:
  case ISD::FSHR:               return LowerFunnelShift(Op, Subtarget, DAG);
  case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG);
  case ISD::UINT_TO_FP:         return LowerUINT_TO_FP(Op, DAG);
  case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
  case ISD::ZERO_EXTEND:        return LowerZERO_EXTEND(Op, Subtarget, DAG);
  case ISD::SIGN_EXTEND:        return LowerSIGN_EXTEND(Op, Subtarget, DAG);
  case ISD::ANY_EXTEND:         return LowerANY_EXTEND(Op, Subtarget, DAG);
  case ISD::ZERO_EXTEND_VECTOR_INREG:
  case ISD::SIGN_EXTEND_VECTOR_INREG:
    return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
  case ISD::FP_TO_SINT:
  case ISD::FP_TO_UINT:         return LowerFP_TO_INT(Op, DAG);
  case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
  case ISD::FP_ROUND:           return LowerFP_ROUND(Op, DAG);
  case ISD::STRICT_FP_ROUND:    return LowerSTRICT_FP_ROUND(Op, DAG);
  case ISD::LOAD:               return LowerLoad(Op, Subtarget, DAG);
  case ISD::STORE:              return LowerStore(Op, Subtarget, DAG);
  case ISD::FADD:
  case ISD::FSUB:               return lowerFaddFsub(Op, DAG);
  case ISD::FMUL:               return LowerF128Call(Op, DAG, RTLIB::MUL_F128);
  case ISD::FDIV:               return LowerF128Call(Op, DAG, RTLIB::DIV_F128);
  case ISD::FABS:
  case ISD::FNEG:               return LowerFABSorFNEG(Op, DAG);
  case ISD::FCOPYSIGN:          return LowerFCOPYSIGN(Op, DAG);
  case ISD::FGETSIGN:           return LowerFGETSIGN(Op, DAG);
  case ISD::SETCC:              return LowerSETCC(Op, DAG);
  case ISD::SETCCCARRY:         return LowerSETCCCARRY(Op, DAG);
  case ISD::SELECT:             return LowerSELECT(Op, DAG);
  case ISD::BRCOND:             return LowerBRCOND(Op, DAG);
  case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
  case ISD::VASTART:            return LowerVASTART(Op, DAG);
  case ISD::VAARG:              return LowerVAARG(Op, DAG);
  case ISD::VACOPY:             return LowerVACOPY(Op, Subtarget, DAG);
  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
  case ISD::INTRINSIC_VOID:
  case ISD::INTRINSIC_W_CHAIN:  return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
  case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
  case ISD::ADDROFRETURNADDR:   return LowerADDROFRETURNADDR(Op, DAG);
  case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
  case ISD::FRAME_TO_ARGS_OFFSET:
                                return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
  case ISD::EH_RETURN:          return LowerEH_RETURN(Op, DAG);
  case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
  case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
  case ISD::EH_SJLJ_SETUP_DISPATCH:
    return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
  case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
  case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
  case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
  case ISD::CTLZ:
  case ISD::CTLZ_ZERO_UNDEF:    return LowerCTLZ(Op, Subtarget, DAG);
  case ISD::CTTZ:
  case ISD::CTTZ_ZERO_UNDEF:    return LowerCTTZ(Op, Subtarget, DAG);
  case ISD::MUL:                return LowerMUL(Op, Subtarget, DAG);
  case ISD::MULHS:
  case ISD::MULHU:              return LowerMULH(Op, Subtarget, DAG);
  case ISD::ROTL:
  case ISD::ROTR:               return LowerRotate(Op, Subtarget, DAG);
  case ISD::SRA:
  case ISD::SRL:
  case ISD::SHL:                return LowerShift(Op, Subtarget, DAG);
  case ISD::SADDO:
  case ISD::UADDO:
  case ISD::SSUBO:
  case ISD::USUBO:
  case ISD::SMULO:
  case ISD::UMULO:              return LowerXALUO(Op, DAG);
  case ISD::READCYCLECOUNTER:   return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
  case ISD::BITCAST:            return LowerBITCAST(Op, Subtarget, DAG);
  case ISD::ADDCARRY:
  case ISD::SUBCARRY:           return LowerADDSUBCARRY(Op, DAG);
  case ISD::ADD:
  case ISD::SUB:                return lowerAddSub(Op, DAG, Subtarget);
  case ISD::UADDSAT:
  case ISD::SADDSAT:
  case ISD::USUBSAT:
  case ISD::SSUBSAT:            return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
  case ISD::SMAX:
  case ISD::SMIN:
  case ISD::UMAX:
  case ISD::UMIN:               return LowerMINMAX(Op, DAG);
  case ISD::ABS:                return LowerABS(Op, Subtarget, DAG);
  case ISD::FSINCOS:            return LowerFSINCOS(Op, Subtarget, DAG);
  case ISD::MLOAD:              return LowerMLOAD(Op, Subtarget, DAG);
  case ISD::MSTORE:             return LowerMSTORE(Op, Subtarget, DAG);
  case ISD::MGATHER:            return LowerMGATHER(Op, Subtarget, DAG);
  case ISD::MSCATTER:           return LowerMSCATTER(Op, Subtarget, DAG);
  case ISD::GC_TRANSITION_START:
                                return LowerGC_TRANSITION_START(Op, DAG);
  case ISD::GC_TRANSITION_END:  return LowerGC_TRANSITION_END(Op, DAG);
  }
}

/// Places new result values for the node in Results (their number
/// and types must exactly match those of the original return values of
/// the node), or leaves Results empty, which indicates that the node is not
/// to be custom lowered after all.
void X86TargetLowering::LowerOperationWrapper(SDNode *N,
                                              SmallVectorImpl<SDValue> &Results,
                                              SelectionDAG &DAG) const {
  SDValue Res = LowerOperation(SDValue(N, 0), DAG);

  if (!Res.getNode())
    return;

  // If the original node has one result, take the return value from
  // LowerOperation as is. It might not be result number 0.
  if (N->getNumValues() == 1) {
    Results.push_back(Res);
    return;
  }

  // If the original node has multiple results, then the return node should
  // have the same number of results.
  assert((N->getNumValues() == Res->getNumValues()) &&
      "Lowering returned the wrong number of results!");

  // Places new result values base on N result number.
  for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
    Results.push_back(Res.getValue(I));
}

/// Replace a node with an illegal result type with a new node built out of
/// custom code.
void X86TargetLowering::ReplaceNodeResults(SDNode *N,
                                           SmallVectorImpl<SDValue>&Results,
                                           SelectionDAG &DAG) const {
  SDLoc dl(N);
  switch (N->getOpcode()) {
  default:
#ifndef NDEBUG
    dbgs() << "ReplaceNodeResults: ";
    N->dump(&DAG);
#endif
    llvm_unreachable("Do not know how to custom type legalize this operation!");
  case ISD::CTPOP: {
    assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
    // Use a v2i64 if possible.
    bool NoImplicitFloatOps =
        DAG.getMachineFunction().getFunction().hasFnAttribute(
            Attribute::NoImplicitFloat);
    if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
      SDValue Wide =
          DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
      Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
      // Bit count should fit in 32-bits, extract it as that and then zero
      // extend to i64. Otherwise we end up extracting bits 63:32 separately.
      Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
      Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
                         DAG.getIntPtrConstant(0, dl));
      Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
      Results.push_back(Wide);
    }
    return;
  }
  case ISD::MUL: {
    EVT VT = N->getValueType(0);
    assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
           VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
    // Pre-promote these to vXi16 to avoid op legalization thinking all 16
    // elements are needed.
    MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
    SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
    SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
    SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
    Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
    unsigned NumConcats = 16 / VT.getVectorNumElements();
    SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
    ConcatOps[0] = Res;
    Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
    Results.push_back(Res);
    return;
  }
  case X86ISD::VPMADDWD:
  case X86ISD::AVG: {
    // Legalize types for ISD::UADDSAT/SADDSAT/USUBSAT/SSUBSAT and
    // X86ISD::AVG/VPMADDWD by widening.
    assert(Subtarget.hasSSE2() && "Requires at least SSE2!");

    EVT VT = N->getValueType(0);
    EVT InVT = N->getOperand(0).getValueType();
    assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
           "Expected a VT that divides into 128 bits.");
    assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
           "Unexpected type action!");
    unsigned NumConcat = 128 / InVT.getSizeInBits();

    EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
                                    InVT.getVectorElementType(),
                                    NumConcat * InVT.getVectorNumElements());
    EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
                                  VT.getVectorElementType(),
                                  NumConcat * VT.getVectorNumElements());

    SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
    Ops[0] = N->getOperand(0);
    SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
    Ops[0] = N->getOperand(1);
    SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);

    SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
    Results.push_back(Res);
    return;
  }
  case ISD::ABS: {
    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    assert(N->getValueType(0) == MVT::i64 &&
           "Unexpected type (!= i64) on ABS.");
    MVT HalfT = MVT::i32;
    SDValue Lo, Hi, Tmp;
    SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);

    Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
                     DAG.getConstant(0, dl, HalfT));
    Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
                     DAG.getConstant(1, dl, HalfT));
    Tmp = DAG.getNode(
        ISD::SRA, dl, HalfT, Hi,
        DAG.getConstant(HalfT.getSizeInBits() - 1, dl,
                        TLI.getShiftAmountTy(HalfT, DAG.getDataLayout())));
    Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
    Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
                     SDValue(Lo.getNode(), 1));
    Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
    Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
    Results.push_back(Lo);
    Results.push_back(Hi);
    return;
  }
  // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
  case X86ISD::FMINC:
  case X86ISD::FMIN:
  case X86ISD::FMAXC:
  case X86ISD::FMAX: {
    EVT VT = N->getValueType(0);
    assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
    SDValue UNDEF = DAG.getUNDEF(VT);
    SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
                              N->getOperand(0), UNDEF);
    SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
                              N->getOperand(1), UNDEF);
    Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
    return;
  }
  case ISD::SDIV:
  case ISD::UDIV:
  case ISD::SREM:
  case ISD::UREM: {
    EVT VT = N->getValueType(0);
    if (VT.isVector()) {
      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
             "Unexpected type action!");
      // If this RHS is a constant splat vector we can widen this and let
      // division/remainder by constant optimize it.
      // TODO: Can we do something for non-splat?
      APInt SplatVal;
      if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
        unsigned NumConcats = 128 / VT.getSizeInBits();
        SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
        Ops0[0] = N->getOperand(0);
        EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
        SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
        SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
        SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
        Results.push_back(Res);
      }
      return;
    }

    LLVM_FALLTHROUGH;
  }
  case ISD::SDIVREM:
  case ISD::UDIVREM: {
    SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
    Results.push_back(V);
    return;
  }
  case ISD::TRUNCATE: {
    MVT VT = N->getSimpleValueType(0);
    if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
      return;

    // The generic legalizer will try to widen the input type to the same
    // number of elements as the widened result type. But this isn't always
    // the best thing so do some custom legalization to avoid some cases.
    MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
    SDValue In = N->getOperand(0);
    EVT InVT = In.getValueType();

    unsigned InBits = InVT.getSizeInBits();
    if (128 % InBits == 0) {
      // 128 bit and smaller inputs should avoid truncate all together and
      // just use a build_vector that will become a shuffle.
      // TODO: Widen and use a shuffle directly?
      MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
      EVT EltVT = VT.getVectorElementType();
      unsigned WidenNumElts = WidenVT.getVectorNumElements();
      SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
      // Use the original element count so we don't do more scalar opts than
      // necessary.
      unsigned MinElts = VT.getVectorNumElements();
      for (unsigned i=0; i < MinElts; ++i) {
        SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
                                  DAG.getIntPtrConstant(i, dl));
        Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
      }
      Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
      return;
    }
    // With AVX512 there are some cases that can use a target specific
    // truncate node to go from 256/512 to less than 128 with zeros in the
    // upper elements of the 128 bit result.
    if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
      // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
      if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
        Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
        return;
      }
      // There's one case we can widen to 512 bits and use VTRUNC.
      if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
        In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
                         DAG.getUNDEF(MVT::v4i64));
        Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
        return;
      }
    }
    if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
        getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
        isTypeLegal(MVT::v4i64)) {
      // Input needs to be split and output needs to widened. Let's use two
      // VTRUNCs, and shuffle their results together into the wider type.
      SDValue Lo, Hi;
      std::tie(Lo, Hi) = DAG.SplitVector(In, dl);

      Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
      Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
      SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
                                         { 0,  1,  2,  3, 16, 17, 18, 19,
                                          -1, -1, -1, -1, -1, -1, -1, -1 });
      Results.push_back(Res);
      return;
    }

    return;
  }
  case ISD::ANY_EXTEND:
    // Right now, only MVT::v8i8 has Custom action for an illegal type.
    // It's intended to custom handle the input type.
    assert(N->getValueType(0) == MVT::v8i8 &&
           "Do not know how to legalize this Node");
    return;
  case ISD::SIGN_EXTEND:
  case ISD::ZERO_EXTEND: {
    EVT VT = N->getValueType(0);
    SDValue In = N->getOperand(0);
    EVT InVT = In.getValueType();
    if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
        (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
      assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
             "Unexpected type action!");
      assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
      // Custom split this so we can extend i8/i16->i32 invec. This is better
      // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
      // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
      // we allow the sra from the extend to i32 to be shared by the split.
      In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);

      // Fill a vector with sign bits for each element.
      SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
      SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);

      // Create an unpackl and unpackh to interleave the sign bits then bitcast
      // to v2i64.
      SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
                                        {0, 4, 1, 5});
      Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
      SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
                                        {2, 6, 3, 7});
      Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);

      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
      Results.push_back(Res);
      return;
    }

    if (VT == MVT::v16i32 || VT == MVT::v8i64) {
      if (!InVT.is128BitVector()) {
        // Not a 128 bit vector, but maybe type legalization will promote
        // it to 128 bits.
        if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
          return;
        InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
        if (!InVT.is128BitVector())
          return;

        // Promote the input to 128 bits. Type legalization will turn this into
        // zext_inreg/sext_inreg.
        In = DAG.getNode(N->getOpcode(), dl, InVT, In);
      }

      // Perform custom splitting instead of the two stage extend we would get
      // by default.
      EVT LoVT, HiVT;
      std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
      assert(isTypeLegal(LoVT) && "Split VT not legal?");

      SDValue Lo = getExtendInVec(N->getOpcode(), dl, LoVT, In, DAG);

      // We need to shift the input over by half the number of elements.
      unsigned NumElts = InVT.getVectorNumElements();
      unsigned HalfNumElts = NumElts / 2;
      SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
      for (unsigned i = 0; i != HalfNumElts; ++i)
        ShufMask[i] = i + HalfNumElts;

      SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
      Hi = getExtendInVec(N->getOpcode(), dl, HiVT, Hi, DAG);

      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
      Results.push_back(Res);
    }
    return;
  }
  case ISD::FP_TO_SINT:
  case ISD::FP_TO_UINT: {
    bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
    EVT VT = N->getValueType(0);
    SDValue Src = N->getOperand(0);
    EVT SrcVT = Src.getValueType();

    if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
             "Unexpected type action!");

      // Try to create a 128 bit vector, but don't exceed a 32 bit element.
      unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
      MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
                                       VT.getVectorNumElements());
      SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);

      // Preserve what we know about the size of the original result. Except
      // when the result is v2i32 since we can't widen the assert.
      if (PromoteVT != MVT::v2i32)
        Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext
                                                            : ISD::AssertSext,
                          dl, PromoteVT, Res,
                          DAG.getValueType(VT.getVectorElementType()));

      // Truncate back to the original width.
      Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);

      // Now widen to 128 bits.
      unsigned NumConcats = 128 / VT.getSizeInBits();
      MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
                                      VT.getVectorNumElements() * NumConcats);
      SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
      ConcatOps[0] = Res;
      Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
      Results.push_back(Res);
      return;
    }


    if (VT == MVT::v2i32) {
      assert((IsSigned || Subtarget.hasAVX512()) &&
             "Can only handle signed conversion without AVX512");
      assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
             "Unexpected type action!");
      if (Src.getValueType() == MVT::v2f64) {
        if (!IsSigned && !Subtarget.hasVLX()) {
          // If we have VLX we can emit a target specific FP_TO_UINT node,
          // otherwise we can defer to the generic legalizer which will widen
          // the input as well. This will be further widened during op
          // legalization to v8i32<-v8f64.
          return;
        }
        unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
        SDValue Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
        Results.push_back(Res);
        return;
      }

      // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
      // so early out here.
      return;
    }

    assert(!VT.isVector() && "Vectors should have been handled above!");

    if (Subtarget.hasDQI() && VT == MVT::i64 &&
        (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
      assert(!Subtarget.is64Bit() && "i64 should be legal");
      unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
      // Using a 256-bit input here to guarantee 128-bit input for f32 case.
      // TODO: Use 128-bit vectors for f64 case?
      // TODO: Use 128-bit vectors for f32 by using CVTTP2SI/CVTTP2UI.
      MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
      MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), NumElts);

      SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
      SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
                                DAG.getConstantFP(0.0, dl, VecInVT), Src,
                                ZeroIdx);
      Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res);
      Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
      Results.push_back(Res);
      return;
    }

    if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned))
      Results.push_back(V);
    return;
  }
  case ISD::SINT_TO_FP: {
    assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
    SDValue Src = N->getOperand(0);
    if (N->getValueType(0) != MVT::v2f32 || Src.getValueType() != MVT::v2i64)
      return;
    Results.push_back(DAG.getNode(X86ISD::CVTSI2P, dl, MVT::v4f32, Src));
    return;
  }
  case ISD::UINT_TO_FP: {
    assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
    EVT VT = N->getValueType(0);
    if (VT != MVT::v2f32)
      return;
    SDValue Src = N->getOperand(0);
    EVT SrcVT = Src.getValueType();
    if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
      Results.push_back(DAG.getNode(X86ISD::CVTUI2P, dl, MVT::v4f32, Src));
      return;
    }
    if (SrcVT != MVT::v2i32)
      return;
    SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
    SDValue VBias =
        DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
    SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
                             DAG.getBitcast(MVT::v2i64, VBias));
    Or = DAG.getBitcast(MVT::v2f64, Or);
    // TODO: Are there any fast-math-flags to propagate here?
    SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
    Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
    return;
  }
  case ISD::FP_ROUND: {
    if (!isTypeLegal(N->getOperand(0).getValueType()))
        return;
    SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
    Results.push_back(V);
    return;
  }
  case ISD::FP_EXTEND: {
    // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
    // No other ValueType for FP_EXTEND should reach this point.
    assert(N->getValueType(0) == MVT::v2f32 &&
           "Do not know how to legalize this Node");
    return;
  }
  case ISD::INTRINSIC_W_CHAIN: {
    unsigned IntNo = N->getConstantOperandVal(1);
    switch (IntNo) {
    default : llvm_unreachable("Do not know how to custom type "
                               "legalize this intrinsic operation!");
    case Intrinsic::x86_rdtsc:
      return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
                                     Results);
    case Intrinsic::x86_rdtscp:
      return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
                                     Results);
    case Intrinsic::x86_rdpmc:
      expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
                                  Results);
      return;
    case Intrinsic::x86_xgetbv:
      expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
                                  Results);
      return;
    }
  }
  case ISD::READCYCLECOUNTER: {
    return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
  }
  case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
    EVT T = N->getValueType(0);
    assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
    bool Regs64bit = T == MVT::i128;
    assert((!Regs64bit || Subtarget.hasCmpxchg16b()) &&
           "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
    MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
    SDValue cpInL, cpInH;
    cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
                        DAG.getConstant(0, dl, HalfT));
    cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
                        DAG.getConstant(1, dl, HalfT));
    cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
                             Regs64bit ? X86::RAX : X86::EAX,
                             cpInL, SDValue());
    cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
                             Regs64bit ? X86::RDX : X86::EDX,
                             cpInH, cpInL.getValue(1));
    SDValue swapInL, swapInH;
    swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
                          DAG.getConstant(0, dl, HalfT));
    swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
                          DAG.getConstant(1, dl, HalfT));
    swapInH =
        DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
                         swapInH, cpInH.getValue(1));
    // If the current function needs the base pointer, RBX,
    // we shouldn't use cmpxchg directly.
    // Indeed the lowering of that instruction will clobber
    // that register and since RBX will be a reserved register
    // the register allocator will not make sure its value will
    // be properly saved and restored around this live-range.
    const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
    SDValue Result;
    SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
    Register BasePtr = TRI->getBaseRegister();
    MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
    if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
        (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
      // ISel prefers the LCMPXCHG64 variant.
      // If that assert breaks, that means it is not the case anymore,
      // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
      // not just EBX. This is a matter of accepting i64 input for that
      // pseudo, and restoring into the register of the right wide
      // in expand pseudo. Everything else should just work.
      assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
             "Saving only half of the RBX");
      unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
                                  : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
      SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
                                           Regs64bit ? X86::RBX : X86::EBX,
                                           HalfT, swapInH.getValue(1));
      SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
                       RBXSave,
                       /*Glue*/ RBXSave.getValue(2)};
      Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
    } else {
      unsigned Opcode =
          Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
      swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
                                 Regs64bit ? X86::RBX : X86::EBX, swapInL,
                                 swapInH.getValue(1));
      SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
                       swapInL.getValue(1)};
      Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
    }
    SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
                                        Regs64bit ? X86::RAX : X86::EAX,
                                        HalfT, Result.getValue(1));
    SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
                                        Regs64bit ? X86::RDX : X86::EDX,
                                        HalfT, cpOutL.getValue(2));
    SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};

    SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
                                        MVT::i32, cpOutH.getValue(2));
    SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
    Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));

    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
    Results.push_back(Success);
    Results.push_back(EFLAGS.getValue(1));
    return;
  }
  case ISD::ATOMIC_LOAD: {
    assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
    bool NoImplicitFloatOps =
        DAG.getMachineFunction().getFunction().hasFnAttribute(
            Attribute::NoImplicitFloat);
    if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
      auto *Node = cast<AtomicSDNode>(N);
      if (Subtarget.hasSSE2()) {
        // Use a VZEXT_LOAD which will be selected as MOVQ. Then extract the
        // lower 64-bits.
        SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
        SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
        SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
                                             MVT::i64, Node->getMemOperand());
        SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
                                  DAG.getIntPtrConstant(0, dl));
        Results.push_back(Res);
        Results.push_back(Ld.getValue(1));
        return;
      }
      if (Subtarget.hasX87()) {
        // First load this into an 80-bit X87 register. This will put the whole
        // integer into the significand.
        // FIXME: Do we need to glue? See FIXME comment in BuildFILD.
        SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other, MVT::Glue);
        SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
        SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD_FLAG,
                                                 dl, Tys, Ops, MVT::i64,
                                                 Node->getMemOperand());
        SDValue Chain = Result.getValue(1);
        SDValue InFlag = Result.getValue(2);

        // Now store the X87 register to a stack temporary and convert to i64.
        // This store is not atomic and doesn't need to be.
        // FIXME: We don't need a stack temporary if the result of the load
        // is already being stored. We could just directly store there.
        SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
        int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
        MachinePointerInfo MPI =
            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
        SDValue StoreOps[] = { Chain, Result, StackPtr, InFlag };
        Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, dl,
                                        DAG.getVTList(MVT::Other), StoreOps,
                                        MVT::i64, MPI, 0 /*Align*/,
                                        MachineMemOperand::MOStore);

        // Finally load the value back from the stack temporary and return it.
        // This load is not atomic and doesn't need to be.
        // This load will be further type legalized.
        Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
        Results.push_back(Result);
        Results.push_back(Result.getValue(1));
        return;
      }
    }
    // TODO: Use MOVLPS when SSE1 is available?
    // Delegate to generic TypeLegalization. Situations we can really handle
    // should have already been dealt with by AtomicExpandPass.cpp.
    break;
  }
  case ISD::ATOMIC_SWAP:
  case ISD::ATOMIC_LOAD_ADD:
  case ISD::ATOMIC_LOAD_SUB:
  case ISD::ATOMIC_LOAD_AND:
  case ISD::ATOMIC_LOAD_OR:
  case ISD::ATOMIC_LOAD_XOR:
  case ISD::ATOMIC_LOAD_NAND:
  case ISD::ATOMIC_LOAD_MIN:
  case ISD::ATOMIC_LOAD_MAX:
  case ISD::ATOMIC_LOAD_UMIN:
  case ISD::ATOMIC_LOAD_UMAX:
    // Delegate to generic TypeLegalization. Situations we can really handle
    // should have already been dealt with by AtomicExpandPass.cpp.
    break;

  case ISD::BITCAST: {
    assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
    EVT DstVT = N->getValueType(0);
    EVT SrcVT = N->getOperand(0).getValueType();

    // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
    // we can split using the k-register rather than memory.
    if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
      assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
      SDValue Lo, Hi;
      std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
      Lo = DAG.getBitcast(MVT::i32, Lo);
      Hi = DAG.getBitcast(MVT::i32, Hi);
      SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
      Results.push_back(Res);
      return;
    }

    // Custom splitting for BWI types when AVX512F is available but BWI isn't.
    if ((DstVT == MVT::v32i16 || DstVT == MVT::v64i8) &&
        SrcVT.isVector() && isTypeLegal(SrcVT)) {
      SDValue Lo, Hi;
      std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
      MVT CastVT = (DstVT == MVT::v32i16) ? MVT::v16i16 : MVT::v32i8;
      Lo = DAG.getBitcast(CastVT, Lo);
      Hi = DAG.getBitcast(CastVT, Hi);
      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
      Results.push_back(Res);
      return;
    }

    if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
      assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
             "Unexpected type action!");
      EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
      SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, WideVT, N->getOperand(0));
      Results.push_back(Res);
      return;
    }

    return;
  }
  case ISD::MGATHER: {
    EVT VT = N->getValueType(0);
    if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
        (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
      auto *Gather = cast<MaskedGatherSDNode>(N);
      SDValue Index = Gather->getIndex();
      if (Index.getValueType() != MVT::v2i64)
        return;
      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
             "Unexpected type action!");
      EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
      SDValue Mask = Gather->getMask();
      assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
      SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
                                     Gather->getPassThru(),
                                     DAG.getUNDEF(VT));
      if (!Subtarget.hasVLX()) {
        // We need to widen the mask, but the instruction will only use 2
        // of its elements. So we can use undef.
        Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
                           DAG.getUNDEF(MVT::v2i1));
        Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
      }
      SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
                        Gather->getBasePtr(), Index, Gather->getScale() };
      SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
        DAG.getVTList(WideVT, Mask.getValueType(), MVT::Other), Ops, dl,
        Gather->getMemoryVT(), Gather->getMemOperand());
      Results.push_back(Res);
      Results.push_back(Res.getValue(2));
      return;
    }
    return;
  }
  case ISD::LOAD: {
    // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
    // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
    // cast since type legalization will try to use an i64 load.
    MVT VT = N->getSimpleValueType(0);
    assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
    assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
           "Unexpected type action!");
    if (!ISD::isNON_EXTLoad(N))
      return;
    auto *Ld = cast<LoadSDNode>(N);
    if (Subtarget.hasSSE2()) {
      MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
      SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
                                Ld->getPointerInfo(), Ld->getAlignment(),
                                Ld->getMemOperand()->getFlags());
      SDValue Chain = Res.getValue(1);
      MVT VecVT = MVT::getVectorVT(LdVT, 2);
      Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
      EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
      Res = DAG.getBitcast(WideVT, Res);
      Results.push_back(Res);
      Results.push_back(Chain);
      return;
    }
    assert(Subtarget.hasSSE1() && "Expected SSE");
    SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
    SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
    SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
                                          MVT::i64, Ld->getMemOperand());
    Results.push_back(Res);
    Results.push_back(Res.getValue(1));
    return;
  }
  }
}

const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
  switch ((X86ISD::NodeType)Opcode) {
  case X86ISD::FIRST_NUMBER:       break;
  case X86ISD::BSF:                return "X86ISD::BSF";
  case X86ISD::BSR:                return "X86ISD::BSR";
  case X86ISD::SHLD:               return "X86ISD::SHLD";
  case X86ISD::SHRD:               return "X86ISD::SHRD";
  case X86ISD::FAND:               return "X86ISD::FAND";
  case X86ISD::FANDN:              return "X86ISD::FANDN";
  case X86ISD::FOR:                return "X86ISD::FOR";
  case X86ISD::FXOR:               return "X86ISD::FXOR";
  case X86ISD::FILD:               return "X86ISD::FILD";
  case X86ISD::FILD_FLAG:          return "X86ISD::FILD_FLAG";
  case X86ISD::FIST:               return "X86ISD::FIST";
  case X86ISD::FP_TO_INT_IN_MEM:   return "X86ISD::FP_TO_INT_IN_MEM";
  case X86ISD::FLD:                return "X86ISD::FLD";
  case X86ISD::FST:                return "X86ISD::FST";
  case X86ISD::CALL:               return "X86ISD::CALL";
  case X86ISD::BT:                 return "X86ISD::BT";
  case X86ISD::CMP:                return "X86ISD::CMP";
  case X86ISD::COMI:               return "X86ISD::COMI";
  case X86ISD::UCOMI:              return "X86ISD::UCOMI";
  case X86ISD::CMPM:               return "X86ISD::CMPM";
  case X86ISD::CMPM_SAE:           return "X86ISD::CMPM_SAE";
  case X86ISD::SETCC:              return "X86ISD::SETCC";
  case X86ISD::SETCC_CARRY:        return "X86ISD::SETCC_CARRY";
  case X86ISD::FSETCC:             return "X86ISD::FSETCC";
  case X86ISD::FSETCCM:            return "X86ISD::FSETCCM";
  case X86ISD::FSETCCM_SAE:        return "X86ISD::FSETCCM_SAE";
  case X86ISD::CMOV:               return "X86ISD::CMOV";
  case X86ISD::BRCOND:             return "X86ISD::BRCOND";
  case X86ISD::RET_FLAG:           return "X86ISD::RET_FLAG";
  case X86ISD::IRET:               return "X86ISD::IRET";
  case X86ISD::REP_STOS:           return "X86ISD::REP_STOS";
  case X86ISD::REP_MOVS:           return "X86ISD::REP_MOVS";
  case X86ISD::GlobalBaseReg:      return "X86ISD::GlobalBaseReg";
  case X86ISD::Wrapper:            return "X86ISD::Wrapper";
  case X86ISD::WrapperRIP:         return "X86ISD::WrapperRIP";
  case X86ISD::MOVQ2DQ:            return "X86ISD::MOVQ2DQ";
  case X86ISD::MOVDQ2Q:            return "X86ISD::MOVDQ2Q";
  case X86ISD::MMX_MOVD2W:         return "X86ISD::MMX_MOVD2W";
  case X86ISD::MMX_MOVW2D:         return "X86ISD::MMX_MOVW2D";
  case X86ISD::PEXTRB:             return "X86ISD::PEXTRB";
  case X86ISD::PEXTRW:             return "X86ISD::PEXTRW";
  case X86ISD::INSERTPS:           return "X86ISD::INSERTPS";
  case X86ISD::PINSRB:             return "X86ISD::PINSRB";
  case X86ISD::PINSRW:             return "X86ISD::PINSRW";
  case X86ISD::PSHUFB:             return "X86ISD::PSHUFB";
  case X86ISD::ANDNP:              return "X86ISD::ANDNP";
  case X86ISD::BLENDI:             return "X86ISD::BLENDI";
  case X86ISD::BLENDV:             return "X86ISD::BLENDV";
  case X86ISD::HADD:               return "X86ISD::HADD";
  case X86ISD::HSUB:               return "X86ISD::HSUB";
  case X86ISD::FHADD:              return "X86ISD::FHADD";
  case X86ISD::FHSUB:              return "X86ISD::FHSUB";
  case X86ISD::CONFLICT:           return "X86ISD::CONFLICT";
  case X86ISD::FMAX:               return "X86ISD::FMAX";
  case X86ISD::FMAXS:              return "X86ISD::FMAXS";
  case X86ISD::FMAX_SAE:           return "X86ISD::FMAX_SAE";
  case X86ISD::FMAXS_SAE:          return "X86ISD::FMAXS_SAE";
  case X86ISD::FMIN:               return "X86ISD::FMIN";
  case X86ISD::FMINS:              return "X86ISD::FMINS";
  case X86ISD::FMIN_SAE:           return "X86ISD::FMIN_SAE";
  case X86ISD::FMINS_SAE:          return "X86ISD::FMINS_SAE";
  case X86ISD::FMAXC:              return "X86ISD::FMAXC";
  case X86ISD::FMINC:              return "X86ISD::FMINC";
  case X86ISD::FRSQRT:             return "X86ISD::FRSQRT";
  case X86ISD::FRCP:               return "X86ISD::FRCP";
  case X86ISD::EXTRQI:             return "X86ISD::EXTRQI";
  case X86ISD::INSERTQI:           return "X86ISD::INSERTQI";
  case X86ISD::TLSADDR:            return "X86ISD::TLSADDR";
  case X86ISD::TLSBASEADDR:        return "X86ISD::TLSBASEADDR";
  case X86ISD::TLSCALL:            return "X86ISD::TLSCALL";
  case X86ISD::EH_SJLJ_SETJMP:     return "X86ISD::EH_SJLJ_SETJMP";
  case X86ISD::EH_SJLJ_LONGJMP:    return "X86ISD::EH_SJLJ_LONGJMP";
  case X86ISD::EH_SJLJ_SETUP_DISPATCH:
    return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
  case X86ISD::EH_RETURN:          return "X86ISD::EH_RETURN";
  case X86ISD::TC_RETURN:          return "X86ISD::TC_RETURN";
  case X86ISD::FNSTCW16m:          return "X86ISD::FNSTCW16m";
  case X86ISD::FNSTSW16r:          return "X86ISD::FNSTSW16r";
  case X86ISD::LCMPXCHG_DAG:       return "X86ISD::LCMPXCHG_DAG";
  case X86ISD::LCMPXCHG8_DAG:      return "X86ISD::LCMPXCHG8_DAG";
  case X86ISD::LCMPXCHG16_DAG:     return "X86ISD::LCMPXCHG16_DAG";
  case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
    return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
  case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
    return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
  case X86ISD::LADD:               return "X86ISD::LADD";
  case X86ISD::LSUB:               return "X86ISD::LSUB";
  case X86ISD::LOR:                return "X86ISD::LOR";
  case X86ISD::LXOR:               return "X86ISD::LXOR";
  case X86ISD::LAND:               return "X86ISD::LAND";
  case X86ISD::VZEXT_MOVL:         return "X86ISD::VZEXT_MOVL";
  case X86ISD::VZEXT_LOAD:         return "X86ISD::VZEXT_LOAD";
  case X86ISD::VEXTRACT_STORE:     return "X86ISD::VEXTRACT_STORE";
  case X86ISD::VTRUNC:             return "X86ISD::VTRUNC";
  case X86ISD::VTRUNCS:            return "X86ISD::VTRUNCS";
  case X86ISD::VTRUNCUS:           return "X86ISD::VTRUNCUS";
  case X86ISD::VMTRUNC:            return "X86ISD::VMTRUNC";
  case X86ISD::VMTRUNCS:           return "X86ISD::VMTRUNCS";
  case X86ISD::VMTRUNCUS:          return "X86ISD::VMTRUNCUS";
  case X86ISD::VTRUNCSTORES:       return "X86ISD::VTRUNCSTORES";
  case X86ISD::VTRUNCSTOREUS:      return "X86ISD::VTRUNCSTOREUS";
  case X86ISD::VMTRUNCSTORES:      return "X86ISD::VMTRUNCSTORES";
  case X86ISD::VMTRUNCSTOREUS:     return "X86ISD::VMTRUNCSTOREUS";
  case X86ISD::VFPEXT:             return "X86ISD::VFPEXT";
  case X86ISD::VFPEXT_SAE:         return "X86ISD::VFPEXT_SAE";
  case X86ISD::VFPEXTS:            return "X86ISD::VFPEXTS";
  case X86ISD::VFPEXTS_SAE:        return "X86ISD::VFPEXTS_SAE";
  case X86ISD::VFPROUND:           return "X86ISD::VFPROUND";
  case X86ISD::VMFPROUND:          return "X86ISD::VMFPROUND";
  case X86ISD::VFPROUND_RND:       return "X86ISD::VFPROUND_RND";
  case X86ISD::VFPROUNDS:          return "X86ISD::VFPROUNDS";
  case X86ISD::VFPROUNDS_RND:      return "X86ISD::VFPROUNDS_RND";
  case X86ISD::VSHLDQ:             return "X86ISD::VSHLDQ";
  case X86ISD::VSRLDQ:             return "X86ISD::VSRLDQ";
  case X86ISD::VSHL:               return "X86ISD::VSHL";
  case X86ISD::VSRL:               return "X86ISD::VSRL";
  case X86ISD::VSRA:               return "X86ISD::VSRA";
  case X86ISD::VSHLI:              return "X86ISD::VSHLI";
  case X86ISD::VSRLI:              return "X86ISD::VSRLI";
  case X86ISD::VSRAI:              return "X86ISD::VSRAI";
  case X86ISD::VSHLV:              return "X86ISD::VSHLV";
  case X86ISD::VSRLV:              return "X86ISD::VSRLV";
  case X86ISD::VSRAV:              return "X86ISD::VSRAV";
  case X86ISD::VROTLI:             return "X86ISD::VROTLI";
  case X86ISD::VROTRI:             return "X86ISD::VROTRI";
  case X86ISD::VPPERM:             return "X86ISD::VPPERM";
  case X86ISD::CMPP:               return "X86ISD::CMPP";
  case X86ISD::PCMPEQ:             return "X86ISD::PCMPEQ";
  case X86ISD::PCMPGT:             return "X86ISD::PCMPGT";
  case X86ISD::PHMINPOS:           return "X86ISD::PHMINPOS";
  case X86ISD::ADD:                return "X86ISD::ADD";
  case X86ISD::SUB:                return "X86ISD::SUB";
  case X86ISD::ADC:                return "X86ISD::ADC";
  case X86ISD::SBB:                return "X86ISD::SBB";
  case X86ISD::SMUL:               return "X86ISD::SMUL";
  case X86ISD::UMUL:               return "X86ISD::UMUL";
  case X86ISD::OR:                 return "X86ISD::OR";
  case X86ISD::XOR:                return "X86ISD::XOR";
  case X86ISD::AND:                return "X86ISD::AND";
  case X86ISD::BEXTR:              return "X86ISD::BEXTR";
  case X86ISD::BZHI:               return "X86ISD::BZHI";
  case X86ISD::MUL_IMM:            return "X86ISD::MUL_IMM";
  case X86ISD::MOVMSK:             return "X86ISD::MOVMSK";
  case X86ISD::PTEST:              return "X86ISD::PTEST";
  case X86ISD::TESTP:              return "X86ISD::TESTP";
  case X86ISD::KORTEST:            return "X86ISD::KORTEST";
  case X86ISD::KTEST:              return "X86ISD::KTEST";
  case X86ISD::KADD:               return "X86ISD::KADD";
  case X86ISD::KSHIFTL:            return "X86ISD::KSHIFTL";
  case X86ISD::KSHIFTR:            return "X86ISD::KSHIFTR";
  case X86ISD::PACKSS:             return "X86ISD::PACKSS";
  case X86ISD::PACKUS:             return "X86ISD::PACKUS";
  case X86ISD::PALIGNR:            return "X86ISD::PALIGNR";
  case X86ISD::VALIGN:             return "X86ISD::VALIGN";
  case X86ISD::VSHLD:              return "X86ISD::VSHLD";
  case X86ISD::VSHRD:              return "X86ISD::VSHRD";
  case X86ISD::VSHLDV:             return "X86ISD::VSHLDV";
  case X86ISD::VSHRDV:             return "X86ISD::VSHRDV";
  case X86ISD::PSHUFD:             return "X86ISD::PSHUFD";
  case X86ISD::PSHUFHW:            return "X86ISD::PSHUFHW";
  case X86ISD::PSHUFLW:            return "X86ISD::PSHUFLW";
  case X86ISD::SHUFP:              return "X86ISD::SHUFP";
  case X86ISD::SHUF128:            return "X86ISD::SHUF128";
  case X86ISD::MOVLHPS:            return "X86ISD::MOVLHPS";
  case X86ISD::MOVHLPS:            return "X86ISD::MOVHLPS";
  case X86ISD::MOVDDUP:            return "X86ISD::MOVDDUP";
  case X86ISD::MOVSHDUP:           return "X86ISD::MOVSHDUP";
  case X86ISD::MOVSLDUP:           return "X86ISD::MOVSLDUP";
  case X86ISD::MOVSD:              return "X86ISD::MOVSD";
  case X86ISD::MOVSS:              return "X86ISD::MOVSS";
  case X86ISD::UNPCKL:             return "X86ISD::UNPCKL";
  case X86ISD::UNPCKH:             return "X86ISD::UNPCKH";
  case X86ISD::VBROADCAST:         return "X86ISD::VBROADCAST";
  case X86ISD::VBROADCAST_LOAD:    return "X86ISD::VBROADCAST_LOAD";
  case X86ISD::VBROADCASTM:        return "X86ISD::VBROADCASTM";
  case X86ISD::SUBV_BROADCAST:     return "X86ISD::SUBV_BROADCAST";
  case X86ISD::VPERMILPV:          return "X86ISD::VPERMILPV";
  case X86ISD::VPERMILPI:          return "X86ISD::VPERMILPI";
  case X86ISD::VPERM2X128:         return "X86ISD::VPERM2X128";
  case X86ISD::VPERMV:             return "X86ISD::VPERMV";
  case X86ISD::VPERMV3:            return "X86ISD::VPERMV3";
  case X86ISD::VPERMI:             return "X86ISD::VPERMI";
  case X86ISD::VPTERNLOG:          return "X86ISD::VPTERNLOG";
  case X86ISD::VFIXUPIMM:          return "X86ISD::VFIXUPIMM";
  case X86ISD::VFIXUPIMM_SAE:      return "X86ISD::VFIXUPIMM_SAE";
  case X86ISD::VFIXUPIMMS:         return "X86ISD::VFIXUPIMMS";
  case X86ISD::VFIXUPIMMS_SAE:     return "X86ISD::VFIXUPIMMS_SAE";
  case X86ISD::VRANGE:             return "X86ISD::VRANGE";
  case X86ISD::VRANGE_SAE:         return "X86ISD::VRANGE_SAE";
  case X86ISD::VRANGES:            return "X86ISD::VRANGES";
  case X86ISD::VRANGES_SAE:        return "X86ISD::VRANGES_SAE";
  case X86ISD::PMULUDQ:            return "X86ISD::PMULUDQ";
  case X86ISD::PMULDQ:             return "X86ISD::PMULDQ";
  case X86ISD::PSADBW:             return "X86ISD::PSADBW";
  case X86ISD::DBPSADBW:           return "X86ISD::DBPSADBW";
  case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
  case X86ISD::VAARG_64:           return "X86ISD::VAARG_64";
  case X86ISD::WIN_ALLOCA:         return "X86ISD::WIN_ALLOCA";
  case X86ISD::MEMBARRIER:         return "X86ISD::MEMBARRIER";
  case X86ISD::MFENCE:             return "X86ISD::MFENCE";
  case X86ISD::SEG_ALLOCA:         return "X86ISD::SEG_ALLOCA";
  case X86ISD::SAHF:               return "X86ISD::SAHF";
  case X86ISD::RDRAND:             return "X86ISD::RDRAND";
  case X86ISD::RDSEED:             return "X86ISD::RDSEED";
  case X86ISD::RDPKRU:             return "X86ISD::RDPKRU";
  case X86ISD::WRPKRU:             return "X86ISD::WRPKRU";
  case X86ISD::VPMADDUBSW:         return "X86ISD::VPMADDUBSW";
  case X86ISD::VPMADDWD:           return "X86ISD::VPMADDWD";
  case X86ISD::VPSHA:              return "X86ISD::VPSHA";
  case X86ISD::VPSHL:              return "X86ISD::VPSHL";
  case X86ISD::VPCOM:              return "X86ISD::VPCOM";
  case X86ISD::VPCOMU:             return "X86ISD::VPCOMU";
  case X86ISD::VPERMIL2:           return "X86ISD::VPERMIL2";
  case X86ISD::FMSUB:              return "X86ISD::FMSUB";
  case X86ISD::FNMADD:             return "X86ISD::FNMADD";
  case X86ISD::FNMSUB:             return "X86ISD::FNMSUB";
  case X86ISD::FMADDSUB:           return "X86ISD::FMADDSUB";
  case X86ISD::FMSUBADD:           return "X86ISD::FMSUBADD";
  case X86ISD::FMADD_RND:          return "X86ISD::FMADD_RND";
  case X86ISD::FNMADD_RND:         return "X86ISD::FNMADD_RND";
  case X86ISD::FMSUB_RND:          return "X86ISD::FMSUB_RND";
  case X86ISD::FNMSUB_RND:         return "X86ISD::FNMSUB_RND";
  case X86ISD::FMADDSUB_RND:       return "X86ISD::FMADDSUB_RND";
  case X86ISD::FMSUBADD_RND:       return "X86ISD::FMSUBADD_RND";
  case X86ISD::VPMADD52H:          return "X86ISD::VPMADD52H";
  case X86ISD::VPMADD52L:          return "X86ISD::VPMADD52L";
  case X86ISD::VRNDSCALE:          return "X86ISD::VRNDSCALE";
  case X86ISD::VRNDSCALE_SAE:      return "X86ISD::VRNDSCALE_SAE";
  case X86ISD::VRNDSCALES:         return "X86ISD::VRNDSCALES";
  case X86ISD::VRNDSCALES_SAE:     return "X86ISD::VRNDSCALES_SAE";
  case X86ISD::VREDUCE:            return "X86ISD::VREDUCE";
  case X86ISD::VREDUCE_SAE:        return "X86ISD::VREDUCE_SAE";
  case X86ISD::VREDUCES:           return "X86ISD::VREDUCES";
  case X86ISD::VREDUCES_SAE:       return "X86ISD::VREDUCES_SAE";
  case X86ISD::VGETMANT:           return "X86ISD::VGETMANT";
  case X86ISD::VGETMANT_SAE:       return "X86ISD::VGETMANT_SAE";
  case X86ISD::VGETMANTS:          return "X86ISD::VGETMANTS";
  case X86ISD::VGETMANTS_SAE:      return "X86ISD::VGETMANTS_SAE";
  case X86ISD::PCMPESTR:           return "X86ISD::PCMPESTR";
  case X86ISD::PCMPISTR:           return "X86ISD::PCMPISTR";
  case X86ISD::XTEST:              return "X86ISD::XTEST";
  case X86ISD::COMPRESS:           return "X86ISD::COMPRESS";
  case X86ISD::EXPAND:             return "X86ISD::EXPAND";
  case X86ISD::SELECTS:            return "X86ISD::SELECTS";
  case X86ISD::ADDSUB:             return "X86ISD::ADDSUB";
  case X86ISD::RCP14:              return "X86ISD::RCP14";
  case X86ISD::RCP14S:             return "X86ISD::RCP14S";
  case X86ISD::RCP28:              return "X86ISD::RCP28";
  case X86ISD::RCP28_SAE:          return "X86ISD::RCP28_SAE";
  case X86ISD::RCP28S:             return "X86ISD::RCP28S";
  case X86ISD::RCP28S_SAE:         return "X86ISD::RCP28S_SAE";
  case X86ISD::EXP2:               return "X86ISD::EXP2";
  case X86ISD::EXP2_SAE:           return "X86ISD::EXP2_SAE";
  case X86ISD::RSQRT14:            return "X86ISD::RSQRT14";
  case X86ISD::RSQRT14S:           return "X86ISD::RSQRT14S";
  case X86ISD::RSQRT28:            return "X86ISD::RSQRT28";
  case X86ISD::RSQRT28_SAE:        return "X86ISD::RSQRT28_SAE";
  case X86ISD::RSQRT28S:           return "X86ISD::RSQRT28S";
  case X86ISD::RSQRT28S_SAE:       return "X86ISD::RSQRT28S_SAE";
  case X86ISD::FADD_RND:           return "X86ISD::FADD_RND";
  case X86ISD::FADDS:              return "X86ISD::FADDS";
  case X86ISD::FADDS_RND:          return "X86ISD::FADDS_RND";
  case X86ISD::FSUB_RND:           return "X86ISD::FSUB_RND";
  case X86ISD::FSUBS:              return "X86ISD::FSUBS";
  case X86ISD::FSUBS_RND:          return "X86ISD::FSUBS_RND";
  case X86ISD::FMUL_RND:           return "X86ISD::FMUL_RND";
  case X86ISD::FMULS:              return "X86ISD::FMULS";
  case X86ISD::FMULS_RND:          return "X86ISD::FMULS_RND";
  case X86ISD::FDIV_RND:           return "X86ISD::FDIV_RND";
  case X86ISD::FDIVS:              return "X86ISD::FDIVS";
  case X86ISD::FDIVS_RND:          return "X86ISD::FDIVS_RND";
  case X86ISD::FSQRT_RND:          return "X86ISD::FSQRT_RND";
  case X86ISD::FSQRTS:             return "X86ISD::FSQRTS";
  case X86ISD::FSQRTS_RND:         return "X86ISD::FSQRTS_RND";
  case X86ISD::FGETEXP:            return "X86ISD::FGETEXP";
  case X86ISD::FGETEXP_SAE:        return "X86ISD::FGETEXP_SAE";
  case X86ISD::FGETEXPS:           return "X86ISD::FGETEXPS";
  case X86ISD::FGETEXPS_SAE:       return "X86ISD::FGETEXPS_SAE";
  case X86ISD::SCALEF:             return "X86ISD::SCALEF";
  case X86ISD::SCALEF_RND:         return "X86ISD::SCALEF_RND";
  case X86ISD::SCALEFS:            return "X86ISD::SCALEFS";
  case X86ISD::SCALEFS_RND:        return "X86ISD::SCALEFS_RND";
  case X86ISD::AVG:                return "X86ISD::AVG";
  case X86ISD::MULHRS:             return "X86ISD::MULHRS";
  case X86ISD::SINT_TO_FP_RND:     return "X86ISD::SINT_TO_FP_RND";
  case X86ISD::UINT_TO_FP_RND:     return "X86ISD::UINT_TO_FP_RND";
  case X86ISD::CVTTP2SI:           return "X86ISD::CVTTP2SI";
  case X86ISD::CVTTP2UI:           return "X86ISD::CVTTP2UI";
  case X86ISD::MCVTTP2SI:          return "X86ISD::MCVTTP2SI";
  case X86ISD::MCVTTP2UI:          return "X86ISD::MCVTTP2UI";
  case X86ISD::CVTTP2SI_SAE:       return "X86ISD::CVTTP2SI_SAE";
  case X86ISD::CVTTP2UI_SAE:       return "X86ISD::CVTTP2UI_SAE";
  case X86ISD::CVTTS2SI:           return "X86ISD::CVTTS2SI";
  case X86ISD::CVTTS2UI:           return "X86ISD::CVTTS2UI";
  case X86ISD::CVTTS2SI_SAE:       return "X86ISD::CVTTS2SI_SAE";
  case X86ISD::CVTTS2UI_SAE:       return "X86ISD::CVTTS2UI_SAE";
  case X86ISD::CVTSI2P:            return "X86ISD::CVTSI2P";
  case X86ISD::CVTUI2P:            return "X86ISD::CVTUI2P";
  case X86ISD::MCVTSI2P:           return "X86ISD::MCVTSI2P";
  case X86ISD::MCVTUI2P:           return "X86ISD::MCVTUI2P";
  case X86ISD::VFPCLASS:           return "X86ISD::VFPCLASS";
  case X86ISD::VFPCLASSS:          return "X86ISD::VFPCLASSS";
  case X86ISD::MULTISHIFT:         return "X86ISD::MULTISHIFT";
  case X86ISD::SCALAR_SINT_TO_FP:     return "X86ISD::SCALAR_SINT_TO_FP";
  case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
  case X86ISD::SCALAR_UINT_TO_FP:     return "X86ISD::SCALAR_UINT_TO_FP";
  case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
  case X86ISD::CVTPS2PH:           return "X86ISD::CVTPS2PH";
  case X86ISD::MCVTPS2PH:          return "X86ISD::MCVTPS2PH";
  case X86ISD::CVTPH2PS:           return "X86ISD::CVTPH2PS";
  case X86ISD::CVTPH2PS_SAE:       return "X86ISD::CVTPH2PS_SAE";
  case X86ISD::CVTP2SI:            return "X86ISD::CVTP2SI";
  case X86ISD::CVTP2UI:            return "X86ISD::CVTP2UI";
  case X86ISD::MCVTP2SI:           return "X86ISD::MCVTP2SI";
  case X86ISD::MCVTP2UI:           return "X86ISD::MCVTP2UI";
  case X86ISD::CVTP2SI_RND:        return "X86ISD::CVTP2SI_RND";
  case X86ISD::CVTP2UI_RND:        return "X86ISD::CVTP2UI_RND";
  case X86ISD::CVTS2SI:            return "X86ISD::CVTS2SI";
  case X86ISD::CVTS2UI:            return "X86ISD::CVTS2UI";
  case X86ISD::CVTS2SI_RND:        return "X86ISD::CVTS2SI_RND";
  case X86ISD::CVTS2UI_RND:        return "X86ISD::CVTS2UI_RND";
  case X86ISD::CVTNE2PS2BF16:      return "X86ISD::CVTNE2PS2BF16";
  case X86ISD::CVTNEPS2BF16:       return "X86ISD::CVTNEPS2BF16";
  case X86ISD::MCVTNEPS2BF16:      return "X86ISD::MCVTNEPS2BF16";
  case X86ISD::DPBF16PS:           return "X86ISD::DPBF16PS";
  case X86ISD::LWPINS:             return "X86ISD::LWPINS";
  case X86ISD::MGATHER:            return "X86ISD::MGATHER";
  case X86ISD::MSCATTER:           return "X86ISD::MSCATTER";
  case X86ISD::VPDPBUSD:           return "X86ISD::VPDPBUSD";
  case X86ISD::VPDPBUSDS:          return "X86ISD::VPDPBUSDS";
  case X86ISD::VPDPWSSD:           return "X86ISD::VPDPWSSD";
  case X86ISD::VPDPWSSDS:          return "X86ISD::VPDPWSSDS";
  case X86ISD::VPSHUFBITQMB:       return "X86ISD::VPSHUFBITQMB";
  case X86ISD::GF2P8MULB:          return "X86ISD::GF2P8MULB";
  case X86ISD::GF2P8AFFINEQB:      return "X86ISD::GF2P8AFFINEQB";
  case X86ISD::GF2P8AFFINEINVQB:   return "X86ISD::GF2P8AFFINEINVQB";
  case X86ISD::NT_CALL:            return "X86ISD::NT_CALL";
  case X86ISD::NT_BRIND:           return "X86ISD::NT_BRIND";
  case X86ISD::UMWAIT:             return "X86ISD::UMWAIT";
  case X86ISD::TPAUSE:             return "X86ISD::TPAUSE";
  case X86ISD::ENQCMD:             return "X86ISD:ENQCMD";
  case X86ISD::ENQCMDS:            return "X86ISD:ENQCMDS";
  case X86ISD::VP2INTERSECT:       return "X86ISD::VP2INTERSECT";
  }
  return nullptr;
}

/// Return true if the addressing mode represented by AM is legal for this
/// target, for a load/store of the specified type.
bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                              const AddrMode &AM, Type *Ty,
                                              unsigned AS,
                                              Instruction *I) const {
  // X86 supports extremely general addressing modes.
  CodeModel::Model M = getTargetMachine().getCodeModel();

  // X86 allows a sign-extended 32-bit immediate field as a displacement.
  if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
    return false;

  if (AM.BaseGV) {
    unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);

    // If a reference to this global requires an extra load, we can't fold it.
    if (isGlobalStubReference(GVFlags))
      return false;

    // If BaseGV requires a register for the PIC base, we cannot also have a
    // BaseReg specified.
    if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
      return false;

    // If lower 4G is not available, then we must use rip-relative addressing.
    if ((M != CodeModel::Small || isPositionIndependent()) &&
        Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
      return false;
  }

  switch (AM.Scale) {
  case 0:
  case 1:
  case 2:
  case 4:
  case 8:
    // These scales always work.
    break;
  case 3:
  case 5:
  case 9:
    // These scales are formed with basereg+scalereg.  Only accept if there is
    // no basereg yet.
    if (AM.HasBaseReg)
      return false;
    break;
  default:  // Other stuff never works.
    return false;
  }

  return true;
}

bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
  unsigned Bits = Ty->getScalarSizeInBits();

  // 8-bit shifts are always expensive, but versions with a scalar amount aren't
  // particularly cheaper than those without.
  if (Bits == 8)
    return false;

  // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
  if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 &&
      (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
    return false;

  // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
  // shifts just as cheap as scalar ones.
  if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
    return false;

  // AVX512BW has shifts such as vpsllvw.
  if (Subtarget.hasBWI() && Bits == 16)
      return false;

  // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
  // fully general vector.
  return true;
}

bool X86TargetLowering::isBinOp(unsigned Opcode) const {
  switch (Opcode) {
  // These are non-commutative binops.
  // TODO: Add more X86ISD opcodes once we have test coverage.
  case X86ISD::ANDNP:
  case X86ISD::PCMPGT:
  case X86ISD::FMAX:
  case X86ISD::FMIN:
  case X86ISD::FANDN:
    return true;
  }

  return TargetLoweringBase::isBinOp(Opcode);
}

bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
  switch (Opcode) {
  // TODO: Add more X86ISD opcodes once we have test coverage.
  case X86ISD::PCMPEQ:
  case X86ISD::PMULDQ:
  case X86ISD::PMULUDQ:
  case X86ISD::FMAXC:
  case X86ISD::FMINC:
  case X86ISD::FAND:
  case X86ISD::FOR:
  case X86ISD::FXOR:
    return true;
  }

  return TargetLoweringBase::isCommutativeBinOp(Opcode);
}

bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
  if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
    return false;
  unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
  unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
  return NumBits1 > NumBits2;
}

bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
  if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
    return false;

  if (!isTypeLegal(EVT::getEVT(Ty1)))
    return false;

  assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");

  // Assuming the caller doesn't have a zeroext or signext return parameter,
  // truncation all the way down to i1 is valid.
  return true;
}

bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
  return isInt<32>(Imm);
}

bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
  // Can also use sub to handle negated immediates.
  return isInt<32>(Imm);
}

bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
  return isInt<32>(Imm);
}

bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
  if (!VT1.isInteger() || !VT2.isInteger())
    return false;
  unsigned NumBits1 = VT1.getSizeInBits();
  unsigned NumBits2 = VT2.getSizeInBits();
  return NumBits1 > NumBits2;
}

bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
  // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
  return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
}

bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
  // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
  return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
}

bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
  EVT VT1 = Val.getValueType();
  if (isZExtFree(VT1, VT2))
    return true;

  if (Val.getOpcode() != ISD::LOAD)
    return false;

  if (!VT1.isSimple() || !VT1.isInteger() ||
      !VT2.isSimple() || !VT2.isInteger())
    return false;

  switch (VT1.getSimpleVT().SimpleTy) {
  default: break;
  case MVT::i8:
  case MVT::i16:
  case MVT::i32:
    // X86 has 8, 16, and 32-bit zero-extending loads.
    return true;
  }

  return false;
}

bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
  if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
    return false;

  EVT SrcVT = ExtVal.getOperand(0).getValueType();

  // There is no extending load for vXi1.
  if (SrcVT.getScalarType() == MVT::i1)
    return false;

  return true;
}

bool
X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
  if (!Subtarget.hasAnyFMA())
    return false;

  VT = VT.getScalarType();

  if (!VT.isSimple())
    return false;

  switch (VT.getSimpleVT().SimpleTy) {
  case MVT::f32:
  case MVT::f64:
    return true;
  default:
    break;
  }

  return false;
}

bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
  // i16 instructions are longer (0x66 prefix) and potentially slower.
  return !(VT1 == MVT::i32 && VT2 == MVT::i16);
}

/// Targets can use this to indicate that they only support *some*
/// VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
/// are assumed to be legal.
bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
  if (!VT.isSimple())
    return false;

  // Not for i1 vectors
  if (VT.getSimpleVT().getScalarType() == MVT::i1)
    return false;

  // Very little shuffling can be done for 64-bit vectors right now.
  if (VT.getSimpleVT().getSizeInBits() == 64)
    return false;

  // We only care that the types being shuffled are legal. The lowering can
  // handle any possible shuffle mask that results.
  return isTypeLegal(VT.getSimpleVT());
}

bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
                                               EVT VT) const {
  // Don't convert an 'and' into a shuffle that we don't directly support.
  // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
  if (!Subtarget.hasAVX2())
    if (VT == MVT::v32i8 || VT == MVT::v16i16)
      return false;

  // Just delegate to the generic legality, clear masks aren't special.
  return isShuffleMaskLegal(Mask, VT);
}

bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
  // If the subtarget is using retpolines, we need to not generate jump tables.
  if (Subtarget.useRetpolineIndirectBranches())
    return false;

  // Otherwise, fallback on the generic logic.
  return TargetLowering::areJTsAllowed(Fn);
}

//===----------------------------------------------------------------------===//
//                           X86 Scheduler Hooks
//===----------------------------------------------------------------------===//

/// Utility function to emit xbegin specifying the start of an RTM region.
static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
                                     const TargetInstrInfo *TII) {
  DebugLoc DL = MI.getDebugLoc();

  const BasicBlock *BB = MBB->getBasicBlock();
  MachineFunction::iterator I = ++MBB->getIterator();

  // For the v = xbegin(), we generate
  //
  // thisMBB:
  //  xbegin sinkMBB
  //
  // mainMBB:
  //  s0 = -1
  //
  // fallBB:
  //  eax = # XABORT_DEF
  //  s1 = eax
  //
  // sinkMBB:
  //  v = phi(s0/mainBB, s1/fallBB)

  MachineBasicBlock *thisMBB = MBB;
  MachineFunction *MF = MBB->getParent();
  MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
  MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
  MF->insert(I, mainMBB);
  MF->insert(I, fallMBB);
  MF->insert(I, sinkMBB);

  // Transfer the remainder of BB and its successor edges to sinkMBB.
  sinkMBB->splice(sinkMBB->begin(), MBB,
                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);

  MachineRegisterInfo &MRI = MF->getRegInfo();
  Register DstReg = MI.getOperand(0).getReg();
  const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
  Register mainDstReg = MRI.createVirtualRegister(RC);
  Register fallDstReg = MRI.createVirtualRegister(RC);

  // thisMBB:
  //  xbegin fallMBB
  //  # fallthrough to mainMBB
  //  # abortion to fallMBB
  BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
  thisMBB->addSuccessor(mainMBB);
  thisMBB->addSuccessor(fallMBB);

  // mainMBB:
  //  mainDstReg := -1
  BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
  BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
  mainMBB->addSuccessor(sinkMBB);

  // fallMBB:
  //  ; pseudo instruction to model hardware's definition from XABORT
  //  EAX := XABORT_DEF
  //  fallDstReg := EAX
  BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
  BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
      .addReg(X86::EAX);
  fallMBB->addSuccessor(sinkMBB);

  // sinkMBB:
  //  DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
  BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
      .addReg(mainDstReg).addMBB(mainMBB)
      .addReg(fallDstReg).addMBB(fallMBB);

  MI.eraseFromParent();
  return sinkMBB;
}



MachineBasicBlock *
X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
                                                 MachineBasicBlock *MBB) const {
  // Emit va_arg instruction on X86-64.

  // Operands to this pseudo-instruction:
  // 0  ) Output        : destination address (reg)
  // 1-5) Input         : va_list address (addr, i64mem)
  // 6  ) ArgSize       : Size (in bytes) of vararg type
  // 7  ) ArgMode       : 0=overflow only, 1=use gp_offset, 2=use fp_offset
  // 8  ) Align         : Alignment of type
  // 9  ) EFLAGS (implicit-def)

  assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
  static_assert(X86::AddrNumOperands == 5,
                "VAARG_64 assumes 5 address operands");

  Register DestReg = MI.getOperand(0).getReg();
  MachineOperand &Base = MI.getOperand(1);
  MachineOperand &Scale = MI.getOperand(2);
  MachineOperand &Index = MI.getOperand(3);
  MachineOperand &Disp = MI.getOperand(4);
  MachineOperand &Segment = MI.getOperand(5);
  unsigned ArgSize = MI.getOperand(6).getImm();
  unsigned ArgMode = MI.getOperand(7).getImm();
  unsigned Align = MI.getOperand(8).getImm();

  MachineFunction *MF = MBB->getParent();

  // Memory Reference
  assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");

  MachineMemOperand *OldMMO = MI.memoperands().front();

  // Clone the MMO into two separate MMOs for loading and storing
  MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
      OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
  MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
      OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);

  // Machine Information
  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
  MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
  const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
  const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
  DebugLoc DL = MI.getDebugLoc();

  // struct va_list {
  //   i32   gp_offset
  //   i32   fp_offset
  //   i64   overflow_area (address)
  //   i64   reg_save_area (address)
  // }
  // sizeof(va_list) = 24
  // alignment(va_list) = 8

  unsigned TotalNumIntRegs = 6;
  unsigned TotalNumXMMRegs = 8;
  bool UseGPOffset = (ArgMode == 1);
  bool UseFPOffset = (ArgMode == 2);
  unsigned MaxOffset = TotalNumIntRegs * 8 +
                       (UseFPOffset ? TotalNumXMMRegs * 16 : 0);

  /* Align ArgSize to a multiple of 8 */
  unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
  bool NeedsAlign = (Align > 8);

  MachineBasicBlock *thisMBB = MBB;
  MachineBasicBlock *overflowMBB;
  MachineBasicBlock *offsetMBB;
  MachineBasicBlock *endMBB;

  unsigned OffsetDestReg = 0;    // Argument address computed by offsetMBB
  unsigned OverflowDestReg = 0;  // Argument address computed by overflowMBB
  unsigned OffsetReg = 0;

  if (!UseGPOffset && !UseFPOffset) {
    // If we only pull from the overflow region, we don't create a branch.
    // We don't need to alter control flow.
    OffsetDestReg = 0; // unused
    OverflowDestReg = DestReg;

    offsetMBB = nullptr;
    overflowMBB = thisMBB;
    endMBB = thisMBB;
  } else {
    // First emit code to check if gp_offset (or fp_offset) is below the bound.
    // If so, pull the argument from reg_save_area. (branch to offsetMBB)
    // If not, pull from overflow_area. (branch to overflowMBB)
    //
    //       thisMBB
    //         |     .
    //         |        .
    //     offsetMBB   overflowMBB
    //         |        .
    //         |     .
    //        endMBB

    // Registers for the PHI in endMBB
    OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
    OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);

    const BasicBlock *LLVM_BB = MBB->getBasicBlock();
    overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
    offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
    endMBB = MF->CreateMachineBasicBlock(LLVM_BB);

    MachineFunction::iterator MBBIter = ++MBB->getIterator();

    // Insert the new basic blocks
    MF->insert(MBBIter, offsetMBB);
    MF->insert(MBBIter, overflowMBB);
    MF->insert(MBBIter, endMBB);

    // Transfer the remainder of MBB and its successor edges to endMBB.
    endMBB->splice(endMBB->begin(), thisMBB,
                   std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
    endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);

    // Make offsetMBB and overflowMBB successors of thisMBB
    thisMBB->addSuccessor(offsetMBB);
    thisMBB->addSuccessor(overflowMBB);

    // endMBB is a successor of both offsetMBB and overflowMBB
    offsetMBB->addSuccessor(endMBB);
    overflowMBB->addSuccessor(endMBB);

    // Load the offset value into a register
    OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
    BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
        .add(Base)
        .add(Scale)
        .add(Index)
        .addDisp(Disp, UseFPOffset ? 4 : 0)
        .add(Segment)
        .setMemRefs(LoadOnlyMMO);

    // Check if there is enough room left to pull this argument.
    BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
      .addReg(OffsetReg)
      .addImm(MaxOffset + 8 - ArgSizeA8);

    // Branch to "overflowMBB" if offset >= max
    // Fall through to "offsetMBB" otherwise
    BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
      .addMBB(overflowMBB).addImm(X86::COND_AE);
  }

  // In offsetMBB, emit code to use the reg_save_area.
  if (offsetMBB) {
    assert(OffsetReg != 0);

    // Read the reg_save_area address.
    Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
    BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
        .add(Base)
        .add(Scale)
        .add(Index)
        .addDisp(Disp, 16)
        .add(Segment)
        .setMemRefs(LoadOnlyMMO);

    // Zero-extend the offset
    Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
    BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
        .addImm(0)
        .addReg(OffsetReg)
        .addImm(X86::sub_32bit);

    // Add the offset to the reg_save_area to get the final address.
    BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
      .addReg(OffsetReg64)
      .addReg(RegSaveReg);

    // Compute the offset for the next argument
    Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
    BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
      .addReg(OffsetReg)
      .addImm(UseFPOffset ? 16 : 8);

    // Store it back into the va_list.
    BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
        .add(Base)
        .add(Scale)
        .add(Index)
        .addDisp(Disp, UseFPOffset ? 4 : 0)
        .add(Segment)
        .addReg(NextOffsetReg)
        .setMemRefs(StoreOnlyMMO);

    // Jump to endMBB
    BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
      .addMBB(endMBB);
  }

  //
  // Emit code to use overflow area
  //

  // Load the overflow_area address into a register.
  Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
  BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
      .add(Base)
      .add(Scale)
      .add(Index)
      .addDisp(Disp, 8)
      .add(Segment)
      .setMemRefs(LoadOnlyMMO);

  // If we need to align it, do so. Otherwise, just copy the address
  // to OverflowDestReg.
  if (NeedsAlign) {
    // Align the overflow address
    assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
    Register TmpReg = MRI.createVirtualRegister(AddrRegClass);

    // aligned_addr = (addr + (align-1)) & ~(align-1)
    BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
      .addReg(OverflowAddrReg)
      .addImm(Align-1);

    BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
      .addReg(TmpReg)
      .addImm(~(uint64_t)(Align-1));
  } else {
    BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
      .addReg(OverflowAddrReg);
  }

  // Compute the next overflow address after this argument.
  // (the overflow address should be kept 8-byte aligned)
  Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
  BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
    .addReg(OverflowDestReg)
    .addImm(ArgSizeA8);

  // Store the new overflow address.
  BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
      .add(Base)
      .add(Scale)
      .add(Index)
      .addDisp(Disp, 8)
      .add(Segment)
      .addReg(NextAddrReg)
      .setMemRefs(StoreOnlyMMO);

  // If we branched, emit the PHI to the front of endMBB.
  if (offsetMBB) {
    BuildMI(*endMBB, endMBB->begin(), DL,
            TII->get(X86::PHI), DestReg)
      .addReg(OffsetDestReg).addMBB(offsetMBB)
      .addReg(OverflowDestReg).addMBB(overflowMBB);
  }

  // Erase the pseudo instruction
  MI.eraseFromParent();

  return endMBB;
}

MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
    MachineInstr &MI, MachineBasicBlock *MBB) const {
  // Emit code to save XMM registers to the stack. The ABI says that the
  // number of registers to save is given in %al, so it's theoretically
  // possible to do an indirect jump trick to avoid saving all of them,
  // however this code takes a simpler approach and just executes all
  // of the stores if %al is non-zero. It's less code, and it's probably
  // easier on the hardware branch predictor, and stores aren't all that
  // expensive anyway.

  // Create the new basic blocks. One block contains all the XMM stores,
  // and one block is the final destination regardless of whether any
  // stores were performed.
  const BasicBlock *LLVM_BB = MBB->getBasicBlock();
  MachineFunction *F = MBB->getParent();
  MachineFunction::iterator MBBIter = ++MBB->getIterator();
  MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
  MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
  F->insert(MBBIter, XMMSaveMBB);
  F->insert(MBBIter, EndMBB);

  // Transfer the remainder of MBB and its successor edges to EndMBB.
  EndMBB->splice(EndMBB->begin(), MBB,
                 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
  EndMBB->transferSuccessorsAndUpdatePHIs(MBB);

  // The original block will now fall through to the XMM save block.
  MBB->addSuccessor(XMMSaveMBB);
  // The XMMSaveMBB will fall through to the end block.
  XMMSaveMBB->addSuccessor(EndMBB);

  // Now add the instructions.
  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
  DebugLoc DL = MI.getDebugLoc();

  Register CountReg = MI.getOperand(0).getReg();
  int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
  int64_t VarArgsFPOffset = MI.getOperand(2).getImm();

  if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
    // If %al is 0, branch around the XMM save block.
    BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
    BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(EndMBB).addImm(X86::COND_E);
    MBB->addSuccessor(EndMBB);
  }

  // Make sure the last operand is EFLAGS, which gets clobbered by the branch
  // that was just emitted, but clearly shouldn't be "saved".
  assert((MI.getNumOperands() <= 3 ||
          !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
          MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
         "Expected last argument to be EFLAGS");
  unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
  // In the XMM save block, save all the XMM argument registers.
  for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
    int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
    MachineMemOperand *MMO = F->getMachineMemOperand(
        MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
        MachineMemOperand::MOStore,
        /*Size=*/16, /*Align=*/16);
    BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
        .addFrameIndex(RegSaveFrameIndex)
        .addImm(/*Scale=*/1)
        .addReg(/*IndexReg=*/0)
        .addImm(/*Disp=*/Offset)
        .addReg(/*Segment=*/0)
        .addReg(MI.getOperand(i).getReg())
        .addMemOperand(MMO);
  }

  MI.eraseFromParent(); // The pseudo instruction is gone now.

  return EndMBB;
}

// The EFLAGS operand of SelectItr might be missing a kill marker
// because there were multiple uses of EFLAGS, and ISel didn't know
// which to mark. Figure out whether SelectItr should have had a
// kill marker, and set it if it should. Returns the correct kill
// marker value.
static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
                                     MachineBasicBlock* BB,
                                     const TargetRegisterInfo* TRI) {
  // Scan forward through BB for a use/def of EFLAGS.
  MachineBasicBlock::iterator miI(std::next(SelectItr));
  for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
    const MachineInstr& mi = *miI;
    if (mi.readsRegister(X86::EFLAGS))
      return false;
    if (mi.definesRegister(X86::EFLAGS))
      break; // Should have kill-flag - update below.
  }

  // If we hit the end of the block, check whether EFLAGS is live into a
  // successor.
  if (miI == BB->end()) {
    for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
                                          sEnd = BB->succ_end();
         sItr != sEnd; ++sItr) {
      MachineBasicBlock* succ = *sItr;
      if (succ->isLiveIn(X86::EFLAGS))
        return false;
    }
  }

  // We found a def, or hit the end of the basic block and EFLAGS wasn't live
  // out. SelectMI should have a kill flag on EFLAGS.
  SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
  return true;
}

// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
// together with other CMOV pseudo-opcodes into a single basic-block with
// conditional jump around it.
static bool isCMOVPseudo(MachineInstr &MI) {
  switch (MI.getOpcode()) {
  case X86::CMOV_FR32:
  case X86::CMOV_FR32X:
  case X86::CMOV_FR64:
  case X86::CMOV_FR64X:
  case X86::CMOV_GR8:
  case X86::CMOV_GR16:
  case X86::CMOV_GR32:
  case X86::CMOV_RFP32:
  case X86::CMOV_RFP64:
  case X86::CMOV_RFP80:
  case X86::CMOV_VR128:
  case X86::CMOV_VR128X:
  case X86::CMOV_VR256:
  case X86::CMOV_VR256X:
  case X86::CMOV_VR512:
  case X86::CMOV_VK2:
  case X86::CMOV_VK4:
  case X86::CMOV_VK8:
  case X86::CMOV_VK16:
  case X86::CMOV_VK32:
  case X86::CMOV_VK64:
    return true;

  default:
    return false;
  }
}

// Helper function, which inserts PHI functions into SinkMBB:
//   %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
// where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
// in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
// the last PHI function inserted.
static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
    MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
    MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
    MachineBasicBlock *SinkMBB) {
  MachineFunction *MF = TrueMBB->getParent();
  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
  DebugLoc DL = MIItBegin->getDebugLoc();

  X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
  X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);

  MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();

  // As we are creating the PHIs, we have to be careful if there is more than
  // one.  Later CMOVs may reference the results of earlier CMOVs, but later
  // PHIs have to reference the individual true/false inputs from earlier PHIs.
  // That also means that PHI construction must work forward from earlier to
  // later, and that the code must maintain a mapping from earlier PHI's
  // destination registers, and the registers that went into the PHI.
  DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
  MachineInstrBuilder MIB;

  for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
    Register DestReg = MIIt->getOperand(0).getReg();
    Register Op1Reg = MIIt->getOperand(1).getReg();
    Register Op2Reg = MIIt->getOperand(2).getReg();

    // If this CMOV we are generating is the opposite condition from
    // the jump we generated, then we have to swap the operands for the
    // PHI that is going to be generated.
    if (MIIt->getOperand(3).getImm() == OppCC)
      std::swap(Op1Reg, Op2Reg);

    if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
      Op1Reg = RegRewriteTable[Op1Reg].first;

    if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
      Op2Reg = RegRewriteTable[Op2Reg].second;

    MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
              .addReg(Op1Reg)
              .addMBB(FalseMBB)
              .addReg(Op2Reg)
              .addMBB(TrueMBB);

    // Add this PHI to the rewrite table.
    RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
  }

  return MIB;
}

// Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
MachineBasicBlock *
X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
                                             MachineInstr &SecondCascadedCMOV,
                                             MachineBasicBlock *ThisMBB) const {
  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
  DebugLoc DL = FirstCMOV.getDebugLoc();

  // We lower cascaded CMOVs such as
  //
  //   (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
  //
  // to two successive branches.
  //
  // Without this, we would add a PHI between the two jumps, which ends up
  // creating a few copies all around. For instance, for
  //
  //    (sitofp (zext (fcmp une)))
  //
  // we would generate:
  //
  //         ucomiss %xmm1, %xmm0
  //         movss  <1.0f>, %xmm0
  //         movaps  %xmm0, %xmm1
  //         jne     .LBB5_2
  //         xorps   %xmm1, %xmm1
  // .LBB5_2:
  //         jp      .LBB5_4
  //         movaps  %xmm1, %xmm0
  // .LBB5_4:
  //         retq
  //
  // because this custom-inserter would have generated:
  //
  //   A
  //   | \
  //   |  B
  //   | /
  //   C
  //   | \
  //   |  D
  //   | /
  //   E
  //
  // A: X = ...; Y = ...
  // B: empty
  // C: Z = PHI [X, A], [Y, B]
  // D: empty
  // E: PHI [X, C], [Z, D]
  //
  // If we lower both CMOVs in a single step, we can instead generate:
  //
  //   A
  //   | \
  //   |  C
  //   | /|
  //   |/ |
  //   |  |
  //   |  D
  //   | /
  //   E
  //
  // A: X = ...; Y = ...
  // D: empty
  // E: PHI [X, A], [X, C], [Y, D]
  //
  // Which, in our sitofp/fcmp example, gives us something like:
  //
  //         ucomiss %xmm1, %xmm0
  //         movss  <1.0f>, %xmm0
  //         jne     .LBB5_4
  //         jp      .LBB5_4
  //         xorps   %xmm0, %xmm0
  // .LBB5_4:
  //         retq
  //

  // We lower cascaded CMOV into two successive branches to the same block.
  // EFLAGS is used by both, so mark it as live in the second.
  const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
  MachineFunction *F = ThisMBB->getParent();
  MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
  MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);

  MachineFunction::iterator It = ++ThisMBB->getIterator();
  F->insert(It, FirstInsertedMBB);
  F->insert(It, SecondInsertedMBB);
  F->insert(It, SinkMBB);

  // For a cascaded CMOV, we lower it to two successive branches to
  // the same block (SinkMBB).  EFLAGS is used by both, so mark it as live in
  // the FirstInsertedMBB.
  FirstInsertedMBB->addLiveIn(X86::EFLAGS);

  // If the EFLAGS register isn't dead in the terminator, then claim that it's
  // live into the sink and copy blocks.
  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
  if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
      !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
    SecondInsertedMBB->addLiveIn(X86::EFLAGS);
    SinkMBB->addLiveIn(X86::EFLAGS);
  }

  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
  SinkMBB->splice(SinkMBB->begin(), ThisMBB,
                  std::next(MachineBasicBlock::iterator(FirstCMOV)),
                  ThisMBB->end());
  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);

  // Fallthrough block for ThisMBB.
  ThisMBB->addSuccessor(FirstInsertedMBB);
  // The true block target of the first branch is always SinkMBB.
  ThisMBB->addSuccessor(SinkMBB);
  // Fallthrough block for FirstInsertedMBB.
  FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
  // The true block for the branch of FirstInsertedMBB.
  FirstInsertedMBB->addSuccessor(SinkMBB);
  // This is fallthrough.
  SecondInsertedMBB->addSuccessor(SinkMBB);

  // Create the conditional branch instructions.
  X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
  BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);

  X86::CondCode SecondCC =
      X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
  BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);

  //  SinkMBB:
  //   %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
  Register DestReg = FirstCMOV.getOperand(0).getReg();
  Register Op1Reg = FirstCMOV.getOperand(1).getReg();
  Register Op2Reg = FirstCMOV.getOperand(2).getReg();
  MachineInstrBuilder MIB =
      BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
          .addReg(Op1Reg)
          .addMBB(SecondInsertedMBB)
          .addReg(Op2Reg)
          .addMBB(ThisMBB);

  // The second SecondInsertedMBB provides the same incoming value as the
  // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
  MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
  // Copy the PHI result to the register defined by the second CMOV.
  BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
          TII->get(TargetOpcode::COPY),
          SecondCascadedCMOV.getOperand(0).getReg())
      .addReg(FirstCMOV.getOperand(0).getReg());

  // Now remove the CMOVs.
  FirstCMOV.eraseFromParent();
  SecondCascadedCMOV.eraseFromParent();

  return SinkMBB;
}

MachineBasicBlock *
X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
                                     MachineBasicBlock *ThisMBB) const {
  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
  DebugLoc DL = MI.getDebugLoc();

  // To "insert" a SELECT_CC instruction, we actually have to insert the
  // diamond control-flow pattern.  The incoming instruction knows the
  // destination vreg to set, the condition code register to branch on, the
  // true/false values to select between and a branch opcode to use.

  //  ThisMBB:
  //  ...
  //   TrueVal = ...
  //   cmpTY ccX, r1, r2
  //   bCC copy1MBB
  //   fallthrough --> FalseMBB

  // This code lowers all pseudo-CMOV instructions. Generally it lowers these
  // as described above, by inserting a BB, and then making a PHI at the join
  // point to select the true and false operands of the CMOV in the PHI.
  //
  // The code also handles two different cases of multiple CMOV opcodes
  // in a row.
  //
  // Case 1:
  // In this case, there are multiple CMOVs in a row, all which are based on
  // the same condition setting (or the exact opposite condition setting).
  // In this case we can lower all the CMOVs using a single inserted BB, and
  // then make a number of PHIs at the join point to model the CMOVs. The only
  // trickiness here, is that in a case like:
  //
  // t2 = CMOV cond1 t1, f1
  // t3 = CMOV cond1 t2, f2
  //
  // when rewriting this into PHIs, we have to perform some renaming on the
  // temps since you cannot have a PHI operand refer to a PHI result earlier
  // in the same block.  The "simple" but wrong lowering would be:
  //
  // t2 = PHI t1(BB1), f1(BB2)
  // t3 = PHI t2(BB1), f2(BB2)
  //
  // but clearly t2 is not defined in BB1, so that is incorrect. The proper
  // renaming is to note that on the path through BB1, t2 is really just a
  // copy of t1, and do that renaming, properly generating:
  //
  // t2 = PHI t1(BB1), f1(BB2)
  // t3 = PHI t1(BB1), f2(BB2)
  //
  // Case 2:
  // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
  // function - EmitLoweredCascadedSelect.

  X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
  X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
  MachineInstr *LastCMOV = &MI;
  MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);

  // Check for case 1, where there are multiple CMOVs with the same condition
  // first.  Of the two cases of multiple CMOV lowerings, case 1 reduces the
  // number of jumps the most.

  if (isCMOVPseudo(MI)) {
    // See if we have a string of CMOVS with the same condition. Skip over
    // intervening debug insts.
    while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
           (NextMIIt->getOperand(3).getImm() == CC ||
            NextMIIt->getOperand(3).getImm() == OppCC)) {
      LastCMOV = &*NextMIIt;
      ++NextMIIt;
      NextMIIt = skipDebugInstructionsForward(NextMIIt, ThisMBB->end());
    }
  }

  // This checks for case 2, but only do this if we didn't already find
  // case 1, as indicated by LastCMOV == MI.
  if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
      NextMIIt->getOpcode() == MI.getOpcode() &&
      NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
      NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
      NextMIIt->getOperand(1).isKill()) {
    return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
  }

  const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
  MachineFunction *F = ThisMBB->getParent();
  MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);

  MachineFunction::iterator It = ++ThisMBB->getIterator();
  F->insert(It, FalseMBB);
  F->insert(It, SinkMBB);

  // If the EFLAGS register isn't dead in the terminator, then claim that it's
  // live into the sink and copy blocks.
  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
  if (!LastCMOV->killsRegister(X86::EFLAGS) &&
      !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
    FalseMBB->addLiveIn(X86::EFLAGS);
    SinkMBB->addLiveIn(X86::EFLAGS);
  }

  // Transfer any debug instructions inside the CMOV sequence to the sunk block.
  auto DbgEnd = MachineBasicBlock::iterator(LastCMOV);
  auto DbgIt = MachineBasicBlock::iterator(MI);
  while (DbgIt != DbgEnd) {
    auto Next = std::next(DbgIt);
    if (DbgIt->isDebugInstr())
      SinkMBB->push_back(DbgIt->removeFromParent());
    DbgIt = Next;
  }

  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
  SinkMBB->splice(SinkMBB->end(), ThisMBB,
                  std::next(MachineBasicBlock::iterator(LastCMOV)),
                  ThisMBB->end());
  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);

  // Fallthrough block for ThisMBB.
  ThisMBB->addSuccessor(FalseMBB);
  // The true block target of the first (or only) branch is always a SinkMBB.
  ThisMBB->addSuccessor(SinkMBB);
  // Fallthrough block for FalseMBB.
  FalseMBB->addSuccessor(SinkMBB);

  // Create the conditional branch instruction.
  BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);

  //  SinkMBB:
  //   %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
  //  ...
  MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
  MachineBasicBlock::iterator MIItEnd =
      std::next(MachineBasicBlock::iterator(LastCMOV));
  createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);

  // Now remove the CMOV(s).
  ThisMBB->erase(MIItBegin, MIItEnd);

  return SinkMBB;
}

MachineBasicBlock *
X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
                                        MachineBasicBlock *BB) const {
  MachineFunction *MF = BB->getParent();
  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
  DebugLoc DL = MI.getDebugLoc();
  const BasicBlock *LLVM_BB = BB->getBasicBlock();

  assert(MF->shouldSplitStack());

  const bool Is64Bit = Subtarget.is64Bit();
  const bool IsLP64 = Subtarget.isTarget64BitLP64();

  const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
  const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;

  // BB:
  //  ... [Till the alloca]
  // If stacklet is not large enough, jump to mallocMBB
  //
  // bumpMBB:
  //  Allocate by subtracting from RSP
  //  Jump to continueMBB
  //
  // mallocMBB:
  //  Allocate by call to runtime
  //
  // continueMBB:
  //  ...
  //  [rest of original BB]
  //

  MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
  MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
  MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);

  MachineRegisterInfo &MRI = MF->getRegInfo();
  const TargetRegisterClass *AddrRegClass =
      getRegClassFor(getPointerTy(MF->getDataLayout()));

  unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
           bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
           tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
           SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
           sizeVReg = MI.getOperand(1).getReg(),
           physSPReg =
               IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;

  MachineFunction::iterator MBBIter = ++BB->getIterator();

  MF->insert(MBBIter, bumpMBB);
  MF->insert(MBBIter, mallocMBB);
  MF->insert(MBBIter, continueMBB);

  continueMBB->splice(continueMBB->begin(), BB,
                      std::next(MachineBasicBlock::iterator(MI)), BB->end());
  continueMBB->transferSuccessorsAndUpdatePHIs(BB);

  // Add code to the main basic block to check if the stack limit has been hit,
  // and if so, jump to mallocMBB otherwise to bumpMBB.
  BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
  BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
    .addReg(tmpSPVReg).addReg(sizeVReg);
  BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
    .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
    .addReg(SPLimitVReg);
  BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);

  // bumpMBB simply decreases the stack pointer, since we know the current
  // stacklet has enough space.
  BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
    .addReg(SPLimitVReg);
  BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
    .addReg(SPLimitVReg);
  BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);

  // Calls into a routine in libgcc to allocate more space from the heap.
  const uint32_t *RegMask =
      Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
  if (IsLP64) {
    BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
      .addReg(sizeVReg);
    BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
      .addExternalSymbol("__morestack_allocate_stack_space")
      .addRegMask(RegMask)
      .addReg(X86::RDI, RegState::Implicit)
      .addReg(X86::RAX, RegState::ImplicitDefine);
  } else if (Is64Bit) {
    BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
      .addReg(sizeVReg);
    BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
      .addExternalSymbol("__morestack_allocate_stack_space")
      .addRegMask(RegMask)
      .addReg(X86::EDI, RegState::Implicit)
      .addReg(X86::EAX, RegState::ImplicitDefine);
  } else {
    BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
      .addImm(12);
    BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
    BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
      .addExternalSymbol("__morestack_allocate_stack_space")
      .addRegMask(RegMask)
      .addReg(X86::EAX, RegState::ImplicitDefine);
  }

  if (!Is64Bit)
    BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
      .addImm(16);

  BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
    .addReg(IsLP64 ? X86::RAX : X86::EAX);
  BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);

  // Set up the CFG correctly.
  BB->addSuccessor(bumpMBB);
  BB->addSuccessor(mallocMBB);
  mallocMBB->addSuccessor(continueMBB);
  bumpMBB->addSuccessor(continueMBB);

  // Take care of the PHI nodes.
  BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
          MI.getOperand(0).getReg())
      .addReg(mallocPtrVReg)
      .addMBB(mallocMBB)
      .addReg(bumpSPPtrVReg)
      .addMBB(bumpMBB);

  // Delete the original pseudo instruction.
  MI.eraseFromParent();

  // And we're done.
  return continueMBB;
}

MachineBasicBlock *
X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
                                       MachineBasicBlock *BB) const {
  MachineFunction *MF = BB->getParent();
  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
  MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
  DebugLoc DL = MI.getDebugLoc();

  assert(!isAsynchronousEHPersonality(
             classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
         "SEH does not use catchret!");

  // Only 32-bit EH needs to worry about manually restoring stack pointers.
  if (!Subtarget.is32Bit())
    return BB;

  // C++ EH creates a new target block to hold the restore code, and wires up
  // the new block to the return destination with a normal JMP_4.
  MachineBasicBlock *RestoreMBB =
      MF->CreateMachineBasicBlock(BB->getBasicBlock());
  assert(BB->succ_size() == 1);
  MF->insert(std::next(BB->getIterator()), RestoreMBB);
  RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
  BB->addSuccessor(RestoreMBB);
  MI.getOperand(0).setMBB(RestoreMBB);

  auto RestoreMBBI = RestoreMBB->begin();
  BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
  BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
  return BB;
}

MachineBasicBlock *
X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
                                       MachineBasicBlock *BB) const {
  MachineFunction *MF = BB->getParent();
  const Constant *PerFn = MF->getFunction().getPersonalityFn();
  bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
  // Only 32-bit SEH requires special handling for catchpad.
  if (IsSEH && Subtarget.is32Bit()) {
    const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
    DebugLoc DL = MI.getDebugLoc();
    BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
  }
  MI.eraseFromParent();
  return BB;
}

MachineBasicBlock *
X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
                                      MachineBasicBlock *BB) const {
  // So, here we replace TLSADDR with the sequence:
  // adjust_stackdown -> TLSADDR -> adjust_stackup.
  // We need this because TLSADDR is lowered into calls
  // inside MC, therefore without the two markers shrink-wrapping
  // may push the prologue/epilogue pass them.
  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
  DebugLoc DL = MI.getDebugLoc();
  MachineFunction &MF = *BB->getParent();

  // Emit CALLSEQ_START right before the instruction.
  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
  MachineInstrBuilder CallseqStart =
    BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
  BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);

  // Emit CALLSEQ_END right after the instruction.
  // We don't call erase from parent because we want to keep the
  // original instruction around.
  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
  MachineInstrBuilder CallseqEnd =
    BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
  BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);

  return BB;
}

MachineBasicBlock *
X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
                                      MachineBasicBlock *BB) const {
  // This is pretty easy.  We're taking the value that we received from
  // our load from the relocation, sticking it in either RDI (x86-64)
  // or EAX and doing an indirect call.  The return value will then
  // be in the normal return register.
  MachineFunction *F = BB->getParent();
  const X86InstrInfo *TII = Subtarget.getInstrInfo();
  DebugLoc DL = MI.getDebugLoc();

  assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
  assert(MI.getOperand(3).isGlobal() && "This should be a global");

  // Get a register mask for the lowered call.
  // FIXME: The 32-bit calls have non-standard calling conventions. Use a
  // proper register mask.
  const uint32_t *RegMask =
      Subtarget.is64Bit() ?
      Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
      Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
  if (Subtarget.is64Bit()) {
    MachineInstrBuilder MIB =
        BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
            .addReg(X86::RIP)
            .addImm(0)
            .addReg(0)
            .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
                              MI.getOperand(3).getTargetFlags())
            .addReg(0);
    MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
    addDirectMem(MIB, X86::RDI);
    MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
  } else if (!isPositionIndependent()) {
    MachineInstrBuilder MIB =
        BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
            .addReg(0)
            .addImm(0)
            .addReg(0)
            .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
                              MI.getOperand(3).getTargetFlags())
            .addReg(0);
    MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
    addDirectMem(MIB, X86::EAX);
    MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
  } else {
    MachineInstrBuilder MIB =
        BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
            .addReg(TII->getGlobalBaseReg(F))
            .addImm(0)
            .addReg(0)
            .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
                              MI.getOperand(3).getTargetFlags())
            .addReg(0);
    MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
    addDirectMem(MIB, X86::EAX);
    MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
  }

  MI.eraseFromParent(); // The pseudo instruction is gone now.
  return BB;
}

static unsigned getOpcodeForRetpoline(unsigned RPOpc) {
  switch (RPOpc) {
  case X86::RETPOLINE_CALL32:
    return X86::CALLpcrel32;
  case X86::RETPOLINE_CALL64:
    return X86::CALL64pcrel32;
  case X86::RETPOLINE_TCRETURN32:
    return X86::TCRETURNdi;
  case X86::RETPOLINE_TCRETURN64:
    return X86::TCRETURNdi64;
  }
  llvm_unreachable("not retpoline opcode");
}

static const char *getRetpolineSymbol(const X86Subtarget &Subtarget,
                                      unsigned Reg) {
  if (Subtarget.useRetpolineExternalThunk()) {
    // When using an external thunk for retpolines, we pick names that match the
    // names GCC happens to use as well. This helps simplify the implementation
    // of the thunks for kernels where they have no easy ability to create
    // aliases and are doing non-trivial configuration of the thunk's body. For
    // example, the Linux kernel will do boot-time hot patching of the thunk
    // bodies and cannot easily export aliases of these to loaded modules.
    //
    // Note that at any point in the future, we may need to change the semantics
    // of how we implement retpolines and at that time will likely change the
    // name of the called thunk. Essentially, there is no hard guarantee that
    // LLVM will generate calls to specific thunks, we merely make a best-effort
    // attempt to help out kernels and other systems where duplicating the
    // thunks is costly.
    switch (Reg) {
    case X86::EAX:
      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
      return "__x86_indirect_thunk_eax";
    case X86::ECX:
      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
      return "__x86_indirect_thunk_ecx";
    case X86::EDX:
      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
      return "__x86_indirect_thunk_edx";
    case X86::EDI:
      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
      return "__x86_indirect_thunk_edi";
    case X86::R11:
      assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
      return "__x86_indirect_thunk_r11";
    }
    llvm_unreachable("unexpected reg for retpoline");
  }

  // When targeting an internal COMDAT thunk use an LLVM-specific name.
  switch (Reg) {
  case X86::EAX:
    assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
    return "__llvm_retpoline_eax";
  case X86::ECX:
    assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
    return "__llvm_retpoline_ecx";
  case X86::EDX:
    assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
    return "__llvm_retpoline_edx";
  case X86::EDI:
    assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
    return "__llvm_retpoline_edi";
  case X86::R11:
    assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
    return "__llvm_retpoline_r11";
  }
  llvm_unreachable("unexpected reg for retpoline");
}

MachineBasicBlock *
X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI,
                                        MachineBasicBlock *BB) const {
  // Copy the virtual register into the R11 physical register and
  // call the retpoline thunk.
  DebugLoc DL = MI.getDebugLoc();
  const X86InstrInfo *TII = Subtarget.getInstrInfo();
  Register CalleeVReg = MI.getOperand(0).getReg();
  unsigned Opc = getOpcodeForRetpoline(MI.getOpcode());

  // Find an available scratch register to hold the callee. On 64-bit, we can
  // just use R11, but we scan for uses anyway to ensure we don't generate
  // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
  // already a register use operand to the call to hold the callee. If none
  // are available, use EDI instead. EDI is chosen because EBX is the PIC base
  // register and ESI is the base pointer to realigned stack frames with VLAs.
  SmallVector<unsigned, 3> AvailableRegs;
  if (Subtarget.is64Bit())
    AvailableRegs.push_back(X86::R11);
  else
    AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});

  // Zero out any registers that are already used.
  for (const auto &MO : MI.operands()) {
    if (MO.isReg() && MO.isUse())
      for (unsigned &Reg : AvailableRegs)
        if (Reg == MO.getReg())
          Reg = 0;
  }

  // Choose the first remaining non-zero available register.
  unsigned AvailableReg = 0;
  for (unsigned MaybeReg : AvailableRegs) {
    if (MaybeReg) {
      AvailableReg = MaybeReg;
      break;
    }
  }
  if (!AvailableReg)
    report_fatal_error("calling convention incompatible with retpoline, no "
                       "available registers");

  const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg);

  BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
      .addReg(CalleeVReg);
  MI.getOperand(0).ChangeToES(Symbol);
  MI.setDesc(TII->get(Opc));
  MachineInstrBuilder(*BB->getParent(), &MI)
      .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
  return BB;
}

/// SetJmp implies future control flow change upon calling the corresponding
/// LongJmp.
/// Instead of using the 'return' instruction, the long jump fixes the stack and
/// performs an indirect branch. To do so it uses the registers that were stored
/// in the jump buffer (when calling SetJmp).
/// In case the shadow stack is enabled we need to fix it as well, because some
/// return addresses will be skipped.
/// The function will save the SSP for future fixing in the function
/// emitLongJmpShadowStackFix.
/// \sa emitLongJmpShadowStackFix
/// \param [in] MI The temporary Machine Instruction for the builtin.
/// \param [in] MBB The Machine Basic Block that will be modified.
void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
                                                 MachineBasicBlock *MBB) const {
  DebugLoc DL = MI.getDebugLoc();
  MachineFunction *MF = MBB->getParent();
  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
  MachineRegisterInfo &MRI = MF->getRegInfo();
  MachineInstrBuilder MIB;

  // Memory Reference.
  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
                                           MI.memoperands_end());

  // Initialize a register with zero.
  MVT PVT = getPointerTy(MF->getDataLayout());
  const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
  Register ZReg = MRI.createVirtualRegister(PtrRC);
  unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
  BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
      .addDef(ZReg)
      .addReg(ZReg, RegState::Undef)
      .addReg(ZReg, RegState::Undef);

  // Read the current SSP Register value to the zeroed register.
  Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
  unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
  BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);

  // Write the SSP register value to offset 3 in input memory buffer.
  unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
  MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
  const int64_t SSPOffset = 3 * PVT.getStoreSize();
  const unsigned MemOpndSlot = 1;
  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
    if (i == X86::AddrDisp)
      MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
    else
      MIB.add(MI.getOperand(MemOpndSlot + i));
  }
  MIB.addReg(SSPCopyReg);
  MIB.setMemRefs(MMOs);
}

MachineBasicBlock *
X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
                                    MachineBasicBlock *MBB) const {
  DebugLoc DL = MI.getDebugLoc();
  MachineFunction *MF = MBB->getParent();
  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
  MachineRegisterInfo &MRI = MF->getRegInfo();

  const BasicBlock *BB = MBB->getBasicBlock();
  MachineFunction::iterator I = ++MBB->getIterator();

  // Memory Reference
  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
                                           MI.memoperands_end());

  unsigned DstReg;
  unsigned MemOpndSlot = 0;

  unsigned CurOp = 0;

  DstReg = MI.getOperand(CurOp++).getReg();
  const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
  assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
  (void)TRI;
  Register mainDstReg = MRI.createVirtualRegister(RC);
  Register restoreDstReg = MRI.createVirtualRegister(RC);

  MemOpndSlot = CurOp;

  MVT PVT = getPointerTy(MF->getDataLayout());
  assert((PVT == MVT::i64 || PVT == MVT::i32) &&
         "Invalid Pointer Size!");

  // For v = setjmp(buf), we generate
  //
  // thisMBB:
  //  buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
  //  SjLjSetup restoreMBB
  //
  // mainMBB:
  //  v_main = 0
  //
  // sinkMBB:
  //  v = phi(main, restore)
  //
  // restoreMBB:
  //  if base pointer being used, load it from frame
  //  v_restore = 1

  MachineBasicBlock *thisMBB = MBB;
  MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
  MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
  MF->insert(I, mainMBB);
  MF->insert(I, sinkMBB);
  MF->push_back(restoreMBB);
  restoreMBB->setHasAddressTaken();

  MachineInstrBuilder MIB;

  // Transfer the remainder of BB and its successor edges to sinkMBB.
  sinkMBB->splice(sinkMBB->begin(), MBB,
                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);

  // thisMBB:
  unsigned PtrStoreOpc = 0;
  unsigned LabelReg = 0;
  const int64_t LabelOffset = 1 * PVT.getStoreSize();
  bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
                     !isPositionIndependent();

  // Prepare IP either in reg or imm.
  if (!UseImmLabel) {
    PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
    const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
    LabelReg = MRI.createVirtualRegister(PtrRC);
    if (Subtarget.is64Bit()) {
      MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
              .addReg(X86::RIP)
              .addImm(0)
              .addReg(0)
              .addMBB(restoreMBB)
              .addReg(0);
    } else {
      const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
      MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
              .addReg(XII->getGlobalBaseReg(MF))
              .addImm(0)
              .addReg(0)
              .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
              .addReg(0);
    }
  } else
    PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
  // Store IP
  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
    if (i == X86::AddrDisp)
      MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
    else
      MIB.add(MI.getOperand(MemOpndSlot + i));
  }
  if (!UseImmLabel)
    MIB.addReg(LabelReg);
  else
    MIB.addMBB(restoreMBB);
  MIB.setMemRefs(MMOs);

  if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
    emitSetJmpShadowStackFix(MI, thisMBB);
  }

  // Setup
  MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
          .addMBB(restoreMBB);

  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  MIB.addRegMask(RegInfo->getNoPreservedMask());
  thisMBB->addSuccessor(mainMBB);
  thisMBB->addSuccessor(restoreMBB);

  // mainMBB:
  //  EAX = 0
  BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
  mainMBB->addSuccessor(sinkMBB);

  // sinkMBB:
  BuildMI(*sinkMBB, sinkMBB->begin(), DL,
          TII->get(X86::PHI), DstReg)
    .addReg(mainDstReg).addMBB(mainMBB)
    .addReg(restoreDstReg).addMBB(restoreMBB);

  // restoreMBB:
  if (RegInfo->hasBasePointer(*MF)) {
    const bool Uses64BitFramePtr =
        Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
    X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
    X86FI->setRestoreBasePointer(MF);
    Register FramePtr = RegInfo->getFrameRegister(*MF);
    Register BasePtr = RegInfo->getBaseRegister();
    unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
    addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
                 FramePtr, true, X86FI->getRestoreBasePointerOffset())
      .setMIFlag(MachineInstr::FrameSetup);
  }
  BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
  BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
  restoreMBB->addSuccessor(sinkMBB);

  MI.eraseFromParent();
  return sinkMBB;
}

/// Fix the shadow stack using the previously saved SSP pointer.
/// \sa emitSetJmpShadowStackFix
/// \param [in] MI The temporary Machine Instruction for the builtin.
/// \param [in] MBB The Machine Basic Block that will be modified.
/// \return The sink MBB that will perform the future indirect branch.
MachineBasicBlock *
X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
                                             MachineBasicBlock *MBB) const {
  DebugLoc DL = MI.getDebugLoc();
  MachineFunction *MF = MBB->getParent();
  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
  MachineRegisterInfo &MRI = MF->getRegInfo();

  // Memory Reference
  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
                                           MI.memoperands_end());

  MVT PVT = getPointerTy(MF->getDataLayout());
  const TargetRegisterClass *PtrRC = getRegClassFor(PVT);

  // checkSspMBB:
  //         xor vreg1, vreg1
  //         rdssp vreg1
  //         test vreg1, vreg1
  //         je sinkMBB   # Jump if Shadow Stack is not supported
  // fallMBB:
  //         mov buf+24/12(%rip), vreg2
  //         sub vreg1, vreg2
  //         jbe sinkMBB  # No need to fix the Shadow Stack
  // fixShadowMBB:
  //         shr 3/2, vreg2
  //         incssp vreg2  # fix the SSP according to the lower 8 bits
  //         shr 8, vreg2
  //         je sinkMBB
  // fixShadowLoopPrepareMBB:
  //         shl vreg2
  //         mov 128, vreg3
  // fixShadowLoopMBB:
  //         incssp vreg3
  //         dec vreg2
  //         jne fixShadowLoopMBB # Iterate until you finish fixing
  //                              # the Shadow Stack
  // sinkMBB:

  MachineFunction::iterator I = ++MBB->getIterator();
  const BasicBlock *BB = MBB->getBasicBlock();

  MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
  MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
  MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
  MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
  MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
  MF->insert(I, checkSspMBB);
  MF->insert(I, fallMBB);
  MF->insert(I, fixShadowMBB);
  MF->insert(I, fixShadowLoopPrepareMBB);
  MF->insert(I, fixShadowLoopMBB);
  MF->insert(I, sinkMBB);

  // Transfer the remainder of BB and its successor edges to sinkMBB.
  sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
                  MBB->end());
  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);

  MBB->addSuccessor(checkSspMBB);

  // Initialize a register with zero.
  Register ZReg = MRI.createVirtualRegister(PtrRC);
  unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
  BuildMI(checkSspMBB, DL, TII->get(XorRROpc))
      .addDef(ZReg)
      .addReg(ZReg, RegState::Undef)
      .addReg(ZReg, RegState::Undef);

  // Read the current SSP Register value to the zeroed register.
  Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
  unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
  BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);

  // Check whether the result of the SSP register is zero and jump directly
  // to the sink.
  unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
  BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
      .addReg(SSPCopyReg)
      .addReg(SSPCopyReg);
  BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
  checkSspMBB->addSuccessor(sinkMBB);
  checkSspMBB->addSuccessor(fallMBB);

  // Reload the previously saved SSP register value.
  Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
  unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
  const int64_t SPPOffset = 3 * PVT.getStoreSize();
  MachineInstrBuilder MIB =
      BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
    const MachineOperand &MO = MI.getOperand(i);
    if (i == X86::AddrDisp)
      MIB.addDisp(MO, SPPOffset);
    else if (MO.isReg()) // Don't add the whole operand, we don't want to
                         // preserve kill flags.
      MIB.addReg(MO.getReg());
    else
      MIB.add(MO);
  }
  MIB.setMemRefs(MMOs);

  // Subtract the current SSP from the previous SSP.
  Register SspSubReg = MRI.createVirtualRegister(PtrRC);
  unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
  BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
      .addReg(PrevSSPReg)
      .addReg(SSPCopyReg);

  // Jump to sink in case PrevSSPReg <= SSPCopyReg.
  BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
  fallMBB->addSuccessor(sinkMBB);
  fallMBB->addSuccessor(fixShadowMBB);

  // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
  unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
  unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
  Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
  BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
      .addReg(SspSubReg)
      .addImm(Offset);

  // Increase SSP when looking only on the lower 8 bits of the delta.
  unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
  BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);

  // Reset the lower 8 bits.
  Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
  BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
      .addReg(SspFirstShrReg)
      .addImm(8);

  // Jump if the result of the shift is zero.
  BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
  fixShadowMBB->addSuccessor(sinkMBB);
  fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);

  // Do a single shift left.
  unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
  Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
  BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
      .addReg(SspSecondShrReg);

  // Save the value 128 to a register (will be used next with incssp).
  Register Value128InReg = MRI.createVirtualRegister(PtrRC);
  unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
  BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
      .addImm(128);
  fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);

  // Since incssp only looks at the lower 8 bits, we might need to do several
  // iterations of incssp until we finish fixing the shadow stack.
  Register DecReg = MRI.createVirtualRegister(PtrRC);
  Register CounterReg = MRI.createVirtualRegister(PtrRC);
  BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
      .addReg(SspAfterShlReg)
      .addMBB(fixShadowLoopPrepareMBB)
      .addReg(DecReg)
      .addMBB(fixShadowLoopMBB);

  // Every iteration we increase the SSP by 128.
  BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);

  // Every iteration we decrement the counter by 1.
  unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
  BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);

  // Jump if the counter is not zero yet.
  BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
  fixShadowLoopMBB->addSuccessor(sinkMBB);
  fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);

  return sinkMBB;
}

MachineBasicBlock *
X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
                                     MachineBasicBlock *MBB) const {
  DebugLoc DL = MI.getDebugLoc();
  MachineFunction *MF = MBB->getParent();
  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
  MachineRegisterInfo &MRI = MF->getRegInfo();

  // Memory Reference
  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
                                           MI.memoperands_end());

  MVT PVT = getPointerTy(MF->getDataLayout());
  assert((PVT == MVT::i64 || PVT == MVT::i32) &&
         "Invalid Pointer Size!");

  const TargetRegisterClass *RC =
    (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
  Register Tmp = MRI.createVirtualRegister(RC);
  // Since FP is only updated here but NOT referenced, it's treated as GPR.
  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
  Register SP = RegInfo->getStackRegister();

  MachineInstrBuilder MIB;

  const int64_t LabelOffset = 1 * PVT.getStoreSize();
  const int64_t SPOffset = 2 * PVT.getStoreSize();

  unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
  unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;

  MachineBasicBlock *thisMBB = MBB;

  // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
  if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
    thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
  }

  // Reload FP
  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
    const MachineOperand &MO = MI.getOperand(i);
    if (MO.isReg()) // Don't add the whole operand, we don't want to
                    // preserve kill flags.
      MIB.addReg(MO.getReg());
    else
      MIB.add(MO);
  }
  MIB.setMemRefs(MMOs);

  // Reload IP
  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
    const MachineOperand &MO = MI.getOperand(i);
    if (i == X86::AddrDisp)
      MIB.addDisp(MO, LabelOffset);
    else if (MO.isReg()) // Don't add the whole operand, we don't want to
                         // preserve kill flags.
      MIB.addReg(MO.getReg());
    else
      MIB.add(MO);
  }
  MIB.setMemRefs(MMOs);

  // Reload SP
  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
    if (i == X86::AddrDisp)
      MIB.addDisp(MI.getOperand(i), SPOffset);
    else
      MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
                                 // the last instruction of the expansion.
  }
  MIB.setMemRefs(MMOs);

  // Jump
  BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);

  MI.eraseFromParent();
  return thisMBB;
}

void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
                                               MachineBasicBlock *MBB,
                                               MachineBasicBlock *DispatchBB,
                                               int FI) const {
  DebugLoc DL = MI.getDebugLoc();
  MachineFunction *MF = MBB->getParent();
  MachineRegisterInfo *MRI = &MF->getRegInfo();
  const X86InstrInfo *TII = Subtarget.getInstrInfo();

  MVT PVT = getPointerTy(MF->getDataLayout());
  assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");

  unsigned Op = 0;
  unsigned VR = 0;

  bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
                     !isPositionIndependent();

  if (UseImmLabel) {
    Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
  } else {
    const TargetRegisterClass *TRC =
        (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
    VR = MRI->createVirtualRegister(TRC);
    Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;

    if (Subtarget.is64Bit())
      BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
          .addReg(X86::RIP)
          .addImm(1)
          .addReg(0)
          .addMBB(DispatchBB)
          .addReg(0);
    else
      BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
          .addReg(0) /* TII->getGlobalBaseReg(MF) */
          .addImm(1)
          .addReg(0)
          .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
          .addReg(0);
  }

  MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
  addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
  if (UseImmLabel)
    MIB.addMBB(DispatchBB);
  else
    MIB.addReg(VR);
}

MachineBasicBlock *
X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
                                         MachineBasicBlock *BB) const {
  DebugLoc DL = MI.getDebugLoc();
  MachineFunction *MF = BB->getParent();
  MachineRegisterInfo *MRI = &MF->getRegInfo();
  const X86InstrInfo *TII = Subtarget.getInstrInfo();
  int FI = MF->getFrameInfo().getFunctionContextIndex();

  // Get a mapping of the call site numbers to all of the landing pads they're
  // associated with.
  DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
  unsigned MaxCSNum = 0;
  for (auto &MBB : *MF) {
    if (!MBB.isEHPad())
      continue;

    MCSymbol *Sym = nullptr;
    for (const auto &MI : MBB) {
      if (MI.isDebugInstr())
        continue;

      assert(MI.isEHLabel() && "expected EH_LABEL");
      Sym = MI.getOperand(0).getMCSymbol();
      break;
    }

    if (!MF->hasCallSiteLandingPad(Sym))
      continue;

    for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
      CallSiteNumToLPad[CSI].push_back(&MBB);
      MaxCSNum = std::max(MaxCSNum, CSI);
    }
  }

  // Get an ordered list of the machine basic blocks for the jump table.
  std::vector<MachineBasicBlock *> LPadList;
  SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
  LPadList.reserve(CallSiteNumToLPad.size());

  for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
    for (auto &LP : CallSiteNumToLPad[CSI]) {
      LPadList.push_back(LP);
      InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
    }
  }

  assert(!LPadList.empty() &&
         "No landing pad destinations for the dispatch jump table!");

  // Create the MBBs for the dispatch code.

  // Shove the dispatch's address into the return slot in the function context.
  MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
  DispatchBB->setIsEHPad(true);

  MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
  BuildMI(TrapBB, DL, TII->get(X86::TRAP));
  DispatchBB->addSuccessor(TrapBB);

  MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
  DispatchBB->addSuccessor(DispContBB);

  // Insert MBBs.
  MF->push_back(DispatchBB);
  MF->push_back(DispContBB);
  MF->push_back(TrapBB);

  // Insert code into the entry block that creates and registers the function
  // context.
  SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);

  // Create the jump table and associated information
  unsigned JTE = getJumpTableEncoding();
  MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
  unsigned MJTI = JTI->createJumpTableIndex(LPadList);

  const X86RegisterInfo &RI = TII->getRegisterInfo();
  // Add a register mask with no preserved registers.  This results in all
  // registers being marked as clobbered.
  if (RI.hasBasePointer(*MF)) {
    const bool FPIs64Bit =
        Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
    X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
    MFI->setRestoreBasePointer(MF);

    Register FP = RI.getFrameRegister(*MF);
    Register BP = RI.getBaseRegister();
    unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
    addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
                 MFI->getRestoreBasePointerOffset())
        .addRegMask(RI.getNoPreservedMask());
  } else {
    BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
        .addRegMask(RI.getNoPreservedMask());
  }

  // IReg is used as an index in a memory operand and therefore can't be SP
  Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
  addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
                    Subtarget.is64Bit() ? 8 : 4);
  BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
      .addReg(IReg)
      .addImm(LPadList.size());
  BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);

  if (Subtarget.is64Bit()) {
    Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
    Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);

    // leaq .LJTI0_0(%rip), BReg
    BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
        .addReg(X86::RIP)
        .addImm(1)
        .addReg(0)
        .addJumpTableIndex(MJTI)
        .addReg(0);
    // movzx IReg64, IReg
    BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
        .addImm(0)
        .addReg(IReg)
        .addImm(X86::sub_32bit);

    switch (JTE) {
    case MachineJumpTableInfo::EK_BlockAddress:
      // jmpq *(BReg,IReg64,8)
      BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
          .addReg(BReg)
          .addImm(8)
          .addReg(IReg64)
          .addImm(0)
          .addReg(0);
      break;
    case MachineJumpTableInfo::EK_LabelDifference32: {
      Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
      Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
      Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);

      // movl (BReg,IReg64,4), OReg
      BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
          .addReg(BReg)
          .addImm(4)
          .addReg(IReg64)
          .addImm(0)
          .addReg(0);
      // movsx OReg64, OReg
      BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
      // addq BReg, OReg64, TReg
      BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
          .addReg(OReg64)
          .addReg(BReg);
      // jmpq *TReg
      BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
      break;
    }
    default:
      llvm_unreachable("Unexpected jump table encoding");
    }
  } else {
    // jmpl *.LJTI0_0(,IReg,4)
    BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
        .addReg(0)
        .addImm(4)
        .addReg(IReg)
        .addJumpTableIndex(MJTI)
        .addReg(0);
  }

  // Add the jump table entries as successors to the MBB.
  SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
  for (auto &LP : LPadList)
    if (SeenMBBs.insert(LP).second)
      DispContBB->addSuccessor(LP);

  // N.B. the order the invoke BBs are processed in doesn't matter here.
  SmallVector<MachineBasicBlock *, 64> MBBLPads;
  const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
  for (MachineBasicBlock *MBB : InvokeBBs) {
    // Remove the landing pad successor from the invoke block and replace it
    // with the new dispatch block.
    // Keep a copy of Successors since it's modified inside the loop.
    SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
                                                   MBB->succ_rend());
    // FIXME: Avoid quadratic complexity.
    for (auto MBBS : Successors) {
      if (MBBS->isEHPad()) {
        MBB->removeSuccessor(MBBS);
        MBBLPads.push_back(MBBS);
      }
    }

    MBB->addSuccessor(DispatchBB);

    // Find the invoke call and mark all of the callee-saved registers as
    // 'implicit defined' so that they're spilled.  This prevents code from
    // moving instructions to before the EH block, where they will never be
    // executed.
    for (auto &II : reverse(*MBB)) {
      if (!II.isCall())
        continue;

      DenseMap<unsigned, bool> DefRegs;
      for (auto &MOp : II.operands())
        if (MOp.isReg())
          DefRegs[MOp.getReg()] = true;

      MachineInstrBuilder MIB(*MF, &II);
      for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
        unsigned Reg = SavedRegs[RegIdx];
        if (!DefRegs[Reg])
          MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
      }

      break;
    }
  }

  // Mark all former landing pads as non-landing pads.  The dispatch is the only
  // landing pad now.
  for (auto &LP : MBBLPads)
    LP->setIsEHPad(false);

  // The instruction is gone now.
  MI.eraseFromParent();
  return BB;
}

MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
                                               MachineBasicBlock *BB) const {
  MachineFunction *MF = BB->getParent();
  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
  DebugLoc DL = MI.getDebugLoc();

  switch (MI.getOpcode()) {
  default: llvm_unreachable("Unexpected instr type to insert");
  case X86::TLS_addr32:
  case X86::TLS_addr64:
  case X86::TLS_base_addr32:
  case X86::TLS_base_addr64:
    return EmitLoweredTLSAddr(MI, BB);
  case X86::RETPOLINE_CALL32:
  case X86::RETPOLINE_CALL64:
  case X86::RETPOLINE_TCRETURN32:
  case X86::RETPOLINE_TCRETURN64:
    return EmitLoweredRetpoline(MI, BB);
  case X86::CATCHRET:
    return EmitLoweredCatchRet(MI, BB);
  case X86::CATCHPAD:
    return EmitLoweredCatchPad(MI, BB);
  case X86::SEG_ALLOCA_32:
  case X86::SEG_ALLOCA_64:
    return EmitLoweredSegAlloca(MI, BB);
  case X86::TLSCall_32:
  case X86::TLSCall_64:
    return EmitLoweredTLSCall(MI, BB);
  case X86::CMOV_FR32:
  case X86::CMOV_FR32X:
  case X86::CMOV_FR64:
  case X86::CMOV_FR64X:
  case X86::CMOV_GR8:
  case X86::CMOV_GR16:
  case X86::CMOV_GR32:
  case X86::CMOV_RFP32:
  case X86::CMOV_RFP64:
  case X86::CMOV_RFP80:
  case X86::CMOV_VR128:
  case X86::CMOV_VR128X:
  case X86::CMOV_VR256:
  case X86::CMOV_VR256X:
  case X86::CMOV_VR512:
  case X86::CMOV_VK2:
  case X86::CMOV_VK4:
  case X86::CMOV_VK8:
  case X86::CMOV_VK16:
  case X86::CMOV_VK32:
  case X86::CMOV_VK64:
    return EmitLoweredSelect(MI, BB);

  case X86::RDFLAGS32:
  case X86::RDFLAGS64: {
    unsigned PushF =
        MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
    unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
    MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
    // Permit reads of the EFLAGS and DF registers without them being defined.
    // This intrinsic exists to read external processor state in flags, such as
    // the trap flag, interrupt flag, and direction flag, none of which are
    // modeled by the backend.
    assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
           "Unexpected register in operand!");
    Push->getOperand(2).setIsUndef();
    assert(Push->getOperand(3).getReg() == X86::DF &&
           "Unexpected register in operand!");
    Push->getOperand(3).setIsUndef();
    BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());

    MI.eraseFromParent(); // The pseudo is gone now.
    return BB;
  }

  case X86::WRFLAGS32:
  case X86::WRFLAGS64: {
    unsigned Push =
        MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
    unsigned PopF =
        MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
    BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
    BuildMI(*BB, MI, DL, TII->get(PopF));

    MI.eraseFromParent(); // The pseudo is gone now.
    return BB;
  }

  case X86::FP32_TO_INT16_IN_MEM:
  case X86::FP32_TO_INT32_IN_MEM:
  case X86::FP32_TO_INT64_IN_MEM:
  case X86::FP64_TO_INT16_IN_MEM:
  case X86::FP64_TO_INT32_IN_MEM:
  case X86::FP64_TO_INT64_IN_MEM:
  case X86::FP80_TO_INT16_IN_MEM:
  case X86::FP80_TO_INT32_IN_MEM:
  case X86::FP80_TO_INT64_IN_MEM: {
    // Change the floating point control register to use "round towards zero"
    // mode when truncating to an integer value.
    int OrigCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
    addFrameReference(BuildMI(*BB, MI, DL,
                              TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);

    // Load the old value of the control word...
    Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
    addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
                      OrigCWFrameIdx);

    // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
    Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
    BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
      .addReg(OldCW, RegState::Kill).addImm(0xC00);

    // Extract to 16 bits.
    Register NewCW16 =
        MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
    BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
      .addReg(NewCW, RegState::Kill, X86::sub_16bit);

    // Prepare memory for FLDCW.
    int NewCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
    addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
                      NewCWFrameIdx)
      .addReg(NewCW16, RegState::Kill);

    // Reload the modified control word now...
    addFrameReference(BuildMI(*BB, MI, DL,
                              TII->get(X86::FLDCW16m)), NewCWFrameIdx);

    // Get the X86 opcode to use.
    unsigned Opc;
    switch (MI.getOpcode()) {
    default: llvm_unreachable("illegal opcode!");
    case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
    case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
    case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
    case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
    case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
    case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
    case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
    case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
    case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
    }

    X86AddressMode AM = getAddressFromInstr(&MI, 0);
    addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
        .addReg(MI.getOperand(X86::AddrNumOperands).getReg());

    // Reload the original control word now.
    addFrameReference(BuildMI(*BB, MI, DL,
                              TII->get(X86::FLDCW16m)), OrigCWFrameIdx);

    MI.eraseFromParent(); // The pseudo instruction is gone now.
    return BB;
  }

  // xbegin
  case X86::XBEGIN:
    return emitXBegin(MI, BB, Subtarget.getInstrInfo());

  case X86::VASTART_SAVE_XMM_REGS:
    return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);

  case X86::VAARG_64:
    return EmitVAARG64WithCustomInserter(MI, BB);

  case X86::EH_SjLj_SetJmp32:
  case X86::EH_SjLj_SetJmp64:
    return emitEHSjLjSetJmp(MI, BB);

  case X86::EH_SjLj_LongJmp32:
  case X86::EH_SjLj_LongJmp64:
    return emitEHSjLjLongJmp(MI, BB);

  case X86::Int_eh_sjlj_setup_dispatch:
    return EmitSjLjDispatchBlock(MI, BB);

  case TargetOpcode::STATEPOINT:
    // As an implementation detail, STATEPOINT shares the STACKMAP format at
    // this point in the process.  We diverge later.
    return emitPatchPoint(MI, BB);

  case TargetOpcode::STACKMAP:
  case TargetOpcode::PATCHPOINT:
    return emitPatchPoint(MI, BB);

  case TargetOpcode::PATCHABLE_EVENT_CALL:
    return emitXRayCustomEvent(MI, BB);

  case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
    return emitXRayTypedEvent(MI, BB);

  case X86::LCMPXCHG8B: {
    const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
    // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
    // requires a memory operand. If it happens that current architecture is
    // i686 and for current function we need a base pointer
    // - which is ESI for i686 - register allocator would not be able to
    // allocate registers for an address in form of X(%reg, %reg, Y)
    // - there never would be enough unreserved registers during regalloc
    // (without the need for base ptr the only option would be X(%edi, %esi, Y).
    // We are giving a hand to register allocator by precomputing the address in
    // a new vreg using LEA.

    // If it is not i686 or there is no base pointer - nothing to do here.
    if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
      return BB;

    // Even though this code does not necessarily needs the base pointer to
    // be ESI, we check for that. The reason: if this assert fails, there are
    // some changes happened in the compiler base pointer handling, which most
    // probably have to be addressed somehow here.
    assert(TRI->getBaseRegister() == X86::ESI &&
           "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
           "base pointer in mind");

    MachineRegisterInfo &MRI = MF->getRegInfo();
    MVT SPTy = getPointerTy(MF->getDataLayout());
    const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
    Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);

    X86AddressMode AM = getAddressFromInstr(&MI, 0);
    // Regalloc does not need any help when the memory operand of CMPXCHG8B
    // does not use index register.
    if (AM.IndexReg == X86::NoRegister)
      return BB;

    // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
    // four operand definitions that are E[ABCD] registers. We skip them and
    // then insert the LEA.
    MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
    while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
                                   RMBBI->definesRegister(X86::EBX) ||
                                   RMBBI->definesRegister(X86::ECX) ||
                                   RMBBI->definesRegister(X86::EDX))) {
      ++RMBBI;
    }
    MachineBasicBlock::iterator MBBI(RMBBI);
    addFullAddress(
        BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);

    setDirectAddressInInstr(&MI, 0, computedAddrVReg);

    return BB;
  }
  case X86::LCMPXCHG16B:
    return BB;
  case X86::LCMPXCHG8B_SAVE_EBX:
  case X86::LCMPXCHG16B_SAVE_RBX: {
    unsigned BasePtr =
        MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
    if (!BB->isLiveIn(BasePtr))
      BB->addLiveIn(BasePtr);
    return BB;
  }
  }
}

//===----------------------------------------------------------------------===//
//                           X86 Optimization Hooks
//===----------------------------------------------------------------------===//

bool
X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
                                                const APInt &Demanded,
                                                TargetLoweringOpt &TLO) const {
  // Only optimize Ands to prevent shrinking a constant that could be
  // matched by movzx.
  if (Op.getOpcode() != ISD::AND)
    return false;

  EVT VT = Op.getValueType();

  // Ignore vectors.
  if (VT.isVector())
    return false;

  unsigned Size = VT.getSizeInBits();

  // Make sure the RHS really is a constant.
  ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
  if (!C)
    return false;

  const APInt &Mask = C->getAPIntValue();

  // Clear all non-demanded bits initially.
  APInt ShrunkMask = Mask & Demanded;

  // Find the width of the shrunk mask.
  unsigned Width = ShrunkMask.getActiveBits();

  // If the mask is all 0s there's nothing to do here.
  if (Width == 0)
    return false;

  // Find the next power of 2 width, rounding up to a byte.
  Width = PowerOf2Ceil(std::max(Width, 8U));
  // Truncate the width to size to handle illegal types.
  Width = std::min(Width, Size);

  // Calculate a possible zero extend mask for this constant.
  APInt ZeroExtendMask = APInt::getLowBitsSet(Size, Width);

  // If we aren't changing the mask, just return true to keep it and prevent
  // the caller from optimizing.
  if (ZeroExtendMask == Mask)
    return true;

  // Make sure the new mask can be represented by a combination of mask bits
  // and non-demanded bits.
  if (!ZeroExtendMask.isSubsetOf(Mask | ~Demanded))
    return false;

  // Replace the constant with the zero extend mask.
  SDLoc DL(Op);
  SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
  SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
  return TLO.CombineTo(Op, NewOp);
}

void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
                                                      KnownBits &Known,
                                                      const APInt &DemandedElts,
                                                      const SelectionDAG &DAG,
                                                      unsigned Depth) const {
  unsigned BitWidth = Known.getBitWidth();
  unsigned Opc = Op.getOpcode();
  EVT VT = Op.getValueType();
  assert((Opc >= ISD::BUILTIN_OP_END ||
          Opc == ISD::INTRINSIC_WO_CHAIN ||
          Opc == ISD::INTRINSIC_W_CHAIN ||
          Opc == ISD::INTRINSIC_VOID) &&
         "Should use MaskedValueIsZero if you don't know whether Op"
         " is a target node!");

  Known.resetAll();
  switch (Opc) {
  default: break;
  case X86ISD::SETCC:
    Known.Zero.setBitsFrom(1);
    break;
  case X86ISD::MOVMSK: {
    unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
    Known.Zero.setBitsFrom(NumLoBits);
    break;
  }
  case X86ISD::PEXTRB:
  case X86ISD::PEXTRW: {
    SDValue Src = Op.getOperand(0);
    EVT SrcVT = Src.getValueType();
    APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
                                            Op.getConstantOperandVal(1));
    Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
    Known = Known.zextOrTrunc(BitWidth, false);
    Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
    break;
  }
  case X86ISD::VSRAI:
  case X86ISD::VSHLI:
  case X86ISD::VSRLI: {
    if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
      if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
        Known.setAllZero();
        break;
      }

      Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
      unsigned ShAmt = ShiftImm->getZExtValue();
      if (Opc == X86ISD::VSHLI) {
        Known.Zero <<= ShAmt;
        Known.One <<= ShAmt;
        // Low bits are known zero.
        Known.Zero.setLowBits(ShAmt);
      } else if (Opc == X86ISD::VSRLI) {
        Known.Zero.lshrInPlace(ShAmt);
        Known.One.lshrInPlace(ShAmt);
        // High bits are known zero.
        Known.Zero.setHighBits(ShAmt);
      } else {
        Known.Zero.ashrInPlace(ShAmt);
        Known.One.ashrInPlace(ShAmt);
      }
    }
    break;
  }
  case X86ISD::PACKUS: {
    // PACKUS is just a truncation if the upper half is zero.
    APInt DemandedLHS, DemandedRHS;
    getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);

    Known.One = APInt::getAllOnesValue(BitWidth * 2);
    Known.Zero = APInt::getAllOnesValue(BitWidth * 2);

    KnownBits Known2;
    if (!!DemandedLHS) {
      Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
      Known.One &= Known2.One;
      Known.Zero &= Known2.Zero;
    }
    if (!!DemandedRHS) {
      Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
      Known.One &= Known2.One;
      Known.Zero &= Known2.Zero;
    }

    if (Known.countMinLeadingZeros() < BitWidth)
      Known.resetAll();
    Known = Known.trunc(BitWidth);
    break;
  }
  case X86ISD::ANDNP: {
    KnownBits Known2;
    Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
    Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);

    // ANDNP = (~X & Y);
    Known.One &= Known2.Zero;
    Known.Zero |= Known2.One;
    break;
  }
  case X86ISD::FOR: {
    KnownBits Known2;
    Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
    Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);

    // Output known-0 bits are only known if clear in both the LHS & RHS.
    Known.Zero &= Known2.Zero;
    // Output known-1 are known to be set if set in either the LHS | RHS.
    Known.One |= Known2.One;
    break;
  }
  case X86ISD::PSADBW: {
    assert(VT.getScalarType() == MVT::i64 &&
           Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
           "Unexpected PSADBW types");

    // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
    Known.Zero.setBitsFrom(16);
    break;
  }
  case X86ISD::CMOV: {
    Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
    // If we don't know any bits, early out.
    if (Known.isUnknown())
      break;
    KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);

    // Only known if known in both the LHS and RHS.
    Known.One &= Known2.One;
    Known.Zero &= Known2.Zero;
    break;
  }
  }

  // Handle target shuffles.
  // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
  if (isTargetShuffle(Opc)) {
    bool IsUnary;
    SmallVector<int, 64> Mask;
    SmallVector<SDValue, 2> Ops;
    if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
                             IsUnary)) {
      unsigned NumOps = Ops.size();
      unsigned NumElts = VT.getVectorNumElements();
      if (Mask.size() == NumElts) {
        SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
        Known.Zero.setAllBits(); Known.One.setAllBits();
        for (unsigned i = 0; i != NumElts; ++i) {
          if (!DemandedElts[i])
            continue;
          int M = Mask[i];
          if (M == SM_SentinelUndef) {
            // For UNDEF elements, we don't know anything about the common state
            // of the shuffle result.
            Known.resetAll();
            break;
          } else if (M == SM_SentinelZero) {
            Known.One.clearAllBits();
            continue;
          }
          assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
                 "Shuffle index out of range");

          unsigned OpIdx = (unsigned)M / NumElts;
          unsigned EltIdx = (unsigned)M % NumElts;
          if (Ops[OpIdx].getValueType() != VT) {
            // TODO - handle target shuffle ops with different value types.
            Known.resetAll();
            break;
          }
          DemandedOps[OpIdx].setBit(EltIdx);
        }
        // Known bits are the values that are shared by every demanded element.
        for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
          if (!DemandedOps[i])
            continue;
          KnownBits Known2 =
              DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
          Known.One &= Known2.One;
          Known.Zero &= Known2.Zero;
        }
      }
    }
  }
}

unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
    SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
    unsigned Depth) const {
  EVT VT = Op.getValueType();
  unsigned VTBits = VT.getScalarSizeInBits();
  unsigned Opcode = Op.getOpcode();
  switch (Opcode) {
  case X86ISD::SETCC_CARRY:
    // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
    return VTBits;

  case X86ISD::VTRUNC: {
    // TODO: Add DemandedElts support.
    SDValue Src = Op.getOperand(0);
    unsigned NumSrcBits = Src.getScalarValueSizeInBits();
    assert(VTBits < NumSrcBits && "Illegal truncation input type");
    unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
    if (Tmp > (NumSrcBits - VTBits))
      return Tmp - (NumSrcBits - VTBits);
    return 1;
  }

  case X86ISD::PACKSS: {
    // PACKSS is just a truncation if the sign bits extend to the packed size.
    APInt DemandedLHS, DemandedRHS;
    getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
                        DemandedRHS);

    unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
    unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
    if (!!DemandedLHS)
      Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
    if (!!DemandedRHS)
      Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
    unsigned Tmp = std::min(Tmp0, Tmp1);
    if (Tmp > (SrcBits - VTBits))
      return Tmp - (SrcBits - VTBits);
    return 1;
  }

  case X86ISD::VSHLI: {
    SDValue Src = Op.getOperand(0);
    const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
    if (ShiftVal.uge(VTBits))
      return VTBits; // Shifted all bits out --> zero.
    unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
    if (ShiftVal.uge(Tmp))
      return 1; // Shifted all sign bits out --> unknown.
    return Tmp - ShiftVal.getZExtValue();
  }

  case X86ISD::VSRAI: {
    SDValue Src = Op.getOperand(0);
    APInt ShiftVal = Op.getConstantOperandAPInt(1);
    if (ShiftVal.uge(VTBits - 1))
      return VTBits; // Sign splat.
    unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
    ShiftVal += Tmp;
    return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
  }

  case X86ISD::PCMPGT:
  case X86ISD::PCMPEQ:
  case X86ISD::CMPP:
  case X86ISD::VPCOM:
  case X86ISD::VPCOMU:
    // Vector compares return zero/all-bits result values.
    return VTBits;

  case X86ISD::ANDNP: {
    unsigned Tmp0 =
        DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
    if (Tmp0 == 1) return 1; // Early out.
    unsigned Tmp1 =
        DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
    return std::min(Tmp0, Tmp1);
  }

  case X86ISD::CMOV: {
    unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
    if (Tmp0 == 1) return 1;  // Early out.
    unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
    return std::min(Tmp0, Tmp1);
  }
  }

  // Handle target shuffles.
  // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
  if (isTargetShuffle(Opcode)) {
    bool IsUnary;
    SmallVector<int, 64> Mask;
    SmallVector<SDValue, 2> Ops;
    if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
                             IsUnary)) {
      unsigned NumOps = Ops.size();
      unsigned NumElts = VT.getVectorNumElements();
      if (Mask.size() == NumElts) {
        SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
        for (unsigned i = 0; i != NumElts; ++i) {
          if (!DemandedElts[i])
            continue;
          int M = Mask[i];
          if (M == SM_SentinelUndef) {
            // For UNDEF elements, we don't know anything about the common state
            // of the shuffle result.
            return 1;
          } else if (M == SM_SentinelZero) {
            // Zero = all sign bits.
            continue;
          }
          assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
                 "Shuffle index out of range");

          unsigned OpIdx = (unsigned)M / NumElts;
          unsigned EltIdx = (unsigned)M % NumElts;
          if (Ops[OpIdx].getValueType() != VT) {
            // TODO - handle target shuffle ops with different value types.
            return 1;
          }
          DemandedOps[OpIdx].setBit(EltIdx);
        }
        unsigned Tmp0 = VTBits;
        for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
          if (!DemandedOps[i])
            continue;
          unsigned Tmp1 =
              DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
          Tmp0 = std::min(Tmp0, Tmp1);
        }
        return Tmp0;
      }
    }
  }

  // Fallback case.
  return 1;
}

SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
  if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
    return N->getOperand(0);
  return N;
}

// Attempt to match a combined shuffle mask against supported unary shuffle
// instructions.
// TODO: Investigate sharing more of this with shuffle lowering.
static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
                              bool AllowFloatDomain, bool AllowIntDomain,
                              SDValue &V1, const SDLoc &DL, SelectionDAG &DAG,
                              const X86Subtarget &Subtarget, unsigned &Shuffle,
                              MVT &SrcVT, MVT &DstVT) {
  unsigned NumMaskElts = Mask.size();
  unsigned MaskEltSize = MaskVT.getScalarSizeInBits();

  // Match against a VZEXT_MOVL vXi32 zero-extending instruction.
  if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
      isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
    Shuffle = X86ISD::VZEXT_MOVL;
    SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
    return true;
  }

  // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
  // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
  if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
                         (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
    unsigned MaxScale = 64 / MaskEltSize;
    for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
      bool MatchAny = true;
      bool MatchZero = true;
      unsigned NumDstElts = NumMaskElts / Scale;
      for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
        if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
          MatchAny = MatchZero = false;
          break;
        }
        MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
        MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
      }
      if (MatchAny || MatchZero) {
        assert(MatchZero && "Failed to match zext but matched aext?");
        unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
        MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
                                            MVT::getIntegerVT(MaskEltSize);
        SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);

        if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits())
          V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);

        Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
        if (SrcVT.getVectorNumElements() != NumDstElts)
          Shuffle = getOpcode_EXTEND_VECTOR_INREG(Shuffle);

        DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
        DstVT = MVT::getVectorVT(DstVT, NumDstElts);
        return true;
      }
    }
  }

  // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
  if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
      isUndefOrEqual(Mask[0], 0) &&
      isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
    Shuffle = X86ISD::VZEXT_MOVL;
    SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
    return true;
  }

  // Check if we have SSE3 which will let us use MOVDDUP etc. The
  // instructions are no slower than UNPCKLPD but has the option to
  // fold the input operand into even an unaligned memory load.
  if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
    if (isTargetShuffleEquivalent(Mask, {0, 0})) {
      Shuffle = X86ISD::MOVDDUP;
      SrcVT = DstVT = MVT::v2f64;
      return true;
    }
    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
      Shuffle = X86ISD::MOVSLDUP;
      SrcVT = DstVT = MVT::v4f32;
      return true;
    }
    if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
      Shuffle = X86ISD::MOVSHDUP;
      SrcVT = DstVT = MVT::v4f32;
      return true;
    }
  }

  if (MaskVT.is256BitVector() && AllowFloatDomain) {
    assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
      Shuffle = X86ISD::MOVDDUP;
      SrcVT = DstVT = MVT::v4f64;
      return true;
    }
    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
      Shuffle = X86ISD::MOVSLDUP;
      SrcVT = DstVT = MVT::v8f32;
      return true;
    }
    if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
      Shuffle = X86ISD::MOVSHDUP;
      SrcVT = DstVT = MVT::v8f32;
      return true;
    }
  }

  if (MaskVT.is512BitVector() && AllowFloatDomain) {
    assert(Subtarget.hasAVX512() &&
           "AVX512 required for 512-bit vector shuffles");
    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
      Shuffle = X86ISD::MOVDDUP;
      SrcVT = DstVT = MVT::v8f64;
      return true;
    }
    if (isTargetShuffleEquivalent(
            Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
      Shuffle = X86ISD::MOVSLDUP;
      SrcVT = DstVT = MVT::v16f32;
      return true;
    }
    if (isTargetShuffleEquivalent(
            Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
      Shuffle = X86ISD::MOVSHDUP;
      SrcVT = DstVT = MVT::v16f32;
      return true;
    }
  }

  return false;
}

// Attempt to match a combined shuffle mask against supported unary immediate
// permute instructions.
// TODO: Investigate sharing more of this with shuffle lowering.
static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
                                     const APInt &Zeroable,
                                     bool AllowFloatDomain, bool AllowIntDomain,
                                     const X86Subtarget &Subtarget,
                                     unsigned &Shuffle, MVT &ShuffleVT,
                                     unsigned &PermuteImm) {
  unsigned NumMaskElts = Mask.size();
  unsigned InputSizeInBits = MaskVT.getSizeInBits();
  unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
  MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);

  bool ContainsZeros =
      llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });

  // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
  if (!ContainsZeros && MaskScalarSizeInBits == 64) {
    // Check for lane crossing permutes.
    if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
      // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
      if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
        Shuffle = X86ISD::VPERMI;
        ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
        PermuteImm = getV4X86ShuffleImm(Mask);
        return true;
      }
      if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
        SmallVector<int, 4> RepeatedMask;
        if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
          Shuffle = X86ISD::VPERMI;
          ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
          PermuteImm = getV4X86ShuffleImm(RepeatedMask);
          return true;
        }
      }
    } else if (AllowFloatDomain && Subtarget.hasAVX()) {
      // VPERMILPD can permute with a non-repeating shuffle.
      Shuffle = X86ISD::VPERMILPI;
      ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
      PermuteImm = 0;
      for (int i = 0, e = Mask.size(); i != e; ++i) {
        int M = Mask[i];
        if (M == SM_SentinelUndef)
          continue;
        assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
        PermuteImm |= (M & 1) << i;
      }
      return true;
    }
  }

  // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
  // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
  // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
  if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
      !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
    SmallVector<int, 4> RepeatedMask;
    if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
      // Narrow the repeated mask to create 32-bit element permutes.
      SmallVector<int, 4> WordMask = RepeatedMask;
      if (MaskScalarSizeInBits == 64)
        scaleShuffleMask<int>(2, RepeatedMask, WordMask);

      Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
      ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
      ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
      PermuteImm = getV4X86ShuffleImm(WordMask);
      return true;
    }
  }

  // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
  if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
    SmallVector<int, 4> RepeatedMask;
    if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
      ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
      ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);

      // PSHUFLW: permute lower 4 elements only.
      if (isUndefOrInRange(LoMask, 0, 4) &&
          isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
        Shuffle = X86ISD::PSHUFLW;
        ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
        PermuteImm = getV4X86ShuffleImm(LoMask);
        return true;
      }

      // PSHUFHW: permute upper 4 elements only.
      if (isUndefOrInRange(HiMask, 4, 8) &&
          isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
        // Offset the HiMask so that we can create the shuffle immediate.
        int OffsetHiMask[4];
        for (int i = 0; i != 4; ++i)
          OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);

        Shuffle = X86ISD::PSHUFHW;
        ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
        PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
        return true;
      }
    }
  }

  // Attempt to match against byte/bit shifts.
  // FIXME: Add 512-bit support.
  if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
                         (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
    int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
                                       Mask, 0, Zeroable, Subtarget);
    if (0 < ShiftAmt) {
      PermuteImm = (unsigned)ShiftAmt;
      return true;
    }
  }

  return false;
}

// Attempt to match a combined unary shuffle mask against supported binary
// shuffle instructions.
// TODO: Investigate sharing more of this with shuffle lowering.
static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
                               bool AllowFloatDomain, bool AllowIntDomain,
                               SDValue &V1, SDValue &V2, const SDLoc &DL,
                               SelectionDAG &DAG, const X86Subtarget &Subtarget,
                               unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
                               bool IsUnary) {
  unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();

  if (MaskVT.is128BitVector()) {
    if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
      V2 = V1;
      V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
      Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
      SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
      return true;
    }
    if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
      V2 = V1;
      Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
      SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
      return true;
    }
    if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
        (AllowFloatDomain || !Subtarget.hasSSE41())) {
      std::swap(V1, V2);
      Shuffle = X86ISD::MOVSD;
      SrcVT = DstVT = MVT::v2f64;
      return true;
    }
    if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
        (AllowFloatDomain || !Subtarget.hasSSE41())) {
      Shuffle = X86ISD::MOVSS;
      SrcVT = DstVT = MVT::v4f32;
      return true;
    }
  }

  // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
  if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
      ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
      ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
    if (matchVectorShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
                                   Subtarget)) {
      DstVT = MaskVT;
      return true;
    }
  }

  // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
  if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
      (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
      (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
      (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
      (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
    if (matchVectorShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL,
                                    DAG, Subtarget)) {
      SrcVT = DstVT = MaskVT;
      if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
        SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
      return true;
    }
  }

  return false;
}

static bool matchBinaryPermuteShuffle(
    MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
    bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
    const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
    unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
  unsigned NumMaskElts = Mask.size();
  unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();

  // Attempt to match against PALIGNR byte rotate.
  if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
                         (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
    int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
    if (0 < ByteRotation) {
      Shuffle = X86ISD::PALIGNR;
      ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
      PermuteImm = ByteRotation;
      return true;
    }
  }

  // Attempt to combine to X86ISD::BLENDI.
  if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
                            (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
      (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
    uint64_t BlendMask = 0;
    bool ForceV1Zero = false, ForceV2Zero = false;
    SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
    if (matchVectorShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
                                  ForceV2Zero, BlendMask)) {
      if (MaskVT == MVT::v16i16) {
        // We can only use v16i16 PBLENDW if the lanes are repeated.
        SmallVector<int, 8> RepeatedMask;
        if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
                                        RepeatedMask)) {
          assert(RepeatedMask.size() == 8 &&
                 "Repeated mask size doesn't match!");
          PermuteImm = 0;
          for (int i = 0; i < 8; ++i)
            if (RepeatedMask[i] >= 8)
              PermuteImm |= 1 << i;
          V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
          V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
          Shuffle = X86ISD::BLENDI;
          ShuffleVT = MaskVT;
          return true;
        }
      } else {
        V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
        V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
        PermuteImm = (unsigned)BlendMask;
        Shuffle = X86ISD::BLENDI;
        ShuffleVT = MaskVT;
        return true;
      }
    }
  }

  // Attempt to combine to INSERTPS, but only if it has elements that need to
  // be set to zero.
  if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
      MaskVT.is128BitVector() &&
      llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }) &&
      matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
    Shuffle = X86ISD::INSERTPS;
    ShuffleVT = MVT::v4f32;
    return true;
  }

  // Attempt to combine to SHUFPD.
  if (AllowFloatDomain && EltSizeInBits == 64 &&
      ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
       (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
       (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
    bool ForceV1Zero = false, ForceV2Zero = false;
    if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
                               PermuteImm, Mask, Zeroable)) {
      V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
      V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
      Shuffle = X86ISD::SHUFP;
      ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
      return true;
    }
  }

  // Attempt to combine to SHUFPS.
  if (AllowFloatDomain && EltSizeInBits == 32 &&
      ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
       (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
       (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
    SmallVector<int, 4> RepeatedMask;
    if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
      // Match each half of the repeated mask, to determine if its just
      // referencing one of the vectors, is zeroable or entirely undef.
      auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
        int M0 = RepeatedMask[Offset];
        int M1 = RepeatedMask[Offset + 1];

        if (isUndefInRange(RepeatedMask, Offset, 2)) {
          return DAG.getUNDEF(MaskVT);
        } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
          S0 = (SM_SentinelUndef == M0 ? -1 : 0);
          S1 = (SM_SentinelUndef == M1 ? -1 : 1);
          return getZeroVector(MaskVT, Subtarget, DAG, DL);
        } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
          S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
          S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
          return V1;
        } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
          S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
          S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
          return V2;
        }

        return SDValue();
      };

      int ShufMask[4] = {-1, -1, -1, -1};
      SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
      SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);

      if (Lo && Hi) {
        V1 = Lo;
        V2 = Hi;
        Shuffle = X86ISD::SHUFP;
        ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
        PermuteImm = getV4X86ShuffleImm(ShufMask);
        return true;
      }
    }
  }

  // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
  if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
      MaskVT.is128BitVector() &&
      matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
    Shuffle = X86ISD::INSERTPS;
    ShuffleVT = MVT::v4f32;
    return true;
  }

  return false;
}

static SDValue combineX86ShuffleChainWithExtract(
    ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
    bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
    const X86Subtarget &Subtarget);

/// Combine an arbitrary chain of shuffles into a single instruction if
/// possible.
///
/// This is the leaf of the recursive combine below. When we have found some
/// chain of single-use x86 shuffle instructions and accumulated the combined
/// shuffle mask represented by them, this will try to pattern match that mask
/// into either a single instruction if there is a special purpose instruction
/// for this operation, or into a PSHUFB instruction which is a fully general
/// instruction but should only be used to replace chains over a certain depth.
static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
                                      ArrayRef<int> BaseMask, int Depth,
                                      bool HasVariableMask,
                                      bool AllowVariableMask, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {
  assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
  assert((Inputs.size() == 1 || Inputs.size() == 2) &&
         "Unexpected number of shuffle inputs!");

  // Find the inputs that enter the chain. Note that multiple uses are OK
  // here, we're not going to remove the operands we find.
  bool UnaryShuffle = (Inputs.size() == 1);
  SDValue V1 = peekThroughBitcasts(Inputs[0]);
  SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
                             : peekThroughBitcasts(Inputs[1]));

  MVT VT1 = V1.getSimpleValueType();
  MVT VT2 = V2.getSimpleValueType();
  MVT RootVT = Root.getSimpleValueType();
  assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
         VT2.getSizeInBits() == RootVT.getSizeInBits() &&
         "Vector size mismatch");

  SDLoc DL(Root);
  SDValue Res;

  unsigned NumBaseMaskElts = BaseMask.size();
  if (NumBaseMaskElts == 1) {
    assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
    return DAG.getBitcast(RootVT, V1);
  }

  unsigned RootSizeInBits = RootVT.getSizeInBits();
  unsigned NumRootElts = RootVT.getVectorNumElements();
  unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
  bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
                     (RootVT.isFloatingPoint() && Depth >= 1) ||
                     (RootVT.is256BitVector() && !Subtarget.hasAVX2());

  // Don't combine if we are a AVX512/EVEX target and the mask element size
  // is different from the root element size - this would prevent writemasks
  // from being reused.
  // TODO - this currently prevents all lane shuffles from occurring.
  // TODO - check for writemasks usage instead of always preventing combining.
  // TODO - attempt to narrow Mask back to writemask size.
  bool IsEVEXShuffle =
      RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128);

  // Attempt to match a subvector broadcast.
  // shuffle(insert_subvector(undef, sub, 0), undef, 0, 0, 0, 0)
  if (UnaryShuffle &&
      (BaseMaskEltSizeInBits == 128 || BaseMaskEltSizeInBits == 256)) {
    SmallVector<int, 64> BroadcastMask(NumBaseMaskElts, 0);
    if (isTargetShuffleEquivalent(BaseMask, BroadcastMask)) {
      SDValue Src = Inputs[0];
      if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
          Src.getOperand(0).isUndef() &&
          Src.getOperand(1).getValueSizeInBits() == BaseMaskEltSizeInBits &&
          MayFoldLoad(Src.getOperand(1)) && isNullConstant(Src.getOperand(2))) {
        return DAG.getBitcast(RootVT, DAG.getNode(X86ISD::SUBV_BROADCAST, DL,
                                                  Src.getValueType(),
                                                  Src.getOperand(1)));
      }
    }
  }

  // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.

  // Handle 128-bit lane shuffles of 256-bit vectors.
  // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
  // we need to use the zeroing feature.
  // TODO - this should support binary shuffles.
  if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 &&
      !(Subtarget.hasAVX2() && BaseMask[0] >= -1 && BaseMask[1] >= -1) &&
      !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
    if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
      return SDValue(); // Nothing to do!
    MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
    unsigned PermMask = 0;
    PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
    PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);

    Res = DAG.getBitcast(ShuffleVT, V1);
    Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
                      DAG.getUNDEF(ShuffleVT),
                      DAG.getTargetConstant(PermMask, DL, MVT::i8));
    return DAG.getBitcast(RootVT, Res);
  }

  // For masks that have been widened to 128-bit elements or more,
  // narrow back down to 64-bit elements.
  SmallVector<int, 64> Mask;
  if (BaseMaskEltSizeInBits > 64) {
    assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
    int MaskScale = BaseMaskEltSizeInBits / 64;
    scaleShuffleMask<int>(MaskScale, BaseMask, Mask);
  } else {
    Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
  }

  unsigned NumMaskElts = Mask.size();
  unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;

  // Determine the effective mask value type.
  FloatDomain &= (32 <= MaskEltSizeInBits);
  MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
                           : MVT::getIntegerVT(MaskEltSizeInBits);
  MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);

  // Only allow legal mask types.
  if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
    return SDValue();

  // Attempt to match the mask against known shuffle patterns.
  MVT ShuffleSrcVT, ShuffleVT;
  unsigned Shuffle, PermuteImm;

  // Which shuffle domains are permitted?
  // Permit domain crossing at higher combine depths.
  // TODO: Should we indicate which domain is preferred if both are allowed?
  bool AllowFloatDomain = FloatDomain || (Depth >= 3);
  bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
                        (!MaskVT.is256BitVector() || Subtarget.hasAVX2());

  // Determine zeroable mask elements.
  APInt Zeroable(NumMaskElts, 0);
  for (unsigned i = 0; i != NumMaskElts; ++i)
    if (isUndefOrZero(Mask[i]))
      Zeroable.setBit(i);

  if (UnaryShuffle) {
    // If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
    // directly if we don't shuffle the lower element and we shuffle the upper
    // (zero) elements within themselves.
    if (V1.getOpcode() == X86ISD::VZEXT_LOAD &&
        (cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() %
         MaskEltSizeInBits) == 0) {
      unsigned Scale =
          cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() /
          MaskEltSizeInBits;
      ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale);
      if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) &&
          isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) {
        return DAG.getBitcast(RootVT, V1);
      }
    }

    // Attempt to match against broadcast-from-vector.
    // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
    if ((Subtarget.hasAVX2() || (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits))
        && (!IsEVEXShuffle || NumRootElts == NumMaskElts)) {
      SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
      if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
        if (V1.getValueType() == MaskVT &&
            V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
            MayFoldLoad(V1.getOperand(0))) {
          if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
            return SDValue(); // Nothing to do!
          Res = V1.getOperand(0);
          Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
          return DAG.getBitcast(RootVT, Res);
        }
        if (Subtarget.hasAVX2()) {
          if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
            return SDValue(); // Nothing to do!
          Res = DAG.getBitcast(MaskVT, V1);
          Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
          return DAG.getBitcast(RootVT, Res);
        }
      }
    }

    SDValue NewV1 = V1; // Save operand in case early exit happens.
    if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
                          DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
                          ShuffleVT) &&
        (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
      if (Depth == 0 && Root.getOpcode() == Shuffle)
        return SDValue(); // Nothing to do!
      Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
      Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
      return DAG.getBitcast(RootVT, Res);
    }

    if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
                                 AllowIntDomain, Subtarget, Shuffle, ShuffleVT,
                                 PermuteImm) &&
        (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
      if (Depth == 0 && Root.getOpcode() == Shuffle)
        return SDValue(); // Nothing to do!
      Res = DAG.getBitcast(ShuffleVT, V1);
      Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
                        DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
      return DAG.getBitcast(RootVT, Res);
    }
  }

  SDValue NewV1 = V1; // Save operands in case early exit happens.
  SDValue NewV2 = V2;
  if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
                         NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
                         ShuffleVT, UnaryShuffle) &&
      (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
    if (Depth == 0 && Root.getOpcode() == Shuffle)
      return SDValue(); // Nothing to do!
    NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
    NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
    Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
    return DAG.getBitcast(RootVT, Res);
  }

  NewV1 = V1; // Save operands in case early exit happens.
  NewV2 = V2;
  if (matchBinaryPermuteShuffle(
          MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1,
          NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
      (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
    if (Depth == 0 && Root.getOpcode() == Shuffle)
      return SDValue(); // Nothing to do!
    NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
    NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
    Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
                      DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
    return DAG.getBitcast(RootVT, Res);
  }

  // Typically from here on, we need an integer version of MaskVT.
  MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
  IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);

  // Annoyingly, SSE4A instructions don't map into the above match helpers.
  if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
    uint64_t BitLen, BitIdx;
    if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
                            Zeroable)) {
      if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
        return SDValue(); // Nothing to do!
      V1 = DAG.getBitcast(IntMaskVT, V1);
      Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
      return DAG.getBitcast(RootVT, Res);
    }

    if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
      if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
        return SDValue(); // Nothing to do!
      V1 = DAG.getBitcast(IntMaskVT, V1);
      V2 = DAG.getBitcast(IntMaskVT, V2);
      Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
      return DAG.getBitcast(RootVT, Res);
    }
  }

  // Don't try to re-form single instruction chains under any circumstances now
  // that we've done encoding canonicalization for them.
  if (Depth < 1)
    return SDValue();

  // Depth threshold above which we can efficiently use variable mask shuffles.
  int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 1 : 2;
  AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask;

  bool MaskContainsZeros =
      any_of(Mask, [](int M) { return M == SM_SentinelZero; });

  if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
    // If we have a single input lane-crossing shuffle then lower to VPERMV.
    if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
        ((Subtarget.hasAVX2() &&
          (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
         (Subtarget.hasAVX512() &&
          (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
           MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
         (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
         (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
         (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
         (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
      SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
      Res = DAG.getBitcast(MaskVT, V1);
      Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
      return DAG.getBitcast(RootVT, Res);
    }

    // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
    // vector as the second source.
    if (UnaryShuffle && AllowVariableMask &&
        ((Subtarget.hasAVX512() &&
          (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
           MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
         (Subtarget.hasVLX() &&
          (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
           MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
         (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
         (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
         (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
         (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
      // Adjust shuffle mask - replace SM_SentinelZero with second source index.
      for (unsigned i = 0; i != NumMaskElts; ++i)
        if (Mask[i] == SM_SentinelZero)
          Mask[i] = NumMaskElts + i;

      SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
      Res = DAG.getBitcast(MaskVT, V1);
      SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
      Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
      return DAG.getBitcast(RootVT, Res);
    }

    // If that failed and either input is extracted then try to combine as a
    // shuffle with the larger type.
    if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
            Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
            DAG, Subtarget))
      return WideShuffle;

    // If we have a dual input lane-crossing shuffle then lower to VPERMV3.
    if (AllowVariableMask && !MaskContainsZeros &&
        ((Subtarget.hasAVX512() &&
          (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
           MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
         (Subtarget.hasVLX() &&
          (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
           MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
         (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
         (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
         (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
         (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
      SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
      V1 = DAG.getBitcast(MaskVT, V1);
      V2 = DAG.getBitcast(MaskVT, V2);
      Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
      return DAG.getBitcast(RootVT, Res);
    }
    return SDValue();
  }

  // See if we can combine a single input shuffle with zeros to a bit-mask,
  // which is much simpler than any shuffle.
  if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
      isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
      DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
    APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
    APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
    APInt UndefElts(NumMaskElts, 0);
    SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
    for (unsigned i = 0; i != NumMaskElts; ++i) {
      int M = Mask[i];
      if (M == SM_SentinelUndef) {
        UndefElts.setBit(i);
        continue;
      }
      if (M == SM_SentinelZero)
        continue;
      EltBits[i] = AllOnes;
    }
    SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
    Res = DAG.getBitcast(MaskVT, V1);
    unsigned AndOpcode =
        FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
    Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
    return DAG.getBitcast(RootVT, Res);
  }

  // If we have a single input shuffle with different shuffle patterns in the
  // the 128-bit lanes use the variable mask to VPERMILPS.
  // TODO Combine other mask types at higher depths.
  if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
      ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
       (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
    SmallVector<SDValue, 16> VPermIdx;
    for (int M : Mask) {
      SDValue Idx =
          M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
      VPermIdx.push_back(Idx);
    }
    SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
    Res = DAG.getBitcast(MaskVT, V1);
    Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
    return DAG.getBitcast(RootVT, Res);
  }

  // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
  // to VPERMIL2PD/VPERMIL2PS.
  if (AllowVariableMask && Subtarget.hasXOP() &&
      (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
       MaskVT == MVT::v8f32)) {
    // VPERMIL2 Operation.
    // Bits[3] - Match Bit.
    // Bits[2:1] - (Per Lane) PD Shuffle Mask.
    // Bits[2:0] - (Per Lane) PS Shuffle Mask.
    unsigned NumLanes = MaskVT.getSizeInBits() / 128;
    unsigned NumEltsPerLane = NumMaskElts / NumLanes;
    SmallVector<int, 8> VPerm2Idx;
    unsigned M2ZImm = 0;
    for (int M : Mask) {
      if (M == SM_SentinelUndef) {
        VPerm2Idx.push_back(-1);
        continue;
      }
      if (M == SM_SentinelZero) {
        M2ZImm = 2;
        VPerm2Idx.push_back(8);
        continue;
      }
      int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
      Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
      VPerm2Idx.push_back(Index);
    }
    V1 = DAG.getBitcast(MaskVT, V1);
    V2 = DAG.getBitcast(MaskVT, V2);
    SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
    Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
                      DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
    return DAG.getBitcast(RootVT, Res);
  }

  // If we have 3 or more shuffle instructions or a chain involving a variable
  // mask, we can replace them with a single PSHUFB instruction profitably.
  // Intel's manuals suggest only using PSHUFB if doing so replacing 5
  // instructions, but in practice PSHUFB tends to be *very* fast so we're
  // more aggressive.
  if (UnaryShuffle && AllowVariableMask &&
      ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
       (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
       (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
    SmallVector<SDValue, 16> PSHUFBMask;
    int NumBytes = RootVT.getSizeInBits() / 8;
    int Ratio = NumBytes / NumMaskElts;
    for (int i = 0; i < NumBytes; ++i) {
      int M = Mask[i / Ratio];
      if (M == SM_SentinelUndef) {
        PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
        continue;
      }
      if (M == SM_SentinelZero) {
        PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
        continue;
      }
      M = Ratio * M + i % Ratio;
      assert((M / 16) == (i / 16) && "Lane crossing detected");
      PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
    }
    MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
    Res = DAG.getBitcast(ByteVT, V1);
    SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
    Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
    return DAG.getBitcast(RootVT, Res);
  }

  // With XOP, if we have a 128-bit binary input shuffle we can always combine
  // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
  // slower than PSHUFB on targets that support both.
  if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
    // VPPERM Mask Operation
    // Bits[4:0] - Byte Index (0 - 31)
    // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
    SmallVector<SDValue, 16> VPPERMMask;
    int NumBytes = 16;
    int Ratio = NumBytes / NumMaskElts;
    for (int i = 0; i < NumBytes; ++i) {
      int M = Mask[i / Ratio];
      if (M == SM_SentinelUndef) {
        VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
        continue;
      }
      if (M == SM_SentinelZero) {
        VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8));
        continue;
      }
      M = Ratio * M + i % Ratio;
      VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
    }
    MVT ByteVT = MVT::v16i8;
    V1 = DAG.getBitcast(ByteVT, V1);
    V2 = DAG.getBitcast(ByteVT, V2);
    SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
    Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
    return DAG.getBitcast(RootVT, Res);
  }

  // If that failed and either input is extracted then try to combine as a
  // shuffle with the larger type.
  if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
          Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
          DAG, Subtarget))
    return WideShuffle;

  // If we have a dual input shuffle then lower to VPERMV3.
  if (!UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
      ((Subtarget.hasAVX512() &&
        (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
         MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
       (Subtarget.hasVLX() &&
        (MaskVT == MVT::v2f64 || MaskVT == MVT::v2i64 || MaskVT == MVT::v4f64 ||
         MaskVT == MVT::v4i64 || MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 ||
         MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
       (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
       (Subtarget.hasBWI() && Subtarget.hasVLX() &&
        (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16)) ||
       (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
       (Subtarget.hasVBMI() && Subtarget.hasVLX() &&
        (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8)))) {
    SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
    V1 = DAG.getBitcast(MaskVT, V1);
    V2 = DAG.getBitcast(MaskVT, V2);
    Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
    return DAG.getBitcast(RootVT, Res);
  }

  // Failed to find any combines.
  return SDValue();
}

// Combine an arbitrary chain of shuffles + extract_subvectors into a single
// instruction if possible.
//
// Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
// type size to attempt to combine:
// shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
// -->
// extract_subvector(shuffle(x,y,m2),0)
static SDValue combineX86ShuffleChainWithExtract(
    ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
    bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
    const X86Subtarget &Subtarget) {
  unsigned NumMaskElts = BaseMask.size();
  unsigned NumInputs = Inputs.size();
  if (NumInputs == 0)
    return SDValue();

  SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
  SmallVector<unsigned, 4> Offsets(NumInputs, 0);

  // Peek through subvectors.
  // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
  unsigned WideSizeInBits = WideInputs[0].getValueSizeInBits();
  for (unsigned i = 0; i != NumInputs; ++i) {
    SDValue &Src = WideInputs[i];
    unsigned &Offset = Offsets[i];
    Src = peekThroughBitcasts(Src);
    EVT BaseVT = Src.getValueType();
    while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
           isa<ConstantSDNode>(Src.getOperand(1))) {
      Offset += Src.getConstantOperandVal(1);
      Src = Src.getOperand(0);
    }
    WideSizeInBits = std::max(WideSizeInBits, Src.getValueSizeInBits());
    assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
           "Unexpected subvector extraction");
    Offset /= BaseVT.getVectorNumElements();
    Offset *= NumMaskElts;
  }

  // Bail if we're always extracting from the lowest subvectors,
  // combineX86ShuffleChain should match this for the current width.
  if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
    return SDValue();

  EVT RootVT = Root.getValueType();
  unsigned RootSizeInBits = RootVT.getSizeInBits();
  unsigned Scale = WideSizeInBits / RootSizeInBits;
  assert((WideSizeInBits % RootSizeInBits) == 0 &&
         "Unexpected subvector extraction");

  // If the src vector types aren't the same, see if we can extend
  // them to match each other.
  // TODO: Support different scalar types?
  EVT WideSVT = WideInputs[0].getValueType().getScalarType();
  if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
        return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
               Op.getValueType().getScalarType() != WideSVT;
      }))
    return SDValue();

  for (SDValue &NewInput : WideInputs) {
    assert((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&
           "Shuffle vector size mismatch");
    if (WideSizeInBits > NewInput.getValueSizeInBits())
      NewInput = widenSubVector(NewInput, false, Subtarget, DAG,
                                SDLoc(NewInput), WideSizeInBits);
    assert(WideSizeInBits == NewInput.getValueSizeInBits() &&
           "Unexpected subvector extraction");
  }

  // Create new mask for larger type.
  for (unsigned i = 1; i != NumInputs; ++i)
    Offsets[i] += i * Scale * NumMaskElts;

  SmallVector<int, 64> WideMask(BaseMask.begin(), BaseMask.end());
  for (int &M : WideMask) {
    if (M < 0)
      continue;
    M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
  }
  WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);

  // Remove unused/repeated shuffle source ops.
  resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
  assert(!WideInputs.empty() && "Shuffle with no inputs detected");

  if (WideInputs.size() > 2)
    return SDValue();

  // Increase depth for every upper subvector we've peeked through.
  Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });

  // Attempt to combine wider chain.
  // TODO: Can we use a better Root?
  SDValue WideRoot = WideInputs[0];
  if (SDValue WideShuffle = combineX86ShuffleChain(
          WideInputs, WideRoot, WideMask, Depth, HasVariableMask,
          AllowVariableMask, DAG, Subtarget)) {
    WideShuffle =
        extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
    return DAG.getBitcast(RootVT, WideShuffle);
  }
  return SDValue();
}

// Attempt to constant fold all of the constant source ops.
// Returns true if the entire shuffle is folded to a constant.
// TODO: Extend this to merge multiple constant Ops and update the mask.
static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
                                           ArrayRef<int> Mask, SDValue Root,
                                           bool HasVariableMask,
                                           SelectionDAG &DAG,
                                           const X86Subtarget &Subtarget) {
  MVT VT = Root.getSimpleValueType();

  unsigned SizeInBits = VT.getSizeInBits();
  unsigned NumMaskElts = Mask.size();
  unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
  unsigned NumOps = Ops.size();

  // Extract constant bits from each source op.
  bool OneUseConstantOp = false;
  SmallVector<APInt, 16> UndefEltsOps(NumOps);
  SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
  for (unsigned i = 0; i != NumOps; ++i) {
    SDValue SrcOp = Ops[i];
    OneUseConstantOp |= SrcOp.hasOneUse();
    if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
                                       RawBitsOps[i]))
      return SDValue();
  }

  // Only fold if at least one of the constants is only used once or
  // the combined shuffle has included a variable mask shuffle, this
  // is to avoid constant pool bloat.
  if (!OneUseConstantOp && !HasVariableMask)
    return SDValue();

  // Shuffle the constant bits according to the mask.
  APInt UndefElts(NumMaskElts, 0);
  APInt ZeroElts(NumMaskElts, 0);
  APInt ConstantElts(NumMaskElts, 0);
  SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
                                        APInt::getNullValue(MaskSizeInBits));
  for (unsigned i = 0; i != NumMaskElts; ++i) {
    int M = Mask[i];
    if (M == SM_SentinelUndef) {
      UndefElts.setBit(i);
      continue;
    } else if (M == SM_SentinelZero) {
      ZeroElts.setBit(i);
      continue;
    }
    assert(0 <= M && M < (int)(NumMaskElts * NumOps));

    unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
    unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;

    auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
    if (SrcUndefElts[SrcMaskIdx]) {
      UndefElts.setBit(i);
      continue;
    }

    auto &SrcEltBits = RawBitsOps[SrcOpIdx];
    APInt &Bits = SrcEltBits[SrcMaskIdx];
    if (!Bits) {
      ZeroElts.setBit(i);
      continue;
    }

    ConstantElts.setBit(i);
    ConstantBitData[i] = Bits;
  }
  assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());

  // Create the constant data.
  MVT MaskSVT;
  if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
    MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
  else
    MaskSVT = MVT::getIntegerVT(MaskSizeInBits);

  MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);

  SDLoc DL(Root);
  SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
  return DAG.getBitcast(VT, CstOp);
}

/// Fully generic combining of x86 shuffle instructions.
///
/// This should be the last combine run over the x86 shuffle instructions. Once
/// they have been fully optimized, this will recursively consider all chains
/// of single-use shuffle instructions, build a generic model of the cumulative
/// shuffle operation, and check for simpler instructions which implement this
/// operation. We use this primarily for two purposes:
///
/// 1) Collapse generic shuffles to specialized single instructions when
///    equivalent. In most cases, this is just an encoding size win, but
///    sometimes we will collapse multiple generic shuffles into a single
///    special-purpose shuffle.
/// 2) Look for sequences of shuffle instructions with 3 or more total
///    instructions, and replace them with the slightly more expensive SSSE3
///    PSHUFB instruction if available. We do this as the last combining step
///    to ensure we avoid using PSHUFB if we can implement the shuffle with
///    a suitable short sequence of other instructions. The PSHUFB will either
///    use a register or have to read from memory and so is slightly (but only
///    slightly) more expensive than the other shuffle instructions.
///
/// Because this is inherently a quadratic operation (for each shuffle in
/// a chain, we recurse up the chain), the depth is limited to 8 instructions.
/// This should never be an issue in practice as the shuffle lowering doesn't
/// produce sequences of more than 8 instructions.
///
/// FIXME: We will currently miss some cases where the redundant shuffling
/// would simplify under the threshold for PSHUFB formation because of
/// combine-ordering. To fix this, we should do the redundant instruction
/// combining in this recursive walk.
static SDValue combineX86ShufflesRecursively(
    ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
    ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
    bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
    const X86Subtarget &Subtarget) {
  assert(RootMask.size() > 0 && (RootMask.size() > 1 || RootMask[0] == 0) &&
         "Illegal shuffle root mask");

  // Bound the depth of our recursive combine because this is ultimately
  // quadratic in nature.
  const unsigned MaxRecursionDepth = 8;
  if (Depth >= MaxRecursionDepth)
    return SDValue();

  // Directly rip through bitcasts to find the underlying operand.
  SDValue Op = SrcOps[SrcOpIndex];
  Op = peekThroughOneUseBitcasts(Op);

  MVT VT = Op.getSimpleValueType();
  if (!VT.isVector())
    return SDValue(); // Bail if we hit a non-vector.

  assert(Root.getSimpleValueType().isVector() &&
         "Shuffles operate on vector types!");
  assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
         "Can only combine shuffles of the same vector register size.");

  // Extract target shuffle mask and resolve sentinels and inputs.
  // TODO - determine Op's demanded elts from RootMask.
  SmallVector<int, 64> OpMask;
  SmallVector<SDValue, 2> OpInputs;
  APInt OpUndef, OpZero;
  APInt OpDemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
  bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
  if (!getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
                              OpZero, DAG, Depth, false))
    return SDValue();

  resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);

  // Add the inputs to the Ops list, avoiding duplicates.
  SmallVector<SDValue, 16> Ops(SrcOps.begin(), SrcOps.end());

  auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
    // Attempt to find an existing match.
    SDValue InputBC = peekThroughBitcasts(Input);
    for (int i = 0, e = Ops.size(); i < e; ++i)
      if (InputBC == peekThroughBitcasts(Ops[i]))
        return i;
    // Match failed - should we replace an existing Op?
    if (InsertionPoint >= 0) {
      Ops[InsertionPoint] = Input;
      return InsertionPoint;
    }
    // Add to the end of the Ops list.
    Ops.push_back(Input);
    return Ops.size() - 1;
  };

  SmallVector<int, 2> OpInputIdx;
  for (SDValue OpInput : OpInputs)
    OpInputIdx.push_back(AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));

  assert(((RootMask.size() > OpMask.size() &&
           RootMask.size() % OpMask.size() == 0) ||
          (OpMask.size() > RootMask.size() &&
           OpMask.size() % RootMask.size() == 0) ||
          OpMask.size() == RootMask.size()) &&
         "The smaller number of elements must divide the larger.");

  // This function can be performance-critical, so we rely on the power-of-2
  // knowledge that we have about the mask sizes to replace div/rem ops with
  // bit-masks and shifts.
  assert(isPowerOf2_32(RootMask.size()) && "Non-power-of-2 shuffle mask sizes");
  assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
  unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
  unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());

  unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
  unsigned RootRatio = std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
  unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
  assert((RootRatio == 1 || OpRatio == 1) &&
         "Must not have a ratio for both incoming and op masks!");

  assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
  assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
  assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
  unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
  unsigned OpRatioLog2 = countTrailingZeros(OpRatio);

  SmallVector<int, 64> Mask(MaskWidth, SM_SentinelUndef);

  // Merge this shuffle operation's mask into our accumulated mask. Note that
  // this shuffle's mask will be the first applied to the input, followed by the
  // root mask to get us all the way to the root value arrangement. The reason
  // for this order is that we are recursing up the operation chain.
  for (unsigned i = 0; i < MaskWidth; ++i) {
    unsigned RootIdx = i >> RootRatioLog2;
    if (RootMask[RootIdx] < 0) {
      // This is a zero or undef lane, we're done.
      Mask[i] = RootMask[RootIdx];
      continue;
    }

    unsigned RootMaskedIdx =
        RootRatio == 1
            ? RootMask[RootIdx]
            : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));

    // Just insert the scaled root mask value if it references an input other
    // than the SrcOp we're currently inserting.
    if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
        (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
      Mask[i] = RootMaskedIdx;
      continue;
    }

    RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
    unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
    if (OpMask[OpIdx] < 0) {
      // The incoming lanes are zero or undef, it doesn't matter which ones we
      // are using.
      Mask[i] = OpMask[OpIdx];
      continue;
    }

    // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
    unsigned OpMaskedIdx =
        OpRatio == 1
            ? OpMask[OpIdx]
            : (OpMask[OpIdx] << OpRatioLog2) + (RootMaskedIdx & (OpRatio - 1));

    OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
    int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
    assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
    OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;

    Mask[i] = OpMaskedIdx;
  }

  // Remove unused/repeated shuffle source ops.
  resolveTargetShuffleInputsAndMask(Ops, Mask);

  // Handle the all undef/zero cases early.
  if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
    return DAG.getUNDEF(Root.getValueType());

  // TODO - should we handle the mixed zero/undef case as well? Just returning
  // a zero mask will lose information on undef elements possibly reducing
  // future combine possibilities.
  if (all_of(Mask, [](int Idx) { return Idx < 0; }))
    return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
                         SDLoc(Root));

  assert(!Ops.empty() && "Shuffle with no inputs detected");
  HasVariableMask |= IsOpVariableMask;

  // Update the list of shuffle nodes that have been combined so far.
  SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
                                                SrcNodes.end());
  CombinedNodes.push_back(Op.getNode());

  // See if we can recurse into each shuffle source op (if it's a target
  // shuffle). The source op should only be generally combined if it either has
  // a single use (i.e. current Op) or all its users have already been combined,
  // if not then we can still combine but should prevent generation of variable
  // shuffles to avoid constant pool bloat.
  // Don't recurse if we already have more source ops than we can combine in
  // the remaining recursion depth.
  if (Ops.size() < (MaxRecursionDepth - Depth)) {
    for (int i = 0, e = Ops.size(); i < e; ++i) {
      bool AllowVar = false;
      if (Ops[i].getNode()->hasOneUse() ||
          SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
        AllowVar = AllowVariableMask;
      if (SDValue Res = combineX86ShufflesRecursively(
              Ops, i, Root, Mask, CombinedNodes, Depth + 1, HasVariableMask,
              AllowVar, DAG, Subtarget))
        return Res;
    }
  }

  // Attempt to constant fold all of the constant source ops.
  if (SDValue Cst = combineX86ShufflesConstants(
          Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
    return Cst;

  // We can only combine unary and binary shuffle mask cases.
  if (Ops.size() <= 2) {
    // Minor canonicalization of the accumulated shuffle mask to make it easier
    // to match below. All this does is detect masks with sequential pairs of
    // elements, and shrink them to the half-width mask. It does this in a loop
    // so it will reduce the size of the mask to the minimal width mask which
    // performs an equivalent shuffle.
    SmallVector<int, 64> WidenedMask;
    while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
      Mask = std::move(WidenedMask);
    }

    // Canonicalization of binary shuffle masks to improve pattern matching by
    // commuting the inputs.
    if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
      ShuffleVectorSDNode::commuteMask(Mask);
      std::swap(Ops[0], Ops[1]);
    }

    // Finally, try to combine into a single shuffle instruction.
    return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
                                  AllowVariableMask, DAG, Subtarget);
  }

  // If that failed and any input is extracted then try to combine as a
  // shuffle with the larger type.
  return combineX86ShuffleChainWithExtract(Ops, Root, Mask, Depth,
                                           HasVariableMask, AllowVariableMask,
                                           DAG, Subtarget);
}

/// Helper entry wrapper to combineX86ShufflesRecursively.
static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
                                             const X86Subtarget &Subtarget) {
  return combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 0,
                                       /*HasVarMask*/ false,
                                       /*AllowVarMask*/ true, DAG, Subtarget);
}

/// Get the PSHUF-style mask from PSHUF node.
///
/// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
/// PSHUF-style masks that can be reused with such instructions.
static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
  MVT VT = N.getSimpleValueType();
  SmallVector<int, 4> Mask;
  SmallVector<SDValue, 2> Ops;
  bool IsUnary;
  bool HaveMask =
      getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
  (void)HaveMask;
  assert(HaveMask);

  // If we have more than 128-bits, only the low 128-bits of shuffle mask
  // matter. Check that the upper masks are repeats and remove them.
  if (VT.getSizeInBits() > 128) {
    int LaneElts = 128 / VT.getScalarSizeInBits();
#ifndef NDEBUG
    for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
      for (int j = 0; j < LaneElts; ++j)
        assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
               "Mask doesn't repeat in high 128-bit lanes!");
#endif
    Mask.resize(LaneElts);
  }

  switch (N.getOpcode()) {
  case X86ISD::PSHUFD:
    return Mask;
  case X86ISD::PSHUFLW:
    Mask.resize(4);
    return Mask;
  case X86ISD::PSHUFHW:
    Mask.erase(Mask.begin(), Mask.begin() + 4);
    for (int &M : Mask)
      M -= 4;
    return Mask;
  default:
    llvm_unreachable("No valid shuffle instruction found!");
  }
}

/// Search for a combinable shuffle across a chain ending in pshufd.
///
/// We walk up the chain and look for a combinable shuffle, skipping over
/// shuffles that we could hoist this shuffle's transformation past without
/// altering anything.
static SDValue
combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
                             SelectionDAG &DAG) {
  assert(N.getOpcode() == X86ISD::PSHUFD &&
         "Called with something other than an x86 128-bit half shuffle!");
  SDLoc DL(N);

  // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
  // of the shuffles in the chain so that we can form a fresh chain to replace
  // this one.
  SmallVector<SDValue, 8> Chain;
  SDValue V = N.getOperand(0);
  for (; V.hasOneUse(); V = V.getOperand(0)) {
    switch (V.getOpcode()) {
    default:
      return SDValue(); // Nothing combined!

    case ISD::BITCAST:
      // Skip bitcasts as we always know the type for the target specific
      // instructions.
      continue;

    case X86ISD::PSHUFD:
      // Found another dword shuffle.
      break;

    case X86ISD::PSHUFLW:
      // Check that the low words (being shuffled) are the identity in the
      // dword shuffle, and the high words are self-contained.
      if (Mask[0] != 0 || Mask[1] != 1 ||
          !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
        return SDValue();

      Chain.push_back(V);
      continue;

    case X86ISD::PSHUFHW:
      // Check that the high words (being shuffled) are the identity in the
      // dword shuffle, and the low words are self-contained.
      if (Mask[2] != 2 || Mask[3] != 3 ||
          !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
        return SDValue();

      Chain.push_back(V);
      continue;

    case X86ISD::UNPCKL:
    case X86ISD::UNPCKH:
      // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
      // shuffle into a preceding word shuffle.
      if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
          V.getSimpleValueType().getVectorElementType() != MVT::i16)
        return SDValue();

      // Search for a half-shuffle which we can combine with.
      unsigned CombineOp =
          V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
      if (V.getOperand(0) != V.getOperand(1) ||
          !V->isOnlyUserOf(V.getOperand(0).getNode()))
        return SDValue();
      Chain.push_back(V);
      V = V.getOperand(0);
      do {
        switch (V.getOpcode()) {
        default:
          return SDValue(); // Nothing to combine.

        case X86ISD::PSHUFLW:
        case X86ISD::PSHUFHW:
          if (V.getOpcode() == CombineOp)
            break;

          Chain.push_back(V);

          LLVM_FALLTHROUGH;
        case ISD::BITCAST:
          V = V.getOperand(0);
          continue;
        }
        break;
      } while (V.hasOneUse());
      break;
    }
    // Break out of the loop if we break out of the switch.
    break;
  }

  if (!V.hasOneUse())
    // We fell out of the loop without finding a viable combining instruction.
    return SDValue();

  // Merge this node's mask and our incoming mask.
  SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
  for (int &M : Mask)
    M = VMask[M];
  V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
                  getV4X86ShuffleImm8ForMask(Mask, DL, DAG));

  // Rebuild the chain around this new shuffle.
  while (!Chain.empty()) {
    SDValue W = Chain.pop_back_val();

    if (V.getValueType() != W.getOperand(0).getValueType())
      V = DAG.getBitcast(W.getOperand(0).getValueType(), V);

    switch (W.getOpcode()) {
    default:
      llvm_unreachable("Only PSHUF and UNPCK instructions get here!");

    case X86ISD::UNPCKL:
    case X86ISD::UNPCKH:
      V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
      break;

    case X86ISD::PSHUFD:
    case X86ISD::PSHUFLW:
    case X86ISD::PSHUFHW:
      V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
      break;
    }
  }
  if (V.getValueType() != N.getValueType())
    V = DAG.getBitcast(N.getValueType(), V);

  // Return the new chain to replace N.
  return V;
}

/// Try to combine x86 target specific shuffles.
static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
                                    TargetLowering::DAGCombinerInfo &DCI,
                                    const X86Subtarget &Subtarget) {
  SDLoc DL(N);
  MVT VT = N.getSimpleValueType();
  SmallVector<int, 4> Mask;
  unsigned Opcode = N.getOpcode();

  // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
  // single instruction.
  if (VT.getScalarSizeInBits() == 64 &&
      (Opcode == X86ISD::MOVSD || Opcode == X86ISD::UNPCKH ||
       Opcode == X86ISD::UNPCKL)) {
    auto BC0 = peekThroughBitcasts(N.getOperand(0));
    auto BC1 = peekThroughBitcasts(N.getOperand(1));
    EVT VT0 = BC0.getValueType();
    EVT VT1 = BC1.getValueType();
    unsigned Opcode0 = BC0.getOpcode();
    unsigned Opcode1 = BC1.getOpcode();
    if (Opcode0 == Opcode1 && VT0 == VT1 &&
        (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
         Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB ||
         Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
      SDValue Lo, Hi;
      if (Opcode == X86ISD::MOVSD) {
        Lo = BC1.getOperand(0);
        Hi = BC0.getOperand(1);
      } else {
        Lo = BC0.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
        Hi = BC1.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
      }
      SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
      return DAG.getBitcast(VT, Horiz);
    }
  }

  switch (Opcode) {
  case X86ISD::VBROADCAST: {
    SDValue Src = N.getOperand(0);
    SDValue BC = peekThroughBitcasts(Src);
    EVT SrcVT = Src.getValueType();
    EVT BCVT = BC.getValueType();

    // If broadcasting from another shuffle, attempt to simplify it.
    // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
    if (isTargetShuffle(BC.getOpcode()) &&
        VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
      unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
      SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
                                        SM_SentinelUndef);
      for (unsigned i = 0; i != Scale; ++i)
        DemandedMask[i] = i;
      if (SDValue Res = combineX86ShufflesRecursively(
              {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
              /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
        return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
                           DAG.getBitcast(SrcVT, Res));
    }

    // broadcast(bitcast(src)) -> bitcast(broadcast(src))
    // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
    if (Src.getOpcode() == ISD::BITCAST &&
        SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits()) {
      EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
                                   VT.getVectorNumElements());
      return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
    }

    // Reduce broadcast source vector to lowest 128-bits.
    if (SrcVT.getSizeInBits() > 128)
      return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
                         extract128BitVector(Src, 0, DAG, DL));

    // broadcast(scalar_to_vector(x)) -> broadcast(x).
    if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
      return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));

    // Share broadcast with the longest vector and extract low subvector (free).
    for (SDNode *User : Src->uses())
      if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
          User->getValueSizeInBits(0) > VT.getSizeInBits()) {
        return extractSubVector(SDValue(User, 0), 0, DAG, DL,
                                VT.getSizeInBits());
      }

    // vbroadcast(scalarload X) -> vbroadcast_load X
    // For float loads, extract other uses of the scalar from the broadcast.
    if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
        ISD::isNormalLoad(Src.getNode())) {
      LoadSDNode *LN = cast<LoadSDNode>(Src);
      SDVTList Tys = DAG.getVTList(VT, MVT::Other);
      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
      SDValue BcastLd =
          DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
                                  LN->getMemoryVT(), LN->getMemOperand());
      // If the load value is used only by N, replace it via CombineTo N.
      bool NoReplaceExtract = Src.hasOneUse();
      DCI.CombineTo(N.getNode(), BcastLd);
      if (NoReplaceExtract) {
        DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
        DCI.recursivelyDeleteUnusedNodes(LN);
      } else {
        SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
                                  DAG.getIntPtrConstant(0, DL));
        DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
      }
      return N; // Return N so it doesn't get rechecked!
    }

    return SDValue();
  }
  case X86ISD::BLENDI: {
    SDValue N0 = N.getOperand(0);
    SDValue N1 = N.getOperand(1);

    // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
    // TODO: Handle MVT::v16i16 repeated blend mask.
    if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
        N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
      MVT SrcVT = N0.getOperand(0).getSimpleValueType();
      if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
          SrcVT.getScalarSizeInBits() >= 32) {
        unsigned BlendMask = N.getConstantOperandVal(2);
        unsigned Size = VT.getVectorNumElements();
        unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
        BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
        return DAG.getBitcast(
            VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
                            N1.getOperand(0),
                            DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
      }
    }
    return SDValue();
  }
  case X86ISD::VPERMI: {
    // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
    // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
    SDValue N0 = N.getOperand(0);
    SDValue N1 = N.getOperand(1);
    unsigned EltSizeInBits = VT.getScalarSizeInBits();
    if (N0.getOpcode() == ISD::BITCAST &&
        N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
      SDValue Src = N0.getOperand(0);
      EVT SrcVT = Src.getValueType();
      SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
      return DAG.getBitcast(VT, Res);
    }
    return SDValue();
  }
  case X86ISD::PSHUFD:
  case X86ISD::PSHUFLW:
  case X86ISD::PSHUFHW:
    Mask = getPSHUFShuffleMask(N);
    assert(Mask.size() == 4);
    break;
  case X86ISD::MOVSD:
  case X86ISD::MOVSS: {
    SDValue N0 = N.getOperand(0);
    SDValue N1 = N.getOperand(1);

    // Canonicalize scalar FPOps:
    // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
    // If commutable, allow OP(N1[0], N0[0]).
    unsigned Opcode1 = N1.getOpcode();
    if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
        Opcode1 == ISD::FDIV) {
      SDValue N10 = N1.getOperand(0);
      SDValue N11 = N1.getOperand(1);
      if (N10 == N0 ||
          (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
        if (N10 != N0)
          std::swap(N10, N11);
        MVT SVT = VT.getVectorElementType();
        SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
        N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
        N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
        SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
        SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
        return DAG.getNode(Opcode, DL, VT, N0, SclVec);
      }
    }

    return SDValue();
  }
  case X86ISD::INSERTPS: {
    assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
    SDValue Op0 = N.getOperand(0);
    SDValue Op1 = N.getOperand(1);
    SDValue Op2 = N.getOperand(2);
    unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
    unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
    unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
    unsigned ZeroMask = InsertPSMask & 0xF;

    // If we zero out all elements from Op0 then we don't need to reference it.
    if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
      return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
                         DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));

    // If we zero out the element from Op1 then we don't need to reference it.
    if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
      return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
                         DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));

    // Attempt to merge insertps Op1 with an inner target shuffle node.
    SmallVector<int, 8> TargetMask1;
    SmallVector<SDValue, 2> Ops1;
    APInt KnownUndef1, KnownZero1;
    if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
                                     KnownZero1)) {
      if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
        // Zero/UNDEF insertion - zero out element and remove dependency.
        InsertPSMask |= (1u << DstIdx);
        return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
                           DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
      }
      // Update insertps mask srcidx and reference the source input directly.
      int M = TargetMask1[SrcIdx];
      assert(0 <= M && M < 8 && "Shuffle index out of range");
      InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
      Op1 = Ops1[M < 4 ? 0 : 1];
      return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
                         DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
    }

    // Attempt to merge insertps Op0 with an inner target shuffle node.
    SmallVector<int, 8> TargetMask0;
    SmallVector<SDValue, 2> Ops0;
    APInt KnownUndef0, KnownZero0;
    if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
                                     KnownZero0)) {
      bool Updated = false;
      bool UseInput00 = false;
      bool UseInput01 = false;
      for (int i = 0; i != 4; ++i) {
        if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
          // No change if element is already zero or the inserted element.
          continue;
        } else if (KnownUndef0[i] || KnownZero0[i]) {
          // If the target mask is undef/zero then we must zero the element.
          InsertPSMask |= (1u << i);
          Updated = true;
          continue;
        }

        // The input vector element must be inline.
        int M = TargetMask0[i];
        if (M != i && M != (i + 4))
          return SDValue();

        // Determine which inputs of the target shuffle we're using.
        UseInput00 |= (0 <= M && M < 4);
        UseInput01 |= (4 <= M);
      }

      // If we're not using both inputs of the target shuffle then use the
      // referenced input directly.
      if (UseInput00 && !UseInput01) {
        Updated = true;
        Op0 = Ops0[0];
      } else if (!UseInput00 && UseInput01) {
        Updated = true;
        Op0 = Ops0[1];
      }

      if (Updated)
        return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
                           DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
    }

    // If we're inserting an element from a vbroadcast load, fold the
    // load into the X86insertps instruction. We need to convert the scalar
    // load to a vector and clear the source lane of the INSERTPS control.
    if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
      auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
      if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
        SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
                                   MemIntr->getBasePtr(),
                                   MemIntr->getMemOperand());
        SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
                           DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
                                       Load),
                           DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
        DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
        return Insert;
      }
    }

    return SDValue();
  }
  default:
    return SDValue();
  }

  // Nuke no-op shuffles that show up after combining.
  if (isNoopShuffleMask(Mask))
    return N.getOperand(0);

  // Look for simplifications involving one or two shuffle instructions.
  SDValue V = N.getOperand(0);
  switch (N.getOpcode()) {
  default:
    break;
  case X86ISD::PSHUFLW:
  case X86ISD::PSHUFHW:
    assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");

    // See if this reduces to a PSHUFD which is no more expensive and can
    // combine with more operations. Note that it has to at least flip the
    // dwords as otherwise it would have been removed as a no-op.
    if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
      int DMask[] = {0, 1, 2, 3};
      int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
      DMask[DOffset + 0] = DOffset + 1;
      DMask[DOffset + 1] = DOffset + 0;
      MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
      V = DAG.getBitcast(DVT, V);
      V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
                      getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
      return DAG.getBitcast(VT, V);
    }

    // Look for shuffle patterns which can be implemented as a single unpack.
    // FIXME: This doesn't handle the location of the PSHUFD generically, and
    // only works when we have a PSHUFD followed by two half-shuffles.
    if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
        (V.getOpcode() == X86ISD::PSHUFLW ||
         V.getOpcode() == X86ISD::PSHUFHW) &&
        V.getOpcode() != N.getOpcode() &&
        V.hasOneUse()) {
      SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
      if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
        SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
        SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
        int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
        int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
        int WordMask[8];
        for (int i = 0; i < 4; ++i) {
          WordMask[i + NOffset] = Mask[i] + NOffset;
          WordMask[i + VOffset] = VMask[i] + VOffset;
        }
        // Map the word mask through the DWord mask.
        int MappedMask[8];
        for (int i = 0; i < 8; ++i)
          MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
        if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
            makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
          // We can replace all three shuffles with an unpack.
          V = DAG.getBitcast(VT, D.getOperand(0));
          return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
                                                : X86ISD::UNPCKH,
                             DL, VT, V, V);
        }
      }
    }

    break;

  case X86ISD::PSHUFD:
    if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
      return NewN;

    break;
  }

  return SDValue();
}

/// Checks if the shuffle mask takes subsequent elements
/// alternately from two vectors.
/// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {

  int ParitySrc[2] = {-1, -1};
  unsigned Size = Mask.size();
  for (unsigned i = 0; i != Size; ++i) {
    int M = Mask[i];
    if (M < 0)
      continue;

    // Make sure we are using the matching element from the input.
    if ((M % Size) != i)
      return false;

    // Make sure we use the same input for all elements of the same parity.
    int Src = M / Size;
    if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
      return false;
    ParitySrc[i % 2] = Src;
  }

  // Make sure each input is used.
  if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
    return false;

  Op0Even = ParitySrc[0] == 0;
  return true;
}

/// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
/// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
/// are written to the parameters \p Opnd0 and \p Opnd1.
///
/// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
/// so it is easier to generically match. We also insert dummy vector shuffle
/// nodes for the operands which explicitly discard the lanes which are unused
/// by this operation to try to flow through the rest of the combiner
/// the fact that they're unused.
static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
                             SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
                             bool &IsSubAdd) {

  EVT VT = N->getValueType(0);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
      !VT.getSimpleVT().isFloatingPoint())
    return false;

  // We only handle target-independent shuffles.
  // FIXME: It would be easy and harmless to use the target shuffle mask
  // extraction tool to support more.
  if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
    return false;

  SDValue V1 = N->getOperand(0);
  SDValue V2 = N->getOperand(1);

  // Make sure we have an FADD and an FSUB.
  if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
      (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
      V1.getOpcode() == V2.getOpcode())
    return false;

  // If there are other uses of these operations we can't fold them.
  if (!V1->hasOneUse() || !V2->hasOneUse())
    return false;

  // Ensure that both operations have the same operands. Note that we can
  // commute the FADD operands.
  SDValue LHS, RHS;
  if (V1.getOpcode() == ISD::FSUB) {
    LHS = V1->getOperand(0); RHS = V1->getOperand(1);
    if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
        (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
      return false;
  } else {
    assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
    LHS = V2->getOperand(0); RHS = V2->getOperand(1);
    if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
        (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
      return false;
  }

  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
  bool Op0Even;
  if (!isAddSubOrSubAddMask(Mask, Op0Even))
    return false;

  // It's a subadd if the vector in the even parity is an FADD.
  IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
                     : V2->getOpcode() == ISD::FADD;

  Opnd0 = LHS;
  Opnd1 = RHS;
  return true;
}

/// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
static SDValue combineShuffleToFMAddSub(SDNode *N,
                                        const X86Subtarget &Subtarget,
                                        SelectionDAG &DAG) {
  // We only handle target-independent shuffles.
  // FIXME: It would be easy and harmless to use the target shuffle mask
  // extraction tool to support more.
  if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
    return SDValue();

  MVT VT = N->getSimpleValueType(0);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
    return SDValue();

  // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);
  SDValue FMAdd = Op0, FMSub = Op1;
  if (FMSub.getOpcode() != X86ISD::FMSUB)
    std::swap(FMAdd, FMSub);

  if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
      FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
      FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
      FMAdd.getOperand(2) != FMSub.getOperand(2))
    return SDValue();

  // Check for correct shuffle mask.
  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
  bool Op0Even;
  if (!isAddSubOrSubAddMask(Mask, Op0Even))
    return SDValue();

  // FMAddSub takes zeroth operand from FMSub node.
  SDLoc DL(N);
  bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
  unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
  return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
                     FMAdd.getOperand(2));
}

/// Try to combine a shuffle into a target-specific add-sub or
/// mul-add-sub node.
static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
                                                const X86Subtarget &Subtarget,
                                                SelectionDAG &DAG) {
  if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
    return V;

  SDValue Opnd0, Opnd1;
  bool IsSubAdd;
  if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
    return SDValue();

  MVT VT = N->getSimpleValueType(0);
  SDLoc DL(N);

  // Try to generate X86ISD::FMADDSUB node here.
  SDValue Opnd2;
  if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
    unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
    return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
  }

  if (IsSubAdd)
    return SDValue();

  // Do not generate X86ISD::ADDSUB node for 512-bit types even though
  // the ADDSUB idiom has been successfully recognized. There are no known
  // X86 targets with 512-bit ADDSUB instructions!
  if (VT.is512BitVector())
    return SDValue();

  return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
}

// We are looking for a shuffle where both sources are concatenated with undef
// and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
// if we can express this as a single-source shuffle, that's preferable.
static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
                                           const X86Subtarget &Subtarget) {
  if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
    return SDValue();

  EVT VT = N->getValueType(0);

  // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
  if (!VT.is128BitVector() && !VT.is256BitVector())
    return SDValue();

  if (VT.getVectorElementType() != MVT::i32 &&
      VT.getVectorElementType() != MVT::i64 &&
      VT.getVectorElementType() != MVT::f32 &&
      VT.getVectorElementType() != MVT::f64)
    return SDValue();

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // Check that both sources are concats with undef.
  if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
      N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
      N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
      !N1.getOperand(1).isUndef())
    return SDValue();

  // Construct the new shuffle mask. Elements from the first source retain their
  // index, but elements from the second source no longer need to skip an undef.
  SmallVector<int, 8> Mask;
  int NumElts = VT.getVectorNumElements();

  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
  for (int Elt : SVOp->getMask())
    Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));

  SDLoc DL(N);
  SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
                               N1.getOperand(0));
  return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
}

/// Eliminate a redundant shuffle of a horizontal math op.
static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
  unsigned Opcode = N->getOpcode();
  if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
    if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
      return SDValue();

  // For a broadcast, peek through an extract element of index 0 to find the
  // horizontal op: broadcast (ext_vec_elt HOp, 0)
  EVT VT = N->getValueType(0);
  if (Opcode == X86ISD::VBROADCAST) {
    SDValue SrcOp = N->getOperand(0);
    if (SrcOp.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
        SrcOp.getValueType() == MVT::f64 &&
        SrcOp.getOperand(0).getValueType() == VT &&
        isNullConstant(SrcOp.getOperand(1)))
      N = SrcOp.getNode();
  }

  SDValue HOp = N->getOperand(0);
  if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
      HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
    return SDValue();

  // 128-bit horizontal math instructions are defined to operate on adjacent
  // lanes of each operand as:
  // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
  // ...similarly for v2f64 and v8i16.
  if (!HOp.getOperand(0).isUndef() && !HOp.getOperand(1).isUndef() &&
      HOp.getOperand(0) != HOp.getOperand(1))
    return SDValue();

  // The shuffle that we are eliminating may have allowed the horizontal op to
  // have an undemanded (undefined) operand. Duplicate the other (defined)
  // operand to ensure that the results are defined across all lanes without the
  // shuffle.
  auto updateHOp = [](SDValue HorizOp, SelectionDAG &DAG) {
    SDValue X;
    if (HorizOp.getOperand(0).isUndef()) {
      assert(!HorizOp.getOperand(1).isUndef() && "Not expecting foldable h-op");
      X = HorizOp.getOperand(1);
    } else if (HorizOp.getOperand(1).isUndef()) {
      assert(!HorizOp.getOperand(0).isUndef() && "Not expecting foldable h-op");
      X = HorizOp.getOperand(0);
    } else {
      return HorizOp;
    }
    return DAG.getNode(HorizOp.getOpcode(), SDLoc(HorizOp),
                       HorizOp.getValueType(), X, X);
  };

  // When the operands of a horizontal math op are identical, the low half of
  // the result is the same as the high half. If a target shuffle is also
  // replicating low and high halves (and without changing the type/length of
  // the vector), we don't need the shuffle.
  if (Opcode == X86ISD::MOVDDUP || Opcode == X86ISD::VBROADCAST) {
    if (HOp.getScalarValueSizeInBits() == 64 && HOp.getValueType() == VT) {
      // movddup (hadd X, X) --> hadd X, X
      // broadcast (extract_vec_elt (hadd X, X), 0) --> hadd X, X
      assert((HOp.getValueType() == MVT::v2f64 ||
              HOp.getValueType() == MVT::v4f64) && "Unexpected type for h-op");
      return updateHOp(HOp, DAG);
    }
    return SDValue();
  }

  // shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
  // TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
  // but this should be tied to whatever horizontal op matching and shuffle
  // canonicalization are producing.
  if (HOp.getValueSizeInBits() == 128 &&
      (isTargetShuffleEquivalent(Mask, {0, 0}) ||
       isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
       isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
    return updateHOp(HOp, DAG);

  if (HOp.getValueSizeInBits() == 256 &&
      (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
       isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
       isTargetShuffleEquivalent(
           Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
    return updateHOp(HOp, DAG);

  return SDValue();
}

/// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
/// low half of each source vector and does not set any high half elements in
/// the destination vector, narrow the shuffle to half its original size.
static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
  if (!Shuf->getValueType(0).isSimple())
    return SDValue();
  MVT VT = Shuf->getSimpleValueType(0);
  if (!VT.is256BitVector() && !VT.is512BitVector())
    return SDValue();

  // See if we can ignore all of the high elements of the shuffle.
  ArrayRef<int> Mask = Shuf->getMask();
  if (!isUndefUpperHalf(Mask))
    return SDValue();

  // Check if the shuffle mask accesses only the low half of each input vector
  // (half-index output is 0 or 2).
  int HalfIdx1, HalfIdx2;
  SmallVector<int, 8> HalfMask(Mask.size() / 2);
  if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
      (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
    return SDValue();

  // Create a half-width shuffle to replace the unnecessarily wide shuffle.
  // The trick is knowing that all of the insert/extract are actually free
  // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
  // of narrow inputs into a narrow output, and that is always cheaper than
  // the wide shuffle that we started with.
  return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
                               Shuf->getOperand(1), HalfMask, HalfIdx1,
                               HalfIdx2, false, DAG, /*UseConcat*/true);
}

static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
                              TargetLowering::DAGCombinerInfo &DCI,
                              const X86Subtarget &Subtarget) {
  if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
    if (SDValue V = narrowShuffle(Shuf, DAG))
      return V;

  // If we have legalized the vector types, look for blends of FADD and FSUB
  // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
  SDLoc dl(N);
  EVT VT = N->getValueType(0);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (TLI.isTypeLegal(VT)) {
    if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
      return AddSub;

    if (SDValue HAddSub = foldShuffleOfHorizOp(N, DAG))
      return HAddSub;
  }

  // Attempt to combine into a vector load/broadcast.
  if (SDValue LD = combineToConsecutiveLoads(VT, N, dl, DAG, Subtarget, true))
    return LD;

  // For AVX2, we sometimes want to combine
  // (vector_shuffle <mask> (concat_vectors t1, undef)
  //                        (concat_vectors t2, undef))
  // Into:
  // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
  // Since the latter can be efficiently lowered with VPERMD/VPERMQ
  if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
    return ShufConcat;

  if (isTargetShuffle(N->getOpcode())) {
    SDValue Op(N, 0);
    if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
      return Shuffle;

    // Try recursively combining arbitrary sequences of x86 shuffle
    // instructions into higher-order shuffles. We do this after combining
    // specific PSHUF instruction sequences into their minimal form so that we
    // can evaluate how many specialized shuffle instructions are involved in
    // a particular chain.
    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
      return Res;

    // Simplify source operands based on shuffle mask.
    // TODO - merge this into combineX86ShufflesRecursively.
    APInt KnownUndef, KnownZero;
    APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
    if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI))
      return SDValue(N, 0);
  }

  // Look for a v2i64/v2f64 VZEXT_MOVL of a node that already produces zeros
  // in the upper 64 bits.
  // TODO: Can we generalize this using computeKnownBits.
  if (N->getOpcode() == X86ISD::VZEXT_MOVL &&
      (VT == MVT::v2f64 || VT == MVT::v2i64) &&
      N->getOperand(0).getOpcode() == ISD::BITCAST &&
      (N->getOperand(0).getOperand(0).getValueType() == MVT::v4f32 ||
       N->getOperand(0).getOperand(0).getValueType() == MVT::v4i32)) {
    SDValue In = N->getOperand(0).getOperand(0);
    switch (In.getOpcode()) {
    default:
      break;
    case X86ISD::CVTP2SI:   case X86ISD::CVTP2UI:
    case X86ISD::MCVTP2SI:  case X86ISD::MCVTP2UI:
    case X86ISD::CVTTP2SI:  case X86ISD::CVTTP2UI:
    case X86ISD::MCVTTP2SI: case X86ISD::MCVTTP2UI:
    case X86ISD::CVTSI2P:   case X86ISD::CVTUI2P:
    case X86ISD::MCVTSI2P:  case X86ISD::MCVTUI2P:
    case X86ISD::VFPROUND:  case X86ISD::VMFPROUND:
      if (In.getOperand(0).getValueType() == MVT::v2f64 ||
          In.getOperand(0).getValueType() == MVT::v2i64)
        return N->getOperand(0); // return the bitcast
      break;
    }
  }

  // Pull subvector inserts into undef through VZEXT_MOVL by making it an
  // insert into a zero vector. This helps get VZEXT_MOVL closer to
  // scalar_to_vectors where 256/512 are canonicalized to an insert and a
  // 128-bit scalar_to_vector. This reduces the number of isel patterns.
  if (N->getOpcode() == X86ISD::VZEXT_MOVL && !DCI.isBeforeLegalizeOps() &&
      N->getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR &&
      N->getOperand(0).hasOneUse() &&
      N->getOperand(0).getOperand(0).isUndef() &&
      isNullConstant(N->getOperand(0).getOperand(2))) {
    SDValue In = N->getOperand(0).getOperand(1);
    SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, dl, In.getValueType(), In);
    return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT,
                       getZeroVector(VT.getSimpleVT(), Subtarget, DAG, dl),
                       Movl, N->getOperand(0).getOperand(2));
  }

  // If this a vzmovl of a full vector load, replace it with a vzload, unless
  // the load is volatile.
  if (N->getOpcode() == X86ISD::VZEXT_MOVL && N->getOperand(0).hasOneUse() &&
      ISD::isNormalLoad(N->getOperand(0).getNode())) {
    LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
    if (LN->isSimple()) {
      SDVTList Tys = DAG.getVTList(VT, MVT::Other);
      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
      SDValue VZLoad =
          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
                                  VT.getVectorElementType(),
                                  LN->getPointerInfo(),
                                  LN->getAlignment(),
                                  MachineMemOperand::MOLoad);
      DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
      return VZLoad;
    }
  }

  return SDValue();
}

bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
    SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
    TargetLoweringOpt &TLO, unsigned Depth) const {
  int NumElts = DemandedElts.getBitWidth();
  unsigned Opc = Op.getOpcode();
  EVT VT = Op.getValueType();

  // Handle special case opcodes.
  switch (Opc) {
  case X86ISD::PMULDQ:
  case X86ISD::PMULUDQ: {
    APInt LHSUndef, LHSZero;
    APInt RHSUndef, RHSZero;
    SDValue LHS = Op.getOperand(0);
    SDValue RHS = Op.getOperand(1);
    if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
                                   Depth + 1))
      return true;
    if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
                                   Depth + 1))
      return true;
    // Multiply by zero.
    KnownZero = LHSZero | RHSZero;
    break;
  }
  case X86ISD::VSHL:
  case X86ISD::VSRL:
  case X86ISD::VSRA: {
    // We only need the bottom 64-bits of the (128-bit) shift amount.
    SDValue Amt = Op.getOperand(1);
    MVT AmtVT = Amt.getSimpleValueType();
    assert(AmtVT.is128BitVector() && "Unexpected value type");

    // If we reuse the shift amount just for sse shift amounts then we know that
    // only the bottom 64-bits are only ever used.
    bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
      unsigned UseOpc = Use->getOpcode();
      return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
              UseOpc == X86ISD::VSRA) &&
             Use->getOperand(0) != Amt;
    });

    APInt AmtUndef, AmtZero;
    unsigned NumAmtElts = AmtVT.getVectorNumElements();
    APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
    if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
                                   Depth + 1, AssumeSingleUse))
      return true;
    LLVM_FALLTHROUGH;
  }
  case X86ISD::VSHLI:
  case X86ISD::VSRLI:
  case X86ISD::VSRAI: {
    SDValue Src = Op.getOperand(0);
    APInt SrcUndef;
    if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
                                   Depth + 1))
      return true;
    // TODO convert SrcUndef to KnownUndef.
    break;
  }
  case X86ISD::KSHIFTL: {
    SDValue Src = Op.getOperand(0);
    auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
    assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
    unsigned ShiftAmt = Amt->getZExtValue();

    if (ShiftAmt == 0)
      return TLO.CombineTo(Op, Src);

    // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
    // single shift.  We can do this if the bottom bits (which are shifted
    // out) are never demanded.
    if (Src.getOpcode() == X86ISD::KSHIFTR) {
      if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
        unsigned C1 = Src.getConstantOperandVal(1);
        unsigned NewOpc = X86ISD::KSHIFTL;
        int Diff = ShiftAmt - C1;
        if (Diff < 0) {
          Diff = -Diff;
          NewOpc = X86ISD::KSHIFTR;
        }

        SDLoc dl(Op);
        SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
        return TLO.CombineTo(
            Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
      }
    }

    APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
    if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
                                   Depth + 1))
      return true;

    KnownUndef <<= ShiftAmt;
    KnownZero <<= ShiftAmt;
    KnownZero.setLowBits(ShiftAmt);
    break;
  }
  case X86ISD::KSHIFTR: {
    SDValue Src = Op.getOperand(0);
    auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
    assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
    unsigned ShiftAmt = Amt->getZExtValue();

    if (ShiftAmt == 0)
      return TLO.CombineTo(Op, Src);

    // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
    // single shift.  We can do this if the top bits (which are shifted
    // out) are never demanded.
    if (Src.getOpcode() == X86ISD::KSHIFTL) {
      if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
        unsigned C1 = Src.getConstantOperandVal(1);
        unsigned NewOpc = X86ISD::KSHIFTR;
        int Diff = ShiftAmt - C1;
        if (Diff < 0) {
          Diff = -Diff;
          NewOpc = X86ISD::KSHIFTL;
        }

        SDLoc dl(Op);
        SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
        return TLO.CombineTo(
            Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
      }
    }

    APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
    if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
                                   Depth + 1))
      return true;

    KnownUndef.lshrInPlace(ShiftAmt);
    KnownZero.lshrInPlace(ShiftAmt);
    KnownZero.setHighBits(ShiftAmt);
    break;
  }
  case X86ISD::CVTSI2P:
  case X86ISD::CVTUI2P: {
    SDValue Src = Op.getOperand(0);
    MVT SrcVT = Src.getSimpleValueType();
    APInt SrcUndef, SrcZero;
    APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
    if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
                                   Depth + 1))
      return true;
    break;
  }
  case X86ISD::PACKSS:
  case X86ISD::PACKUS: {
    SDValue N0 = Op.getOperand(0);
    SDValue N1 = Op.getOperand(1);

    APInt DemandedLHS, DemandedRHS;
    getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);

    APInt SrcUndef, SrcZero;
    if (SimplifyDemandedVectorElts(N0, DemandedLHS, SrcUndef, SrcZero, TLO,
                                   Depth + 1))
      return true;
    if (SimplifyDemandedVectorElts(N1, DemandedRHS, SrcUndef, SrcZero, TLO,
                                   Depth + 1))
      return true;

    // Aggressively peek through ops to get at the demanded elts.
    // TODO - we should do this for all target/faux shuffles ops.
    if (!DemandedElts.isAllOnesValue()) {
      APInt DemandedSrcBits =
          APInt::getAllOnesValue(N0.getScalarValueSizeInBits());
      SDValue NewN0 = SimplifyMultipleUseDemandedBits(
          N0, DemandedSrcBits, DemandedLHS, TLO.DAG, Depth + 1);
      SDValue NewN1 = SimplifyMultipleUseDemandedBits(
          N1, DemandedSrcBits, DemandedRHS, TLO.DAG, Depth + 1);
      if (NewN0 || NewN1) {
        NewN0 = NewN0 ? NewN0 : N0;
        NewN1 = NewN1 ? NewN1 : N1;
        return TLO.CombineTo(Op,
                             TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
      }
    }
    break;
  }
  case X86ISD::HADD:
  case X86ISD::HSUB:
  case X86ISD::FHADD:
  case X86ISD::FHSUB: {
    APInt DemandedLHS, DemandedRHS;
    getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);

    APInt LHSUndef, LHSZero;
    if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, LHSUndef,
                                   LHSZero, TLO, Depth + 1))
      return true;
    APInt RHSUndef, RHSZero;
    if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, RHSUndef,
                                   RHSZero, TLO, Depth + 1))
      return true;
    break;
  }
  case X86ISD::VTRUNC:
  case X86ISD::VTRUNCS:
  case X86ISD::VTRUNCUS: {
    SDValue Src = Op.getOperand(0);
    MVT SrcVT = Src.getSimpleValueType();
    APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
    APInt SrcUndef, SrcZero;
    if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
                                   Depth + 1))
      return true;
    KnownZero = SrcZero.zextOrTrunc(NumElts);
    KnownUndef = SrcUndef.zextOrTrunc(NumElts);
    break;
  }
  case X86ISD::BLENDV: {
    APInt SelUndef, SelZero;
    if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
                                   SelZero, TLO, Depth + 1))
      return true;

    // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
    APInt LHSUndef, LHSZero;
    if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
                                   LHSZero, TLO, Depth + 1))
      return true;

    APInt RHSUndef, RHSZero;
    if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
                                   RHSZero, TLO, Depth + 1))
      return true;

    KnownZero = LHSZero & RHSZero;
    KnownUndef = LHSUndef & RHSUndef;
    break;
  }
  case X86ISD::VBROADCAST: {
    SDValue Src = Op.getOperand(0);
    MVT SrcVT = Src.getSimpleValueType();
    if (!SrcVT.isVector())
      return false;
    // Don't bother broadcasting if we just need the 0'th element.
    if (DemandedElts == 1) {
      if (Src.getValueType() != VT)
        Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
                             SDLoc(Op));
      return TLO.CombineTo(Op, Src);
    }
    APInt SrcUndef, SrcZero;
    APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
    if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
                                   Depth + 1))
      return true;
    break;
  }
  case X86ISD::VPERMV: {
    SDValue Mask = Op.getOperand(0);
    APInt MaskUndef, MaskZero;
    if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
                                   Depth + 1))
      return true;
    break;
  }
  case X86ISD::PSHUFB:
  case X86ISD::VPERMV3:
  case X86ISD::VPERMILPV: {
    SDValue Mask = Op.getOperand(1);
    APInt MaskUndef, MaskZero;
    if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
                                   Depth + 1))
      return true;
    break;
  }
  case X86ISD::VPPERM:
  case X86ISD::VPERMIL2: {
    SDValue Mask = Op.getOperand(2);
    APInt MaskUndef, MaskZero;
    if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
                                   Depth + 1))
      return true;
    break;
  }
  }

  // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
  // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
  // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
  if ((VT.is256BitVector() || VT.is512BitVector()) &&
      DemandedElts.lshr(NumElts / 2) == 0) {
    unsigned SizeInBits = VT.getSizeInBits();
    unsigned ExtSizeInBits = SizeInBits / 2;

    // See if 512-bit ops only use the bottom 128-bits.
    if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
      ExtSizeInBits = SizeInBits / 4;

    switch (Opc) {
      // Zero upper elements.
    case X86ISD::VZEXT_MOVL: {
      SDLoc DL(Op);
      SDValue Ext0 =
          extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
      SDValue ExtOp =
          TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0);
      SDValue UndefVec = TLO.DAG.getUNDEF(VT);
      SDValue Insert =
          insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
      return TLO.CombineTo(Op, Insert);
    }
      // Subvector broadcast.
    case X86ISD::SUBV_BROADCAST: {
      SDLoc DL(Op);
      SDValue Src = Op.getOperand(0);
      if (Src.getValueSizeInBits() > ExtSizeInBits)
        Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
      else if (Src.getValueSizeInBits() < ExtSizeInBits) {
        MVT SrcSVT = Src.getSimpleValueType().getScalarType();
        MVT SrcVT =
            MVT::getVectorVT(SrcSVT, ExtSizeInBits / SrcSVT.getSizeInBits());
        Src = TLO.DAG.getNode(X86ISD::SUBV_BROADCAST, DL, SrcVT, Src);
      }
      return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Src, 0,
                                               TLO.DAG, DL, ExtSizeInBits));
    }
      // Byte shifts by immediate.
    case X86ISD::VSHLDQ:
    case X86ISD::VSRLDQ:
      // Shift by uniform.
    case X86ISD::VSHL:
    case X86ISD::VSRL:
    case X86ISD::VSRA:
      // Shift by immediate.
    case X86ISD::VSHLI:
    case X86ISD::VSRLI:
    case X86ISD::VSRAI: {
      SDLoc DL(Op);
      SDValue Ext0 =
          extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
      SDValue ExtOp =
          TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
      SDValue UndefVec = TLO.DAG.getUNDEF(VT);
      SDValue Insert =
          insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
      return TLO.CombineTo(Op, Insert);
    }
    case X86ISD::VPERMI: {
      // Simplify PERMPD/PERMQ to extract_subvector.
      // TODO: This should be done in shuffle combining.
      if (VT == MVT::v4f64 || VT == MVT::v4i64) {
        SmallVector<int, 4> Mask;
        DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
        if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
          SDLoc DL(Op);
          SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
          SDValue UndefVec = TLO.DAG.getUNDEF(VT);
          SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
          return TLO.CombineTo(Op, Insert);
        }
      }
      break;
    }
      // Target Shuffles.
    case X86ISD::PSHUFB:
    case X86ISD::UNPCKL:
    case X86ISD::UNPCKH:
      // Saturated Packs.
    case X86ISD::PACKSS:
    case X86ISD::PACKUS:
      // Horizontal Ops.
    case X86ISD::HADD:
    case X86ISD::HSUB:
    case X86ISD::FHADD:
    case X86ISD::FHSUB: {
      SDLoc DL(Op);
      MVT ExtVT = VT.getSimpleVT();
      ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
                               ExtSizeInBits / ExtVT.getScalarSizeInBits());
      SDValue Ext0 =
          extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
      SDValue Ext1 =
          extractSubVector(Op.getOperand(1), 0, TLO.DAG, DL, ExtSizeInBits);
      SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ext0, Ext1);
      SDValue UndefVec = TLO.DAG.getUNDEF(VT);
      SDValue Insert =
          insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
      return TLO.CombineTo(Op, Insert);
    }
    }
  }

  // Get target/faux shuffle mask.
  APInt OpUndef, OpZero;
  SmallVector<int, 64> OpMask;
  SmallVector<SDValue, 2> OpInputs;
  if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
                              OpZero, TLO.DAG, Depth, false))
    return false;

  // Shuffle inputs must be the same size as the result.
  if (OpMask.size() != (unsigned)NumElts ||
      llvm::any_of(OpInputs, [VT](SDValue V) {
        return VT.getSizeInBits() != V.getValueSizeInBits() ||
               !V.getValueType().isVector();
      }))
    return false;

  KnownZero = OpZero;
  KnownUndef = OpUndef;

  // Check if shuffle mask can be simplified to undef/zero/identity.
  int NumSrcs = OpInputs.size();
  for (int i = 0; i != NumElts; ++i)
    if (!DemandedElts[i])
      OpMask[i] = SM_SentinelUndef;

  if (isUndefInRange(OpMask, 0, NumElts)) {
    KnownUndef.setAllBits();
    return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
  }
  if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
    KnownZero.setAllBits();
    return TLO.CombineTo(
        Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
  }
  for (int Src = 0; Src != NumSrcs; ++Src)
    if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
      return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));

  // Attempt to simplify inputs.
  for (int Src = 0; Src != NumSrcs; ++Src) {
    // TODO: Support inputs of different types.
    if (OpInputs[Src].getValueType() != VT)
      continue;

    int Lo = Src * NumElts;
    APInt SrcElts = APInt::getNullValue(NumElts);
    for (int i = 0; i != NumElts; ++i)
      if (DemandedElts[i]) {
        int M = OpMask[i] - Lo;
        if (0 <= M && M < NumElts)
          SrcElts.setBit(M);
      }

    // TODO - Propagate input undef/zero elts.
    APInt SrcUndef, SrcZero;
    if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
                                   TLO, Depth + 1))
      return true;
  }

  return false;
}

bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
    SDValue Op, const APInt &OriginalDemandedBits,
    const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
    unsigned Depth) const {
  EVT VT = Op.getValueType();
  unsigned BitWidth = OriginalDemandedBits.getBitWidth();
  unsigned Opc = Op.getOpcode();
  switch(Opc) {
  case X86ISD::PMULDQ:
  case X86ISD::PMULUDQ: {
    // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
    KnownBits KnownOp;
    SDValue LHS = Op.getOperand(0);
    SDValue RHS = Op.getOperand(1);
    // FIXME: Can we bound this better?
    APInt DemandedMask = APInt::getLowBitsSet(64, 32);
    if (SimplifyDemandedBits(LHS, DemandedMask, OriginalDemandedElts, KnownOp,
                             TLO, Depth + 1))
      return true;
    if (SimplifyDemandedBits(RHS, DemandedMask, OriginalDemandedElts, KnownOp,
                             TLO, Depth + 1))
      return true;

    // Aggressively peek through ops to get at the demanded low bits.
    SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
        LHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
    SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
        RHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
    if (DemandedLHS || DemandedRHS) {
      DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
      DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
      return TLO.CombineTo(
          Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
    }
    break;
  }
  case X86ISD::VSHLI: {
    SDValue Op0 = Op.getOperand(0);
    SDValue Op1 = Op.getOperand(1);

    if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
      if (ShiftImm->getAPIntValue().uge(BitWidth))
        break;

      unsigned ShAmt = ShiftImm->getZExtValue();
      APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);

      // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
      // single shift.  We can do this if the bottom bits (which are shifted
      // out) are never demanded.
      if (Op0.getOpcode() == X86ISD::VSRLI &&
          OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
        if (auto *Shift2Imm = dyn_cast<ConstantSDNode>(Op0.getOperand(1))) {
          if (Shift2Imm->getAPIntValue().ult(BitWidth)) {
            int Diff = ShAmt - Shift2Imm->getZExtValue();
            if (Diff == 0)
              return TLO.CombineTo(Op, Op0.getOperand(0));

            unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
            SDValue NewShift = TLO.DAG.getNode(
                NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
                TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
            return TLO.CombineTo(Op, NewShift);
          }
        }
      }

      if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
                               TLO, Depth + 1))
        return true;

      assert(!Known.hasConflict() && "Bits known to be one AND zero?");
      Known.Zero <<= ShAmt;
      Known.One <<= ShAmt;

      // Low bits known zero.
      Known.Zero.setLowBits(ShAmt);
    }
    break;
  }
  case X86ISD::VSRLI: {
    if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
      if (ShiftImm->getAPIntValue().uge(BitWidth))
        break;

      unsigned ShAmt = ShiftImm->getZExtValue();
      APInt DemandedMask = OriginalDemandedBits << ShAmt;

      if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
                               OriginalDemandedElts, Known, TLO, Depth + 1))
        return true;

      assert(!Known.hasConflict() && "Bits known to be one AND zero?");
      Known.Zero.lshrInPlace(ShAmt);
      Known.One.lshrInPlace(ShAmt);

      // High bits known zero.
      Known.Zero.setHighBits(ShAmt);
    }
    break;
  }
  case X86ISD::VSRAI: {
    SDValue Op0 = Op.getOperand(0);
    SDValue Op1 = Op.getOperand(1);

    if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
      if (ShiftImm->getAPIntValue().uge(BitWidth))
        break;

      unsigned ShAmt = ShiftImm->getZExtValue();
      APInt DemandedMask = OriginalDemandedBits << ShAmt;

      // If we just want the sign bit then we don't need to shift it.
      if (OriginalDemandedBits.isSignMask())
        return TLO.CombineTo(Op, Op0);

      // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
      if (Op0.getOpcode() == X86ISD::VSHLI && Op1 == Op0.getOperand(1)) {
        SDValue Op00 = Op0.getOperand(0);
        unsigned NumSignBits =
            TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
        if (ShAmt < NumSignBits)
          return TLO.CombineTo(Op, Op00);
      }

      // If any of the demanded bits are produced by the sign extension, we also
      // demand the input sign bit.
      if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
        DemandedMask.setSignBit();

      if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
                               TLO, Depth + 1))
        return true;

      assert(!Known.hasConflict() && "Bits known to be one AND zero?");
      Known.Zero.lshrInPlace(ShAmt);
      Known.One.lshrInPlace(ShAmt);

      // If the input sign bit is known to be zero, or if none of the top bits
      // are demanded, turn this into an unsigned shift right.
      if (Known.Zero[BitWidth - ShAmt - 1] ||
          OriginalDemandedBits.countLeadingZeros() >= ShAmt)
        return TLO.CombineTo(
            Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));

      // High bits are known one.
      if (Known.One[BitWidth - ShAmt - 1])
        Known.One.setHighBits(ShAmt);
    }
    break;
  }
  case X86ISD::PEXTRB:
  case X86ISD::PEXTRW: {
    SDValue Vec = Op.getOperand(0);
    auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
    MVT VecVT = Vec.getSimpleValueType();
    unsigned NumVecElts = VecVT.getVectorNumElements();

    if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
      unsigned Idx = CIdx->getZExtValue();
      unsigned VecBitWidth = VecVT.getScalarSizeInBits();

      // If we demand no bits from the vector then we must have demanded
      // bits from the implict zext - simplify to zero.
      APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
      if (DemandedVecBits == 0)
        return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));

      APInt KnownUndef, KnownZero;
      APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
      if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
                                     KnownZero, TLO, Depth + 1))
        return true;

      KnownBits KnownVec;
      if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
                               KnownVec, TLO, Depth + 1))
        return true;

      if (SDValue V = SimplifyMultipleUseDemandedBits(
              Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
        return TLO.CombineTo(
            Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));

      Known = KnownVec.zext(BitWidth, true);
      return false;
    }
    break;
  }
  case X86ISD::PINSRB:
  case X86ISD::PINSRW: {
    SDValue Vec = Op.getOperand(0);
    SDValue Scl = Op.getOperand(1);
    auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
    MVT VecVT = Vec.getSimpleValueType();

    if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
      unsigned Idx = CIdx->getZExtValue();
      if (!OriginalDemandedElts[Idx])
        return TLO.CombineTo(Op, Vec);

      KnownBits KnownVec;
      APInt DemandedVecElts(OriginalDemandedElts);
      DemandedVecElts.clearBit(Idx);
      if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
                               KnownVec, TLO, Depth + 1))
        return true;

      KnownBits KnownScl;
      unsigned NumSclBits = Scl.getScalarValueSizeInBits();
      APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
      if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
        return true;

      KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
      Known.One = KnownVec.One & KnownScl.One;
      Known.Zero = KnownVec.Zero & KnownScl.Zero;
      return false;
    }
    break;
  }
  case X86ISD::PACKSS:
    // PACKSS saturates to MIN/MAX integer values. So if we just want the
    // sign bit then we can just ask for the source operands sign bit.
    // TODO - add known bits handling.
    if (OriginalDemandedBits.isSignMask()) {
      APInt DemandedLHS, DemandedRHS;
      getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);

      KnownBits KnownLHS, KnownRHS;
      APInt SignMask = APInt::getSignMask(BitWidth * 2);
      if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
                               KnownLHS, TLO, Depth + 1))
        return true;
      if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
                               KnownRHS, TLO, Depth + 1))
        return true;
    }
    // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
    break;
  case X86ISD::PCMPGT:
    // icmp sgt(0, R) == ashr(R, BitWidth-1).
    // iff we only need the sign bit then we can use R directly.
    if (OriginalDemandedBits.isSignMask() &&
        ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
      return TLO.CombineTo(Op, Op.getOperand(1));
    break;
  case X86ISD::MOVMSK: {
    SDValue Src = Op.getOperand(0);
    MVT SrcVT = Src.getSimpleValueType();
    unsigned SrcBits = SrcVT.getScalarSizeInBits();
    unsigned NumElts = SrcVT.getVectorNumElements();

    // If we don't need the sign bits at all just return zero.
    if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
      return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));

    // Only demand the vector elements of the sign bits we need.
    APInt KnownUndef, KnownZero;
    APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
    if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
                                   TLO, Depth + 1))
      return true;

    Known.Zero = KnownZero.zextOrSelf(BitWidth);
    Known.Zero.setHighBits(BitWidth - NumElts);

    // MOVMSK only uses the MSB from each vector element.
    KnownBits KnownSrc;
    if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), DemandedElts,
                             KnownSrc, TLO, Depth + 1))
      return true;

    if (KnownSrc.One[SrcBits - 1])
      Known.One.setLowBits(NumElts);
    else if (KnownSrc.Zero[SrcBits - 1])
      Known.Zero.setLowBits(NumElts);
    return false;
  }
  }

  return TargetLowering::SimplifyDemandedBitsForTargetNode(
      Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
}

SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
    SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
    SelectionDAG &DAG, unsigned Depth) const {
  int NumElts = DemandedElts.getBitWidth();
  unsigned Opc = Op.getOpcode();
  EVT VT = Op.getValueType();

  switch (Opc) {
  case X86ISD::PINSRB:
  case X86ISD::PINSRW: {
    // If we don't demand the inserted element, return the base vector.
    SDValue Vec = Op.getOperand(0);
    auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
    MVT VecVT = Vec.getSimpleValueType();
    if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
        !DemandedElts[CIdx->getZExtValue()])
      return Vec;
     break;
  }
  }

  APInt ShuffleUndef, ShuffleZero;
  SmallVector<int, 16> ShuffleMask;
  SmallVector<SDValue, 2> ShuffleOps;
  if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
                             ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
    // If all the demanded elts are from one operand and are inline,
    // then we can use the operand directly.
    int NumOps = ShuffleOps.size();
    if (ShuffleMask.size() == (unsigned)NumElts &&
        llvm::all_of(ShuffleOps, [VT](SDValue V) {
          return VT.getSizeInBits() == V.getValueSizeInBits();
        })) {

      if (DemandedElts.isSubsetOf(ShuffleUndef))
        return DAG.getUNDEF(VT);
      if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
        return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));

      // Bitmask that indicates which ops have only been accessed 'inline'.
      APInt IdentityOp = APInt::getAllOnesValue(NumOps);
      for (int i = 0; i != NumElts; ++i) {
        int M = ShuffleMask[i];
        if (!DemandedElts[i] || ShuffleUndef[i])
          continue;
        int Op = M / NumElts;
        int Index = M % NumElts;
        if (M < 0 || Index != i) {
          IdentityOp.clearAllBits();
          break;
        }
        IdentityOp &= APInt::getOneBitSet(NumOps, Op);
        if (IdentityOp == 0)
          break;
      }
      assert((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
             "Multiple identity shuffles detected");

      if (IdentityOp != 0)
        return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countTrailingZeros()]);
    }
  }

  return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
      Op, DemandedBits, DemandedElts, DAG, Depth);
}

/// Check if a vector extract from a target-specific shuffle of a load can be
/// folded into a single element load.
/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
/// shuffles have been custom lowered so we need to handle those here.
static SDValue
XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI) {
  if (DCI.isBeforeLegalizeOps())
    return SDValue();

  SDValue InVec = N->getOperand(0);
  SDValue EltNo = N->getOperand(1);
  EVT EltVT = N->getValueType(0);

  if (!isa<ConstantSDNode>(EltNo))
    return SDValue();

  EVT OriginalVT = InVec.getValueType();
  unsigned NumOriginalElts = OriginalVT.getVectorNumElements();

  // Peek through bitcasts, don't duplicate a load with other uses.
  InVec = peekThroughOneUseBitcasts(InVec);

  EVT CurrentVT = InVec.getValueType();
  if (!CurrentVT.isVector())
    return SDValue();

  unsigned NumCurrentElts = CurrentVT.getVectorNumElements();
  if ((NumOriginalElts % NumCurrentElts) != 0)
    return SDValue();

  if (!isTargetShuffle(InVec.getOpcode()))
    return SDValue();

  // Don't duplicate a load with other uses.
  if (!InVec.hasOneUse())
    return SDValue();

  SmallVector<int, 16> ShuffleMask;
  SmallVector<SDValue, 2> ShuffleOps;
  bool UnaryShuffle;
  if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true,
                            ShuffleOps, ShuffleMask, UnaryShuffle))
    return SDValue();

  unsigned Scale = NumOriginalElts / NumCurrentElts;
  if (Scale > 1) {
    SmallVector<int, 16> ScaledMask;
    scaleShuffleMask<int>(Scale, ShuffleMask, ScaledMask);
    ShuffleMask = std::move(ScaledMask);
  }
  assert(ShuffleMask.size() == NumOriginalElts && "Shuffle mask size mismatch");

  // Select the input vector, guarding against out of range extract vector.
  int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
  int Idx = (Elt > (int)NumOriginalElts) ? SM_SentinelUndef : ShuffleMask[Elt];

  if (Idx == SM_SentinelZero)
    return EltVT.isInteger() ? DAG.getConstant(0, SDLoc(N), EltVT)
                             : DAG.getConstantFP(+0.0, SDLoc(N), EltVT);
  if (Idx == SM_SentinelUndef)
    return DAG.getUNDEF(EltVT);

  // Bail if any mask element is SM_SentinelZero - getVectorShuffle below
  // won't handle it.
  if (llvm::any_of(ShuffleMask, [](int M) { return M == SM_SentinelZero; }))
    return SDValue();

  assert(0 <= Idx && Idx < (int)(2 * NumOriginalElts) &&
         "Shuffle index out of range");
  SDValue LdNode = (Idx < (int)NumOriginalElts) ? ShuffleOps[0] : ShuffleOps[1];

  // If inputs to shuffle are the same for both ops, then allow 2 uses
  unsigned AllowedUses =
      (ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1;

  if (LdNode.getOpcode() == ISD::BITCAST) {
    // Don't duplicate a load with other uses.
    if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
      return SDValue();

    AllowedUses = 1; // only allow 1 load use if we have a bitcast
    LdNode = LdNode.getOperand(0);
  }

  if (!ISD::isNormalLoad(LdNode.getNode()))
    return SDValue();

  LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);

  if (!LN0 || !LN0->hasNUsesOfValue(AllowedUses, 0) || !LN0->isSimple())
    return SDValue();

  // If there's a bitcast before the shuffle, check if the load type and
  // alignment is valid.
  unsigned Align = LN0->getAlignment();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
      EltVT.getTypeForEVT(*DAG.getContext()));

  if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
    return SDValue();

  // All checks match so transform back to vector_shuffle so that DAG combiner
  // can finish the job
  SDLoc dl(N);

  // Create shuffle node taking into account the case that its a unary shuffle
  SDValue Shuffle = UnaryShuffle ? DAG.getUNDEF(OriginalVT)
                                 : DAG.getBitcast(OriginalVT, ShuffleOps[1]);
  Shuffle = DAG.getVectorShuffle(OriginalVT, dl,
                                 DAG.getBitcast(OriginalVT, ShuffleOps[0]),
                                 Shuffle, ShuffleMask);
  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
                     EltNo);
}

// Helper to peek through bitops/setcc to determine size of source vector.
// Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size) {
  switch (Src.getOpcode()) {
  case ISD::SETCC:
    return Src.getOperand(0).getValueSizeInBits() == Size;
  case ISD::AND:
  case ISD::XOR:
  case ISD::OR:
    return checkBitcastSrcVectorSize(Src.getOperand(0), Size) &&
           checkBitcastSrcVectorSize(Src.getOperand(1), Size);
  }
  return false;
}

// Helper to push sign extension of vXi1 SETCC result through bitops.
static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
                                          SDValue Src, const SDLoc &DL) {
  switch (Src.getOpcode()) {
  case ISD::SETCC:
    return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
  case ISD::AND:
  case ISD::XOR:
  case ISD::OR:
    return DAG.getNode(
        Src.getOpcode(), DL, SExtVT,
        signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
        signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
  }
  llvm_unreachable("Unexpected node type for vXi1 sign extension");
}

// Try to match patterns such as
// (i16 bitcast (v16i1 x))
// ->
// (i16 movmsk (16i8 sext (v16i1 x)))
// before the illegal vector is scalarized on subtargets that don't have legal
// vxi1 types.
static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
                                  const SDLoc &DL,
                                  const X86Subtarget &Subtarget) {
  EVT SrcVT = Src.getValueType();
  if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
    return SDValue();

  // If the input is a truncate from v16i8 or v32i8 go ahead and use a
  // movmskb even with avx512. This will be better than truncating to vXi1 and
  // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
  // vpcmpeqb/vpcmpgtb.
  bool IsTruncated = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
                     (Src.getOperand(0).getValueType() == MVT::v16i8 ||
                      Src.getOperand(0).getValueType() == MVT::v32i8 ||
                      Src.getOperand(0).getValueType() == MVT::v64i8);

  // With AVX512 vxi1 types are legal and we prefer using k-regs.
  // MOVMSK is supported in SSE2 or later.
  if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !IsTruncated))
    return SDValue();

  // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
  // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
  // v8i16 and v16i16.
  // For these two cases, we can shuffle the upper element bytes to a
  // consecutive sequence at the start of the vector and treat the results as
  // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
  // for v16i16 this is not the case, because the shuffle is expensive, so we
  // avoid sign-extending to this type entirely.
  // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
  // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
  MVT SExtVT;
  bool PropagateSExt = false;
  switch (SrcVT.getSimpleVT().SimpleTy) {
  default:
    return SDValue();
  case MVT::v2i1:
    SExtVT = MVT::v2i64;
    break;
  case MVT::v4i1:
    SExtVT = MVT::v4i32;
    // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
    // sign-extend to a 256-bit operation to avoid truncation.
    if (Subtarget.hasAVX() && checkBitcastSrcVectorSize(Src, 256)) {
      SExtVT = MVT::v4i64;
      PropagateSExt = true;
    }
    break;
  case MVT::v8i1:
    SExtVT = MVT::v8i16;
    // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
    // sign-extend to a 256-bit operation to match the compare.
    // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
    // 256-bit because the shuffle is cheaper than sign extending the result of
    // the compare.
    if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256) ||
                               checkBitcastSrcVectorSize(Src, 512))) {
      SExtVT = MVT::v8i32;
      PropagateSExt = true;
    }
    break;
  case MVT::v16i1:
    SExtVT = MVT::v16i8;
    // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
    // it is not profitable to sign-extend to 256-bit because this will
    // require an extra cross-lane shuffle which is more expensive than
    // truncating the result of the compare to 128-bits.
    break;
  case MVT::v32i1:
    SExtVT = MVT::v32i8;
    break;
  case MVT::v64i1:
    // If we have AVX512F, but not AVX512BW and the input is truncated from
    // v64i8 checked earlier. Then split the input and make two pmovmskbs.
    if (Subtarget.hasAVX512() && !Subtarget.hasBWI()) {
      SExtVT = MVT::v64i8;
      break;
    }
    return SDValue();
  };

  SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
                            : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);

  if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
    V = getPMOVMSKB(DL, V, DAG, Subtarget);
  } else {
    if (SExtVT == MVT::v8i16)
      V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
                      DAG.getUNDEF(MVT::v8i16));
    V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
  }

  EVT IntVT =
      EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
  V = DAG.getZExtOrTrunc(V, DL, IntVT);
  return DAG.getBitcast(VT, V);
}

// Convert a vXi1 constant build vector to the same width scalar integer.
static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
  EVT SrcVT = Op.getValueType();
  assert(SrcVT.getVectorElementType() == MVT::i1 &&
         "Expected a vXi1 vector");
  assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
         "Expected a constant build vector");

  APInt Imm(SrcVT.getVectorNumElements(), 0);
  for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
    SDValue In = Op.getOperand(Idx);
    if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
      Imm.setBit(Idx);
  }
  EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
  return DAG.getConstant(Imm, SDLoc(Op), IntVT);
}

static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           const X86Subtarget &Subtarget) {
  assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");

  if (!DCI.isBeforeLegalizeOps())
    return SDValue();

  // Only do this if we have k-registers.
  if (!Subtarget.hasAVX512())
    return SDValue();

  EVT DstVT = N->getValueType(0);
  SDValue Op = N->getOperand(0);
  EVT SrcVT = Op.getValueType();

  if (!Op.hasOneUse())
    return SDValue();

  // Look for logic ops.
  if (Op.getOpcode() != ISD::AND &&
      Op.getOpcode() != ISD::OR &&
      Op.getOpcode() != ISD::XOR)
    return SDValue();

  // Make sure we have a bitcast between mask registers and a scalar type.
  if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
        DstVT.isScalarInteger()) &&
      !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
        SrcVT.isScalarInteger()))
    return SDValue();

  SDValue LHS = Op.getOperand(0);
  SDValue RHS = Op.getOperand(1);

  if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
      LHS.getOperand(0).getValueType() == DstVT)
    return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
                       DAG.getBitcast(DstVT, RHS));

  if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
      RHS.getOperand(0).getValueType() == DstVT)
    return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
                       DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));

  // If the RHS is a vXi1 build vector, this is a good reason to flip too.
  // Most of these have to move a constant from the scalar domain anyway.
  if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
    RHS = combinevXi1ConstantToInteger(RHS, DAG);
    return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
                       DAG.getBitcast(DstVT, LHS), RHS);
  }

  return SDValue();
}

static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget) {
  SDLoc DL(BV);
  unsigned NumElts = BV->getNumOperands();
  SDValue Splat = BV->getSplatValue();

  // Build MMX element from integer GPR or SSE float values.
  auto CreateMMXElement = [&](SDValue V) {
    if (V.isUndef())
      return DAG.getUNDEF(MVT::x86mmx);
    if (V.getValueType().isFloatingPoint()) {
      if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
        V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
        V = DAG.getBitcast(MVT::v2i64, V);
        return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
      }
      V = DAG.getBitcast(MVT::i32, V);
    } else {
      V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
    }
    return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
  };

  // Convert build vector ops to MMX data in the bottom elements.
  SmallVector<SDValue, 8> Ops;

  // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
  if (Splat) {
    if (Splat.isUndef())
      return DAG.getUNDEF(MVT::x86mmx);

    Splat = CreateMMXElement(Splat);

    if (Subtarget.hasSSE1()) {
      // Unpack v8i8 to splat i8 elements to lowest 16-bits.
      if (NumElts == 8)
        Splat = DAG.getNode(
            ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
            DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
            Splat);

      // Use PSHUFW to repeat 16-bit elements.
      unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
      return DAG.getNode(
          ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
          DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32),
          Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
    }
    Ops.append(NumElts, Splat);
  } else {
    for (unsigned i = 0; i != NumElts; ++i)
      Ops.push_back(CreateMMXElement(BV->getOperand(i)));
  }

  // Use tree of PUNPCKLs to build up general MMX vector.
  while (Ops.size() > 1) {
    unsigned NumOps = Ops.size();
    unsigned IntrinOp =
        (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
                     : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
                                    : Intrinsic::x86_mmx_punpcklbw));
    SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
    for (unsigned i = 0; i != NumOps; i += 2)
      Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
                               Ops[i], Ops[i + 1]);
    Ops.resize(NumOps / 2);
  }

  return Ops[0];
}

static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
                              TargetLowering::DAGCombinerInfo &DCI,
                              const X86Subtarget &Subtarget) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  EVT SrcVT = N0.getValueType();

  // Try to match patterns such as
  // (i16 bitcast (v16i1 x))
  // ->
  // (i16 movmsk (16i8 sext (v16i1 x)))
  // before the setcc result is scalarized on subtargets that don't have legal
  // vxi1 types.
  if (DCI.isBeforeLegalize()) {
    SDLoc dl(N);
    if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
      return V;

    // Recognize the IR pattern for the movmsk intrinsic under SSE1 befoer type
    // legalization destroys the v4i32 type.
    if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && SrcVT == MVT::v4i1 &&
        VT.isScalarInteger() && N0.getOpcode() == ISD::SETCC &&
        N0.getOperand(0).getValueType() == MVT::v4i32 &&
        ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode()) &&
        cast<CondCodeSDNode>(N0.getOperand(2))->get() == ISD::SETLT) {
      SDValue N00 = N0.getOperand(0);
      // Only do this if we can avoid scalarizing the input.
      if (ISD::isNormalLoad(N00.getNode()) ||
          (N00.getOpcode() == ISD::BITCAST &&
           N00.getOperand(0).getValueType() == MVT::v4f32)) {
        SDValue V = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32,
                                DAG.getBitcast(MVT::v4f32, N00));
        return DAG.getZExtOrTrunc(V, dl, VT);
      }
    }

    // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
    // type, widen both sides to avoid a trip through memory.
    if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
        Subtarget.hasAVX512()) {
      N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
      N0 = DAG.getBitcast(MVT::v8i1, N0);
      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
                         DAG.getIntPtrConstant(0, dl));
    }

    // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
    // type, widen both sides to avoid a trip through memory.
    if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
        Subtarget.hasAVX512()) {
      // Use zeros for the widening if we already have some zeroes. This can
      // allow SimplifyDemandedBits to remove scalar ANDs that may be down
      // stream of this.
      // FIXME: It might make sense to detect a concat_vectors with a mix of
      // zeroes and undef and turn it into insert_subvector for i1 vectors as
      // a separate combine. What we can't do is canonicalize the operands of
      // such a concat or we'll get into a loop with SimplifyDemandedBits.
      if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
        SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
        if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
          SrcVT = LastOp.getValueType();
          unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
          SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
          Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
          N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
          N0 = DAG.getBitcast(MVT::i8, N0);
          return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
        }
      }

      unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
      SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
      Ops[0] = N0;
      N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
      N0 = DAG.getBitcast(MVT::i8, N0);
      return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
    }
  }

  // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
  // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
  // due to insert_subvector legalization on KNL. By promoting the copy to i16
  // we can help with known bits propagation from the vXi1 domain to the
  // scalar domain.
  if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
      !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
      N0.getOperand(0).getValueType() == MVT::v16i1 &&
      isNullConstant(N0.getOperand(1)))
    return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
                       DAG.getBitcast(MVT::i16, N0.getOperand(0)));

  // Combine (bitcast (vbroadcast_load)) -> (vbroadcast_load). The memory VT
  // determines // the number of bits loaded. Remaining bits are zero.
  if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
      VT.getScalarSizeInBits() == SrcVT.getScalarSizeInBits()) {
    auto *BCast = cast<MemIntrinsicSDNode>(N0);
    SDVTList Tys = DAG.getVTList(VT, MVT::Other);
    SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
    SDValue ResNode =
        DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
                                VT.getVectorElementType(),
                                BCast->getMemOperand());
    DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
    return ResNode;
  }

  // Since MMX types are special and don't usually play with other vector types,
  // it's better to handle them early to be sure we emit efficient code by
  // avoiding store-load conversions.
  if (VT == MVT::x86mmx) {
    // Detect MMX constant vectors.
    APInt UndefElts;
    SmallVector<APInt, 1> EltBits;
    if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
      SDLoc DL(N0);
      // Handle zero-extension of i32 with MOVD.
      if (EltBits[0].countLeadingZeros() >= 32)
        return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
                           DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
      // Else, bitcast to a double.
      // TODO - investigate supporting sext 32-bit immediates on x86_64.
      APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
      return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
    }

    // Detect bitcasts to x86mmx low word.
    if (N0.getOpcode() == ISD::BUILD_VECTOR &&
        (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
        N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
      bool LowUndef = true, AllUndefOrZero = true;
      for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
        SDValue Op = N0.getOperand(i);
        LowUndef &= Op.isUndef() || (i >= e/2);
        AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
      }
      if (AllUndefOrZero) {
        SDValue N00 = N0.getOperand(0);
        SDLoc dl(N00);
        N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
                       : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
        return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
      }
    }

    // Detect bitcasts of 64-bit build vectors and convert to a
    // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
    // lowest element.
    if (N0.getOpcode() == ISD::BUILD_VECTOR &&
        (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
         SrcVT == MVT::v8i8))
      return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);

    // Detect bitcasts between element or subvector extraction to x86mmx.
    if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
         N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
        isNullConstant(N0.getOperand(1))) {
      SDValue N00 = N0.getOperand(0);
      if (N00.getValueType().is128BitVector())
        return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
                           DAG.getBitcast(MVT::v2i64, N00));
    }

    // Detect bitcasts from FP_TO_SINT to x86mmx.
    if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
      SDLoc DL(N0);
      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
                                DAG.getUNDEF(MVT::v2i32));
      return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
                         DAG.getBitcast(MVT::v2i64, Res));
    }
  }

  // Try to remove a bitcast of constant vXi1 vector. We have to legalize
  // most of these to scalar anyway.
  if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
      SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
      ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
    return combinevXi1ConstantToInteger(N0, DAG);
  }

  if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
      VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
      isa<ConstantSDNode>(N0)) {
    auto *C = cast<ConstantSDNode>(N0);
    if (C->isAllOnesValue())
      return DAG.getConstant(1, SDLoc(N0), VT);
    if (C->isNullValue())
      return DAG.getConstant(0, SDLoc(N0), VT);
  }

  // Try to remove bitcasts from input and output of mask arithmetic to
  // remove GPR<->K-register crossings.
  if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
    return V;

  // Convert a bitcasted integer logic operation that has one bitcasted
  // floating-point operand into a floating-point logic operation. This may
  // create a load of a constant, but that is cheaper than materializing the
  // constant in an integer register and transferring it to an SSE register or
  // transferring the SSE operand to integer register and back.
  unsigned FPOpcode;
  switch (N0.getOpcode()) {
    case ISD::AND: FPOpcode = X86ISD::FAND; break;
    case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
    case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
    default: return SDValue();
  }

  if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
        (Subtarget.hasSSE2() && VT == MVT::f64)))
    return SDValue();

  SDValue LogicOp0 = N0.getOperand(0);
  SDValue LogicOp1 = N0.getOperand(1);
  SDLoc DL0(N0);

  // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
  if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
      LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
      !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
    SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
    return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
  }
  // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
  if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
      LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
      !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
    SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
    return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
  }

  return SDValue();
}

// Given a ABS node, detect the following pattern:
// (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
// This is useful as it is the input into a SAD pattern.
static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
  SDValue AbsOp1 = Abs->getOperand(0);
  if (AbsOp1.getOpcode() != ISD::SUB)
    return false;

  Op0 = AbsOp1.getOperand(0);
  Op1 = AbsOp1.getOperand(1);

  // Check if the operands of the sub are zero-extended from vectors of i8.
  if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
      Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
      Op1.getOpcode() != ISD::ZERO_EXTEND ||
      Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
    return false;

  return true;
}

// Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
// to these zexts.
static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
                            const SDValue &Zext1, const SDLoc &DL,
                            const X86Subtarget &Subtarget) {
  // Find the appropriate width for the PSADBW.
  EVT InVT = Zext0.getOperand(0).getValueType();
  unsigned RegSize = std::max(128u, InVT.getSizeInBits());

  // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
  // fill in the missing vector elements with 0.
  unsigned NumConcat = RegSize / InVT.getSizeInBits();
  SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
  Ops[0] = Zext0.getOperand(0);
  MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
  SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
  Ops[0] = Zext1.getOperand(0);
  SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);

  // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
  auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                          ArrayRef<SDValue> Ops) {
    MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
    return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
  };
  MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
  return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
                          PSADBWBuilder);
}

// Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
// PHMINPOSUW.
static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
                                             const X86Subtarget &Subtarget) {
  // Bail without SSE41.
  if (!Subtarget.hasSSE41())
    return SDValue();

  EVT ExtractVT = Extract->getValueType(0);
  if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
    return SDValue();

  // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
  ISD::NodeType BinOp;
  SDValue Src = DAG.matchBinOpReduction(
      Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
  if (!Src)
    return SDValue();

  EVT SrcVT = Src.getValueType();
  EVT SrcSVT = SrcVT.getScalarType();
  if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
    return SDValue();

  SDLoc DL(Extract);
  SDValue MinPos = Src;

  // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
  while (SrcVT.getSizeInBits() > 128) {
    unsigned NumElts = SrcVT.getVectorNumElements();
    unsigned NumSubElts = NumElts / 2;
    SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcSVT, NumSubElts);
    unsigned SubSizeInBits = SrcVT.getSizeInBits();
    SDValue Lo = extractSubVector(MinPos, 0, DAG, DL, SubSizeInBits);
    SDValue Hi = extractSubVector(MinPos, NumSubElts, DAG, DL, SubSizeInBits);
    MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
  }
  assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
          (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
         "Unexpected value type");

  // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
  // to flip the value accordingly.
  SDValue Mask;
  unsigned MaskEltsBits = ExtractVT.getSizeInBits();
  if (BinOp == ISD::SMAX)
    Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
  else if (BinOp == ISD::SMIN)
    Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
  else if (BinOp == ISD::UMAX)
    Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);

  if (Mask)
    MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);

  // For v16i8 cases we need to perform UMIN on pairs of byte elements,
  // shuffling each upper element down and insert zeros. This means that the
  // v16i8 UMIN will leave the upper element as zero, performing zero-extension
  // ready for the PHMINPOS.
  if (ExtractVT == MVT::i8) {
    SDValue Upper = DAG.getVectorShuffle(
        SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
        {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
    MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
  }

  // Perform the PHMINPOS on a v8i16 vector,
  MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
  MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
  MinPos = DAG.getBitcast(SrcVT, MinPos);

  if (Mask)
    MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);

  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
                     DAG.getIntPtrConstant(0, DL));
}

// Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
static SDValue combineHorizontalPredicateResult(SDNode *Extract,
                                                SelectionDAG &DAG,
                                                const X86Subtarget &Subtarget) {
  // Bail without SSE2.
  if (!Subtarget.hasSSE2())
    return SDValue();

  EVT ExtractVT = Extract->getValueType(0);
  unsigned BitWidth = ExtractVT.getSizeInBits();
  if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
      ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
    return SDValue();

  // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
  ISD::NodeType BinOp;
  SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
  if (!Match && ExtractVT == MVT::i1)
    Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
  if (!Match)
    return SDValue();

  // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
  // which we can't support here for now.
  if (Match.getScalarValueSizeInBits() != BitWidth)
    return SDValue();

  SDValue Movmsk;
  SDLoc DL(Extract);
  EVT MatchVT = Match.getValueType();
  unsigned NumElts = MatchVT.getVectorNumElements();
  unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  if (ExtractVT == MVT::i1) {
    // Special case for (pre-legalization) vXi1 reductions.
    if (NumElts > 64 || !isPowerOf2_32(NumElts))
      return SDValue();
    if (TLI.isTypeLegal(MatchVT)) {
      // If this is a legal AVX512 predicate type then we can just bitcast.
      EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
      Movmsk = DAG.getBitcast(MovmskVT, Match);
    } else {
      // Use combineBitcastvxi1 to create the MOVMSK.
      while (NumElts > MaxElts) {
        SDValue Lo, Hi;
        std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
        Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
        NumElts /= 2;
      }
      EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
      Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
    }
    if (!Movmsk)
      return SDValue();
    Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
  } else {
    // Bail with AVX512VL (which uses predicate registers).
    if (Subtarget.hasVLX())
      return SDValue();

    unsigned MatchSizeInBits = Match.getValueSizeInBits();
    if (!(MatchSizeInBits == 128 ||
          (MatchSizeInBits == 256 && Subtarget.hasAVX())))
      return SDValue();

    // Make sure this isn't a vector of 1 element. The perf win from using
    // MOVMSK diminishes with less elements in the reduction, but it is
    // generally better to get the comparison over to the GPRs as soon as
    // possible to reduce the number of vector ops.
    if (Match.getValueType().getVectorNumElements() < 2)
      return SDValue();

    // Check that we are extracting a reduction of all sign bits.
    if (DAG.ComputeNumSignBits(Match) != BitWidth)
      return SDValue();

    if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
      SDValue Lo, Hi;
      std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
      Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
      MatchSizeInBits = Match.getValueSizeInBits();
    }

    // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
    MVT MaskSrcVT;
    if (64 == BitWidth || 32 == BitWidth)
      MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
                                   MatchSizeInBits / BitWidth);
    else
      MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);

    SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
    Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
    NumElts = MaskSrcVT.getVectorNumElements();
  }
  assert((NumElts <= 32 || NumElts == 64) &&
         "Not expecting more than 64 elements");

  MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
  if (BinOp == ISD::XOR) {
    // parity -> (AND (CTPOP(MOVMSK X)), 1)
    SDValue Mask = DAG.getConstant(1, DL, CmpVT);
    SDValue Result = DAG.getNode(ISD::CTPOP, DL, CmpVT, Movmsk);
    Result = DAG.getNode(ISD::AND, DL, CmpVT, Result, Mask);
    return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
  }

  SDValue CmpC;
  ISD::CondCode CondCode;
  if (BinOp == ISD::OR) {
    // any_of -> MOVMSK != 0
    CmpC = DAG.getConstant(0, DL, CmpVT);
    CondCode = ISD::CondCode::SETNE;
  } else {
    // all_of -> MOVMSK == ((1 << NumElts) - 1)
    CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
                           DL, CmpVT);
    CondCode = ISD::CondCode::SETEQ;
  }

  // The setcc produces an i8 of 0/1, so extend that to the result width and
  // negate to get the final 0/-1 mask value.
  EVT SetccVT =
      TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
  SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
  SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
  SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
  return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
}

static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {
  // PSADBW is only supported on SSE2 and up.
  if (!Subtarget.hasSSE2())
    return SDValue();

  // Verify the type we're extracting from is any integer type above i16.
  EVT VT = Extract->getOperand(0).getValueType();
  if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16))
    return SDValue();

  unsigned RegSize = 128;
  if (Subtarget.useBWIRegs())
    RegSize = 512;
  else if (Subtarget.hasAVX())
    RegSize = 256;

  // We handle upto v16i* for SSE2 / v32i* for AVX / v64i* for AVX512.
  // TODO: We should be able to handle larger vectors by splitting them before
  // feeding them into several SADs, and then reducing over those.
  if (RegSize / VT.getVectorNumElements() < 8)
    return SDValue();

  // Match shuffle + add pyramid.
  ISD::NodeType BinOp;
  SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});

  // The operand is expected to be zero extended from i8
  // (verified in detectZextAbsDiff).
  // In order to convert to i64 and above, additional any/zero/sign
  // extend is expected.
  // The zero extend from 32 bit has no mathematical effect on the result.
  // Also the sign extend is basically zero extend
  // (extends the sign bit which is zero).
  // So it is correct to skip the sign/zero extend instruction.
  if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
    Root.getOpcode() == ISD::ZERO_EXTEND ||
    Root.getOpcode() == ISD::ANY_EXTEND))
    Root = Root.getOperand(0);

  // If there was a match, we want Root to be a select that is the root of an
  // abs-diff pattern.
  if (!Root || Root.getOpcode() != ISD::ABS)
    return SDValue();

  // Check whether we have an abs-diff pattern feeding into the select.
  SDValue Zext0, Zext1;
  if (!detectZextAbsDiff(Root, Zext0, Zext1))
    return SDValue();

  // Create the SAD instruction.
  SDLoc DL(Extract);
  SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);

  // If the original vector was wider than 8 elements, sum over the results
  // in the SAD vector.
  unsigned Stages = Log2_32(VT.getVectorNumElements());
  MVT SadVT = SAD.getSimpleValueType();
  if (Stages > 3) {
    unsigned SadElems = SadVT.getVectorNumElements();

    for(unsigned i = Stages - 3; i > 0; --i) {
      SmallVector<int, 16> Mask(SadElems, -1);
      for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
        Mask[j] = MaskEnd + j;

      SDValue Shuffle =
          DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
      SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
    }
  }

  MVT Type = Extract->getSimpleValueType(0);
  unsigned TypeSizeInBits = Type.getSizeInBits();
  // Return the lowest TypeSizeInBits bits.
  MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits);
  SAD = DAG.getBitcast(ResVT, SAD);
  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD,
                     Extract->getOperand(1));
}

// Attempt to peek through a target shuffle and extract the scalar from the
// source.
static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
                                         TargetLowering::DAGCombinerInfo &DCI,
                                         const X86Subtarget &Subtarget) {
  if (DCI.isBeforeLegalizeOps())
    return SDValue();

  SDLoc dl(N);
  SDValue Src = N->getOperand(0);
  SDValue Idx = N->getOperand(1);

  EVT VT = N->getValueType(0);
  EVT SrcVT = Src.getValueType();
  EVT SrcSVT = SrcVT.getVectorElementType();
  unsigned NumSrcElts = SrcVT.getVectorNumElements();

  // Don't attempt this for boolean mask vectors or unknown extraction indices.
  if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
    return SDValue();

  SDValue SrcBC = peekThroughBitcasts(Src);

  // Handle extract(broadcast(scalar_value)), it doesn't matter what index is.
  if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
    SDValue SrcOp = SrcBC.getOperand(0);
    if (SrcOp.getValueSizeInBits() == VT.getSizeInBits())
      return DAG.getBitcast(VT, SrcOp);
  }

  // If we're extracting a single element from a broadcast load and there are
  // no other users, just create a single load.
  if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
    auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
    unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
    if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
        VT.getSizeInBits() == SrcBCWidth) {
      SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
                                 MemIntr->getBasePtr(),
                                 MemIntr->getPointerInfo(),
                                 MemIntr->getAlignment(),
                                 MemIntr->getMemOperand()->getFlags());
      DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
      return Load;
    }
  }

  // Handle extract(truncate(x)) for 0'th index.
  // TODO: Treat this as a faux shuffle?
  // TODO: When can we use this for general indices?
  if (ISD::TRUNCATE == Src.getOpcode() && SrcVT.is128BitVector() &&
      isNullConstant(Idx)) {
    Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
    Src = DAG.getBitcast(SrcVT, Src);
    return DAG.getNode(N->getOpcode(), dl, VT, Src, Idx);
  }

  // Resolve the target shuffle inputs and mask.
  SmallVector<int, 16> Mask;
  SmallVector<SDValue, 2> Ops;
  if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
    return SDValue();

  // Attempt to narrow/widen the shuffle mask to the correct size.
  if (Mask.size() != NumSrcElts) {
    if ((NumSrcElts % Mask.size()) == 0) {
      SmallVector<int, 16> ScaledMask;
      int Scale = NumSrcElts / Mask.size();
      scaleShuffleMask<int>(Scale, Mask, ScaledMask);
      Mask = std::move(ScaledMask);
    } else if ((Mask.size() % NumSrcElts) == 0) {
      // Simplify Mask based on demanded element.
      int ExtractIdx = (int)N->getConstantOperandVal(1);
      int Scale = Mask.size() / NumSrcElts;
      int Lo = Scale * ExtractIdx;
      int Hi = Scale * (ExtractIdx + 1);
      for (int i = 0, e = (int)Mask.size(); i != e; ++i)
        if (i < Lo || Hi <= i)
          Mask[i] = SM_SentinelUndef;

      SmallVector<int, 16> WidenedMask;
      while (Mask.size() > NumSrcElts &&
             canWidenShuffleElements(Mask, WidenedMask))
        Mask = std::move(WidenedMask);
      // TODO - investigate support for wider shuffle masks with known upper
      // undef/zero elements for implicit zero-extension.
    }
  }

  // Check if narrowing/widening failed.
  if (Mask.size() != NumSrcElts)
    return SDValue();

  int SrcIdx = Mask[N->getConstantOperandVal(1)];

  // If the shuffle source element is undef/zero then we can just accept it.
  if (SrcIdx == SM_SentinelUndef)
    return DAG.getUNDEF(VT);

  if (SrcIdx == SM_SentinelZero)
    return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
                                : DAG.getConstant(0, dl, VT);

  SDValue SrcOp = Ops[SrcIdx / Mask.size()];
  SrcIdx = SrcIdx % Mask.size();

  // We can only extract other elements from 128-bit vectors and in certain
  // circumstances, depending on SSE-level.
  // TODO: Investigate using extract_subvector for larger vectors.
  // TODO: Investigate float/double extraction if it will be just stored.
  if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
      ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
    assert(SrcSVT == VT && "Unexpected extraction type");
    SrcOp = DAG.getBitcast(SrcVT, SrcOp);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
                       DAG.getIntPtrConstant(SrcIdx, dl));
  }

  if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
      (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
    assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&
           "Unexpected extraction type");
    unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
    SrcOp = DAG.getBitcast(SrcVT, SrcOp);
    SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
                                DAG.getIntPtrConstant(SrcIdx, dl));
    return DAG.getZExtOrTrunc(ExtOp, dl, VT);
  }

  return SDValue();
}

/// Extracting a scalar FP value from vector element 0 is free, so extract each
/// operand first, then perform the math as a scalar op.
static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG) {
  assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
  SDValue Vec = ExtElt->getOperand(0);
  SDValue Index = ExtElt->getOperand(1);
  EVT VT = ExtElt->getValueType(0);
  EVT VecVT = Vec.getValueType();

  // TODO: If this is a unary/expensive/expand op, allow extraction from a
  // non-zero element because the shuffle+scalar op will be cheaper?
  if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
    return SDValue();

  // Vector FP compares don't fit the pattern of FP math ops (propagate, not
  // extract, the condition code), so deal with those as a special-case.
  if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
    EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
    if (OpVT != MVT::f32 && OpVT != MVT::f64)
      return SDValue();

    // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
    SDLoc DL(ExtElt);
    SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
                               Vec.getOperand(0), Index);
    SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
                               Vec.getOperand(1), Index);
    return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
  }

  if (VT != MVT::f32 && VT != MVT::f64)
    return SDValue();

  // Vector FP selects don't fit the pattern of FP math ops (because the
  // condition has a different type and we have to change the opcode), so deal
  // with those here.
  // FIXME: This is restricted to pre type legalization by ensuring the setcc
  // has i1 elements. If we loosen this we need to convert vector bool to a
  // scalar bool.
  if (Vec.getOpcode() == ISD::VSELECT &&
      Vec.getOperand(0).getOpcode() == ISD::SETCC &&
      Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
      Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
    // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
    SDLoc DL(ExtElt);
    SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
                               Vec.getOperand(0).getValueType().getScalarType(),
                               Vec.getOperand(0), Index);
    SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
                               Vec.getOperand(1), Index);
    SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
                               Vec.getOperand(2), Index);
    return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
  }

  // TODO: This switch could include FNEG and the x86-specific FP logic ops
  // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
  // missed load folding and fma+fneg combining.
  switch (Vec.getOpcode()) {
  case ISD::FMA: // Begin 3 operands
  case ISD::FMAD:
  case ISD::FADD: // Begin 2 operands
  case ISD::FSUB:
  case ISD::FMUL:
  case ISD::FDIV:
  case ISD::FREM:
  case ISD::FCOPYSIGN:
  case ISD::FMINNUM:
  case ISD::FMAXNUM:
  case ISD::FMINNUM_IEEE:
  case ISD::FMAXNUM_IEEE:
  case ISD::FMAXIMUM:
  case ISD::FMINIMUM:
  case X86ISD::FMAX:
  case X86ISD::FMIN:
  case ISD::FABS: // Begin 1 operand
  case ISD::FSQRT:
  case ISD::FRINT:
  case ISD::FCEIL:
  case ISD::FTRUNC:
  case ISD::FNEARBYINT:
  case ISD::FROUND:
  case ISD::FFLOOR:
  case X86ISD::FRCP:
  case X86ISD::FRSQRT: {
    // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
    SDLoc DL(ExtElt);
    SmallVector<SDValue, 4> ExtOps;
    for (SDValue Op : Vec->ops())
      ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
    return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
  }
  default:
    return SDValue();
  }
  llvm_unreachable("All opcodes should return within switch");
}

/// Try to convert a vector reduction sequence composed of binops and shuffles
/// into horizontal ops.
static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
                                            const X86Subtarget &Subtarget) {
  assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");

  // We need at least SSE2 to anything here.
  if (!Subtarget.hasSSE2())
    return SDValue();

  ISD::NodeType Opc;
  SDValue Rdx =
      DAG.matchBinOpReduction(ExtElt, Opc, {ISD::ADD, ISD::FADD}, true);
  if (!Rdx)
    return SDValue();

  SDValue Index = ExtElt->getOperand(1);
  assert(isNullConstant(Index) &&
         "Reduction doesn't end in an extract from index 0");

  EVT VT = ExtElt->getValueType(0);
  EVT VecVT = Rdx.getValueType();
  if (VecVT.getScalarType() != VT)
    return SDValue();

  SDLoc DL(ExtElt);

  // vXi8 reduction - sub 128-bit vector.
  if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
    if (VecVT == MVT::v4i8) {
      // Pad with zero.
      if (Subtarget.hasSSE41()) {
        Rdx = DAG.getBitcast(MVT::i32, Rdx);
        Rdx = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
                          DAG.getConstant(0, DL, MVT::v4i32), Rdx,
                          DAG.getIntPtrConstant(0, DL));
        Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
      } else {
        Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, Rdx,
                          DAG.getConstant(0, DL, VecVT));
      }
    }
    if (Rdx.getValueType() == MVT::v8i8) {
      // Pad with undef.
      Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Rdx,
                        DAG.getUNDEF(MVT::v8i8));
    }
    Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
                      DAG.getConstant(0, DL, MVT::v16i8));
    Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
  }

  // Must be a >=128-bit vector with pow2 elements.
  if ((VecVT.getSizeInBits() % 128) != 0 ||
      !isPowerOf2_32(VecVT.getVectorNumElements()))
    return SDValue();

  // vXi8 reduction - sum lo/hi halves then use PSADBW.
  if (VT == MVT::i8) {
    while (Rdx.getValueSizeInBits() > 128) {
      unsigned HalfSize = VecVT.getSizeInBits() / 2;
      unsigned HalfElts = VecVT.getVectorNumElements() / 2;
      SDValue Lo = extractSubVector(Rdx, 0, DAG, DL, HalfSize);
      SDValue Hi = extractSubVector(Rdx, HalfElts, DAG, DL, HalfSize);
      Rdx = DAG.getNode(ISD::ADD, DL, Lo.getValueType(), Lo, Hi);
      VecVT = Rdx.getValueType();
    }
    assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");

    SDValue Hi = DAG.getVectorShuffle(
        MVT::v16i8, DL, Rdx, Rdx,
        {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
    Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
    Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
                      getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
    Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
  }

  // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
  bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
  if (!Subtarget.hasFastHorizontalOps() && !OptForSize)
    return SDValue();

  unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;

  // 256-bit horizontal instructions operate on 128-bit chunks rather than
  // across the whole vector, so we need an extract + hop preliminary stage.
  // This is the only step where the operands of the hop are not the same value.
  // TODO: We could extend this to handle 512-bit or even longer vectors.
  if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
      ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
    unsigned NumElts = VecVT.getVectorNumElements();
    SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
    SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
    Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
    VecVT = Rdx.getValueType();
  }
  if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
      !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
    return SDValue();

  // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
  unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
  for (unsigned i = 0; i != ReductionSteps; ++i)
    Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);

  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
}

/// Detect vector gather/scatter index generation and convert it from being a
/// bunch of shuffles and extracts into a somewhat faster sequence.
/// For i686, the best sequence is apparently storing the value and loading
/// scalars back, while for x64 we should use 64-bit extracts and shifts.
static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
                                       TargetLowering::DAGCombinerInfo &DCI,
                                       const X86Subtarget &Subtarget) {
  if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
    return NewOp;

  SDValue InputVector = N->getOperand(0);
  SDValue EltIdx = N->getOperand(1);
  auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);

  EVT SrcVT = InputVector.getValueType();
  EVT VT = N->getValueType(0);
  SDLoc dl(InputVector);
  bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;

  if (CIdx && CIdx->getAPIntValue().uge(SrcVT.getVectorNumElements()))
    return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);

  // Integer Constant Folding.
  if (CIdx && VT.isInteger()) {
    APInt UndefVecElts;
    SmallVector<APInt, 16> EltBits;
    unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
    if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
                                      EltBits, true, false)) {
      uint64_t Idx = CIdx->getZExtValue();
      if (UndefVecElts[Idx])
        return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
      return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()),
                             dl, VT);
    }
  }

  if (IsPextr) {
    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    if (TLI.SimplifyDemandedBits(
            SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()), DCI))
      return SDValue(N, 0);

    // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
    if ((InputVector.getOpcode() == X86ISD::PINSRB ||
         InputVector.getOpcode() == X86ISD::PINSRW) &&
        InputVector.getOperand(2) == EltIdx) {
      assert(SrcVT == InputVector.getOperand(0).getValueType() &&
             "Vector type mismatch");
      SDValue Scl = InputVector.getOperand(1);
      Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
      return DAG.getZExtOrTrunc(Scl, dl, VT);
    }

    // TODO - Remove this once we can handle the implicit zero-extension of
    // X86ISD::PEXTRW/X86ISD::PEXTRB in XFormVExtractWithShuffleIntoLoad,
    // combineHorizontalPredicateResult and combineBasicSADPattern.
    return SDValue();
  }

  if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
    return NewOp;

  // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
  if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
      VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
    SDValue MMXSrc = InputVector.getOperand(0);

    // The bitcast source is a direct mmx result.
    if (MMXSrc.getValueType() == MVT::x86mmx)
      return DAG.getBitcast(VT, InputVector);
  }

  // Detect mmx to i32 conversion through a v2i32 elt extract.
  if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
      VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
    SDValue MMXSrc = InputVector.getOperand(0);

    // The bitcast source is a direct mmx result.
    if (MMXSrc.getValueType() == MVT::x86mmx)
      return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
  }

  // Check whether this extract is the root of a sum of absolute differences
  // pattern. This has to be done here because we really want it to happen
  // pre-legalization,
  if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
    return SAD;

  // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
  if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
    return Cmp;

  // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
  if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
    return MinMax;

  if (SDValue V = combineReductionToHorizontal(N, DAG, Subtarget))
    return V;

  if (SDValue V = scalarizeExtEltFP(N, DAG))
    return V;

  // Attempt to extract a i1 element by using MOVMSK to extract the signbits
  // and then testing the relevant element.
  if (CIdx && SrcVT.getScalarType() == MVT::i1) {
    SmallVector<SDNode *, 16> BoolExtracts;
    auto IsBoolExtract = [&BoolExtracts](SDNode *Use) {
      if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
          isa<ConstantSDNode>(Use->getOperand(1)) &&
          Use->getValueType(0) == MVT::i1) {
        BoolExtracts.push_back(Use);
        return true;
      }
      return false;
    };
    if (all_of(InputVector->uses(), IsBoolExtract) &&
        BoolExtracts.size() > 1) {
      unsigned NumSrcElts = SrcVT.getVectorNumElements();
      EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
      if (SDValue BC =
              combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
        for (SDNode *Use : BoolExtracts) {
          // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
          unsigned MaskIdx = Use->getConstantOperandVal(1);
          APInt MaskBit = APInt::getOneBitSet(NumSrcElts, MaskIdx);
          SDValue Mask = DAG.getConstant(MaskBit, dl, BCVT);
          SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
          Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
          DCI.CombineTo(Use, Res);
        }
        return SDValue(N, 0);
      }
    }
  }

  return SDValue();
}

/// If a vector select has an operand that is -1 or 0, try to simplify the
/// select to a bitwise logic operation.
/// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
static SDValue
combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const X86Subtarget &Subtarget) {
  SDValue Cond = N->getOperand(0);
  SDValue LHS = N->getOperand(1);
  SDValue RHS = N->getOperand(2);
  EVT VT = LHS.getValueType();
  EVT CondVT = Cond.getValueType();
  SDLoc DL(N);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  if (N->getOpcode() != ISD::VSELECT)
    return SDValue();

  assert(CondVT.isVector() && "Vector select expects a vector selector!");

  // Check if the first operand is all zeros and Cond type is vXi1.
  // This situation only applies to avx512.
  // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
  // TODO: Can we assert that both operands are not zeros (because that should
  //       get simplified at node creation time)?
  bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
  bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());

  // If both inputs are 0/undef, create a complete zero vector.
  // FIXME: As noted above this should be handled by DAGCombiner/getNode.
  if (TValIsAllZeros && FValIsAllZeros) {
    if (VT.isFloatingPoint())
      return DAG.getConstantFP(0.0, DL, VT);
    return DAG.getConstant(0, DL, VT);
  }

  if (TValIsAllZeros && !FValIsAllZeros && Subtarget.hasAVX512() &&
      Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1) {
    // Invert the cond to not(cond) : xor(op,allones)=not(op)
    SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
    // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
    return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
  }

  // To use the condition operand as a bitwise mask, it must have elements that
  // are the same size as the select elements. Ie, the condition operand must
  // have already been promoted from the IR select condition type <N x i1>.
  // Don't check if the types themselves are equal because that excludes
  // vector floating-point selects.
  if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
    return SDValue();

  // Try to invert the condition if true value is not all 1s and false value is
  // not all 0s. Only do this if the condition has one use.
  bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
  if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
      // Check if the selector will be produced by CMPP*/PCMP*.
      Cond.getOpcode() == ISD::SETCC &&
      // Check if SETCC has already been promoted.
      TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
          CondVT) {
    bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());

    if (TValIsAllZeros || FValIsAllOnes) {
      SDValue CC = Cond.getOperand(2);
      ISD::CondCode NewCC =
          ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
                               Cond.getOperand(0).getValueType().isInteger());
      Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
                          NewCC);
      std::swap(LHS, RHS);
      TValIsAllOnes = FValIsAllOnes;
      FValIsAllZeros = TValIsAllZeros;
    }
  }

  // Cond value must be 'sign splat' to be converted to a logical op.
  if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
    return SDValue();

  // vselect Cond, 111..., 000... -> Cond
  if (TValIsAllOnes && FValIsAllZeros)
    return DAG.getBitcast(VT, Cond);

  if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
    return SDValue();

  // vselect Cond, 111..., X -> or Cond, X
  if (TValIsAllOnes) {
    SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
    SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
    return DAG.getBitcast(VT, Or);
  }

  // vselect Cond, X, 000... -> and Cond, X
  if (FValIsAllZeros) {
    SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
    SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
    return DAG.getBitcast(VT, And);
  }

  // vselect Cond, 000..., X -> andn Cond, X
  if (TValIsAllZeros) {
    MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
    SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
    SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
    SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
    return DAG.getBitcast(VT, AndN);
  }

  return SDValue();
}

/// If both arms of a vector select are concatenated vectors, split the select,
/// and concatenate the result to eliminate a wide (256-bit) vector instruction:
///   vselect Cond, (concat T0, T1), (concat F0, F1) -->
///   concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {
  unsigned Opcode = N->getOpcode();
  if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
    return SDValue();

  // TODO: Split 512-bit vectors too?
  EVT VT = N->getValueType(0);
  if (!VT.is256BitVector())
    return SDValue();

  // TODO: Split as long as any 2 of the 3 operands are concatenated?
  SDValue Cond = N->getOperand(0);
  SDValue TVal = N->getOperand(1);
  SDValue FVal = N->getOperand(2);
  SmallVector<SDValue, 4> CatOpsT, CatOpsF;
  if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
      !collectConcatOps(TVal.getNode(), CatOpsT) ||
      !collectConcatOps(FVal.getNode(), CatOpsF))
    return SDValue();

  auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
                            ArrayRef<SDValue> Ops) {
    return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
  };
  return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
                          makeBlend, /*CheckBWI*/ false);
}

static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
  SDValue Cond = N->getOperand(0);
  SDValue LHS = N->getOperand(1);
  SDValue RHS = N->getOperand(2);
  SDLoc DL(N);

  auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
  auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
  if (!TrueC || !FalseC)
    return SDValue();

  // Don't do this for crazy integer types.
  EVT VT = N->getValueType(0);
  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
    return SDValue();

  // We're going to use the condition bit in math or logic ops. We could allow
  // this with a wider condition value (post-legalization it becomes an i8),
  // but if nothing is creating selects that late, it doesn't matter.
  if (Cond.getValueType() != MVT::i1)
    return SDValue();

  // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
  // 3, 5, or 9 with i32/i64, so those get transformed too.
  // TODO: For constants that overflow or do not differ by power-of-2 or small
  // multiplier, convert to 'and' + 'add'.
  const APInt &TrueVal = TrueC->getAPIntValue();
  const APInt &FalseVal = FalseC->getAPIntValue();
  bool OV;
  APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
  if (OV)
    return SDValue();

  APInt AbsDiff = Diff.abs();
  if (AbsDiff.isPowerOf2() ||
      ((VT == MVT::i32 || VT == MVT::i64) &&
       (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {

    // We need a positive multiplier constant for shift/LEA codegen. The 'not'
    // of the condition can usually be folded into a compare predicate, but even
    // without that, the sequence should be cheaper than a CMOV alternative.
    if (TrueVal.slt(FalseVal)) {
      Cond = DAG.getNOT(DL, Cond, MVT::i1);
      std::swap(TrueC, FalseC);
    }

    // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
    SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);

    // Multiply condition by the difference if non-one.
    if (!AbsDiff.isOneValue())
      R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));

    // Add the base if non-zero.
    if (!FalseC->isNullValue())
      R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));

    return R;
  }

  return SDValue();
}

/// If this is a *dynamic* select (non-constant condition) and we can match
/// this node with one of the variable blend instructions, restructure the
/// condition so that blends can use the high (sign) bit of each element.
/// This function will also call SimplifyDemandedBits on already created
/// BLENDV to perform additional simplifications.
static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           const X86Subtarget &Subtarget) {
  SDValue Cond = N->getOperand(0);
  if ((N->getOpcode() != ISD::VSELECT &&
       N->getOpcode() != X86ISD::BLENDV) ||
      ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
    return SDValue();

  // Don't optimize before the condition has been transformed to a legal type
  // and don't ever optimize vector selects that map to AVX512 mask-registers.
  unsigned BitWidth = Cond.getScalarValueSizeInBits();
  if (BitWidth < 8 || BitWidth > 64)
    return SDValue();

  // We can only handle the cases where VSELECT is directly legal on the
  // subtarget. We custom lower VSELECT nodes with constant conditions and
  // this makes it hard to see whether a dynamic VSELECT will correctly
  // lower, so we both check the operation's status and explicitly handle the
  // cases where a *dynamic* blend will fail even though a constant-condition
  // blend could be custom lowered.
  // FIXME: We should find a better way to handle this class of problems.
  // Potentially, we should combine constant-condition vselect nodes
  // pre-legalization into shuffles and not mark as many types as custom
  // lowered.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  EVT VT = N->getValueType(0);
  if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
    return SDValue();
  // FIXME: We don't support i16-element blends currently. We could and
  // should support them by making *all* the bits in the condition be set
  // rather than just the high bit and using an i8-element blend.
  if (VT.getVectorElementType() == MVT::i16)
    return SDValue();
  // Dynamic blending was only available from SSE4.1 onward.
  if (VT.is128BitVector() && !Subtarget.hasSSE41())
    return SDValue();
  // Byte blends are only available in AVX2
  if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
    return SDValue();
  // There are no 512-bit blend instructions that use sign bits.
  if (VT.is512BitVector())
    return SDValue();

  // TODO: Add other opcodes eventually lowered into BLEND.
  for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
       UI != UE; ++UI)
    if ((UI->getOpcode() != ISD::VSELECT &&
         UI->getOpcode() != X86ISD::BLENDV) ||
        UI.getOperandNo() != 0)
      return SDValue();

  APInt DemandedMask(APInt::getSignMask(BitWidth));
  KnownBits Known;
  TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
                                        !DCI.isBeforeLegalizeOps());
  if (!TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO, 0, true))
    return SDValue();

  // If we changed the computation somewhere in the DAG, this change will
  // affect all users of Cond. Update all the nodes so that we do not use
  // the generic VSELECT anymore. Otherwise, we may perform wrong
  // optimizations as we messed with the actual expectation for the vector
  // boolean values.
  for (SDNode *U : Cond->uses()) {
    if (U->getOpcode() == X86ISD::BLENDV)
      continue;

    SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
                             Cond, U->getOperand(1), U->getOperand(2));
    DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
    DCI.AddToWorklist(U);
  }
  DCI.CommitTargetLoweringOpt(TLO);
  return SDValue(N, 0);
}

/// Do target-specific dag combines on SELECT and VSELECT nodes.
static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI,
                             const X86Subtarget &Subtarget) {
  SDLoc DL(N);
  SDValue Cond = N->getOperand(0);
  SDValue LHS = N->getOperand(1);
  SDValue RHS = N->getOperand(2);

  // Try simplification again because we use this function to optimize
  // BLENDV nodes that are not handled by the generic combiner.
  if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
    return V;

  EVT VT = LHS.getValueType();
  EVT CondVT = Cond.getValueType();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  // Convert vselects with constant condition into shuffles.
  if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
      DCI.isBeforeLegalizeOps()) {
    SmallVector<int, 64> Mask;
    if (createShuffleMaskFromVSELECT(Mask, Cond))
      return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
  }

  // If we have SSE[12] support, try to form min/max nodes. SSE min/max
  // instructions match the semantics of the common C idiom x<y?x:y but not
  // x<=y?x:y, because of how they handle negative zero (which can be
  // ignored in unsafe-math mode).
  // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
  if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
      VT != MVT::f80 && VT != MVT::f128 &&
      (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
      (Subtarget.hasSSE2() ||
       (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();

    unsigned Opcode = 0;
    // Check for x CC y ? x : y.
    if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
        DAG.isEqualTo(RHS, Cond.getOperand(1))) {
      switch (CC) {
      default: break;
      case ISD::SETULT:
        // Converting this to a min would handle NaNs incorrectly, and swapping
        // the operands would cause it to handle comparisons between positive
        // and negative zero incorrectly.
        if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
          if (!DAG.getTarget().Options.UnsafeFPMath &&
              !(DAG.isKnownNeverZeroFloat(LHS) ||
                DAG.isKnownNeverZeroFloat(RHS)))
            break;
          std::swap(LHS, RHS);
        }
        Opcode = X86ISD::FMIN;
        break;
      case ISD::SETOLE:
        // Converting this to a min would handle comparisons between positive
        // and negative zero incorrectly.
        if (!DAG.getTarget().Options.UnsafeFPMath &&
            !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
          break;
        Opcode = X86ISD::FMIN;
        break;
      case ISD::SETULE:
        // Converting this to a min would handle both negative zeros and NaNs
        // incorrectly, but we can swap the operands to fix both.
        std::swap(LHS, RHS);
        LLVM_FALLTHROUGH;
      case ISD::SETOLT:
      case ISD::SETLT:
      case ISD::SETLE:
        Opcode = X86ISD::FMIN;
        break;

      case ISD::SETOGE:
        // Converting this to a max would handle comparisons between positive
        // and negative zero incorrectly.
        if (!DAG.getTarget().Options.UnsafeFPMath &&
            !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
          break;
        Opcode = X86ISD::FMAX;
        break;
      case ISD::SETUGT:
        // Converting this to a max would handle NaNs incorrectly, and swapping
        // the operands would cause it to handle comparisons between positive
        // and negative zero incorrectly.
        if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
          if (!DAG.getTarget().Options.UnsafeFPMath &&
              !(DAG.isKnownNeverZeroFloat(LHS) ||
                DAG.isKnownNeverZeroFloat(RHS)))
            break;
          std::swap(LHS, RHS);
        }
        Opcode = X86ISD::FMAX;
        break;
      case ISD::SETUGE:
        // Converting this to a max would handle both negative zeros and NaNs
        // incorrectly, but we can swap the operands to fix both.
        std::swap(LHS, RHS);
        LLVM_FALLTHROUGH;
      case ISD::SETOGT:
      case ISD::SETGT:
      case ISD::SETGE:
        Opcode = X86ISD::FMAX;
        break;
      }
    // Check for x CC y ? y : x -- a min/max with reversed arms.
    } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
               DAG.isEqualTo(RHS, Cond.getOperand(0))) {
      switch (CC) {
      default: break;
      case ISD::SETOGE:
        // Converting this to a min would handle comparisons between positive
        // and negative zero incorrectly, and swapping the operands would
        // cause it to handle NaNs incorrectly.
        if (!DAG.getTarget().Options.UnsafeFPMath &&
            !(DAG.isKnownNeverZeroFloat(LHS) ||
              DAG.isKnownNeverZeroFloat(RHS))) {
          if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
            break;
          std::swap(LHS, RHS);
        }
        Opcode = X86ISD::FMIN;
        break;
      case ISD::SETUGT:
        // Converting this to a min would handle NaNs incorrectly.
        if (!DAG.getTarget().Options.UnsafeFPMath &&
            (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
          break;
        Opcode = X86ISD::FMIN;
        break;
      case ISD::SETUGE:
        // Converting this to a min would handle both negative zeros and NaNs
        // incorrectly, but we can swap the operands to fix both.
        std::swap(LHS, RHS);
        LLVM_FALLTHROUGH;
      case ISD::SETOGT:
      case ISD::SETGT:
      case ISD::SETGE:
        Opcode = X86ISD::FMIN;
        break;

      case ISD::SETULT:
        // Converting this to a max would handle NaNs incorrectly.
        if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
          break;
        Opcode = X86ISD::FMAX;
        break;
      case ISD::SETOLE:
        // Converting this to a max would handle comparisons between positive
        // and negative zero incorrectly, and swapping the operands would
        // cause it to handle NaNs incorrectly.
        if (!DAG.getTarget().Options.UnsafeFPMath &&
            !DAG.isKnownNeverZeroFloat(LHS) &&
            !DAG.isKnownNeverZeroFloat(RHS)) {
          if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
            break;
          std::swap(LHS, RHS);
        }
        Opcode = X86ISD::FMAX;
        break;
      case ISD::SETULE:
        // Converting this to a max would handle both negative zeros and NaNs
        // incorrectly, but we can swap the operands to fix both.
        std::swap(LHS, RHS);
        LLVM_FALLTHROUGH;
      case ISD::SETOLT:
      case ISD::SETLT:
      case ISD::SETLE:
        Opcode = X86ISD::FMAX;
        break;
      }
    }

    if (Opcode)
      return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
  }

  // Some mask scalar intrinsics rely on checking if only one bit is set
  // and implement it in C code like this:
  // A[0] = (U & 1) ? A[0] : W[0];
  // This creates some redundant instructions that break pattern matching.
  // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
  if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
      Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
    SDValue AndNode = Cond.getOperand(0);
    if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
        isNullConstant(Cond.getOperand(1)) &&
        isOneConstant(AndNode.getOperand(1))) {
      // LHS and RHS swapped due to
      // setcc outputting 1 when AND resulted in 0 and vice versa.
      AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
      return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
    }
  }

  // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
  // lowering on KNL. In this case we convert it to
  // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
  // The same situation all vectors of i8 and i16 without BWI.
  // Make sure we extend these even before type legalization gets a chance to
  // split wide vectors.
  // Since SKX these selects have a proper lowering.
  if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
      CondVT.getVectorElementType() == MVT::i1 &&
      (VT.getVectorElementType() == MVT::i8 ||
       VT.getVectorElementType() == MVT::i16)) {
    Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
    return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
  }

  // AVX512 - Extend select with zero to merge with target shuffle.
  // select(mask, extract_subvector(shuffle(x)), zero) -->
  // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
  // TODO - support non target shuffles as well.
  if (Subtarget.hasAVX512() && CondVT.isVector() &&
      CondVT.getVectorElementType() == MVT::i1) {
    auto SelectableOp = [&TLI](SDValue Op) {
      return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
             isTargetShuffle(Op.getOperand(0).getOpcode()) &&
             isNullConstant(Op.getOperand(1)) &&
             TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
             Op.hasOneUse() && Op.getOperand(0).hasOneUse();
    };

    bool SelectableLHS = SelectableOp(LHS);
    bool SelectableRHS = SelectableOp(RHS);
    bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
    bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());

    if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
      EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
                                : RHS.getOperand(0).getValueType();
      unsigned NumSrcElts = SrcVT.getVectorNumElements();
      EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
      LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
                            VT.getSizeInBits());
      RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
                            VT.getSizeInBits());
      Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
                         DAG.getUNDEF(SrcCondVT), Cond,
                         DAG.getIntPtrConstant(0, DL));
      SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
      return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
    }
  }

  if (SDValue V = combineSelectOfTwoConstants(N, DAG))
    return V;

  // Canonicalize max and min:
  // (x > y) ? x : y -> (x >= y) ? x : y
  // (x < y) ? x : y -> (x <= y) ? x : y
  // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
  // the need for an extra compare
  // against zero. e.g.
  // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
  // subl   %esi, %edi
  // testl  %edi, %edi
  // movl   $0, %eax
  // cmovgl %edi, %eax
  // =>
  // xorl   %eax, %eax
  // subl   %esi, $edi
  // cmovsl %eax, %edi
  if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
      Cond.hasOneUse() &&
      DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
      DAG.isEqualTo(RHS, Cond.getOperand(1))) {
    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
    switch (CC) {
    default: break;
    case ISD::SETLT:
    case ISD::SETGT: {
      ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
      Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
                          Cond.getOperand(0), Cond.getOperand(1), NewCC);
      return DAG.getSelect(DL, VT, Cond, LHS, RHS);
    }
    }
  }

  // Match VSELECTs into subs with unsigned saturation.
  if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
      // psubus is available in SSE2 for i8 and i16 vectors.
      Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
      isPowerOf2_32(VT.getVectorNumElements()) &&
      (VT.getVectorElementType() == MVT::i8 ||
       VT.getVectorElementType() == MVT::i16)) {
    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();

    // Check if one of the arms of the VSELECT is a zero vector. If it's on the
    // left side invert the predicate to simplify logic below.
    SDValue Other;
    if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
      Other = RHS;
      CC = ISD::getSetCCInverse(CC, true);
    } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
      Other = LHS;
    }

    if (Other.getNode() && Other->getNumOperands() == 2 &&
        Other->getOperand(0) == Cond.getOperand(0)) {
      SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
      SDValue CondRHS = Cond->getOperand(1);

      // Look for a general sub with unsigned saturation first.
      // x >= y ? x-y : 0 --> subus x, y
      // x >  y ? x-y : 0 --> subus x, y
      if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
          Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
        return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);

      if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
        if (isa<BuildVectorSDNode>(CondRHS)) {
          // If the RHS is a constant we have to reverse the const
          // canonicalization.
          // x > C-1 ? x+-C : 0 --> subus x, C
          auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
            return (!Op && !Cond) ||
                   (Op && Cond &&
                    Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
          };
          if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
              ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
                                        /*AllowUndefs*/ true)) {
            OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
                                OpRHS);
            return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
          }

          // Another special case: If C was a sign bit, the sub has been
          // canonicalized into a xor.
          // FIXME: Would it be better to use computeKnownBits to determine
          //        whether it's safe to decanonicalize the xor?
          // x s< 0 ? x^C : 0 --> subus x, C
          if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
            if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
                ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
                OpRHSConst->getAPIntValue().isSignMask()) {
              // Note that we have to rebuild the RHS constant here to ensure we
              // don't rely on particular values of undef lanes.
              OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
              return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
            }
          }
        }
      }
    }
  }

  // Match VSELECTs into add with unsigned saturation.
  if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
      // paddus is available in SSE2 for i8 and i16 vectors.
      Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
      isPowerOf2_32(VT.getVectorNumElements()) &&
      (VT.getVectorElementType() == MVT::i8 ||
       VT.getVectorElementType() == MVT::i16)) {
    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();

    SDValue CondLHS = Cond->getOperand(0);
    SDValue CondRHS = Cond->getOperand(1);

    // Check if one of the arms of the VSELECT is vector with all bits set.
    // If it's on the left side invert the predicate to simplify logic below.
    SDValue Other;
    if (ISD::isBuildVectorAllOnes(LHS.getNode())) {
      Other = RHS;
      CC = ISD::getSetCCInverse(CC, true);
    } else if (ISD::isBuildVectorAllOnes(RHS.getNode())) {
      Other = LHS;
    }

    if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
      SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);

      // Canonicalize condition operands.
      if (CC == ISD::SETUGE) {
        std::swap(CondLHS, CondRHS);
        CC = ISD::SETULE;
      }

      // We can test against either of the addition operands.
      // x <= x+y ? x+y : ~0 --> addus x, y
      // x+y >= x ? x+y : ~0 --> addus x, y
      if (CC == ISD::SETULE && Other == CondRHS &&
          (OpLHS == CondLHS || OpRHS == CondLHS))
        return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);

      if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
          CondLHS == OpLHS) {
        // If the RHS is a constant we have to reverse the const
        // canonicalization.
        // x > ~C ? x+C : ~0 --> addus x, C
        auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
          return Cond->getAPIntValue() == ~Op->getAPIntValue();
        };
        if (CC == ISD::SETULE &&
            ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
          return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
      }
    }
  }

  // Early exit check
  if (!TLI.isTypeLegal(VT))
    return SDValue();

  if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
    return V;

  if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
    return V;

  if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
    return V;

  // select(~Cond, X, Y) -> select(Cond, Y, X)
  if (CondVT.getScalarType() != MVT::i1)
    if (SDValue CondNot = IsNOT(Cond, DAG))
      return DAG.getNode(N->getOpcode(), DL, VT,
                         DAG.getBitcast(CondVT, CondNot), RHS, LHS);

  // Custom action for SELECT MMX
  if (VT == MVT::x86mmx) {
    LHS = DAG.getBitcast(MVT::i64, LHS);
    RHS = DAG.getBitcast(MVT::i64, RHS);
    SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS);
    return DAG.getBitcast(VT, newSelect);
  }

  return SDValue();
}

/// Combine:
///   (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
/// to:
///   (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
/// i.e., reusing the EFLAGS produced by the LOCKed instruction.
/// Note that this is only legal for some op/cc combinations.
static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
                                       SelectionDAG &DAG,
                                       const X86Subtarget &Subtarget) {
  // This combine only operates on CMP-like nodes.
  if (!(Cmp.getOpcode() == X86ISD::CMP ||
        (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
    return SDValue();

  // Can't replace the cmp if it has more uses than the one we're looking at.
  // FIXME: We would like to be able to handle this, but would need to make sure
  // all uses were updated.
  if (!Cmp.hasOneUse())
    return SDValue();

  // This only applies to variations of the common case:
  //   (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
  //   (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
  //   (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
  //   (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
  // Using the proper condcodes (see below), overflow is checked for.

  // FIXME: We can generalize both constraints:
  // - XOR/OR/AND (if they were made to survive AtomicExpand)
  // - LHS != 1
  // if the result is compared.

  SDValue CmpLHS = Cmp.getOperand(0);
  SDValue CmpRHS = Cmp.getOperand(1);

  if (!CmpLHS.hasOneUse())
    return SDValue();

  unsigned Opc = CmpLHS.getOpcode();
  if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
    return SDValue();

  SDValue OpRHS = CmpLHS.getOperand(2);
  auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
  if (!OpRHSC)
    return SDValue();

  APInt Addend = OpRHSC->getAPIntValue();
  if (Opc == ISD::ATOMIC_LOAD_SUB)
    Addend = -Addend;

  auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
  if (!CmpRHSC)
    return SDValue();

  APInt Comparison = CmpRHSC->getAPIntValue();

  // If the addend is the negation of the comparison value, then we can do
  // a full comparison by emitting the atomic arithmetic as a locked sub.
  if (Comparison == -Addend) {
    // The CC is fine, but we need to rewrite the LHS of the comparison as an
    // atomic sub.
    auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
    auto AtomicSub = DAG.getAtomic(
        ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
        /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
        /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
        AN->getMemOperand());
    auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
    DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
                                  DAG.getUNDEF(CmpLHS.getValueType()));
    DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
    return LockOp;
  }

  // We can handle comparisons with zero in a number of cases by manipulating
  // the CC used.
  if (!Comparison.isNullValue())
    return SDValue();

  if (CC == X86::COND_S && Addend == 1)
    CC = X86::COND_LE;
  else if (CC == X86::COND_NS && Addend == 1)
    CC = X86::COND_G;
  else if (CC == X86::COND_G && Addend == -1)
    CC = X86::COND_GE;
  else if (CC == X86::COND_LE && Addend == -1)
    CC = X86::COND_L;
  else
    return SDValue();

  SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
  DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
                                DAG.getUNDEF(CmpLHS.getValueType()));
  DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
  return LockOp;
}

// Check whether a boolean test is testing a boolean value generated by
// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
// code.
//
// Simplify the following patterns:
// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
// to (Op EFLAGS Cond)
//
// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
// to (Op EFLAGS !Cond)
//
// where Op could be BRCOND or CMOV.
//
static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
  // This combine only operates on CMP-like nodes.
  if (!(Cmp.getOpcode() == X86ISD::CMP ||
        (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
    return SDValue();

  // Quit if not used as a boolean value.
  if (CC != X86::COND_E && CC != X86::COND_NE)
    return SDValue();

  // Check CMP operands. One of them should be 0 or 1 and the other should be
  // an SetCC or extended from it.
  SDValue Op1 = Cmp.getOperand(0);
  SDValue Op2 = Cmp.getOperand(1);

  SDValue SetCC;
  const ConstantSDNode* C = nullptr;
  bool needOppositeCond = (CC == X86::COND_E);
  bool checkAgainstTrue = false; // Is it a comparison against 1?

  if ((C = dyn_cast<ConstantSDNode>(Op1)))
    SetCC = Op2;
  else if ((C = dyn_cast<ConstantSDNode>(Op2)))
    SetCC = Op1;
  else // Quit if all operands are not constants.
    return SDValue();

  if (C->getZExtValue() == 1) {
    needOppositeCond = !needOppositeCond;
    checkAgainstTrue = true;
  } else if (C->getZExtValue() != 0)
    // Quit if the constant is neither 0 or 1.
    return SDValue();

  bool truncatedToBoolWithAnd = false;
  // Skip (zext $x), (trunc $x), or (and $x, 1) node.
  while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
         SetCC.getOpcode() == ISD::TRUNCATE ||
         SetCC.getOpcode() == ISD::AND) {
    if (SetCC.getOpcode() == ISD::AND) {
      int OpIdx = -1;
      if (isOneConstant(SetCC.getOperand(0)))
        OpIdx = 1;
      if (isOneConstant(SetCC.getOperand(1)))
        OpIdx = 0;
      if (OpIdx < 0)
        break;
      SetCC = SetCC.getOperand(OpIdx);
      truncatedToBoolWithAnd = true;
    } else
      SetCC = SetCC.getOperand(0);
  }

  switch (SetCC.getOpcode()) {
  case X86ISD::SETCC_CARRY:
    // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
    // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
    // i.e. it's a comparison against true but the result of SETCC_CARRY is not
    // truncated to i1 using 'and'.
    if (checkAgainstTrue && !truncatedToBoolWithAnd)
      break;
    assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
           "Invalid use of SETCC_CARRY!");
    LLVM_FALLTHROUGH;
  case X86ISD::SETCC:
    // Set the condition code or opposite one if necessary.
    CC = X86::CondCode(SetCC.getConstantOperandVal(0));
    if (needOppositeCond)
      CC = X86::GetOppositeBranchCondition(CC);
    return SetCC.getOperand(1);
  case X86ISD::CMOV: {
    // Check whether false/true value has canonical one, i.e. 0 or 1.
    ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
    ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
    // Quit if true value is not a constant.
    if (!TVal)
      return SDValue();
    // Quit if false value is not a constant.
    if (!FVal) {
      SDValue Op = SetCC.getOperand(0);
      // Skip 'zext' or 'trunc' node.
      if (Op.getOpcode() == ISD::ZERO_EXTEND ||
          Op.getOpcode() == ISD::TRUNCATE)
        Op = Op.getOperand(0);
      // A special case for rdrand/rdseed, where 0 is set if false cond is
      // found.
      if ((Op.getOpcode() != X86ISD::RDRAND &&
           Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
        return SDValue();
    }
    // Quit if false value is not the constant 0 or 1.
    bool FValIsFalse = true;
    if (FVal && FVal->getZExtValue() != 0) {
      if (FVal->getZExtValue() != 1)
        return SDValue();
      // If FVal is 1, opposite cond is needed.
      needOppositeCond = !needOppositeCond;
      FValIsFalse = false;
    }
    // Quit if TVal is not the constant opposite of FVal.
    if (FValIsFalse && TVal->getZExtValue() != 1)
      return SDValue();
    if (!FValIsFalse && TVal->getZExtValue() != 0)
      return SDValue();
    CC = X86::CondCode(SetCC.getConstantOperandVal(2));
    if (needOppositeCond)
      CC = X86::GetOppositeBranchCondition(CC);
    return SetCC.getOperand(3);
  }
  }

  return SDValue();
}

/// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
/// Match:
///   (X86or (X86setcc) (X86setcc))
///   (X86cmp (and (X86setcc) (X86setcc)), 0)
static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
                                           X86::CondCode &CC1, SDValue &Flags,
                                           bool &isAnd) {
  if (Cond->getOpcode() == X86ISD::CMP) {
    if (!isNullConstant(Cond->getOperand(1)))
      return false;

    Cond = Cond->getOperand(0);
  }

  isAnd = false;

  SDValue SetCC0, SetCC1;
  switch (Cond->getOpcode()) {
  default: return false;
  case ISD::AND:
  case X86ISD::AND:
    isAnd = true;
    LLVM_FALLTHROUGH;
  case ISD::OR:
  case X86ISD::OR:
    SetCC0 = Cond->getOperand(0);
    SetCC1 = Cond->getOperand(1);
    break;
  };

  // Make sure we have SETCC nodes, using the same flags value.
  if (SetCC0.getOpcode() != X86ISD::SETCC ||
      SetCC1.getOpcode() != X86ISD::SETCC ||
      SetCC0->getOperand(1) != SetCC1->getOperand(1))
    return false;

  CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
  CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
  Flags = SetCC0->getOperand(1);
  return true;
}

// When legalizing carry, we create carries via add X, -1
// If that comes from an actual carry, via setcc, we use the
// carry directly.
static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
  if (EFLAGS.getOpcode() == X86ISD::ADD) {
    if (isAllOnesConstant(EFLAGS.getOperand(1))) {
      SDValue Carry = EFLAGS.getOperand(0);
      while (Carry.getOpcode() == ISD::TRUNCATE ||
             Carry.getOpcode() == ISD::ZERO_EXTEND ||
             Carry.getOpcode() == ISD::SIGN_EXTEND ||
             Carry.getOpcode() == ISD::ANY_EXTEND ||
             (Carry.getOpcode() == ISD::AND &&
              isOneConstant(Carry.getOperand(1))))
        Carry = Carry.getOperand(0);
      if (Carry.getOpcode() == X86ISD::SETCC ||
          Carry.getOpcode() == X86ISD::SETCC_CARRY) {
        // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
        uint64_t CarryCC = Carry.getConstantOperandVal(0);
        SDValue CarryOp1 = Carry.getOperand(1);
        if (CarryCC == X86::COND_B)
          return CarryOp1;
        if (CarryCC == X86::COND_A) {
          // Try to convert COND_A into COND_B in an attempt to facilitate
          // materializing "setb reg".
          //
          // Do not flip "e > c", where "c" is a constant, because Cmp
          // instruction cannot take an immediate as its first operand.
          //
          if (CarryOp1.getOpcode() == X86ISD::SUB &&
              CarryOp1.getNode()->hasOneUse() &&
              CarryOp1.getValueType().isInteger() &&
              !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
            SDValue SubCommute =
                DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
                            CarryOp1.getOperand(1), CarryOp1.getOperand(0));
            return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
          }
        }
        // If this is a check of the z flag of an add with 1, switch to the
        // C flag.
        if (CarryCC == X86::COND_E &&
            CarryOp1.getOpcode() == X86ISD::ADD &&
            isOneConstant(CarryOp1.getOperand(1)))
          return CarryOp1;
      }
    }
  }

  return SDValue();
}

/// Optimize an EFLAGS definition used according to the condition code \p CC
/// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
/// uses of chain values.
static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
                                  SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {
  if (CC == X86::COND_B)
    if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
      return Flags;

  if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
    return R;
  return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
}

/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
                           TargetLowering::DAGCombinerInfo &DCI,
                           const X86Subtarget &Subtarget) {
  SDLoc DL(N);

  SDValue FalseOp = N->getOperand(0);
  SDValue TrueOp = N->getOperand(1);
  X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
  SDValue Cond = N->getOperand(3);

  // cmov X, X, ?, ? --> X
  if (TrueOp == FalseOp)
    return TrueOp;

  // Try to simplify the EFLAGS and condition code operands.
  // We can't always do this as FCMOV only supports a subset of X86 cond.
  if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
    if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
      SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
                       Flags};
      return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
    }
  }

  // If this is a select between two integer constants, try to do some
  // optimizations.  Note that the operands are ordered the opposite of SELECT
  // operands.
  if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
    if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
      // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
      // larger than FalseC (the false value).
      if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
        CC = X86::GetOppositeBranchCondition(CC);
        std::swap(TrueC, FalseC);
        std::swap(TrueOp, FalseOp);
      }

      // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3.  Likewise for any pow2/0.
      // This is efficient for any integer data type (including i8/i16) and
      // shift amount.
      if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
        Cond = getSETCC(CC, Cond, DL, DAG);

        // Zero extend the condition if needed.
        Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);

        unsigned ShAmt = TrueC->getAPIntValue().logBase2();
        Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
                           DAG.getConstant(ShAmt, DL, MVT::i8));
        return Cond;
      }

      // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.  This is efficient
      // for any integer data type, including i8/i16.
      if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
        Cond = getSETCC(CC, Cond, DL, DAG);

        // Zero extend the condition if needed.
        Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
                           FalseC->getValueType(0), Cond);
        Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
                           SDValue(FalseC, 0));
        return Cond;
      }

      // Optimize cases that will turn into an LEA instruction.  This requires
      // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
      if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
        APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
        assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
               "Implicit constant truncation");

        bool isFastMultiplier = false;
        if (Diff.ult(10)) {
          switch (Diff.getZExtValue()) {
          default: break;
          case 1:  // result = add base, cond
          case 2:  // result = lea base(    , cond*2)
          case 3:  // result = lea base(cond, cond*2)
          case 4:  // result = lea base(    , cond*4)
          case 5:  // result = lea base(cond, cond*4)
          case 8:  // result = lea base(    , cond*8)
          case 9:  // result = lea base(cond, cond*8)
            isFastMultiplier = true;
            break;
          }
        }

        if (isFastMultiplier) {
          Cond = getSETCC(CC, Cond, DL ,DAG);
          // Zero extend the condition if needed.
          Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
                             Cond);
          // Scale the condition by the difference.
          if (Diff != 1)
            Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
                               DAG.getConstant(Diff, DL, Cond.getValueType()));

          // Add the base if non-zero.
          if (FalseC->getAPIntValue() != 0)
            Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
                               SDValue(FalseC, 0));
          return Cond;
        }
      }
    }
  }

  // Handle these cases:
  //   (select (x != c), e, c) -> select (x != c), e, x),
  //   (select (x == c), c, e) -> select (x == c), x, e)
  // where the c is an integer constant, and the "select" is the combination
  // of CMOV and CMP.
  //
  // The rationale for this change is that the conditional-move from a constant
  // needs two instructions, however, conditional-move from a register needs
  // only one instruction.
  //
  // CAVEAT: By replacing a constant with a symbolic value, it may obscure
  //  some instruction-combining opportunities. This opt needs to be
  //  postponed as late as possible.
  //
  if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
    // the DCI.xxxx conditions are provided to postpone the optimization as
    // late as possible.

    ConstantSDNode *CmpAgainst = nullptr;
    if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
        (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
        !isa<ConstantSDNode>(Cond.getOperand(0))) {

      if (CC == X86::COND_NE &&
          CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
        CC = X86::GetOppositeBranchCondition(CC);
        std::swap(TrueOp, FalseOp);
      }

      if (CC == X86::COND_E &&
          CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
        SDValue Ops[] = {FalseOp, Cond.getOperand(0),
                         DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
        return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
      }
    }
  }

  // Fold and/or of setcc's to double CMOV:
  //   (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
  //   (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
  //
  // This combine lets us generate:
  //   cmovcc1 (jcc1 if we don't have CMOV)
  //   cmovcc2 (same)
  // instead of:
  //   setcc1
  //   setcc2
  //   and/or
  //   cmovne (jne if we don't have CMOV)
  // When we can't use the CMOV instruction, it might increase branch
  // mispredicts.
  // When we can use CMOV, or when there is no mispredict, this improves
  // throughput and reduces register pressure.
  //
  if (CC == X86::COND_NE) {
    SDValue Flags;
    X86::CondCode CC0, CC1;
    bool isAndSetCC;
    if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
      if (isAndSetCC) {
        std::swap(FalseOp, TrueOp);
        CC0 = X86::GetOppositeBranchCondition(CC0);
        CC1 = X86::GetOppositeBranchCondition(CC1);
      }

      SDValue LOps[] = {FalseOp, TrueOp,
                        DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
      SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
      SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
                       Flags};
      SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
      return CMOV;
    }
  }

  // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
  //      (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
  // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
  //    (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
  if ((CC == X86::COND_NE || CC == X86::COND_E) &&
      Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
    SDValue Add = TrueOp;
    SDValue Const = FalseOp;
    // Canonicalize the condition code for easier matching and output.
    if (CC == X86::COND_E)
      std::swap(Add, Const);

    // We might have replaced the constant in the cmov with the LHS of the
    // compare. If so change it to the RHS of the compare.
    if (Const == Cond.getOperand(0))
      Const = Cond.getOperand(1);

    // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
    if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
        Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
        (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
         Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
        Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
      EVT VT = N->getValueType(0);
      // This should constant fold.
      SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
      SDValue CMov =
          DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
                      DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
      return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
    }
  }

  return SDValue();
}

/// Different mul shrinking modes.
enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 };

static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
  EVT VT = N->getOperand(0).getValueType();
  if (VT.getScalarSizeInBits() != 32)
    return false;

  assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
  unsigned SignBits[2] = {1, 1};
  bool IsPositive[2] = {false, false};
  for (unsigned i = 0; i < 2; i++) {
    SDValue Opd = N->getOperand(i);

    SignBits[i] = DAG.ComputeNumSignBits(Opd);
    IsPositive[i] = DAG.SignBitIsZero(Opd);
  }

  bool AllPositive = IsPositive[0] && IsPositive[1];
  unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
  // When ranges are from -128 ~ 127, use MULS8 mode.
  if (MinSignBits >= 25)
    Mode = MULS8;
  // When ranges are from 0 ~ 255, use MULU8 mode.
  else if (AllPositive && MinSignBits >= 24)
    Mode = MULU8;
  // When ranges are from -32768 ~ 32767, use MULS16 mode.
  else if (MinSignBits >= 17)
    Mode = MULS16;
  // When ranges are from 0 ~ 65535, use MULU16 mode.
  else if (AllPositive && MinSignBits >= 16)
    Mode = MULU16;
  else
    return false;
  return true;
}

/// When the operands of vector mul are extended from smaller size values,
/// like i8 and i16, the type of mul may be shrinked to generate more
/// efficient code. Two typical patterns are handled:
/// Pattern1:
///     %2 = sext/zext <N x i8> %1 to <N x i32>
///     %4 = sext/zext <N x i8> %3 to <N x i32>
//   or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
///     %5 = mul <N x i32> %2, %4
///
/// Pattern2:
///     %2 = zext/sext <N x i16> %1 to <N x i32>
///     %4 = zext/sext <N x i16> %3 to <N x i32>
///  or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
///     %5 = mul <N x i32> %2, %4
///
/// There are four mul shrinking modes:
/// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
/// -128 to 128, and the scalar value range of %4 is also -128 to 128,
/// generate pmullw+sext32 for it (MULS8 mode).
/// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
/// 0 to 255, and the scalar value range of %4 is also 0 to 255,
/// generate pmullw+zext32 for it (MULU8 mode).
/// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
/// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
/// generate pmullw+pmulhw for it (MULS16 mode).
/// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
/// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
/// generate pmullw+pmulhuw for it (MULU16 mode).
static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {
  // Check for legality
  // pmullw/pmulhw are not supported by SSE.
  if (!Subtarget.hasSSE2())
    return SDValue();

  // Check for profitability
  // pmulld is supported since SSE41. It is better to use pmulld
  // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
  // the expansion.
  bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
  if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
    return SDValue();

  ShrinkMode Mode;
  if (!canReduceVMulWidth(N, DAG, Mode))
    return SDValue();

  SDLoc DL(N);
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getOperand(0).getValueType();
  unsigned NumElts = VT.getVectorNumElements();
  if ((NumElts % 2) != 0)
    return SDValue();

  EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);

  // Shrink the operands of mul.
  SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
  SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);

  // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
  // lower part is needed.
  SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
  if (Mode == MULU8 || Mode == MULS8)
    return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
                       DL, VT, MulLo);

  MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
  // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
  // the higher part is also needed.
  SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
                              ReducedVT, NewN0, NewN1);

  // Repack the lower part and higher part result of mul into a wider
  // result.
  // Generate shuffle functioning as punpcklwd.
  SmallVector<int, 16> ShuffleMask(NumElts);
  for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
    ShuffleMask[2 * i] = i;
    ShuffleMask[2 * i + 1] = i + NumElts;
  }
  SDValue ResLo =
      DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
  ResLo = DAG.getBitcast(ResVT, ResLo);
  // Generate shuffle functioning as punpckhwd.
  for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
    ShuffleMask[2 * i] = i + NumElts / 2;
    ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
  }
  SDValue ResHi =
      DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
  ResHi = DAG.getBitcast(ResVT, ResHi);
  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
}

static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
                                 EVT VT, const SDLoc &DL) {

  auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
    SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
                                 DAG.getConstant(Mult, DL, VT));
    Result = DAG.getNode(ISD::SHL, DL, VT, Result,
                         DAG.getConstant(Shift, DL, MVT::i8));
    Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
                         N->getOperand(0));
    return Result;
  };

  auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
    SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
                                 DAG.getConstant(Mul1, DL, VT));
    Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
                         DAG.getConstant(Mul2, DL, VT));
    Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
                         N->getOperand(0));
    return Result;
  };

  switch (MulAmt) {
  default:
    break;
  case 11:
    // mul x, 11 => add ((shl (mul x, 5), 1), x)
    return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
  case 21:
    // mul x, 21 => add ((shl (mul x, 5), 2), x)
    return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
  case 41:
    // mul x, 41 => add ((shl (mul x, 5), 3), x)
    return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
  case 22:
    // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
    return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
                       combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
  case 19:
    // mul x, 19 => add ((shl (mul x, 9), 1), x)
    return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
  case 37:
    // mul x, 37 => add ((shl (mul x, 9), 2), x)
    return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
  case 73:
    // mul x, 73 => add ((shl (mul x, 9), 3), x)
    return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
  case 13:
    // mul x, 13 => add ((shl (mul x, 3), 2), x)
    return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
  case 23:
    // mul x, 23 => sub ((shl (mul x, 3), 3), x)
    return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
  case 26:
    // mul x, 26 => add ((mul (mul x, 5), 5), x)
    return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
  case 28:
    // mul x, 28 => add ((mul (mul x, 9), 3), x)
    return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
  case 29:
    // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
    return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
                       combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
  }

  // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
  // by a single LEA.
  // First check if this a sum of two power of 2s because that's easy. Then
  // count how many zeros are up to the first bit.
  // TODO: We can do this even without LEA at a cost of two shifts and an add.
  if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
    unsigned ScaleShift = countTrailingZeros(MulAmt);
    if (ScaleShift >= 1 && ScaleShift < 4) {
      unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
      SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
                                   DAG.getConstant(ShiftAmt, DL, MVT::i8));
      SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
                                   DAG.getConstant(ScaleShift, DL, MVT::i8));
      return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
    }
  }

  return SDValue();
}

// If the upper 17 bits of each element are zero then we can use PMADDWD,
// which is always at least as quick as PMULLD, except on KNL.
static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {
  if (!Subtarget.hasSSE2())
    return SDValue();

  if (Subtarget.isPMADDWDSlow())
    return SDValue();

  EVT VT = N->getValueType(0);

  // Only support vXi32 vectors.
  if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
    return SDValue();

  // Make sure the vXi16 type is legal. This covers the AVX512 without BWI case.
  // Also allow v2i32 if it will be widened.
  MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
  if (VT != MVT::v2i32 && !DAG.getTargetLoweringInfo().isTypeLegal(WVT))
    return SDValue();

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // If we are zero extending two steps without SSE4.1, its better to reduce
  // the vmul width instead.
  if (!Subtarget.hasSSE41() &&
      (N0.getOpcode() == ISD::ZERO_EXTEND &&
       N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
      (N1.getOpcode() == ISD::ZERO_EXTEND &&
       N1.getOperand(0).getScalarValueSizeInBits() <= 8))
    return SDValue();

  APInt Mask17 = APInt::getHighBitsSet(32, 17);
  if (!DAG.MaskedValueIsZero(N1, Mask17) ||
      !DAG.MaskedValueIsZero(N0, Mask17))
    return SDValue();

  // Use SplitOpsAndApply to handle AVX splitting.
  auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                           ArrayRef<SDValue> Ops) {
    MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
    return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
  };
  return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
                          { DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
                          PMADDWDBuilder);
}

static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {
  if (!Subtarget.hasSSE2())
    return SDValue();

  EVT VT = N->getValueType(0);

  // Only support vXi64 vectors.
  if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
      VT.getVectorNumElements() < 2 ||
      !isPowerOf2_32(VT.getVectorNumElements()))
    return SDValue();

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // MULDQ returns the 64-bit result of the signed multiplication of the lower
  // 32-bits. We can lower with this if the sign bits stretch that far.
  if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
      DAG.ComputeNumSignBits(N1) > 32) {
    auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                            ArrayRef<SDValue> Ops) {
      return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
    };
    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
                            PMULDQBuilder, /*CheckBWI*/false);
  }

  // If the upper bits are zero we can use a single pmuludq.
  APInt Mask = APInt::getHighBitsSet(64, 32);
  if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
    auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                             ArrayRef<SDValue> Ops) {
      return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
    };
    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
                            PMULUDQBuilder, /*CheckBWI*/false);
  }

  return SDValue();
}

/// Optimize a single multiply with constant into two operations in order to
/// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {
  EVT VT = N->getValueType(0);

  if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
    return V;

  if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
    return V;

  if (DCI.isBeforeLegalize() && VT.isVector())
    return reduceVMULWidth(N, DAG, Subtarget);

  if (!MulConstantOptimization)
    return SDValue();
  // An imul is usually smaller than the alternative sequence.
  if (DAG.getMachineFunction().getFunction().hasMinSize())
    return SDValue();

  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
    return SDValue();

  if (VT != MVT::i64 && VT != MVT::i32)
    return SDValue();

  ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
  if (!C)
    return SDValue();
  if (isPowerOf2_64(C->getZExtValue()))
    return SDValue();

  int64_t SignMulAmt = C->getSExtValue();
  assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
  uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;

  SDLoc DL(N);
  if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
    SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
                                 DAG.getConstant(AbsMulAmt, DL, VT));
    if (SignMulAmt < 0)
      NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
                           NewMul);

    return NewMul;
  }

  uint64_t MulAmt1 = 0;
  uint64_t MulAmt2 = 0;
  if ((AbsMulAmt % 9) == 0) {
    MulAmt1 = 9;
    MulAmt2 = AbsMulAmt / 9;
  } else if ((AbsMulAmt % 5) == 0) {
    MulAmt1 = 5;
    MulAmt2 = AbsMulAmt / 5;
  } else if ((AbsMulAmt % 3) == 0) {
    MulAmt1 = 3;
    MulAmt2 = AbsMulAmt / 3;
  }

  SDValue NewMul;
  // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
  if (MulAmt2 &&
      (isPowerOf2_64(MulAmt2) ||
       (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {

    if (isPowerOf2_64(MulAmt2) &&
        !(SignMulAmt >= 0 && N->hasOneUse() &&
          N->use_begin()->getOpcode() == ISD::ADD))
      // If second multiplifer is pow2, issue it first. We want the multiply by
      // 3, 5, or 9 to be folded into the addressing mode unless the lone use
      // is an add. Only do this for positive multiply amounts since the
      // negate would prevent it from being used as an address mode anyway.
      std::swap(MulAmt1, MulAmt2);

    if (isPowerOf2_64(MulAmt1))
      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
                           DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
    else
      NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
                           DAG.getConstant(MulAmt1, DL, VT));

    if (isPowerOf2_64(MulAmt2))
      NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
                           DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
    else
      NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
                           DAG.getConstant(MulAmt2, DL, VT));

    // Negate the result.
    if (SignMulAmt < 0)
      NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
                           NewMul);
  } else if (!Subtarget.slowLEA())
    NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);

  if (!NewMul) {
    assert(C->getZExtValue() != 0 &&
           C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
           "Both cases that could cause potential overflows should have "
           "already been handled.");
    if (isPowerOf2_64(AbsMulAmt - 1)) {
      // (mul x, 2^N + 1) => (add (shl x, N), x)
      NewMul = DAG.getNode(
          ISD::ADD, DL, VT, N->getOperand(0),
          DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
                      DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
                                      MVT::i8)));
      // To negate, subtract the number from zero
      if (SignMulAmt < 0)
        NewMul = DAG.getNode(ISD::SUB, DL, VT,
                             DAG.getConstant(0, DL, VT), NewMul);
    } else if (isPowerOf2_64(AbsMulAmt + 1)) {
      // (mul x, 2^N - 1) => (sub (shl x, N), x)
      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
                           DAG.getConstant(Log2_64(AbsMulAmt + 1),
                                           DL, MVT::i8));
      // To negate, reverse the operands of the subtract.
      if (SignMulAmt < 0)
        NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
      else
        NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
    } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
      // (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
                           DAG.getConstant(Log2_64(AbsMulAmt - 2),
                                           DL, MVT::i8));
      NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
      NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
    } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
      // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
                           DAG.getConstant(Log2_64(AbsMulAmt + 2),
                                           DL, MVT::i8));
      NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
      NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
    }
  }

  return NewMul;
}

static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
  EVT VT = N0.getValueType();

  // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
  // since the result of setcc_c is all zero's or all ones.
  if (VT.isInteger() && !VT.isVector() &&
      N1C && N0.getOpcode() == ISD::AND &&
      N0.getOperand(1).getOpcode() == ISD::Constant) {
    SDValue N00 = N0.getOperand(0);
    APInt Mask = N0.getConstantOperandAPInt(1);
    Mask <<= N1C->getAPIntValue();
    bool MaskOK = false;
    // We can handle cases concerning bit-widening nodes containing setcc_c if
    // we carefully interrogate the mask to make sure we are semantics
    // preserving.
    // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
    // of the underlying setcc_c operation if the setcc_c was zero extended.
    // Consider the following example:
    //   zext(setcc_c)                 -> i32 0x0000FFFF
    //   c1                            -> i32 0x0000FFFF
    //   c2                            -> i32 0x00000001
    //   (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
    //   (and setcc_c, (c1 << c2))     -> i32 0x0000FFFE
    if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
      MaskOK = true;
    } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
               N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
      MaskOK = true;
    } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
                N00.getOpcode() == ISD::ANY_EXTEND) &&
               N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
      MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
    }
    if (MaskOK && Mask != 0) {
      SDLoc DL(N);
      return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
    }
  }

  // Hardware support for vector shifts is sparse which makes us scalarize the
  // vector operations in many cases. Also, on sandybridge ADD is faster than
  // shl.
  // (shl V, 1) -> add V,V
  if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
    if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
      assert(N0.getValueType().isVector() && "Invalid vector shift type");
      // We shift all of the values by one. In many cases we do not have
      // hardware support for this operation. This is better expressed as an ADD
      // of two values.
      if (N1SplatC->getAPIntValue() == 1)
        return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
    }

  return SDValue();
}

static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();
  unsigned Size = VT.getSizeInBits();

  // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
  // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
  // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
  // depending on sign of (SarConst - [56,48,32,24,16])

  // sexts in X86 are MOVs. The MOVs have the same code size
  // as above SHIFTs (only SHIFT on 1 has lower code size).
  // However the MOVs have 2 advantages to a SHIFT:
  // 1. MOVs can write to a register that differs from source
  // 2. MOVs accept memory operands

  if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
      N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
      N0.getOperand(1).getOpcode() != ISD::Constant)
    return SDValue();

  SDValue N00 = N0.getOperand(0);
  SDValue N01 = N0.getOperand(1);
  APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
  APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
  EVT CVT = N1.getValueType();

  if (SarConst.isNegative())
    return SDValue();

  for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
    unsigned ShiftSize = SVT.getSizeInBits();
    // skipping types without corresponding sext/zext and
    // ShlConst that is not one of [56,48,32,24,16]
    if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
      continue;
    SDLoc DL(N);
    SDValue NN =
        DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
    SarConst = SarConst - (Size - ShiftSize);
    if (SarConst == 0)
      return NN;
    else if (SarConst.isNegative())
      return DAG.getNode(ISD::SHL, DL, VT, NN,
                         DAG.getConstant(-SarConst, DL, CVT));
    else
      return DAG.getNode(ISD::SRA, DL, VT, NN,
                         DAG.getConstant(SarConst, DL, CVT));
  }
  return SDValue();
}

static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
                                        TargetLowering::DAGCombinerInfo &DCI) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N0.getValueType();

  // Only do this on the last DAG combine as it can interfere with other
  // combines.
  if (!DCI.isAfterLegalizeDAG())
    return SDValue();

  // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
  // TODO: This is a generic DAG combine that became an x86-only combine to
  // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
  // and-not ('andn').
  if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
    return SDValue();

  auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
  auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
  if (!ShiftC || !AndC)
    return SDValue();

  // If we can shrink the constant mask below 8-bits or 32-bits, then this
  // transform should reduce code size. It may also enable secondary transforms
  // from improved known-bits analysis or instruction selection.
  APInt MaskVal = AndC->getAPIntValue();

  // If this can be matched by a zero extend, don't optimize.
  if (MaskVal.isMask()) {
    unsigned TO = MaskVal.countTrailingOnes();
    if (TO >= 8 && isPowerOf2_32(TO))
      return SDValue();
  }

  APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
  unsigned OldMaskSize = MaskVal.getMinSignedBits();
  unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
  if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
      (OldMaskSize > 32 && NewMaskSize <= 32)) {
    // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
    SDLoc DL(N);
    SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
    SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
    return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
  }
  return SDValue();
}

static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const X86Subtarget &Subtarget) {
  unsigned Opcode = N->getOpcode();
  assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
         "Unexpected shift opcode");

  EVT VT = N->getValueType(0);
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  unsigned DstBitsPerElt = VT.getScalarSizeInBits();
  unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
  assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
         N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
         "Unexpected PACKSS/PACKUS input type");

  bool IsSigned = (X86ISD::PACKSS == Opcode);

  // Constant Folding.
  APInt UndefElts0, UndefElts1;
  SmallVector<APInt, 32> EltBits0, EltBits1;
  if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
      (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
      getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
      getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
    unsigned NumLanes = VT.getSizeInBits() / 128;
    unsigned NumDstElts = VT.getVectorNumElements();
    unsigned NumSrcElts = NumDstElts / 2;
    unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
    unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;

    APInt Undefs(NumDstElts, 0);
    SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
    for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
      for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
        unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
        auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
        auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);

        if (UndefElts[SrcIdx]) {
          Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
          continue;
        }

        APInt &Val = EltBits[SrcIdx];
        if (IsSigned) {
          // PACKSS: Truncate signed value with signed saturation.
          // Source values less than dst minint are saturated to minint.
          // Source values greater than dst maxint are saturated to maxint.
          if (Val.isSignedIntN(DstBitsPerElt))
            Val = Val.trunc(DstBitsPerElt);
          else if (Val.isNegative())
            Val = APInt::getSignedMinValue(DstBitsPerElt);
          else
            Val = APInt::getSignedMaxValue(DstBitsPerElt);
        } else {
          // PACKUS: Truncate signed value with unsigned saturation.
          // Source values less than zero are saturated to zero.
          // Source values greater than dst maxuint are saturated to maxuint.
          if (Val.isIntN(DstBitsPerElt))
            Val = Val.trunc(DstBitsPerElt);
          else if (Val.isNegative())
            Val = APInt::getNullValue(DstBitsPerElt);
          else
            Val = APInt::getAllOnesValue(DstBitsPerElt);
        }
        Bits[Lane * NumDstEltsPerLane + Elt] = Val;
      }
    }

    return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
  }

  // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
  // truncate to create a larger truncate.
  if (Subtarget.hasAVX512() &&
      N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
      N0.getOperand(0).getValueType() == MVT::v8i32) {
    if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
        (!IsSigned &&
         DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
      if (Subtarget.hasVLX())
        return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));

      // Widen input to v16i32 so we can truncate that.
      SDLoc dl(N);
      SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
                                   N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
      return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
    }
  }

  // Attempt to combine as shuffle.
  SDValue Op(N, 0);
  if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
    return Res;

  return SDValue();
}

static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const X86Subtarget &Subtarget) {
  assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
          X86ISD::VSRL == N->getOpcode()) &&
         "Unexpected shift opcode");
  EVT VT = N->getValueType(0);
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // Shift zero -> zero.
  if (ISD::isBuildVectorAllZeros(N0.getNode()))
    return DAG.getConstant(0, SDLoc(N), VT);

  // Detect constant shift amounts.
  APInt UndefElts;
  SmallVector<APInt, 32> EltBits;
  if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
    unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
    return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
                                      EltBits[0].getZExtValue(), DAG);
  }

  APInt KnownUndef, KnownZero;
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
  if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
                                     KnownZero, DCI))
    return SDValue(N, 0);

  return SDValue();
}

static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const X86Subtarget &Subtarget) {
  unsigned Opcode = N->getOpcode();
  assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
          X86ISD::VSRLI == Opcode) &&
         "Unexpected shift opcode");
  bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
  EVT VT = N->getValueType(0);
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  unsigned NumBitsPerElt = VT.getScalarSizeInBits();
  assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
         "Unexpected value type");
  assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type");

  // Out of range logical bit shifts are guaranteed to be zero.
  // Out of range arithmetic bit shifts splat the sign bit.
  unsigned ShiftVal = cast<ConstantSDNode>(N1)->getZExtValue();
  if (ShiftVal >= NumBitsPerElt) {
    if (LogicalShift)
      return DAG.getConstant(0, SDLoc(N), VT);
    else
      ShiftVal = NumBitsPerElt - 1;
  }

  // Shift N0 by zero -> N0.
  if (!ShiftVal)
    return N0;

  // Shift zero -> zero.
  if (ISD::isBuildVectorAllZeros(N0.getNode()))
    return DAG.getConstant(0, SDLoc(N), VT);

  // Fold (VSRAI (VSRAI X, C1), C2) --> (VSRAI X, (C1 + C2)) with (C1 + C2)
  // clamped to (NumBitsPerElt - 1).
  if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSRAI) {
    unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
    unsigned NewShiftVal = ShiftVal + ShiftVal2;
    if (NewShiftVal >= NumBitsPerElt)
      NewShiftVal = NumBitsPerElt - 1;
    return DAG.getNode(X86ISD::VSRAI, SDLoc(N), VT, N0.getOperand(0),
                       DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
  }

  // We can decode 'whole byte' logical bit shifts as shuffles.
  if (LogicalShift && (ShiftVal % 8) == 0) {
    SDValue Op(N, 0);
    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
      return Res;
  }

  // Constant Folding.
  APInt UndefElts;
  SmallVector<APInt, 32> EltBits;
  if (N->isOnlyUserOf(N0.getNode()) &&
      getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
    assert(EltBits.size() == VT.getVectorNumElements() &&
           "Unexpected shift value type");
    for (APInt &Elt : EltBits) {
      if (X86ISD::VSHLI == Opcode)
        Elt <<= ShiftVal;
      else if (X86ISD::VSRAI == Opcode)
        Elt.ashrInPlace(ShiftVal);
      else
        Elt.lshrInPlace(ShiftVal);
    }
    return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
  }

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (TLI.SimplifyDemandedBits(SDValue(N, 0),
                               APInt::getAllOnesValue(NumBitsPerElt), DCI))
    return SDValue(N, 0);

  return SDValue();
}

static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const X86Subtarget &Subtarget) {
  EVT VT = N->getValueType(0);
  assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
          (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) &&
         "Unexpected vector insertion");

  unsigned NumBitsPerElt = VT.getScalarSizeInBits();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (TLI.SimplifyDemandedBits(SDValue(N, 0),
                               APInt::getAllOnesValue(NumBitsPerElt), DCI))
    return SDValue(N, 0);

  // Attempt to combine PINSRB/PINSRW patterns to a shuffle.
  SDValue Op(N, 0);
  if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
    return Res;

  return SDValue();
}

/// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
/// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
/// OR -> CMPNEQSS.
static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const X86Subtarget &Subtarget) {
  unsigned opcode;

  // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
  // we're requiring SSE2 for both.
  if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
    SDValue N0 = N->getOperand(0);
    SDValue N1 = N->getOperand(1);
    SDValue CMP0 = N0.getOperand(1);
    SDValue CMP1 = N1.getOperand(1);
    SDLoc DL(N);

    // The SETCCs should both refer to the same CMP.
    if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
      return SDValue();

    SDValue CMP00 = CMP0->getOperand(0);
    SDValue CMP01 = CMP0->getOperand(1);
    EVT     VT    = CMP00.getValueType();

    if (VT == MVT::f32 || VT == MVT::f64) {
      bool ExpectingFlags = false;
      // Check for any users that want flags:
      for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
           !ExpectingFlags && UI != UE; ++UI)
        switch (UI->getOpcode()) {
        default:
        case ISD::BR_CC:
        case ISD::BRCOND:
        case ISD::SELECT:
          ExpectingFlags = true;
          break;
        case ISD::CopyToReg:
        case ISD::SIGN_EXTEND:
        case ISD::ZERO_EXTEND:
        case ISD::ANY_EXTEND:
          break;
        }

      if (!ExpectingFlags) {
        enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
        enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);

        if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
          X86::CondCode tmp = cc0;
          cc0 = cc1;
          cc1 = tmp;
        }

        if ((cc0 == X86::COND_E  && cc1 == X86::COND_NP) ||
            (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
          // FIXME: need symbolic constants for these magic numbers.
          // See X86ATTInstPrinter.cpp:printSSECC().
          unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
          if (Subtarget.hasAVX512()) {
            SDValue FSetCC =
                DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
                            DAG.getTargetConstant(x86cc, DL, MVT::i8));
            // Need to fill with zeros to ensure the bitcast will produce zeroes
            // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
            SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
                                      DAG.getConstant(0, DL, MVT::v16i1),
                                      FSetCC, DAG.getIntPtrConstant(0, DL));
            return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
                                      N->getSimpleValueType(0));
          }
          SDValue OnesOrZeroesF =
              DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
                          CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));

          bool is64BitFP = (CMP00.getValueType() == MVT::f64);
          MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;

          if (is64BitFP && !Subtarget.is64Bit()) {
            // On a 32-bit target, we cannot bitcast the 64-bit float to a
            // 64-bit integer, since that's not a legal type. Since
            // OnesOrZeroesF is all ones of all zeroes, we don't need all the
            // bits, but can do this little dance to extract the lowest 32 bits
            // and work with those going forward.
            SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
                                           OnesOrZeroesF);
            SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
            OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
                                        Vector32, DAG.getIntPtrConstant(0, DL));
            IntVT = MVT::i32;
          }

          SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
          SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
                                      DAG.getConstant(1, DL, IntVT));
          SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
                                              ANDed);
          return OneBitOfTruth;
        }
      }
    }
  }
  return SDValue();
}

/// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
  assert(N->getOpcode() == ISD::AND);

  MVT VT = N->getSimpleValueType(0);
  if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
    return SDValue();

  SDValue X, Y;
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  if (SDValue Not = IsNOT(N0, DAG)) {
    X = Not;
    Y = N1;
  } else if (SDValue Not = IsNOT(N1, DAG)) {
    X = Not;
    Y = N0;
  } else
    return SDValue();

  X = DAG.getBitcast(VT, X);
  Y = DAG.getBitcast(VT, Y);
  return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
}

// On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
// register. In most cases we actually compare or select YMM-sized registers
// and mixing the two types creates horrible code. This method optimizes
// some of the transition sequences.
// Even with AVX-512 this is still useful for removing casts around logical
// operations on vXi1 mask types.
static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  EVT VT = N->getValueType(0);
  assert(VT.isVector() && "Expected vector type");

  assert((N->getOpcode() == ISD::ANY_EXTEND ||
          N->getOpcode() == ISD::ZERO_EXTEND ||
          N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");

  SDValue Narrow = N->getOperand(0);
  EVT NarrowVT = Narrow.getValueType();

  if (Narrow->getOpcode() != ISD::XOR &&
      Narrow->getOpcode() != ISD::AND &&
      Narrow->getOpcode() != ISD::OR)
    return SDValue();

  SDValue N0  = Narrow->getOperand(0);
  SDValue N1  = Narrow->getOperand(1);
  SDLoc DL(Narrow);

  // The Left side has to be a trunc.
  if (N0.getOpcode() != ISD::TRUNCATE)
    return SDValue();

  // The type of the truncated inputs.
  if (N0.getOperand(0).getValueType() != VT)
    return SDValue();

  // The right side has to be a 'trunc' or a constant vector.
  bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
                  N1.getOperand(0).getValueType() == VT;
  if (!RHSTrunc &&
      !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
    return SDValue();

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), VT))
    return SDValue();

  // Set N0 and N1 to hold the inputs to the new wide operation.
  N0 = N0.getOperand(0);
  if (RHSTrunc)
    N1 = N1.getOperand(0);
  else
    N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);

  // Generate the wide operation.
  SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1);
  unsigned Opcode = N->getOpcode();
  switch (Opcode) {
  default: llvm_unreachable("Unexpected opcode");
  case ISD::ANY_EXTEND:
    return Op;
  case ISD::ZERO_EXTEND:
    return DAG.getZeroExtendInReg(Op, DL, NarrowVT.getScalarType());
  case ISD::SIGN_EXTEND:
    return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
                       Op, DAG.getValueType(NarrowVT));
  }
}

/// If both input operands of a logic op are being cast from floating point
/// types, try to convert this into a floating point logic node to avoid
/// unnecessary moves from SSE to integer registers.
static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {
  EVT VT = N->getValueType(0);
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  SDLoc DL(N);

  if (N0.getOpcode() != ISD::BITCAST || N1.getOpcode() != ISD::BITCAST)
    return SDValue();

  SDValue N00 = N0.getOperand(0);
  SDValue N10 = N1.getOperand(0);
  EVT N00Type = N00.getValueType();
  EVT N10Type = N10.getValueType();

  // Ensure that both types are the same and are legal scalar fp types.
  if (N00Type != N10Type ||
      !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
        (Subtarget.hasSSE2() && N00Type == MVT::f64)))
    return SDValue();

  unsigned FPOpcode;
  switch (N->getOpcode()) {
  default: llvm_unreachable("Unexpected input node for FP logic conversion");
  case ISD::AND: FPOpcode = X86ISD::FAND; break;
  case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
  case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
  }

  SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
  return DAG.getBitcast(VT, FPLogic);
}

/// If this is a zero/all-bits result that is bitwise-anded with a low bits
/// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
/// with a shift-right to eliminate loading the vector constant mask value.
static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
  SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
  EVT VT0 = Op0.getValueType();
  EVT VT1 = Op1.getValueType();

  if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
    return SDValue();

  APInt SplatVal;
  if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
      !SplatVal.isMask())
    return SDValue();

  // Don't prevent creation of ANDN.
  if (isBitwiseNot(Op0))
    return SDValue();

  if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
    return SDValue();

  unsigned EltBitWidth = VT0.getScalarSizeInBits();
  if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
    return SDValue();

  SDLoc DL(N);
  unsigned ShiftVal = SplatVal.countTrailingOnes();
  SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
  SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
  return DAG.getBitcast(N->getValueType(0), Shift);
}

// Get the index node from the lowered DAG of a GEP IR instruction with one
// indexing dimension.
static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
  if (Ld->isIndexed())
    return SDValue();

  SDValue Base = Ld->getBasePtr();

  if (Base.getOpcode() != ISD::ADD)
    return SDValue();

  SDValue ShiftedIndex = Base.getOperand(0);

  if (ShiftedIndex.getOpcode() != ISD::SHL)
    return SDValue();

  return ShiftedIndex.getOperand(0);

}

static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
  if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
    switch (VT.getSizeInBits()) {
    default: return false;
    case 64: return Subtarget.is64Bit() ? true : false;
    case 32: return true;
    }
  }
  return false;
}

// This function recognizes cases where X86 bzhi instruction can replace and
// 'and-load' sequence.
// In case of loading integer value from an array of constants which is defined
// as follows:
//
//   int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
//
// then applying a bitwise and on the result with another input.
// It's equivalent to performing bzhi (zero high bits) on the input, with the
// same index of the load.
static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget) {
  MVT VT = Node->getSimpleValueType(0);
  SDLoc dl(Node);

  // Check if subtarget has BZHI instruction for the node's type
  if (!hasBZHI(Subtarget, VT))
    return SDValue();

  // Try matching the pattern for both operands.
  for (unsigned i = 0; i < 2; i++) {
    SDValue N = Node->getOperand(i);
    LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());

     // continue if the operand is not a load instruction
    if (!Ld)
      return SDValue();

    const Value *MemOp = Ld->getMemOperand()->getValue();

    if (!MemOp)
      return SDValue();

    if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
      if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
        if (GV->isConstant() && GV->hasDefinitiveInitializer()) {

          Constant *Init = GV->getInitializer();
          Type *Ty = Init->getType();
          if (!isa<ConstantDataArray>(Init) ||
              !Ty->getArrayElementType()->isIntegerTy() ||
              Ty->getArrayElementType()->getScalarSizeInBits() !=
                  VT.getSizeInBits() ||
              Ty->getArrayNumElements() >
                  Ty->getArrayElementType()->getScalarSizeInBits())
            continue;

          // Check if the array's constant elements are suitable to our case.
          uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
          bool ConstantsMatch = true;
          for (uint64_t j = 0; j < ArrayElementCount; j++) {
            ConstantInt *Elem =
                dyn_cast<ConstantInt>(Init->getAggregateElement(j));
            if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
              ConstantsMatch = false;
              break;
            }
          }
          if (!ConstantsMatch)
            continue;

          // Do the transformation (For 32-bit type):
          // -> (and (load arr[idx]), inp)
          // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
          //    that will be replaced with one bzhi instruction.
          SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
          SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);

          // Get the Node which indexes into the array.
          SDValue Index = getIndexFromUnindexedLoad(Ld);
          if (!Index)
            return SDValue();
          Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);

          SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
          Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);

          SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
          SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);

          return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
        }
      }
    }
  }
  return SDValue();
}

// Look for (and (ctpop X), 1) which is the IR form of __builtin_parity.
// Turn it into series of XORs and a setnp.
static SDValue combineParity(SDNode *N, SelectionDAG &DAG,
                             const X86Subtarget &Subtarget) {
  EVT VT = N->getValueType(0);

  // We only support 64-bit and 32-bit. 64-bit requires special handling
  // unless the 64-bit popcnt instruction is legal.
  if (VT != MVT::i32 && VT != MVT::i64)
    return SDValue();

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT))
    return SDValue();

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // LHS needs to be a single use CTPOP.
  if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse())
    return SDValue();

  // RHS needs to be 1.
  if (!isOneConstant(N1))
    return SDValue();

  SDLoc DL(N);
  SDValue X = N0.getOperand(0);

  // If this is 64-bit, its always best to xor the two 32-bit pieces together
  // even if we have popcnt.
  if (VT == MVT::i64) {
    SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
                             DAG.getNode(ISD::SRL, DL, VT, X,
                                         DAG.getConstant(32, DL, MVT::i8)));
    SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
    X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
    // Generate a 32-bit parity idiom. This will bring us back here if we need
    // to expand it too.
    SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32,
                                 DAG.getNode(ISD::CTPOP, DL, MVT::i32, X),
                                 DAG.getConstant(1, DL, MVT::i32));
    return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity);
  }
  assert(VT == MVT::i32 && "Unexpected VT!");

  // Xor the high and low 16-bits together using a 32-bit operation.
  SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X,
                             DAG.getConstant(16, DL, MVT::i8));
  X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16);

  // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
  // This should allow an h-reg to be used to save a shift.
  // FIXME: We only get an h-reg in 32-bit mode.
  SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
                           DAG.getNode(ISD::SRL, DL, VT, X,
                                       DAG.getConstant(8, DL, MVT::i8)));
  SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
  SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
  SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);

  // Copy the inverse of the parity flag into a register with setcc.
  SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
  // Zero extend to original type.
  return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp);
}


// Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
// Where C is a mask containing the same number of bits as the setcc and
// where the setcc will freely 0 upper bits of k-register. We can replace the
// undef in the concat with 0s and remove the AND. This mainly helps with
// v2i1/v4i1 setcc being casted to scalar.
static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
                                             const X86Subtarget &Subtarget) {
  assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");

  EVT VT = N->getValueType(0);

  // Make sure this is an AND with constant. We will check the value of the
  // constant later.
  if (!isa<ConstantSDNode>(N->getOperand(1)))
    return SDValue();

  // This is implied by the ConstantSDNode.
  assert(!VT.isVector() && "Expected scalar VT!");

  if (N->getOperand(0).getOpcode() != ISD::BITCAST ||
      !N->getOperand(0).hasOneUse() ||
      !N->getOperand(0).getOperand(0).hasOneUse())
    return SDValue();

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  SDValue Src = N->getOperand(0).getOperand(0);
  EVT SrcVT = Src.getValueType();
  if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
      !TLI.isTypeLegal(SrcVT))
    return SDValue();

  if (Src.getOpcode() != ISD::CONCAT_VECTORS)
    return SDValue();

  // We only care about the first subvector of the concat, we expect the
  // other subvectors to be ignored due to the AND if we make the change.
  SDValue SubVec = Src.getOperand(0);
  EVT SubVecVT = SubVec.getValueType();

  // First subvector should be a setcc with a legal result type. The RHS of the
  // AND should be a mask with this many bits.
  if (SubVec.getOpcode() != ISD::SETCC || !TLI.isTypeLegal(SubVecVT) ||
      !N->getConstantOperandAPInt(1).isMask(SubVecVT.getVectorNumElements()))
    return SDValue();

  EVT SetccVT = SubVec.getOperand(0).getValueType();
  if (!TLI.isTypeLegal(SetccVT) ||
      !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
    return SDValue();

  if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
    return SDValue();

  // We passed all the checks. Rebuild the concat_vectors with zeroes
  // and cast it back to VT.
  SDLoc dl(N);
  SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
                              DAG.getConstant(0, dl, SubVecVT));
  Ops[0] = SubVec;
  SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
                               Ops);
  return DAG.getBitcast(VT, Concat);
}

static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {
  EVT VT = N->getValueType(0);

  // If this is SSE1 only convert to FAND to avoid scalarization.
  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
    return DAG.getBitcast(
        MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
                                DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
                                DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
  }

  // Use a 32-bit and+zext if upper bits known zero.
  if (VT == MVT::i64 && Subtarget.is64Bit() &&
      !isa<ConstantSDNode>(N->getOperand(1))) {
    APInt HiMask = APInt::getHighBitsSet(64, 32);
    if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
        DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
      SDLoc dl(N);
      SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
      SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
      return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
                         DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
    }
  }

  // This must be done before legalization has expanded the ctpop.
  if (SDValue V = combineParity(N, DAG, Subtarget))
    return V;

  // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
  // TODO: Support multiple SrcOps.
  if (VT == MVT::i1) {
    SmallVector<SDValue, 2> SrcOps;
    if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps) &&
        SrcOps.size() == 1) {
      SDLoc dl(N);
      unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
      EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
      SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
      if (Mask) {
        APInt AllBits = APInt::getAllOnesValue(NumElts);
        return DAG.getSetCC(dl, MVT::i1, Mask,
                            DAG.getConstant(AllBits, dl, MaskVT), ISD::SETEQ);
      }
    }
  }

  if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
    return V;

  if (DCI.isBeforeLegalizeOps())
    return SDValue();

  if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
    return R;

  if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
    return FPLogic;

  if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
    return R;

  if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
    return ShiftRight;

  if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
    return R;

  // Attempt to recursively combine a bitmask AND with shuffles.
  if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
    SDValue Op(N, 0);
    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
      return Res;
  }

  // Attempt to combine a scalar bitmask AND with an extracted shuffle.
  if ((VT.getScalarSizeInBits() % 8) == 0 &&
      N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
      isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
    SDValue BitMask = N->getOperand(1);
    SDValue SrcVec = N->getOperand(0).getOperand(0);
    EVT SrcVecVT = SrcVec.getValueType();

    // Check that the constant bitmask masks whole bytes.
    APInt UndefElts;
    SmallVector<APInt, 64> EltBits;
    if (VT == SrcVecVT.getScalarType() &&
        N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
        getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
        llvm::all_of(EltBits, [](APInt M) {
          return M.isNullValue() || M.isAllOnesValue();
        })) {
      unsigned NumElts = SrcVecVT.getVectorNumElements();
      unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
      unsigned Idx = N->getOperand(0).getConstantOperandVal(1);

      // Create a root shuffle mask from the byte mask and the extracted index.
      SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
      for (unsigned i = 0; i != Scale; ++i) {
        if (UndefElts[i])
          continue;
        int VecIdx = Scale * Idx + i;
        ShuffleMask[VecIdx] =
            EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
      }

      if (SDValue Shuffle = combineX86ShufflesRecursively(
              {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
              /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
        return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
                           N->getOperand(0).getOperand(1));
    }
  }

  return SDValue();
}

// Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");

  MVT VT = N->getSimpleValueType(0);
  if (!VT.isVector() || (VT.getScalarSizeInBits() % 8) != 0)
    return SDValue();

  SDValue N0 = peekThroughBitcasts(N->getOperand(0));
  SDValue N1 = peekThroughBitcasts(N->getOperand(1));
  if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
    return SDValue();

  // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
  // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
  bool UseVPTERNLOG = (Subtarget.hasAVX512() && VT.is512BitVector()) ||
                      Subtarget.hasVLX();
  if (!(Subtarget.hasXOP() || UseVPTERNLOG ||
        !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
    return SDValue();

  // Attempt to extract constant byte masks.
  APInt UndefElts0, UndefElts1;
  SmallVector<APInt, 32> EltBits0, EltBits1;
  if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
                                     false, false))
    return SDValue();
  if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
                                     false, false))
    return SDValue();

  for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
    // TODO - add UNDEF elts support.
    if (UndefElts0[i] || UndefElts1[i])
      return SDValue();
    if (EltBits0[i] != ~EltBits1[i])
      return SDValue();
  }

  SDLoc DL(N);
  SDValue X = N->getOperand(0);
  SDValue Y =
      DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
                  DAG.getBitcast(VT, N1.getOperand(0)));
  return DAG.getNode(ISD::OR, DL, VT, X, Y);
}

// Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
  if (N->getOpcode() != ISD::OR)
    return false;

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // Canonicalize AND to LHS.
  if (N1.getOpcode() == ISD::AND)
    std::swap(N0, N1);

  // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
  if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
    return false;

  Mask = N1.getOperand(0);
  X = N1.getOperand(1);

  // Check to see if the mask appeared in both the AND and ANDNP.
  if (N0.getOperand(0) == Mask)
    Y = N0.getOperand(1);
  else if (N0.getOperand(1) == Mask)
    Y = N0.getOperand(0);
  else
    return false;

  // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
  // ANDNP combine allows other combines to happen that prevent matching.
  return true;
}

// Try to match:
//   (or (and (M, (sub 0, X)), (pandn M, X)))
// which is a special case of vselect:
//   (vselect M, (sub 0, X), X)
// Per:
// http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
// We know that, if fNegate is 0 or 1:
//   (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
//
// Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
//   ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
//   ( M      ? -X : X) == ((X ^   M     ) + (M & 1))
// This lets us transform our vselect to:
//   (add (xor X, M), (and M, 1))
// And further to:
//   (sub (xor X, M), M)
static SDValue combineLogicBlendIntoConditionalNegate(
    EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
    SelectionDAG &DAG, const X86Subtarget &Subtarget) {
  EVT MaskVT = Mask.getValueType();
  assert(MaskVT.isInteger() &&
         DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
         "Mask must be zero/all-bits");

  if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
    return SDValue();
  if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
    return SDValue();

  auto IsNegV = [](SDNode *N, SDValue V) {
    return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
           ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
  };

  SDValue V;
  if (IsNegV(Y.getNode(), X))
    V = X;
  else if (IsNegV(X.getNode(), Y))
    V = Y;
  else
    return SDValue();

  SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
  SDValue SubOp2 = Mask;

  // If the negate was on the false side of the select, then
  // the operands of the SUB need to be swapped. PR 27251.
  // This is because the pattern being matched above is
  // (vselect M, (sub (0, X), X)  -> (sub (xor X, M), M)
  // but if the pattern matched was
  // (vselect M, X, (sub (0, X))), that is really negation of the pattern
  // above, -(vselect M, (sub 0, X), X), and therefore the replacement
  // pattern also needs to be a negation of the replacement pattern above.
  // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
  // sub accomplishes the negation of the replacement pattern.
  if (V == Y)
    std::swap(SubOp1, SubOp2);

  SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
  return DAG.getBitcast(VT, Res);
}

// Try to fold:
//   (or (and (m, y), (pandn m, x)))
// into:
//   (vselect m, x, y)
// As a special case, try to fold:
//   (or (and (m, (sub 0, x)), (pandn m, x)))
// into:
//   (sub (xor X, M), M)
static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
                                            const X86Subtarget &Subtarget) {
  assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");

  EVT VT = N->getValueType(0);
  if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
        (VT.is256BitVector() && Subtarget.hasInt256())))
    return SDValue();

  SDValue X, Y, Mask;
  if (!matchLogicBlend(N, X, Y, Mask))
    return SDValue();

  // Validate that X, Y, and Mask are bitcasts, and see through them.
  Mask = peekThroughBitcasts(Mask);
  X = peekThroughBitcasts(X);
  Y = peekThroughBitcasts(Y);

  EVT MaskVT = Mask.getValueType();
  unsigned EltBits = MaskVT.getScalarSizeInBits();

  // TODO: Attempt to handle floating point cases as well?
  if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
    return SDValue();

  SDLoc DL(N);

  // Attempt to combine to conditional negate: (sub (xor X, M), M)
  if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
                                                           DAG, Subtarget))
    return Res;

  // PBLENDVB is only available on SSE 4.1.
  if (!Subtarget.hasSSE41())
    return SDValue();

  MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;

  X = DAG.getBitcast(BlendVT, X);
  Y = DAG.getBitcast(BlendVT, Y);
  Mask = DAG.getBitcast(BlendVT, Mask);
  Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
  return DAG.getBitcast(VT, Mask);
}

// Helper function for combineOrCmpEqZeroToCtlzSrl
// Transforms:
//   seteq(cmp x, 0)
//   into:
//   srl(ctlz x), log2(bitsize(x))
// Input pattern is checked by caller.
static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
                                          SelectionDAG &DAG) {
  SDValue Cmp = Op.getOperand(1);
  EVT VT = Cmp.getOperand(0).getValueType();
  unsigned Log2b = Log2_32(VT.getSizeInBits());
  SDLoc dl(Op);
  SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
  // The result of the shift is true or false, and on X86, the 32-bit
  // encoding of shr and lzcnt is more desirable.
  SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
  SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
                            DAG.getConstant(Log2b, dl, MVT::i8));
  return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
}

// Try to transform:
//   zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
//   into:
//   srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
// Will also attempt to match more generic cases, eg:
//   zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
// Only applies if the target supports the FastLZCNT feature.
static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           const X86Subtarget &Subtarget) {
  if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
    return SDValue();

  auto isORCandidate = [](SDValue N) {
    return (N->getOpcode() == ISD::OR && N->hasOneUse());
  };

  // Check the zero extend is extending to 32-bit or more. The code generated by
  // srl(ctlz) for 16-bit or less variants of the pattern would require extra
  // instructions to clear the upper bits.
  if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
      !isORCandidate(N->getOperand(0)))
    return SDValue();

  // Check the node matches: setcc(eq, cmp 0)
  auto isSetCCCandidate = [](SDValue N) {
    return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
           X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
           N->getOperand(1).getOpcode() == X86ISD::CMP &&
           isNullConstant(N->getOperand(1).getOperand(1)) &&
           N->getOperand(1).getValueType().bitsGE(MVT::i32);
  };

  SDNode *OR = N->getOperand(0).getNode();
  SDValue LHS = OR->getOperand(0);
  SDValue RHS = OR->getOperand(1);

  // Save nodes matching or(or, setcc(eq, cmp 0)).
  SmallVector<SDNode *, 2> ORNodes;
  while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
          (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
    ORNodes.push_back(OR);
    OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
    LHS = OR->getOperand(0);
    RHS = OR->getOperand(1);
  }

  // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
  if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
      !isORCandidate(SDValue(OR, 0)))
    return SDValue();

  // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
  // to
  // or(srl(ctlz),srl(ctlz)).
  // The dag combiner can then fold it into:
  // srl(or(ctlz, ctlz)).
  EVT VT = OR->getValueType(0);
  SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
  SDValue Ret, NewRHS;
  if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
    Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);

  if (!Ret)
    return SDValue();

  // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
  while (ORNodes.size() > 0) {
    OR = ORNodes.pop_back_val();
    LHS = OR->getOperand(0);
    RHS = OR->getOperand(1);
    // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
    if (RHS->getOpcode() == ISD::OR)
      std::swap(LHS, RHS);
    NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
    if (!NewRHS)
      return SDValue();
    Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
  }

  if (Ret)
    Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);

  return Ret;
}

static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
                         TargetLowering::DAGCombinerInfo &DCI,
                         const X86Subtarget &Subtarget) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);

  // If this is SSE1 only convert to FOR to avoid scalarization.
  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
    return DAG.getBitcast(MVT::v4i32,
                          DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
                                      DAG.getBitcast(MVT::v4f32, N0),
                                      DAG.getBitcast(MVT::v4f32, N1)));
  }

  // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
  // TODO: Support multiple SrcOps.
  if (VT == MVT::i1) {
    SmallVector<SDValue, 2> SrcOps;
    if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps) &&
        SrcOps.size() == 1) {
      SDLoc dl(N);
      unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
      EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
      SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
      if (Mask) {
        APInt AllBits = APInt::getNullValue(NumElts);
        return DAG.getSetCC(dl, MVT::i1, Mask,
                            DAG.getConstant(AllBits, dl, MaskVT), ISD::SETNE);
      }
    }
  }

  if (DCI.isBeforeLegalizeOps())
    return SDValue();

  if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
    return R;

  if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
    return FPLogic;

  if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
    return R;

  if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
    return R;

  // Attempt to recursively combine an OR of shuffles.
  if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
    SDValue Op(N, 0);
    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
      return Res;
  }

  if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
    return SDValue();

  // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
  bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
  unsigned Bits = VT.getScalarSizeInBits();

  // SHLD/SHRD instructions have lower register pressure, but on some
  // platforms they have higher latency than the equivalent
  // series of shifts/or that would otherwise be generated.
  // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
  // have higher latencies and we are not optimizing for size.
  if (!OptForSize && Subtarget.isSHLDSlow())
    return SDValue();

  if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
    std::swap(N0, N1);
  if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
    return SDValue();
  if (!N0.hasOneUse() || !N1.hasOneUse())
    return SDValue();

  SDValue ShAmt0 = N0.getOperand(1);
  if (ShAmt0.getValueType() != MVT::i8)
    return SDValue();
  SDValue ShAmt1 = N1.getOperand(1);
  if (ShAmt1.getValueType() != MVT::i8)
    return SDValue();

  // Peek through any modulo shift masks.
  SDValue ShMsk0;
  if (ShAmt0.getOpcode() == ISD::AND &&
      isa<ConstantSDNode>(ShAmt0.getOperand(1)) &&
      ShAmt0.getConstantOperandAPInt(1) == (Bits - 1)) {
    ShMsk0 = ShAmt0;
    ShAmt0 = ShAmt0.getOperand(0);
  }
  SDValue ShMsk1;
  if (ShAmt1.getOpcode() == ISD::AND &&
      isa<ConstantSDNode>(ShAmt1.getOperand(1)) &&
      ShAmt1.getConstantOperandAPInt(1) == (Bits - 1)) {
    ShMsk1 = ShAmt1;
    ShAmt1 = ShAmt1.getOperand(0);
  }

  if (ShAmt0.getOpcode() == ISD::TRUNCATE)
    ShAmt0 = ShAmt0.getOperand(0);
  if (ShAmt1.getOpcode() == ISD::TRUNCATE)
    ShAmt1 = ShAmt1.getOperand(0);

  SDLoc DL(N);
  unsigned Opc = ISD::FSHL;
  SDValue Op0 = N0.getOperand(0);
  SDValue Op1 = N1.getOperand(0);
  if (ShAmt0.getOpcode() == ISD::SUB || ShAmt0.getOpcode() == ISD::XOR) {
    Opc = ISD::FSHR;
    std::swap(Op0, Op1);
    std::swap(ShAmt0, ShAmt1);
    std::swap(ShMsk0, ShMsk1);
  }

  auto GetFunnelShift = [&DAG, &DL, VT, Opc](SDValue Op0, SDValue Op1,
                                             SDValue Amt) {
    if (Opc == ISD::FSHR)
      std::swap(Op0, Op1);
    return DAG.getNode(Opc, DL, VT, Op0, Op1,
                       DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Amt));
  };

  // OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> FSHL( X, Y, C )
  // OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> FSHR( Y, X, C )
  // OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHL( X, Y, C )
  // OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHR( Y, X, C )
  // OR( SHL( X, AND( C, 31 ) ), SRL( Y, AND( 0 - C, 31 ) ) ) -> FSHL( X, Y, C )
  // OR( SRL( X, AND( C, 31 ) ), SHL( Y, AND( 0 - C, 31 ) ) ) -> FSHR( Y, X, C )
  if (ShAmt1.getOpcode() == ISD::SUB) {
    SDValue Sum = ShAmt1.getOperand(0);
    if (auto *SumC = dyn_cast<ConstantSDNode>(Sum)) {
      SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
      if (ShAmt1Op1.getOpcode() == ISD::AND &&
          isa<ConstantSDNode>(ShAmt1Op1.getOperand(1)) &&
          ShAmt1Op1.getConstantOperandAPInt(1) == (Bits - 1)) {
        ShMsk1 = ShAmt1Op1;
        ShAmt1Op1 = ShAmt1Op1.getOperand(0);
      }
      if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
        ShAmt1Op1 = ShAmt1Op1.getOperand(0);
      if ((SumC->getAPIntValue() == Bits ||
           (SumC->getAPIntValue() == 0 && ShMsk1)) &&
          ShAmt1Op1 == ShAmt0)
        return GetFunnelShift(Op0, Op1, ShAmt0);
    }
  } else if (auto *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
    auto *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
    if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
      return GetFunnelShift(Op0, Op1, ShAmt0);
  } else if (ShAmt1.getOpcode() == ISD::XOR) {
    SDValue Mask = ShAmt1.getOperand(1);
    if (auto *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
      unsigned InnerShift = (ISD::FSHL == Opc ? ISD::SRL : ISD::SHL);
      SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
      if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
        ShAmt1Op0 = ShAmt1Op0.getOperand(0);
      if (MaskC->getSExtValue() == (Bits - 1) &&
          (ShAmt1Op0 == ShAmt0 || ShAmt1Op0 == ShMsk0)) {
        if (Op1.getOpcode() == InnerShift &&
            isa<ConstantSDNode>(Op1.getOperand(1)) &&
            Op1.getConstantOperandAPInt(1) == 1) {
          return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
        }
        // Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ).
        if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
            Op1.getOperand(0) == Op1.getOperand(1)) {
          return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
        }
      }
    }
  }

  return SDValue();
}

/// Try to turn tests against the signbit in the form of:
///   XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
/// into:
///   SETGT(X, -1)
static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
  // This is only worth doing if the output type is i8 or i1.
  EVT ResultType = N->getValueType(0);
  if (ResultType != MVT::i8 && ResultType != MVT::i1)
    return SDValue();

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // We should be performing an xor against a truncated shift.
  if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
    return SDValue();

  // Make sure we are performing an xor against one.
  if (!isOneConstant(N1))
    return SDValue();

  // SetCC on x86 zero extends so only act on this if it's a logical shift.
  SDValue Shift = N0.getOperand(0);
  if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
    return SDValue();

  // Make sure we are truncating from one of i16, i32 or i64.
  EVT ShiftTy = Shift.getValueType();
  if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
    return SDValue();

  // Make sure the shift amount extracts the sign bit.
  if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
      Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
    return SDValue();

  // Create a greater-than comparison against -1.
  // N.B. Using SETGE against 0 works but we want a canonical looking
  // comparison, using SETGT matches up with what TranslateX86CC.
  SDLoc DL(N);
  SDValue ShiftOp = Shift.getOperand(0);
  EVT ShiftOpTy = ShiftOp.getValueType();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
                                               *DAG.getContext(), ResultType);
  SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
                              DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
  if (SetCCResultType != ResultType)
    Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
  return Cond;
}

/// Turn vector tests of the signbit in the form of:
///   xor (sra X, elt_size(X)-1), -1
/// into:
///   pcmpgt X, -1
///
/// This should be called before type legalization because the pattern may not
/// persist after that.
static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {
  EVT VT = N->getValueType(0);
  if (!VT.isSimple())
    return SDValue();

  switch (VT.getSimpleVT().SimpleTy) {
  default: return SDValue();
  case MVT::v16i8:
  case MVT::v8i16:
  case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break;
  case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break;
  case MVT::v32i8:
  case MVT::v16i16:
  case MVT::v8i32:
  case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
  }

  // There must be a shift right algebraic before the xor, and the xor must be a
  // 'not' operation.
  SDValue Shift = N->getOperand(0);
  SDValue Ones = N->getOperand(1);
  if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
      !ISD::isBuildVectorAllOnes(Ones.getNode()))
    return SDValue();

  // The shift should be smearing the sign bit across each vector element.
  auto *ShiftAmt =
      isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
  if (!ShiftAmt ||
      ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
    return SDValue();

  // Create a greater-than comparison against -1. We don't use the more obvious
  // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
  return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones);
}

/// Detect patterns of truncation with unsigned saturation:
///
/// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
///   Return the source value x to be truncated or SDValue() if the pattern was
///   not matched.
///
/// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
///   where C1 >= 0 and C2 is unsigned max of destination type.
///
///    (truncate (smax (smin (x, C2), C1)) to dest_type)
///   where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
///
///   These two patterns are equivalent to:
///   (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
///   So return the smax(x, C1) value to be truncated or SDValue() if the
///   pattern was not matched.
static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
                                 const SDLoc &DL) {
  EVT InVT = In.getValueType();

  // Saturation with truncation. We truncate from InVT to VT.
  assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
         "Unexpected types for truncate operation");

  // Match min/max and return limit value as a parameter.
  auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
    if (V.getOpcode() == Opcode &&
        ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
      return V.getOperand(0);
    return SDValue();
  };

  APInt C1, C2;
  if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
    // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
    // the element size of the destination type.
    if (C2.isMask(VT.getScalarSizeInBits()))
      return UMin;

  if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
    if (MatchMinMax(SMin, ISD::SMAX, C1))
      if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
        return SMin;

  if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
    if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
      if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
          C2.uge(C1)) {
        return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
      }

  return SDValue();
}

/// Detect patterns of truncation with signed saturation:
/// (truncate (smin ((smax (x, signed_min_of_dest_type)),
///                  signed_max_of_dest_type)) to dest_type)
/// or:
/// (truncate (smax ((smin (x, signed_max_of_dest_type)),
///                  signed_min_of_dest_type)) to dest_type).
/// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
/// Return the source value to be truncated or SDValue() if the pattern was not
/// matched.
static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
  unsigned NumDstBits = VT.getScalarSizeInBits();
  unsigned NumSrcBits = In.getScalarValueSizeInBits();
  assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");

  auto MatchMinMax = [](SDValue V, unsigned Opcode,
                        const APInt &Limit) -> SDValue {
    APInt C;
    if (V.getOpcode() == Opcode &&
        ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
      return V.getOperand(0);
    return SDValue();
  };

  APInt SignedMax, SignedMin;
  if (MatchPackUS) {
    SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
    SignedMin = APInt(NumSrcBits, 0);
  } else {
    SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
    SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
  }

  if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
    if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
      return SMax;

  if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
    if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
      return SMin;

  return SDValue();
}

static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
                                      SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {
  if (!Subtarget.hasSSE2() || !VT.isVector())
    return SDValue();

  EVT SVT = VT.getVectorElementType();
  EVT InVT = In.getValueType();
  EVT InSVT = InVT.getVectorElementType();

  // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
  // split across two registers. We can use a packusdw+perm to clamp to 0-65535
  // and concatenate at the same time. Then we can use a final vpmovuswb to
  // clip to 0-255.
  if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
      InVT == MVT::v16i32 && VT == MVT::v16i8) {
    if (auto USatVal = detectSSatPattern(In, VT, true)) {
      // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
      SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
                                           DL, DAG, Subtarget);
      assert(Mid && "Failed to pack!");
      return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
    }
  }

  // vXi32 truncate instructions are available with AVX512F.
  // vXi16 truncate instructions are only available with AVX512BW.
  // For 256-bit or smaller vectors, we require VLX.
  // FIXME: We could widen truncates to 512 to remove the VLX restriction.
  // If the result type is 256-bits or larger and we have disable 512-bit
  // registers, we should go ahead and use the pack instructions if possible.
  bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
                       (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
                      (InVT.getSizeInBits() > 128) &&
                      (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
                      !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);

  if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
      VT.getSizeInBits() >= 64 &&
      (SVT == MVT::i8 || SVT == MVT::i16) &&
      (InSVT == MVT::i16 || InSVT == MVT::i32)) {
    if (auto USatVal = detectSSatPattern(In, VT, true)) {
      // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
      // Only do this when the result is at least 64 bits or we'll leaving
      // dangling PACKSSDW nodes.
      if (SVT == MVT::i8 && InSVT == MVT::i32) {
        EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
                                     VT.getVectorNumElements());
        SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
                                             DAG, Subtarget);
        assert(Mid && "Failed to pack!");
        SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
                                           Subtarget);
        assert(V && "Failed to pack!");
        return V;
      } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
        return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
                                      Subtarget);
    }
    if (auto SSatVal = detectSSatPattern(In, VT))
      return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
                                    Subtarget);
  }

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
      Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI())) {
    unsigned TruncOpc;
    SDValue SatVal;
    if (auto SSatVal = detectSSatPattern(In, VT)) {
      SatVal = SSatVal;
      TruncOpc = X86ISD::VTRUNCS;
    } else if (auto USatVal = detectUSatPattern(In, VT, DAG, DL)) {
      SatVal = USatVal;
      TruncOpc = X86ISD::VTRUNCUS;
    }
    if (SatVal) {
      unsigned ResElts = VT.getVectorNumElements();
      // If the input type is less than 512 bits and we don't have VLX, we need
      // to widen to 512 bits.
      if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
        unsigned NumConcats = 512 / InVT.getSizeInBits();
        ResElts *= NumConcats;
        SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
        ConcatOps[0] = SatVal;
        InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
                                NumConcats * InVT.getVectorNumElements());
        SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
      }
      // Widen the result if its narrower than 128 bits.
      if (ResElts * SVT.getSizeInBits() < 128)
        ResElts = 128 / SVT.getSizeInBits();
      EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
      SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
                         DAG.getIntPtrConstant(0, DL));
    }
  }

  return SDValue();
}

/// This function detects the AVG pattern between vectors of unsigned i8/i16,
/// which is c = (a + b + 1) / 2, and replace this operation with the efficient
/// X86ISD::AVG instruction.
static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
                                const X86Subtarget &Subtarget,
                                const SDLoc &DL) {
  if (!VT.isVector())
    return SDValue();
  EVT InVT = In.getValueType();
  unsigned NumElems = VT.getVectorNumElements();

  EVT ScalarVT = VT.getVectorElementType();
  if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
        NumElems >= 2 && isPowerOf2_32(NumElems)))
    return SDValue();

  // InScalarVT is the intermediate type in AVG pattern and it should be greater
  // than the original input type (i8/i16).
  EVT InScalarVT = InVT.getVectorElementType();
  if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
    return SDValue();

  if (!Subtarget.hasSSE2())
    return SDValue();

  // Detect the following pattern:
  //
  //   %1 = zext <N x i8> %a to <N x i32>
  //   %2 = zext <N x i8> %b to <N x i32>
  //   %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
  //   %4 = add nuw nsw <N x i32> %3, %2
  //   %5 = lshr <N x i32> %N, <i32 1 x N>
  //   %6 = trunc <N x i32> %5 to <N x i8>
  //
  // In AVX512, the last instruction can also be a trunc store.
  if (In.getOpcode() != ISD::SRL)
    return SDValue();

  // A lambda checking the given SDValue is a constant vector and each element
  // is in the range [Min, Max].
  auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
    BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
    if (!BV || !BV->isConstant())
      return false;
    for (SDValue Op : V->ops()) {
      ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
      if (!C)
        return false;
      const APInt &Val = C->getAPIntValue();
      if (Val.ult(Min) || Val.ugt(Max))
        return false;
    }
    return true;
  };

  // Check if each element of the vector is right-shifted by one.
  auto LHS = In.getOperand(0);
  auto RHS = In.getOperand(1);
  if (!IsConstVectorInRange(RHS, 1, 1))
    return SDValue();
  if (LHS.getOpcode() != ISD::ADD)
    return SDValue();

  // Detect a pattern of a + b + 1 where the order doesn't matter.
  SDValue Operands[3];
  Operands[0] = LHS.getOperand(0);
  Operands[1] = LHS.getOperand(1);

  auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                       ArrayRef<SDValue> Ops) {
    return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
  };

  // Take care of the case when one of the operands is a constant vector whose
  // element is in the range [1, 256].
  if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
      Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
      Operands[0].getOperand(0).getValueType() == VT) {
    // The pattern is detected. Subtract one from the constant vector, then
    // demote it and emit X86ISD::AVG instruction.
    SDValue VecOnes = DAG.getConstant(1, DL, InVT);
    Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
    Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
    return SplitOpsAndApply(DAG, Subtarget, DL, VT,
                            { Operands[0].getOperand(0), Operands[1] },
                            AVGBuilder);
  }

  // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
  // Match the or case only if its 'add-like' - can be replaced by an add.
  auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
    if (ISD::ADD == V.getOpcode()) {
      Op0 = V.getOperand(0);
      Op1 = V.getOperand(1);
      return true;
    }
    if (ISD::ZERO_EXTEND != V.getOpcode())
      return false;
    V = V.getOperand(0);
    if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
        !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
      return false;
    Op0 = V.getOperand(0);
    Op1 = V.getOperand(1);
    return true;
  };

  SDValue Op0, Op1;
  if (FindAddLike(Operands[0], Op0, Op1))
    std::swap(Operands[0], Operands[1]);
  else if (!FindAddLike(Operands[1], Op0, Op1))
    return SDValue();
  Operands[2] = Op0;
  Operands[1] = Op1;

  // Now we have three operands of two additions. Check that one of them is a
  // constant vector with ones, and the other two can be promoted from i8/i16.
  for (int i = 0; i < 3; ++i) {
    if (!IsConstVectorInRange(Operands[i], 1, 1))
      continue;
    std::swap(Operands[i], Operands[2]);

    // Check if Operands[0] and Operands[1] are results of type promotion.
    for (int j = 0; j < 2; ++j)
      if (Operands[j].getValueType() != VT) {
        if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
            Operands[j].getOperand(0).getValueType() != VT)
          return SDValue();
        Operands[j] = Operands[j].getOperand(0);
      }

    // The pattern is detected, emit X86ISD::AVG instruction(s).
    return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Operands[0], Operands[1]},
                            AVGBuilder);
  }

  return SDValue();
}

static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
                           TargetLowering::DAGCombinerInfo &DCI,
                           const X86Subtarget &Subtarget) {
  LoadSDNode *Ld = cast<LoadSDNode>(N);
  EVT RegVT = Ld->getValueType(0);
  EVT MemVT = Ld->getMemoryVT();
  SDLoc dl(Ld);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  // For chips with slow 32-byte unaligned loads, break the 32-byte operation
  // into two 16-byte operations. Also split non-temporal aligned loads on
  // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
  ISD::LoadExtType Ext = Ld->getExtensionType();
  bool Fast;
  unsigned Alignment = Ld->getAlignment();
  if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
      Ext == ISD::NON_EXTLOAD &&
      ((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
       (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
                               *Ld->getMemOperand(), &Fast) &&
        !Fast))) {
    unsigned NumElems = RegVT.getVectorNumElements();
    if (NumElems < 2)
      return SDValue();

    unsigned HalfAlign = 16;
    SDValue Ptr1 = Ld->getBasePtr();
    SDValue Ptr2 = DAG.getMemBasePlusOffset(Ptr1, HalfAlign, dl);
    EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
                                  NumElems / 2);
    SDValue Load1 =
        DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
                    Alignment, Ld->getMemOperand()->getFlags());
    SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
                                Ld->getPointerInfo().getWithOffset(HalfAlign),
                                MinAlign(Alignment, HalfAlign),
                                Ld->getMemOperand()->getFlags());
    SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
                             Load1.getValue(1), Load2.getValue(1));

    SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
    return DCI.CombineTo(N, NewVec, TF, true);
  }

  // Bool vector load - attempt to cast to an integer, as we have good
  // (vXiY *ext(vXi1 bitcast(iX))) handling.
  if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
      RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
    unsigned NumElts = RegVT.getVectorNumElements();
    EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
    if (TLI.isTypeLegal(IntVT)) {
      SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
                                    Ld->getPointerInfo(), Alignment,
                                    Ld->getMemOperand()->getFlags());
      SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
      return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
    }
  }

  return SDValue();
}

/// If V is a build vector of boolean constants and exactly one of those
/// constants is true, return the operand index of that true element.
/// Otherwise, return -1.
static int getOneTrueElt(SDValue V) {
  // This needs to be a build vector of booleans.
  // TODO: Checking for the i1 type matches the IR definition for the mask,
  // but the mask check could be loosened to i8 or other types. That might
  // also require checking more than 'allOnesValue'; eg, the x86 HW
  // instructions only require that the MSB is set for each mask element.
  // The ISD::MSTORE comments/definition do not specify how the mask operand
  // is formatted.
  auto *BV = dyn_cast<BuildVectorSDNode>(V);
  if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
    return -1;

  int TrueIndex = -1;
  unsigned NumElts = BV->getValueType(0).getVectorNumElements();
  for (unsigned i = 0; i < NumElts; ++i) {
    const SDValue &Op = BV->getOperand(i);
    if (Op.isUndef())
      continue;
    auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
    if (!ConstNode)
      return -1;
    if (ConstNode->getAPIntValue().isAllOnesValue()) {
      // If we already found a one, this is too many.
      if (TrueIndex >= 0)
        return -1;
      TrueIndex = i;
    }
  }
  return TrueIndex;
}

/// Given a masked memory load/store operation, return true if it has one mask
/// bit set. If it has one mask bit set, then also return the memory address of
/// the scalar element to load/store, the vector index to insert/extract that
/// scalar element, and the alignment for the scalar memory access.
static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
                                         SelectionDAG &DAG, SDValue &Addr,
                                         SDValue &Index, unsigned &Alignment) {
  int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
  if (TrueMaskElt < 0)
    return false;

  // Get the address of the one scalar element that is specified by the mask
  // using the appropriate offset from the base pointer.
  EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
  Addr = MaskedOp->getBasePtr();
  if (TrueMaskElt != 0) {
    unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
    Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
  }

  Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
  Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
  return true;
}

/// If exactly one element of the mask is set for a non-extending masked load,
/// it is a scalar load and vector insert.
/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
/// mask have already been optimized in IR, so we don't bother with those here.
static SDValue
reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI) {
  // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
  // However, some target hooks may need to be added to know when the transform
  // is profitable. Endianness would also have to be considered.

  SDValue Addr, VecIndex;
  unsigned Alignment;
  if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
    return SDValue();

  // Load the one scalar element that is specified by the mask using the
  // appropriate offset from the base pointer.
  SDLoc DL(ML);
  EVT VT = ML->getValueType(0);
  EVT EltVT = VT.getVectorElementType();
  SDValue Load =
      DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
                  Alignment, ML->getMemOperand()->getFlags());

  // Insert the loaded element into the appropriate place in the vector.
  SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
                               ML->getPassThru(), Load, VecIndex);
  return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
}

static SDValue
combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
                              TargetLowering::DAGCombinerInfo &DCI) {
  if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
    return SDValue();

  SDLoc DL(ML);
  EVT VT = ML->getValueType(0);

  // If we are loading the first and last elements of a vector, it is safe and
  // always faster to load the whole vector. Replace the masked load with a
  // vector load and select.
  unsigned NumElts = VT.getVectorNumElements();
  BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
  bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
  bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
  if (LoadFirstElt && LoadLastElt) {
    SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
                                ML->getMemOperand());
    SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
                                  ML->getPassThru());
    return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
  }

  // Convert a masked load with a constant mask into a masked load and a select.
  // This allows the select operation to use a faster kind of select instruction
  // (for example, vblendvps -> vblendps).

  // Don't try this if the pass-through operand is already undefined. That would
  // cause an infinite loop because that's what we're about to create.
  if (ML->getPassThru().isUndef())
    return SDValue();

  if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
    return SDValue();

  // The new masked load has an undef pass-through operand. The select uses the
  // original pass-through operand.
  SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
                                    ML->getMask(), DAG.getUNDEF(VT),
                                    ML->getMemoryVT(), ML->getMemOperand(),
                                    ML->getExtensionType());
  SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
                                ML->getPassThru());

  return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
}

static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const X86Subtarget &Subtarget) {
  MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);

  // TODO: Expanding load with constant mask may be optimized as well.
  if (Mld->isExpandingLoad())
    return SDValue();

  if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
    if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
      return ScalarLoad;
    // TODO: Do some AVX512 subsets benefit from this transform?
    if (!Subtarget.hasAVX512())
      if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
        return Blend;
  }

  return SDValue();
}

/// If exactly one element of the mask is set for a non-truncating masked store,
/// it is a vector extract and scalar store.
/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
/// mask have already been optimized in IR, so we don't bother with those here.
static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
                                              SelectionDAG &DAG) {
  // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
  // However, some target hooks may need to be added to know when the transform
  // is profitable. Endianness would also have to be considered.

  SDValue Addr, VecIndex;
  unsigned Alignment;
  if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
    return SDValue();

  // Extract the one scalar element that is actually being stored.
  SDLoc DL(MS);
  EVT VT = MS->getValue().getValueType();
  EVT EltVT = VT.getVectorElementType();
  SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
                                MS->getValue(), VecIndex);

  // Store that element at the appropriate offset from the base pointer.
  return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
                      Alignment, MS->getMemOperand()->getFlags());
}

static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const X86Subtarget &Subtarget) {
  MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
  if (Mst->isCompressingStore())
    return SDValue();

  EVT VT = Mst->getValue().getValueType();
  SDLoc dl(Mst);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  if (Mst->isTruncatingStore())
    return SDValue();

  if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
    return ScalarStore;

  // If the mask value has been legalized to a non-boolean vector, try to
  // simplify ops leading up to it. We only demand the MSB of each lane.
  SDValue Mask = Mst->getMask();
  if (Mask.getScalarValueSizeInBits() != 1) {
    APInt DemandedMask(APInt::getSignMask(VT.getScalarSizeInBits()));
    if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
      return SDValue(N, 0);
  }

  SDValue Value = Mst->getValue();
  if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
      TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
                            Mst->getMemoryVT())) {
    return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
                              Mst->getBasePtr(), Mask,
                              Mst->getMemoryVT(), Mst->getMemOperand(), true);
  }

  return SDValue();
}

static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
                            TargetLowering::DAGCombinerInfo &DCI,
                            const X86Subtarget &Subtarget) {
  StoreSDNode *St = cast<StoreSDNode>(N);
  EVT StVT = St->getMemoryVT();
  SDLoc dl(St);
  unsigned Alignment = St->getAlignment();
  SDValue StoredVal = St->getValue();
  EVT VT = StoredVal.getValueType();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  // Convert a store of vXi1 into a store of iX and a bitcast.
  if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
      VT.getVectorElementType() == MVT::i1) {

    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
    StoredVal = DAG.getBitcast(NewVT, StoredVal);

    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
                        St->getPointerInfo(), St->getAlignment(),
                        St->getMemOperand()->getFlags());
  }

  // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
  // This will avoid a copy to k-register.
  if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
      StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
      StoredVal.getOperand(0).getValueType() == MVT::i8) {
    return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
                        St->getBasePtr(), St->getPointerInfo(),
                        St->getAlignment(), St->getMemOperand()->getFlags());
  }

  // Widen v2i1/v4i1 stores to v8i1.
  if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
      Subtarget.hasAVX512()) {
    unsigned NumConcats = 8 / VT.getVectorNumElements();
    SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
    Ops[0] = StoredVal;
    StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
                        St->getPointerInfo(), St->getAlignment(),
                        St->getMemOperand()->getFlags());
  }

  // Turn vXi1 stores of constants into a scalar store.
  if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
       VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
      ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
    // If its a v64i1 store without 64-bit support, we need two stores.
    if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
      SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
                                      StoredVal->ops().slice(0, 32));
      Lo = combinevXi1ConstantToInteger(Lo, DAG);
      SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
                                      StoredVal->ops().slice(32, 32));
      Hi = combinevXi1ConstantToInteger(Hi, DAG);

      SDValue Ptr0 = St->getBasePtr();
      SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);

      SDValue Ch0 =
          DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
                       Alignment, St->getMemOperand()->getFlags());
      SDValue Ch1 =
          DAG.getStore(St->getChain(), dl, Hi, Ptr1,
                       St->getPointerInfo().getWithOffset(4),
                       MinAlign(Alignment, 4U),
                       St->getMemOperand()->getFlags());
      return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
    }

    StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
                        St->getPointerInfo(), St->getAlignment(),
                        St->getMemOperand()->getFlags());
  }

  // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
  // Sandy Bridge, perform two 16-byte stores.
  bool Fast;
  if (VT.is256BitVector() && StVT == VT &&
      TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
                             *St->getMemOperand(), &Fast) &&
      !Fast) {
    unsigned NumElems = VT.getVectorNumElements();
    if (NumElems < 2)
      return SDValue();

    return splitVectorStore(St, DAG);
  }

  // Split under-aligned vector non-temporal stores.
  if (St->isNonTemporal() && StVT == VT && Alignment < VT.getStoreSize()) {
    // ZMM/YMM nt-stores - either it can be stored as a series of shorter
    // vectors or the legalizer can scalarize it to use MOVNTI.
    if (VT.is256BitVector() || VT.is512BitVector()) {
      unsigned NumElems = VT.getVectorNumElements();
      if (NumElems < 2)
        return SDValue();
      return splitVectorStore(St, DAG);
    }

    // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
    // to use MOVNTI.
    if (VT.is128BitVector() && Subtarget.hasSSE2()) {
      MVT NTVT = Subtarget.hasSSE4A()
                     ? MVT::v2f64
                     : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
      return scalarizeVectorStore(St, NTVT, DAG);
    }
  }

  // Try to optimize v16i16->v16i8 truncating stores when BWI is not
  // supported, but avx512f is by extending to v16i32 and truncating.
  if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
      St->getValue().getOpcode() == ISD::TRUNCATE &&
      St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
      TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
      St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
    SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32, St->getValue());
    return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
                             MVT::v16i8, St->getMemOperand());
  }

  // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
  if (!St->isTruncatingStore() && StoredVal.hasOneUse() &&
      (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
       StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
      TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
    bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
    return EmitTruncSStore(IsSigned, St->getChain(),
                           dl, StoredVal.getOperand(0), St->getBasePtr(),
                           VT, St->getMemOperand(), DAG);
  }

  // Optimize trunc store (of multiple scalars) to shuffle and store.
  // First, pack all of the elements in one place. Next, store to memory
  // in fewer chunks.
  if (St->isTruncatingStore() && VT.isVector()) {
    // Check if we can detect an AVG pattern from the truncation. If yes,
    // replace the trunc store by a normal store with the result of X86ISD::AVG
    // instruction.
    if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
      if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
                                         Subtarget, dl))
        return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
                            St->getPointerInfo(), St->getAlignment(),
                            St->getMemOperand()->getFlags());

    if (TLI.isTruncStoreLegal(VT, StVT)) {
      if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
        return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
                               dl, Val, St->getBasePtr(),
                               St->getMemoryVT(), St->getMemOperand(), DAG);
      if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
                                          DAG, dl))
        return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
                               dl, Val, St->getBasePtr(),
                               St->getMemoryVT(), St->getMemOperand(), DAG);
    }

    return SDValue();
  }

  // Turn load->store of MMX types into GPR load/stores.  This avoids clobbering
  // the FP state in cases where an emms may be missing.
  // A preferable solution to the general problem is to figure out the right
  // places to insert EMMS.  This qualifies as a quick hack.

  // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
  if (VT.getSizeInBits() != 64)
    return SDValue();

  const Function &F = DAG.getMachineFunction().getFunction();
  bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
  bool F64IsLegal =
      !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
  if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
      isa<LoadSDNode>(St->getValue()) &&
      cast<LoadSDNode>(St->getValue())->isSimple() &&
      St->getChain().hasOneUse() && St->isSimple()) {
    LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
    SmallVector<SDValue, 8> Ops;

    if (!ISD::isNormalLoad(Ld))
      return SDValue();

    // If this is not the MMX case, i.e. we are just turning i64 load/store
    // into f64 load/store, avoid the transformation if there are multiple
    // uses of the loaded value.
    if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
      return SDValue();

    SDLoc LdDL(Ld);
    SDLoc StDL(N);
    // If we are a 64-bit capable x86, lower to a single movq load/store pair.
    // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
    // pair instead.
    if (Subtarget.is64Bit() || F64IsLegal) {
      MVT LdVT = Subtarget.is64Bit() ? MVT::i64 : MVT::f64;
      SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
                                  Ld->getMemOperand());

      // Make sure new load is placed in same chain order.
      DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
      return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
                          St->getMemOperand());
    }

    // Otherwise, lower to two pairs of 32-bit loads / stores.
    SDValue LoAddr = Ld->getBasePtr();
    SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL);

    SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
                               Ld->getPointerInfo(), Ld->getAlignment(),
                               Ld->getMemOperand()->getFlags());
    SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
                               Ld->getPointerInfo().getWithOffset(4),
                               MinAlign(Ld->getAlignment(), 4),
                               Ld->getMemOperand()->getFlags());
    // Make sure new loads are placed in same chain order.
    DAG.makeEquivalentMemoryOrdering(Ld, LoLd);
    DAG.makeEquivalentMemoryOrdering(Ld, HiLd);

    LoAddr = St->getBasePtr();
    HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL);

    SDValue LoSt =
        DAG.getStore(St->getChain(), StDL, LoLd, LoAddr, St->getPointerInfo(),
                     St->getAlignment(), St->getMemOperand()->getFlags());
    SDValue HiSt = DAG.getStore(St->getChain(), StDL, HiLd, HiAddr,
                                St->getPointerInfo().getWithOffset(4),
                                MinAlign(St->getAlignment(), 4),
                                St->getMemOperand()->getFlags());
    return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
  }

  // This is similar to the above case, but here we handle a scalar 64-bit
  // integer store that is extracted from a vector on a 32-bit target.
  // If we have SSE2, then we can treat it like a floating-point double
  // to get past legalization. The execution dependencies fixup pass will
  // choose the optimal machine instruction for the store if this really is
  // an integer or v2f32 rather than an f64.
  if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
      St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
    SDValue OldExtract = St->getOperand(1);
    SDValue ExtOp0 = OldExtract.getOperand(0);
    unsigned VecSize = ExtOp0.getValueSizeInBits();
    EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
    SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
    SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
                                     BitCast, OldExtract.getOperand(1));
    return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
                        St->getPointerInfo(), St->getAlignment(),
                        St->getMemOperand()->getFlags());
  }

  return SDValue();
}

/// Return 'true' if this vector operation is "horizontal"
/// and return the operands for the horizontal operation in LHS and RHS.  A
/// horizontal operation performs the binary operation on successive elements
/// of its first operand, then on successive elements of its second operand,
/// returning the resulting values in a vector.  For example, if
///   A = < float a0, float a1, float a2, float a3 >
/// and
///   B = < float b0, float b1, float b2, float b3 >
/// then the result of doing a horizontal operation on A and B is
///   A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
/// A horizontal-op B, for some already available A and B, and if so then LHS is
/// set to A, RHS to B, and the routine returns 'true'.
static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, SelectionDAG &DAG,
                              const X86Subtarget &Subtarget,
                              bool IsCommutative) {
  // If either operand is undef, bail out. The binop should be simplified.
  if (LHS.isUndef() || RHS.isUndef())
    return false;

  // Look for the following pattern:
  //   A = < float a0, float a1, float a2, float a3 >
  //   B = < float b0, float b1, float b2, float b3 >
  // and
  //   LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
  //   RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
  // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
  // which is A horizontal-op B.

  MVT VT = LHS.getSimpleValueType();
  assert((VT.is128BitVector() || VT.is256BitVector()) &&
         "Unsupported vector type for horizontal add/sub");
  unsigned NumElts = VT.getVectorNumElements();

  // TODO - can we make a general helper method that does all of this for us?
  auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
                        SmallVectorImpl<int> &ShuffleMask) {
    if (Op.getOpcode() == ISD::VECTOR_SHUFFLE) {
      if (!Op.getOperand(0).isUndef())
        N0 = Op.getOperand(0);
      if (!Op.getOperand(1).isUndef())
        N1 = Op.getOperand(1);
      ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
      ShuffleMask.append(Mask.begin(), Mask.end());
      return;
    }
    bool UseSubVector = false;
    if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
        Op.getOperand(0).getValueType().is256BitVector() &&
        llvm::isNullConstant(Op.getOperand(1))) {
      Op = Op.getOperand(0);
      UseSubVector = true;
    }
    bool IsUnary;
    SmallVector<SDValue, 2> SrcOps;
    SmallVector<int, 16> SrcShuffleMask;
    SDValue BC = peekThroughBitcasts(Op);
    if (isTargetShuffle(BC.getOpcode()) &&
        getTargetShuffleMask(BC.getNode(), BC.getSimpleValueType(), false,
                             SrcOps, SrcShuffleMask, IsUnary)) {
      if (!UseSubVector && SrcShuffleMask.size() == NumElts &&
          SrcOps.size() <= 2) {
        N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
        N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
        ShuffleMask.append(SrcShuffleMask.begin(), SrcShuffleMask.end());
      }
      if (UseSubVector && (SrcShuffleMask.size() == (NumElts * 2)) &&
          SrcOps.size() == 1) {
        N0 = extract128BitVector(SrcOps[0], 0, DAG, SDLoc(Op));
        N1 = extract128BitVector(SrcOps[0], NumElts, DAG, SDLoc(Op));
        ArrayRef<int> Mask = ArrayRef<int>(SrcShuffleMask).slice(0, NumElts);
        ShuffleMask.append(Mask.begin(), Mask.end());
      }
    }
  };

  // View LHS in the form
  //   LHS = VECTOR_SHUFFLE A, B, LMask
  // If LHS is not a shuffle, then pretend it is the identity shuffle:
  //   LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
  // NOTE: A default initialized SDValue represents an UNDEF of type VT.
  SDValue A, B;
  SmallVector<int, 16> LMask;
  GetShuffle(LHS, A, B, LMask);

  // Likewise, view RHS in the form
  //   RHS = VECTOR_SHUFFLE C, D, RMask
  SDValue C, D;
  SmallVector<int, 16> RMask;
  GetShuffle(RHS, C, D, RMask);

  // At least one of the operands should be a vector shuffle.
  unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
  if (NumShuffles == 0)
    return false;

  if (LMask.empty()) {
    A = LHS;
    for (unsigned i = 0; i != NumElts; ++i)
      LMask.push_back(i);
  }

  if (RMask.empty()) {
    C = RHS;
    for (unsigned i = 0; i != NumElts; ++i)
      RMask.push_back(i);
  }

  // If A and B occur in reverse order in RHS, then canonicalize by commuting
  // RHS operands and shuffle mask.
  if (A != C) {
    std::swap(C, D);
    ShuffleVectorSDNode::commuteMask(RMask);
  }
  // Check that the shuffles are both shuffling the same vectors.
  if (!(A == C && B == D))
    return false;

  // LHS and RHS are now:
  //   LHS = shuffle A, B, LMask
  //   RHS = shuffle A, B, RMask
  // Check that the masks correspond to performing a horizontal operation.
  // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
  // so we just repeat the inner loop if this is a 256-bit op.
  unsigned Num128BitChunks = VT.getSizeInBits() / 128;
  unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
  assert((NumEltsPer128BitChunk % 2 == 0) &&
         "Vector type should have an even number of elements in each lane");
  for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
    for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
      // Ignore undefined components.
      int LIdx = LMask[i + j], RIdx = RMask[i + j];
      if (LIdx < 0 || RIdx < 0 ||
          (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
          (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
        continue;

      // The  low half of the 128-bit result must choose from A.
      // The high half of the 128-bit result must choose from B,
      // unless B is undef. In that case, we are always choosing from A.
      unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
      unsigned Src = B.getNode() ? i >= NumEltsPer64BitChunk : 0;

      // Check that successive elements are being operated on. If not, this is
      // not a horizontal operation.
      int Index = 2 * (i % NumEltsPer64BitChunk) + NumElts * Src + j;
      if (!(LIdx == Index && RIdx == Index + 1) &&
          !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
        return false;
    }
  }

  LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
  RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.

  if (!shouldUseHorizontalOp(LHS == RHS && NumShuffles < 2, DAG, Subtarget))
    return false;

  LHS = DAG.getBitcast(VT, LHS);
  RHS = DAG.getBitcast(VT, RHS);
  return true;
}

/// Do target-specific dag combines on floating-point adds/subs.
static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {
  EVT VT = N->getValueType(0);
  SDValue LHS = N->getOperand(0);
  SDValue RHS = N->getOperand(1);
  bool IsFadd = N->getOpcode() == ISD::FADD;
  auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
  assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");

  // Try to synthesize horizontal add/sub from adds/subs of shuffles.
  if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
       (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
      isHorizontalBinOp(LHS, RHS, DAG, Subtarget, IsFadd))
    return DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);

  return SDValue();
}

/// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
/// the codegen.
/// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
/// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
///       anything that is guaranteed to be transformed by DAGCombiner.
static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
                                          const X86Subtarget &Subtarget,
                                          const SDLoc &DL) {
  assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
  SDValue Src = N->getOperand(0);
  unsigned SrcOpcode = Src.getOpcode();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  EVT VT = N->getValueType(0);
  EVT SrcVT = Src.getValueType();

  auto IsFreeTruncation = [VT](SDValue Op) {
    unsigned TruncSizeInBits = VT.getScalarSizeInBits();

    // See if this has been extended from a smaller/equal size to
    // the truncation size, allowing a truncation to combine with the extend.
    unsigned Opcode = Op.getOpcode();
    if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
         Opcode == ISD::ZERO_EXTEND) &&
        Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
      return true;

    // See if this is a single use constant which can be constant folded.
    // NOTE: We don't peek throught bitcasts here because there is currently
    // no support for constant folding truncate+bitcast+vector_of_constants. So
    // we'll just send up with a truncate on both operands which will
    // get turned back into (truncate (binop)) causing an infinite loop.
    return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
  };

  auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
    SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
    SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
    return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
  };

  // Don't combine if the operation has other uses.
  if (!Src.hasOneUse())
    return SDValue();

  // Only support vector truncation for now.
  // TODO: i64 scalar math would benefit as well.
  if (!VT.isVector())
    return SDValue();

  // In most cases its only worth pre-truncating if we're only facing the cost
  // of one truncation.
  // i.e. if one of the inputs will constant fold or the input is repeated.
  switch (SrcOpcode) {
  case ISD::AND:
  case ISD::XOR:
  case ISD::OR: {
    SDValue Op0 = Src.getOperand(0);
    SDValue Op1 = Src.getOperand(1);
    if (TLI.isOperationLegalOrPromote(SrcOpcode, VT) &&
        (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
      return TruncateArithmetic(Op0, Op1);
    break;
  }

  case ISD::MUL:
    // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
    // better to truncate if we have the chance.
    if (SrcVT.getScalarType() == MVT::i64 &&
        TLI.isOperationLegal(SrcOpcode, VT) &&
        !TLI.isOperationLegal(SrcOpcode, SrcVT))
      return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
    LLVM_FALLTHROUGH;
  case ISD::ADD: {
    SDValue Op0 = Src.getOperand(0);
    SDValue Op1 = Src.getOperand(1);
    if (TLI.isOperationLegal(SrcOpcode, VT) &&
        (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
      return TruncateArithmetic(Op0, Op1);
    break;
  }
  case ISD::SUB: {
    // TODO: ISD::SUB We are conservative and require both sides to be freely
    // truncatable to avoid interfering with combineSubToSubus.
    SDValue Op0 = Src.getOperand(0);
    SDValue Op1 = Src.getOperand(1);
    if (TLI.isOperationLegal(SrcOpcode, VT) &&
        (Op0 == Op1 || (IsFreeTruncation(Op0) && IsFreeTruncation(Op1))))
      return TruncateArithmetic(Op0, Op1);
    break;
  }
  }

  return SDValue();
}

/// Truncate using ISD::AND mask and X86ISD::PACKUS.
/// e.g. trunc <8 x i32> X to <8 x i16> -->
/// MaskX = X & 0xffff (clear high bits to prevent saturation)
/// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
                                                 const X86Subtarget &Subtarget,
                                                 SelectionDAG &DAG) {
  SDValue In = N->getOperand(0);
  EVT InVT = In.getValueType();
  EVT OutVT = N->getValueType(0);

  APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
                                    OutVT.getScalarSizeInBits());
  In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
  return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
}

/// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
                                                 const X86Subtarget &Subtarget,
                                                 SelectionDAG &DAG) {
  SDValue In = N->getOperand(0);
  EVT InVT = In.getValueType();
  EVT OutVT = N->getValueType(0);
  In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
                   DAG.getValueType(OutVT));
  return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
}

/// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
/// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
/// legalization the truncation will be translated into a BUILD_VECTOR with each
/// element that is extracted from a vector and then truncated, and it is
/// difficult to do this optimization based on them.
static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
                                       const X86Subtarget &Subtarget) {
  EVT OutVT = N->getValueType(0);
  if (!OutVT.isVector())
    return SDValue();

  SDValue In = N->getOperand(0);
  if (!In.getValueType().isSimple())
    return SDValue();

  EVT InVT = In.getValueType();
  unsigned NumElems = OutVT.getVectorNumElements();

  // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
  // SSE2, and we need to take care of it specially.
  // AVX512 provides vpmovdb.
  if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
    return SDValue();

  EVT OutSVT = OutVT.getVectorElementType();
  EVT InSVT = InVT.getVectorElementType();
  if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
        (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
        NumElems >= 8))
    return SDValue();

  // SSSE3's pshufb results in less instructions in the cases below.
  if (Subtarget.hasSSSE3() && NumElems == 8 &&
      ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
       (InSVT == MVT::i32 && OutSVT == MVT::i16)))
    return SDValue();

  SDLoc DL(N);
  // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
  // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
  // truncate 2 x v4i32 to v8i16.
  if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
    return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
  if (InSVT == MVT::i32)
    return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);

  return SDValue();
}

/// This function transforms vector truncation of 'extended sign-bits' or
/// 'extended zero-bits' values.
/// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
                                               SelectionDAG &DAG,
                                               const X86Subtarget &Subtarget) {
  // Requires SSE2.
  if (!Subtarget.hasSSE2())
    return SDValue();

  if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
    return SDValue();

  SDValue In = N->getOperand(0);
  if (!In.getValueType().isSimple())
    return SDValue();

  MVT VT = N->getValueType(0).getSimpleVT();
  MVT SVT = VT.getScalarType();

  MVT InVT = In.getValueType().getSimpleVT();
  MVT InSVT = InVT.getScalarType();

  // Check we have a truncation suited for PACKSS/PACKUS.
  if (!VT.is128BitVector() && !VT.is256BitVector())
    return SDValue();
  if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
    return SDValue();
  if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
    return SDValue();

  // AVX512 has fast truncate, but if the input is already going to be split,
  // there's no harm in trying pack.
  if (Subtarget.hasAVX512() &&
      !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
        InVT.is512BitVector()))
    return SDValue();

  unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
  unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;

  // Use PACKUS if the input has zero-bits that extend all the way to the
  // packed/truncated value. e.g. masks, zext_in_reg, etc.
  KnownBits Known = DAG.computeKnownBits(In);
  unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
  if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
    return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);

  // Use PACKSS if the input has sign-bits that extend all the way to the
  // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
  unsigned NumSignBits = DAG.ComputeNumSignBits(In);
  if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
    return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);

  return SDValue();
}

// Try to form a MULHU or MULHS node by looking for
// (trunc (srl (mul ext, ext), 16))
// TODO: This is X86 specific because we want to be able to handle wide types
// before type legalization. But we can only do it if the vector will be
// legalized via widening/splitting. Type legalization can't handle promotion
// of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
// combiner.
static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
                            SelectionDAG &DAG, const X86Subtarget &Subtarget) {
  // First instruction should be a right shift of a multiply.
  if (Src.getOpcode() != ISD::SRL ||
      Src.getOperand(0).getOpcode() != ISD::MUL)
    return SDValue();

  if (!Subtarget.hasSSE2())
    return SDValue();

  // Only handle vXi16 types that are at least 128-bits unless they will be
  // widened.
  if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
    return SDValue();

  // Input type should be vXi32.
  EVT InVT = Src.getValueType();
  if (InVT.getVectorElementType() != MVT::i32)
    return SDValue();

  // Need a shift by 16.
  APInt ShiftAmt;
  if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
      ShiftAmt != 16)
    return SDValue();

  SDValue LHS = Src.getOperand(0).getOperand(0);
  SDValue RHS = Src.getOperand(0).getOperand(1);

  unsigned ExtOpc = LHS.getOpcode();
  if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
      RHS.getOpcode() != ExtOpc)
    return SDValue();

  // Peek through the extends.
  LHS = LHS.getOperand(0);
  RHS = RHS.getOperand(0);

  // Ensure the input types match.
  if (LHS.getValueType() != VT || RHS.getValueType() != VT)
    return SDValue();

  unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
  return DAG.getNode(Opc, DL, VT, LHS, RHS);
}

// Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
// from one vector with signed bytes from another vector, adds together
// adjacent pairs of 16-bit products, and saturates the result before
// truncating to 16-bits.
//
// Which looks something like this:
// (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
//                 (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget,
                               const SDLoc &DL) {
  if (!VT.isVector() || !Subtarget.hasSSSE3())
    return SDValue();

  unsigned NumElems = VT.getVectorNumElements();
  EVT ScalarVT = VT.getVectorElementType();
  if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
    return SDValue();

  SDValue SSatVal = detectSSatPattern(In, VT);
  if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
    return SDValue();

  // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
  // of multiplies from even/odd elements.
  SDValue N0 = SSatVal.getOperand(0);
  SDValue N1 = SSatVal.getOperand(1);

  if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
    return SDValue();

  SDValue N00 = N0.getOperand(0);
  SDValue N01 = N0.getOperand(1);
  SDValue N10 = N1.getOperand(0);
  SDValue N11 = N1.getOperand(1);

  // TODO: Handle constant vectors and use knownbits/computenumsignbits?
  // Canonicalize zero_extend to LHS.
  if (N01.getOpcode() == ISD::ZERO_EXTEND)
    std::swap(N00, N01);
  if (N11.getOpcode() == ISD::ZERO_EXTEND)
    std::swap(N10, N11);

  // Ensure we have a zero_extend and a sign_extend.
  if (N00.getOpcode() != ISD::ZERO_EXTEND ||
      N01.getOpcode() != ISD::SIGN_EXTEND ||
      N10.getOpcode() != ISD::ZERO_EXTEND ||
      N11.getOpcode() != ISD::SIGN_EXTEND)
    return SDValue();

  // Peek through the extends.
  N00 = N00.getOperand(0);
  N01 = N01.getOperand(0);
  N10 = N10.getOperand(0);
  N11 = N11.getOperand(0);

  // Ensure the extend is from vXi8.
  if (N00.getValueType().getVectorElementType() != MVT::i8 ||
      N01.getValueType().getVectorElementType() != MVT::i8 ||
      N10.getValueType().getVectorElementType() != MVT::i8 ||
      N11.getValueType().getVectorElementType() != MVT::i8)
    return SDValue();

  // All inputs should be build_vectors.
  if (N00.getOpcode() != ISD::BUILD_VECTOR ||
      N01.getOpcode() != ISD::BUILD_VECTOR ||
      N10.getOpcode() != ISD::BUILD_VECTOR ||
      N11.getOpcode() != ISD::BUILD_VECTOR)
    return SDValue();

  // N00/N10 are zero extended. N01/N11 are sign extended.

  // For each element, we need to ensure we have an odd element from one vector
  // multiplied by the odd element of another vector and the even element from
  // one of the same vectors being multiplied by the even element from the
  // other vector. So we need to make sure for each element i, this operator
  // is being performed:
  //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
  SDValue ZExtIn, SExtIn;
  for (unsigned i = 0; i != NumElems; ++i) {
    SDValue N00Elt = N00.getOperand(i);
    SDValue N01Elt = N01.getOperand(i);
    SDValue N10Elt = N10.getOperand(i);
    SDValue N11Elt = N11.getOperand(i);
    // TODO: Be more tolerant to undefs.
    if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
      return SDValue();
    auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
    auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
    auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
    auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
    if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
      return SDValue();
    unsigned IdxN00 = ConstN00Elt->getZExtValue();
    unsigned IdxN01 = ConstN01Elt->getZExtValue();
    unsigned IdxN10 = ConstN10Elt->getZExtValue();
    unsigned IdxN11 = ConstN11Elt->getZExtValue();
    // Add is commutative so indices can be reordered.
    if (IdxN00 > IdxN10) {
      std::swap(IdxN00, IdxN10);
      std::swap(IdxN01, IdxN11);
    }
    // N0 indices be the even element. N1 indices must be the next odd element.
    if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
        IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
      return SDValue();
    SDValue N00In = N00Elt.getOperand(0);
    SDValue N01In = N01Elt.getOperand(0);
    SDValue N10In = N10Elt.getOperand(0);
    SDValue N11In = N11Elt.getOperand(0);
    // First time we find an input capture it.
    if (!ZExtIn) {
      ZExtIn = N00In;
      SExtIn = N01In;
    }
    if (ZExtIn != N00In || SExtIn != N01In ||
        ZExtIn != N10In || SExtIn != N11In)
      return SDValue();
  }

  auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                         ArrayRef<SDValue> Ops) {
    // Shrink by adding truncate nodes and let DAGCombine fold with the
    // sources.
    EVT InVT = Ops[0].getValueType();
    assert(InVT.getScalarType() == MVT::i8 &&
           "Unexpected scalar element type");
    assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
    EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
                                 InVT.getVectorNumElements() / 2);
    return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
  };
  return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
                          PMADDBuilder);
}

static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {
  EVT VT = N->getValueType(0);
  SDValue Src = N->getOperand(0);
  SDLoc DL(N);

  // Attempt to pre-truncate inputs to arithmetic ops instead.
  if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
    return V;

  // Try to detect AVG pattern first.
  if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
    return Avg;

  // Try to detect PMADD
  if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
    return PMAdd;

  // Try to combine truncation with signed/unsigned saturation.
  if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
    return Val;

  // Try to combine PMULHUW/PMULHW for vXi16.
  if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
    return V;

  // The bitcast source is a direct mmx result.
  // Detect bitcasts between i32 to x86mmx
  if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
    SDValue BCSrc = Src.getOperand(0);
    if (BCSrc.getValueType() == MVT::x86mmx)
      return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
  }

  // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
  if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
    return V;

  return combineVectorTruncation(N, DAG, Subtarget);
}

static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG) {
  EVT VT = N->getValueType(0);
  SDValue In = N->getOperand(0);
  SDLoc DL(N);

  if (auto SSatVal = detectSSatPattern(In, VT))
    return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
  if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
    return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);

  return SDValue();
}

/// Returns the negated value if the node \p N flips sign of FP value.
///
/// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
/// or FSUB(0, x)
/// AVX512F does not have FXOR, so FNEG is lowered as
/// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
/// In this case we go though all bitcasts.
/// This also recognizes splat of a negated value and returns the splat of that
/// value.
static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
  if (N->getOpcode() == ISD::FNEG)
    return N->getOperand(0);

  // Don't recurse exponentially.
  if (Depth > SelectionDAG::MaxRecursionDepth)
    return SDValue();

  unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();

  SDValue Op = peekThroughBitcasts(SDValue(N, 0));
  EVT VT = Op->getValueType(0);
  // Make sure the element size does't change.
  if (VT.getScalarSizeInBits() != ScalarSize)
    return SDValue();

  if (auto SVOp = dyn_cast<ShuffleVectorSDNode>(Op.getNode())) {
    // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
    // of this is VECTOR_SHUFFLE(-VEC1, UNDEF).  The mask can be anything here.
    if (!SVOp->getOperand(1).isUndef())
      return SDValue();
    if (SDValue NegOp0 = isFNEG(DAG, SVOp->getOperand(0).getNode(), Depth + 1))
      if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
        return DAG.getVectorShuffle(VT, SDLoc(SVOp), NegOp0, DAG.getUNDEF(VT),
                                    SVOp->getMask());
    return SDValue();
  }
  unsigned Opc = Op.getOpcode();
  if (Opc == ISD::INSERT_VECTOR_ELT) {
    // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
    // -V, INDEX).
    SDValue InsVector = Op.getOperand(0);
    SDValue InsVal = Op.getOperand(1);
    if (!InsVector.isUndef())
      return SDValue();
    if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
      if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
        return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
                           NegInsVal, Op.getOperand(2));
    return SDValue();
  }

  if (Opc != X86ISD::FXOR && Opc != ISD::XOR && Opc != ISD::FSUB)
    return SDValue();

  SDValue Op1 = Op.getOperand(1);
  SDValue Op0 = Op.getOperand(0);

  // For XOR and FXOR, we want to check if constant bits of Op1 are sign bit
  // masks. For FSUB, we have to check if constant bits of Op0 are sign bit
  // masks and hence we swap the operands.
  if (Opc == ISD::FSUB)
    std::swap(Op0, Op1);

  APInt UndefElts;
  SmallVector<APInt, 16> EltBits;
  // Extract constant bits and see if they are all sign bit masks. Ignore the
  // undef elements.
  if (getTargetConstantBitsFromNode(Op1, ScalarSize,
                                    UndefElts, EltBits,
                                    /* AllowWholeUndefs */ true,
                                    /* AllowPartialUndefs */ false)) {
    for (unsigned I = 0, E = EltBits.size(); I < E; I++)
      if (!UndefElts[I] && !EltBits[I].isSignMask())
        return SDValue();

    return peekThroughBitcasts(Op0);
  }

  return SDValue();
}

static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
                                bool NegRes) {
  if (NegMul) {
    switch (Opcode) {
    default: llvm_unreachable("Unexpected opcode");
    case ISD::FMA:             Opcode = X86ISD::FNMADD;       break;
    case X86ISD::FMADD_RND:    Opcode = X86ISD::FNMADD_RND;   break;
    case X86ISD::FMSUB:        Opcode = X86ISD::FNMSUB;       break;
    case X86ISD::FMSUB_RND:    Opcode = X86ISD::FNMSUB_RND;   break;
    case X86ISD::FNMADD:       Opcode = ISD::FMA;             break;
    case X86ISD::FNMADD_RND:   Opcode = X86ISD::FMADD_RND;    break;
    case X86ISD::FNMSUB:       Opcode = X86ISD::FMSUB;        break;
    case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FMSUB_RND;    break;
    }
  }

  if (NegAcc) {
    switch (Opcode) {
    default: llvm_unreachable("Unexpected opcode");
    case ISD::FMA:             Opcode = X86ISD::FMSUB;        break;
    case X86ISD::FMADD_RND:    Opcode = X86ISD::FMSUB_RND;    break;
    case X86ISD::FMSUB:        Opcode = ISD::FMA;             break;
    case X86ISD::FMSUB_RND:    Opcode = X86ISD::FMADD_RND;    break;
    case X86ISD::FNMADD:       Opcode = X86ISD::FNMSUB;       break;
    case X86ISD::FNMADD_RND:   Opcode = X86ISD::FNMSUB_RND;   break;
    case X86ISD::FNMSUB:       Opcode = X86ISD::FNMADD;       break;
    case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FNMADD_RND;   break;
    case X86ISD::FMADDSUB:     Opcode = X86ISD::FMSUBADD;     break;
    case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break;
    case X86ISD::FMSUBADD:     Opcode = X86ISD::FMADDSUB;     break;
    case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break;
    }
  }

  if (NegRes) {
    switch (Opcode) {
    default: llvm_unreachable("Unexpected opcode");
    case ISD::FMA:             Opcode = X86ISD::FNMSUB;       break;
    case X86ISD::FMADD_RND:    Opcode = X86ISD::FNMSUB_RND;   break;
    case X86ISD::FMSUB:        Opcode = X86ISD::FNMADD;       break;
    case X86ISD::FMSUB_RND:    Opcode = X86ISD::FNMADD_RND;   break;
    case X86ISD::FNMADD:       Opcode = X86ISD::FMSUB;        break;
    case X86ISD::FNMADD_RND:   Opcode = X86ISD::FMSUB_RND;    break;
    case X86ISD::FNMSUB:       Opcode = ISD::FMA;             break;
    case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FMADD_RND;    break;
    }
  }

  return Opcode;
}

/// Do target-specific dag combines on floating point negations.
static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
                           const X86Subtarget &Subtarget) {
  EVT OrigVT = N->getValueType(0);
  SDValue Arg = isFNEG(DAG, N);
  if (!Arg)
    return SDValue();

  EVT VT = Arg.getValueType();
  EVT SVT = VT.getScalarType();
  SDLoc DL(N);

  // Let legalize expand this if it isn't a legal type yet.
  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
    return SDValue();

  // If we're negating a FMUL node on a target with FMA, then we can avoid the
  // use of a constant by performing (-0 - A*B) instead.
  // FIXME: Check rounding control flags as well once it becomes available.
  if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
      Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
    SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
    SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
                                  Arg.getOperand(1), Zero);
    return DAG.getBitcast(OrigVT, NewNode);
  }

  // If we're negating an FMA node, then we can adjust the
  // instruction to include the extra negation.
  if (Arg.hasOneUse() && Subtarget.hasAnyFMA()) {
    switch (Arg.getOpcode()) {
    case ISD::FMA:
    case X86ISD::FMSUB:
    case X86ISD::FNMADD:
    case X86ISD::FNMSUB:
    case X86ISD::FMADD_RND:
    case X86ISD::FMSUB_RND:
    case X86ISD::FNMADD_RND:
    case X86ISD::FNMSUB_RND: {
      // We can't handle scalar intrinsic node here because it would only
      // invert one element and not the whole vector. But we could try to handle
      // a negation of the lower element only.
      unsigned NewOpcode = negateFMAOpcode(Arg.getOpcode(), false, false, true);
      return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT, Arg->ops()));
    }
    }
  }

  return SDValue();
}

char X86TargetLowering::isNegatibleForFree(SDValue Op, SelectionDAG &DAG,
                                           bool LegalOperations,
                                           bool ForCodeSize,
                                           unsigned Depth) const {
  // fneg patterns are removable even if they have multiple uses.
  if (isFNEG(DAG, Op.getNode(), Depth))
    return 2;

  // Don't recurse exponentially.
  if (Depth > SelectionDAG::MaxRecursionDepth)
    return 0;

  EVT VT = Op.getValueType();
  EVT SVT = VT.getScalarType();
  switch (Op.getOpcode()) {
  case ISD::FMA:
  case X86ISD::FMSUB:
  case X86ISD::FNMADD:
  case X86ISD::FNMSUB:
  case X86ISD::FMADD_RND:
  case X86ISD::FMSUB_RND:
  case X86ISD::FNMADD_RND:
  case X86ISD::FNMSUB_RND: {
    if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
        !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
      break;

    // This is always negatible for free but we might be able to remove some
    // extra operand negations as well.
    for (int i = 0; i != 3; ++i) {
      char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
                                  ForCodeSize, Depth + 1);
      if (V == 2)
        return V;
    }
    return 1;
  }
  }

  return TargetLowering::isNegatibleForFree(Op, DAG, LegalOperations,
                                            ForCodeSize, Depth);
}

SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
                                                bool LegalOperations,
                                                bool ForCodeSize,
                                                unsigned Depth) const {
  // fneg patterns are removable even if they have multiple uses.
  if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth))
    return DAG.getBitcast(Op.getValueType(), Arg);

  EVT VT = Op.getValueType();
  EVT SVT = VT.getScalarType();
  unsigned Opc = Op.getOpcode();
  switch (Opc) {
  case ISD::FMA:
  case X86ISD::FMSUB:
  case X86ISD::FNMADD:
  case X86ISD::FNMSUB:
  case X86ISD::FMADD_RND:
  case X86ISD::FMSUB_RND:
  case X86ISD::FNMADD_RND:
  case X86ISD::FNMSUB_RND: {
    if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
        !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
      break;

    // This is always negatible for free but we might be able to remove some
    // extra operand negations as well.
    SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
    for (int i = 0; i != 3; ++i) {
      char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
                                  ForCodeSize, Depth + 1);
      if (V == 2)
        NewOps[i] = getNegatedExpression(Op.getOperand(i), DAG, LegalOperations,
                                         ForCodeSize, Depth + 1);
    }

    bool NegA = !!NewOps[0];
    bool NegB = !!NewOps[1];
    bool NegC = !!NewOps[2];
    unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);

    // Fill in the non-negated ops with the original values.
    for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
      if (!NewOps[i])
        NewOps[i] = Op.getOperand(i);
    return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
  }
  }

  return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
                                              ForCodeSize, Depth);
}

static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget) {
  MVT VT = N->getSimpleValueType(0);
  // If we have integer vector types available, use the integer opcodes.
  if (!VT.isVector() || !Subtarget.hasSSE2())
    return SDValue();

  SDLoc dl(N);

  unsigned IntBits = VT.getScalarSizeInBits();
  MVT IntSVT = MVT::getIntegerVT(IntBits);
  MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);

  SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
  SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
  unsigned IntOpcode;
  switch (N->getOpcode()) {
  default: llvm_unreachable("Unexpected FP logic op");
  case X86ISD::FOR:   IntOpcode = ISD::OR; break;
  case X86ISD::FXOR:  IntOpcode = ISD::XOR; break;
  case X86ISD::FAND:  IntOpcode = ISD::AND; break;
  case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
  }
  SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
  return DAG.getBitcast(VT, IntOp);
}


/// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
  if (N->getOpcode() != ISD::XOR)
    return SDValue();

  SDValue LHS = N->getOperand(0);
  auto *RHSC = dyn_cast<ConstantSDNode>(N->getOperand(1));
  if (!RHSC || RHSC->getZExtValue() != 1 || LHS->getOpcode() != X86ISD::SETCC)
    return SDValue();

  X86::CondCode NewCC = X86::GetOppositeBranchCondition(
      X86::CondCode(LHS->getConstantOperandVal(0)));
  SDLoc DL(N);
  return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
}

static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {
  // If this is SSE1 only convert to FXOR to avoid scalarization.
  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
      N->getValueType(0) == MVT::v4i32) {
    return DAG.getBitcast(
        MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
                                DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
                                DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
  }

  if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
    return Cmp;

  if (DCI.isBeforeLegalizeOps())
    return SDValue();

  if (SDValue SetCC = foldXor1SetCC(N, DAG))
    return SetCC;

  if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
    return RV;

  if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
    return FPLogic;

  return combineFneg(N, DAG, Subtarget);
}

static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
                            TargetLowering::DAGCombinerInfo &DCI,
                            const X86Subtarget &Subtarget) {
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  unsigned NumBits = VT.getSizeInBits();

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  // TODO - Constant Folding.
  if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
    // Reduce Cst1 to the bottom 16-bits.
    // NOTE: SimplifyDemandedBits won't do this for constants.
    const APInt &Val1 = Cst1->getAPIntValue();
    APInt MaskedVal1 = Val1 & 0xFFFF;
    if (MaskedVal1 != Val1)
      return DAG.getNode(X86ISD::BEXTR, SDLoc(N), VT, Op0,
                         DAG.getConstant(MaskedVal1, SDLoc(N), VT));
  }

  // Only bottom 16-bits of the control bits are required.
  APInt DemandedMask(APInt::getLowBitsSet(NumBits, 16));
  if (TLI.SimplifyDemandedBits(Op1, DemandedMask, DCI))
    return SDValue(N, 0);

  return SDValue();
}

static bool isNullFPScalarOrVectorConst(SDValue V) {
  return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
}

/// If a value is a scalar FP zero or a vector FP zero (potentially including
/// undefined elements), return a zero constant that may be used to fold away
/// that value. In the case of a vector, the returned constant will not contain
/// undefined elements even if the input parameter does. This makes it suitable
/// to be used as a replacement operand with operations (eg, bitwise-and) where
/// an undef should not propagate.
static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {
  if (!isNullFPScalarOrVectorConst(V))
    return SDValue();

  if (V.getValueType().isVector())
    return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));

  return V;
}

static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT VT = N->getValueType(0);
  SDLoc DL(N);

  // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
  if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
        (VT == MVT::f64 && Subtarget.hasSSE2()) ||
        (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
    return SDValue();

  auto isAllOnesConstantFP = [](SDValue V) {
    if (V.getSimpleValueType().isVector())
      return ISD::isBuildVectorAllOnes(V.getNode());
    auto *C = dyn_cast<ConstantFPSDNode>(V);
    return C && C->getConstantFPValue()->isAllOnesValue();
  };

  // fand (fxor X, -1), Y --> fandn X, Y
  if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
    return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);

  // fand X, (fxor Y, -1) --> fandn Y, X
  if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
    return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);

  return SDValue();
}

/// Do target-specific dag combines on X86ISD::FAND nodes.
static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
                           const X86Subtarget &Subtarget) {
  // FAND(0.0, x) -> 0.0
  if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
    return V;

  // FAND(x, 0.0) -> 0.0
  if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
    return V;

  if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
    return V;

  return lowerX86FPLogicOp(N, DAG, Subtarget);
}

/// Do target-specific dag combines on X86ISD::FANDN nodes.
static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
                            const X86Subtarget &Subtarget) {
  // FANDN(0.0, x) -> x
  if (isNullFPScalarOrVectorConst(N->getOperand(0)))
    return N->getOperand(1);

  // FANDN(x, 0.0) -> 0.0
  if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
    return V;

  return lowerX86FPLogicOp(N, DAG, Subtarget);
}

/// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
                          const X86Subtarget &Subtarget) {
  assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);

  // F[X]OR(0.0, x) -> x
  if (isNullFPScalarOrVectorConst(N->getOperand(0)))
    return N->getOperand(1);

  // F[X]OR(x, 0.0) -> x
  if (isNullFPScalarOrVectorConst(N->getOperand(1)))
    return N->getOperand(0);

  if (SDValue NewVal = combineFneg(N, DAG, Subtarget))
    return NewVal;

  return lowerX86FPLogicOp(N, DAG, Subtarget);
}

/// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
  assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);

  // Only perform optimizations if UnsafeMath is used.
  if (!DAG.getTarget().Options.UnsafeFPMath)
    return SDValue();

  // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
  // into FMINC and FMAXC, which are Commutative operations.
  unsigned NewOp = 0;
  switch (N->getOpcode()) {
    default: llvm_unreachable("unknown opcode");
    case X86ISD::FMIN:  NewOp = X86ISD::FMINC; break;
    case X86ISD::FMAX:  NewOp = X86ISD::FMAXC; break;
  }

  return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
                     N->getOperand(0), N->getOperand(1));
}

static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  if (Subtarget.useSoftFloat())
    return SDValue();

  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  EVT VT = N->getValueType(0);
  if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
        (Subtarget.hasSSE2() && VT == MVT::f64) ||
        (VT.isVector() && TLI.isTypeLegal(VT))))
    return SDValue();

  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);
  SDLoc DL(N);
  auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;

  // If we don't have to respect NaN inputs, this is a direct translation to x86
  // min/max instructions.
  if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
    return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());

  // If one of the operands is known non-NaN use the native min/max instructions
  // with the non-NaN input as second operand.
  if (DAG.isKnownNeverNaN(Op1))
    return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
  if (DAG.isKnownNeverNaN(Op0))
    return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());

  // If we have to respect NaN inputs, this takes at least 3 instructions.
  // Favor a library call when operating on a scalar and minimizing code size.
  if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
    return SDValue();

  EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
                                         VT);

  // There are 4 possibilities involving NaN inputs, and these are the required
  // outputs:
  //                   Op1
  //               Num     NaN
  //            ----------------
  //       Num  |  Max  |  Op0 |
  // Op0        ----------------
  //       NaN  |  Op1  |  NaN |
  //            ----------------
  //
  // The SSE FP max/min instructions were not designed for this case, but rather
  // to implement:
  //   Min = Op1 < Op0 ? Op1 : Op0
  //   Max = Op1 > Op0 ? Op1 : Op0
  //
  // So they always return Op0 if either input is a NaN. However, we can still
  // use those instructions for fmaxnum by selecting away a NaN input.

  // If either operand is NaN, the 2nd source operand (Op0) is passed through.
  SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
  SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);

  // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
  // are NaN, the NaN value of Op1 is the result.
  return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
}

static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
                                   TargetLowering::DAGCombinerInfo &DCI) {
  EVT VT = N->getValueType(0);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  APInt KnownUndef, KnownZero;
  APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
  if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
                                     KnownZero, DCI))
    return SDValue(N, 0);

  // Convert a full vector load into vzload when not all bits are needed.
  SDValue In = N->getOperand(0);
  MVT InVT = In.getSimpleValueType();
  if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
      ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
    assert(InVT.is128BitVector() && "Expected 128-bit input vector");
    LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
    // Unless the load is volatile or atomic.
    if (LN->isSimple()) {
      SDLoc dl(N);
      unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
      MVT MemVT = MVT::getIntegerVT(NumBits);
      MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
      SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
      SDValue VZLoad =
          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
                                  LN->getPointerInfo(),
                                  LN->getAlignment(),
                                  LN->getMemOperand()->getFlags());
      SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
                                    DAG.getBitcast(InVT, VZLoad));
      DCI.CombineTo(N, Convert);
      DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
      return SDValue(N, 0);
    }
  }

  return SDValue();
}

static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI) {
  EVT VT = N->getValueType(0);

  // Convert a full vector load into vzload when not all bits are needed.
  SDValue In = N->getOperand(0);
  MVT InVT = In.getSimpleValueType();
  if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
      ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
    assert(InVT.is128BitVector() && "Expected 128-bit input vector");
    LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
    // Unless the load is volatile or atomic.
    if (LN->isSimple()) {
      SDLoc dl(N);
      unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
      MVT MemVT = MVT::getFloatingPointVT(NumBits);
      MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
      SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
      SDValue VZLoad =
          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
                                  LN->getPointerInfo(),
                                  LN->getAlignment(),
                                  LN->getMemOperand()->getFlags());
      SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
                                    DAG.getBitcast(InVT, VZLoad));
      DCI.CombineTo(N, Convert);
      DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
      return SDValue(N, 0);
    }
  }

  return SDValue();
}

/// Do target-specific dag combines on X86ISD::ANDNP nodes.
static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
                            TargetLowering::DAGCombinerInfo &DCI,
                            const X86Subtarget &Subtarget) {
  MVT VT = N->getSimpleValueType(0);

  // ANDNP(0, x) -> x
  if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
    return N->getOperand(1);

  // ANDNP(x, 0) -> 0
  if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
    return DAG.getConstant(0, SDLoc(N), VT);

  // Turn ANDNP back to AND if input is inverted.
  if (SDValue Not = IsNOT(N->getOperand(0), DAG))
    return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not),
                       N->getOperand(1));

  // Attempt to recursively combine a bitmask ANDNP with shuffles.
  if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
    SDValue Op(N, 0);
    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
      return Res;
  }

  return SDValue();
}

static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
                         TargetLowering::DAGCombinerInfo &DCI) {
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);

  // BT ignores high bits in the bit index operand.
  unsigned BitWidth = N1.getValueSizeInBits();
  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
  if (SDValue DemandedN1 = DAG.GetDemandedBits(N1, DemandedMask))
    return DAG.getNode(X86ISD::BT, SDLoc(N), MVT::i32, N0, DemandedN1);

  return SDValue();
}

// Try to combine sext_in_reg of a cmov of constants by extending the constants.
static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
  assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);

  EVT DstVT = N->getValueType(0);

  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT ExtraVT = cast<VTSDNode>(N1)->getVT();

  if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
    return SDValue();

  // Look through single use any_extends / truncs.
  SDValue IntermediateBitwidthOp;
  if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
      N0.hasOneUse()) {
    IntermediateBitwidthOp = N0;
    N0 = N0.getOperand(0);
  }

  // See if we have a single use cmov.
  if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
    return SDValue();

  SDValue CMovOp0 = N0.getOperand(0);
  SDValue CMovOp1 = N0.getOperand(1);

  // Make sure both operands are constants.
  if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
      !isa<ConstantSDNode>(CMovOp1.getNode()))
    return SDValue();

  SDLoc DL(N);

  // If we looked through an any_extend/trunc above, add one to the constants.
  if (IntermediateBitwidthOp) {
    unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
    CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
    CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
  }

  CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
  CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);

  EVT CMovVT = DstVT;
  // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
  if (DstVT == MVT::i16) {
    CMovVT = MVT::i32;
    CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
    CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
  }

  SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
                             N0.getOperand(2), N0.getOperand(3));

  if (CMovVT != DstVT)
    CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);

  return CMov;
}

static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {
  assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);

  if (SDValue V = combineSextInRegCmov(N, DAG))
    return V;

  EVT VT = N->getValueType(0);
  SDValue N0 = N->getOperand(0);
  SDValue N1 = N->getOperand(1);
  EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
  SDLoc dl(N);

  // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
  // both SSE and AVX2 since there is no sign-extended shift right
  // operation on a vector with 64-bit elements.
  //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
  // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
  if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
      N0.getOpcode() == ISD::SIGN_EXTEND)) {
    SDValue N00 = N0.getOperand(0);

    // EXTLOAD has a better solution on AVX2,
    // it may be replaced with X86ISD::VSEXT node.
    if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
      if (!ISD::isNormalLoad(N00.getNode()))
        return SDValue();

    if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
        SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
                                  N00, N1);
      return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
    }
  }
  return SDValue();
}

/// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
/// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
/// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
/// opportunities to combine math ops, use an LEA, or use a complex addressing
/// mode. This can eliminate extend, add, and shift instructions.
static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {
  if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
      Ext->getOpcode() != ISD::ZERO_EXTEND)
    return SDValue();

  // TODO: This should be valid for other integer types.
  EVT VT = Ext->getValueType(0);
  if (VT != MVT::i64)
    return SDValue();

  SDValue Add = Ext->getOperand(0);
  if (Add.getOpcode() != ISD::ADD)
    return SDValue();

  bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
  bool NSW = Add->getFlags().hasNoSignedWrap();
  bool NUW = Add->getFlags().hasNoUnsignedWrap();

  // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
  // into the 'zext'
  if ((Sext && !NSW) || (!Sext && !NUW))
    return SDValue();

  // Having a constant operand to the 'add' ensures that we are not increasing
  // the instruction count because the constant is extended for free below.
  // A constant operand can also become the displacement field of an LEA.
  auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
  if (!AddOp1)
    return SDValue();

  // Don't make the 'add' bigger if there's no hope of combining it with some
  // other 'add' or 'shl' instruction.
  // TODO: It may be profitable to generate simpler LEA instructions in place
  // of single 'add' instructions, but the cost model for selecting an LEA
  // currently has a high threshold.
  bool HasLEAPotential = false;
  for (auto *User : Ext->uses()) {
    if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
      HasLEAPotential = true;
      break;
    }
  }
  if (!HasLEAPotential)
    return SDValue();

  // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
  int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
  SDValue AddOp0 = Add.getOperand(0);
  SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
  SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);

  // The wider add is guaranteed to not wrap because both operands are
  // sign-extended.
  SDNodeFlags Flags;
  Flags.setNoSignedWrap(NSW);
  Flags.setNoUnsignedWrap(NUW);
  return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
}

// If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
// operands and the result of CMOV is not used anywhere else - promote CMOV
// itself instead of promoting its result. This could be beneficial, because:
//     1) X86TargetLowering::EmitLoweredSelect later can do merging of two
//        (or more) pseudo-CMOVs only when they go one-after-another and
//        getting rid of result extension code after CMOV will help that.
//     2) Promotion of constant CMOV arguments is free, hence the
//        {ANY,SIGN,ZERO}_EXTEND will just be deleted.
//     3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
//        promotion is also good in terms of code-size.
//        (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
//         promotion).
static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
  SDValue CMovN = Extend->getOperand(0);
  if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
    return SDValue();

  EVT TargetVT = Extend->getValueType(0);
  unsigned ExtendOpcode = Extend->getOpcode();
  SDLoc DL(Extend);

  EVT VT = CMovN.getValueType();
  SDValue CMovOp0 = CMovN.getOperand(0);
  SDValue CMovOp1 = CMovN.getOperand(1);

  if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
      !isa<ConstantSDNode>(CMovOp1.getNode()))
    return SDValue();

  // Only extend to i32 or i64.
  if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
    return SDValue();

  // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
  // are free.
  if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
    return SDValue();

  // If this a zero extend to i64, we should only extend to i32 and use a free
  // zero extend to finish.
  EVT ExtendVT = TargetVT;
  if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
    ExtendVT = MVT::i32;

  CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
  CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);

  SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
                            CMovN.getOperand(2), CMovN.getOperand(3));

  // Finish extending if needed.
  if (ExtendVT != TargetVT)
    Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);

  return Res;
}

// Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
// This is more or less the reverse of combineBitcastvxi1.
static SDValue
combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
                               TargetLowering::DAGCombinerInfo &DCI,
                               const X86Subtarget &Subtarget) {
  unsigned Opcode = N->getOpcode();
  if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
      Opcode != ISD::ANY_EXTEND)
    return SDValue();
  if (!DCI.isBeforeLegalizeOps())
    return SDValue();
  if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
    return SDValue();

  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  EVT SVT = VT.getScalarType();
  EVT InSVT = N0.getValueType().getScalarType();
  unsigned EltSizeInBits = SVT.getSizeInBits();

  // Input type must be extending a bool vector (bit-casted from a scalar
  // integer) to legal integer types.
  if (!VT.isVector())
    return SDValue();
  if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
    return SDValue();
  if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
    return SDValue();

  SDValue N00 = N0.getOperand(0);
  EVT SclVT = N0.getOperand(0).getValueType();
  if (!SclVT.isScalarInteger())
    return SDValue();

  SDLoc DL(N);
  SDValue Vec;
  SmallVector<int, 32> ShuffleMask;
  unsigned NumElts = VT.getVectorNumElements();
  assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");

  // Broadcast the scalar integer to the vector elements.
  if (NumElts > EltSizeInBits) {
    // If the scalar integer is greater than the vector element size, then we
    // must split it down into sub-sections for broadcasting. For example:
    //   i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
    //   i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
    assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
    unsigned Scale = NumElts / EltSizeInBits;
    EVT BroadcastVT =
        EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
    Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
    Vec = DAG.getBitcast(VT, Vec);

    for (unsigned i = 0; i != Scale; ++i)
      ShuffleMask.append(EltSizeInBits, i);
  } else {
    // For smaller scalar integers, we can simply any-extend it to the vector
    // element size (we don't care about the upper bits) and broadcast it to all
    // elements.
    SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
    Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
    ShuffleMask.append(NumElts, 0);
  }
  Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);

  // Now, mask the relevant bit in each element.
  SmallVector<SDValue, 32> Bits;
  for (unsigned i = 0; i != NumElts; ++i) {
    int BitIdx = (i % EltSizeInBits);
    APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
    Bits.push_back(DAG.getConstant(Bit, DL, SVT));
  }
  SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
  Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);

  // Compare against the bitmask and extend the result.
  EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
  Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
  Vec = DAG.getSExtOrTrunc(Vec, DL, VT);

  // For SEXT, this is now done, otherwise shift the result down for
  // zero-extension.
  if (Opcode == ISD::SIGN_EXTEND)
    return Vec;
  return DAG.getNode(ISD::SRL, DL, VT, Vec,
                     DAG.getConstant(EltSizeInBits - 1, DL, VT));
}

// Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
// result type.
static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  SDLoc dl(N);

  // Only do this combine with AVX512 for vector extends.
  if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
    return SDValue();

  // Only combine legal element types.
  EVT SVT = VT.getVectorElementType();
  if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
      SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
    return SDValue();

  // We can only do this if the vector size in 256 bits or less.
  unsigned Size = VT.getSizeInBits();
  if (Size > 256)
    return SDValue();

  // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
  // that's the only integer compares with we have.
  ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
  if (ISD::isUnsignedIntSetCC(CC))
    return SDValue();

  // Only do this combine if the extension will be fully consumed by the setcc.
  EVT N00VT = N0.getOperand(0).getValueType();
  EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
  if (Size != MatchingVecType.getSizeInBits())
    return SDValue();

  SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);

  if (N->getOpcode() == ISD::ZERO_EXTEND)
    Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType().getScalarType());

  return Res;
}

static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
                           TargetLowering::DAGCombinerInfo &DCI,
                           const X86Subtarget &Subtarget) {
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  EVT InVT = N0.getValueType();
  SDLoc DL(N);

  if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
    return NewCMov;

  if (!DCI.isBeforeLegalizeOps())
    return SDValue();

  if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
    return V;

  if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
      isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
    // Invert and sign-extend a boolean is the same as zero-extend and subtract
    // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
    // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
    // sext (xor Bool, -1) --> sub (zext Bool), 1
    SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
    return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
  }

  if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
    return V;

  if (VT.isVector())
    if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
      return R;

  if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
    return NewAdd;

  return SDValue();
}

static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {
  SDLoc dl(N);
  EVT VT = N->getValueType(0);

  // Let legalize expand this if it isn't a legal type yet.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (!TLI.isTypeLegal(VT))
    return SDValue();

  EVT ScalarVT = VT.getScalarType();
  if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
    return SDValue();

  SDValue A = N->getOperand(0);
  SDValue B = N->getOperand(1);
  SDValue C = N->getOperand(2);

  auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
    bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
    bool LegalOperations = !DCI.isBeforeLegalizeOps();
    if (TLI.isNegatibleForFree(V, DAG, LegalOperations, CodeSize) == 2) {
      V = TLI.getNegatedExpression(V, DAG, LegalOperations, CodeSize);
      return true;
    }
    // Look through extract_vector_elts. If it comes from an FNEG, create a
    // new extract from the FNEG input.
    if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
        isNullConstant(V.getOperand(1))) {
      SDValue Vec = V.getOperand(0);
      if (TLI.isNegatibleForFree(Vec, DAG, LegalOperations, CodeSize) == 2) {
        SDValue NegVal =
            TLI.getNegatedExpression(Vec, DAG, LegalOperations, CodeSize);
        V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
                        NegVal, V.getOperand(1));
        return true;
      }
    }

    return false;
  };

  // Do not convert the passthru input of scalar intrinsics.
  // FIXME: We could allow negations of the lower element only.
  bool NegA = invertIfNegative(A);
  bool NegB = invertIfNegative(B);
  bool NegC = invertIfNegative(C);

  if (!NegA && !NegB && !NegC)
    return SDValue();

  unsigned NewOpcode =
      negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);

  if (N->getNumOperands() == 4)
    return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
  return DAG.getNode(NewOpcode, dl, VT, A, B, C);
}

// Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
// Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
                               TargetLowering::DAGCombinerInfo &DCI) {
  SDLoc dl(N);
  EVT VT = N->getValueType(0);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
  bool LegalOperations = !DCI.isBeforeLegalizeOps();

  SDValue N2 = N->getOperand(2);
  if (TLI.isNegatibleForFree(N2, DAG, LegalOperations, CodeSize) != 2)
    return SDValue();

  SDValue NegN2 = TLI.getNegatedExpression(N2, DAG, LegalOperations, CodeSize);
  unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);

  if (N->getNumOperands() == 4)
    return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
                       NegN2, N->getOperand(3));
  return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
                     NegN2);
}

static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
                           TargetLowering::DAGCombinerInfo &DCI,
                           const X86Subtarget &Subtarget) {
  // (i32 zext (and (i8  x86isd::setcc_carry), 1)) ->
  //           (and (i32 x86isd::setcc_carry), 1)
  // This eliminates the zext. This transformation is necessary because
  // ISD::SETCC is always legalized to i8.
  SDLoc dl(N);
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);

  if (N0.getOpcode() == ISD::AND &&
      N0.hasOneUse() &&
      N0.getOperand(0).hasOneUse()) {
    SDValue N00 = N0.getOperand(0);
    if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
      if (!isOneConstant(N0.getOperand(1)))
        return SDValue();
      return DAG.getNode(ISD::AND, dl, VT,
                         DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
                                     N00.getOperand(0), N00.getOperand(1)),
                         DAG.getConstant(1, dl, VT));
    }
  }

  if (N0.getOpcode() == ISD::TRUNCATE &&
      N0.hasOneUse() &&
      N0.getOperand(0).hasOneUse()) {
    SDValue N00 = N0.getOperand(0);
    if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
      return DAG.getNode(ISD::AND, dl, VT,
                         DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
                                     N00.getOperand(0), N00.getOperand(1)),
                         DAG.getConstant(1, dl, VT));
    }
  }

  if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
    return NewCMov;

  if (DCI.isBeforeLegalizeOps())
    if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
      return V;

  if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
    return V;

  if (VT.isVector())
    if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
      return R;

  if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
    return NewAdd;

  if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
    return R;

  // TODO: Combine with any target/faux shuffle.
  if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
      VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
    SDValue N00 = N0.getOperand(0);
    SDValue N01 = N0.getOperand(1);
    unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
    APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
    if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
        (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
      return concatSubVectors(N00, N01, DAG, dl);
    }
  }

  return SDValue();
}

/// Try to map a 128-bit or larger integer comparison to vector instructions
/// before type legalization splits it up into chunks.
static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
                                               const X86Subtarget &Subtarget) {
  ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
  assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");

  // We're looking for an oversized integer equality comparison.
  SDValue X = SetCC->getOperand(0);
  SDValue Y = SetCC->getOperand(1);
  EVT OpVT = X.getValueType();
  unsigned OpSize = OpVT.getSizeInBits();
  if (!OpVT.isScalarInteger() || OpSize < 128)
    return SDValue();

  // Ignore a comparison with zero because that gets special treatment in
  // EmitTest(). But make an exception for the special case of a pair of
  // logically-combined vector-sized operands compared to zero. This pattern may
  // be generated by the memcmp expansion pass with oversized integer compares
  // (see PR33325).
  bool IsOrXorXorCCZero = isNullConstant(Y) && X.getOpcode() == ISD::OR &&
                          X.getOperand(0).getOpcode() == ISD::XOR &&
                          X.getOperand(1).getOpcode() == ISD::XOR;
  if (isNullConstant(Y) && !IsOrXorXorCCZero)
    return SDValue();

  // Don't perform this combine if constructing the vector will be expensive.
  auto IsVectorBitCastCheap = [](SDValue X) {
    X = peekThroughBitcasts(X);
    return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
           X.getOpcode() == ISD::LOAD;
  };
  if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
      !IsOrXorXorCCZero)
    return SDValue();

  EVT VT = SetCC->getValueType(0);
  SDLoc DL(SetCC);
  bool HasAVX = Subtarget.hasAVX();

  // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
  // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
  // Otherwise use PCMPEQ (plus AND) and mask testing.
  if ((OpSize == 128 && Subtarget.hasSSE2()) ||
      (OpSize == 256 && HasAVX) ||
      (OpSize == 512 && Subtarget.useAVX512Regs())) {
    bool HasPT = Subtarget.hasSSE41();

    // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
    // vector registers are essentially free. (Technically, widening registers
    // prevents load folding, but the tradeoff is worth it.)
    bool PreferKOT = Subtarget.preferMaskRegisters();
    bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;

    EVT VecVT = MVT::v16i8;
    EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
    if (OpSize == 256) {
      VecVT = MVT::v32i8;
      CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
    }
    EVT CastVT = VecVT;
    bool NeedsAVX512FCast = false;
    if (OpSize == 512 || NeedZExt) {
      if (Subtarget.hasBWI()) {
        VecVT = MVT::v64i8;
        CmpVT = MVT::v64i1;
        if (OpSize == 512)
          CastVT = VecVT;
      } else {
        VecVT = MVT::v16i32;
        CmpVT = MVT::v16i1;
        CastVT = OpSize == 512 ? VecVT :
                 OpSize == 256 ? MVT::v8i32 : MVT::v4i32;
        NeedsAVX512FCast = true;
      }
    }

    auto ScalarToVector = [&](SDValue X) -> SDValue {
      bool TmpZext = false;
      EVT TmpCastVT = CastVT;
      if (X.getOpcode() == ISD::ZERO_EXTEND) {
        SDValue OrigX = X.getOperand(0);
        unsigned OrigSize = OrigX.getScalarValueSizeInBits();
        if (OrigSize < OpSize) {
          if (OrigSize == 128) {
            TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
            X = OrigX;
            TmpZext = true;
          } else if (OrigSize == 256) {
            TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
            X = OrigX;
            TmpZext = true;
          }
        }
      }
      X = DAG.getBitcast(TmpCastVT, X);
      if (!NeedZExt && !TmpZext)
        return X;
      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
      MVT VecIdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
      return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
                         DAG.getConstant(0, DL, VecVT), X,
                         DAG.getConstant(0, DL, VecIdxVT));
    };

    SDValue Cmp;
    if (IsOrXorXorCCZero) {
      // This is a bitwise-combined equality comparison of 2 pairs of vectors:
      // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
      // Use 2 vector equality compares and 'and' the results before doing a
      // MOVMSK.
      SDValue A = ScalarToVector(X.getOperand(0).getOperand(0));
      SDValue B = ScalarToVector(X.getOperand(0).getOperand(1));
      SDValue C = ScalarToVector(X.getOperand(1).getOperand(0));
      SDValue D = ScalarToVector(X.getOperand(1).getOperand(1));
      if (VecVT != CmpVT) {
        SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
        SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETNE);
        Cmp = DAG.getNode(ISD::OR, DL, CmpVT, Cmp1, Cmp2);
      } else if (HasPT) {
        SDValue Cmp1 = DAG.getNode(ISD::XOR, DL, VecVT, A, B);
        SDValue Cmp2 = DAG.getNode(ISD::XOR, DL, VecVT, C, D);
        Cmp = DAG.getNode(ISD::OR, DL, VecVT, Cmp1, Cmp2);
      } else {
        SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
        SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETEQ);
        Cmp = DAG.getNode(ISD::AND, DL, CmpVT, Cmp1, Cmp2);
      }
    } else {
      SDValue VecX = ScalarToVector(X);
      SDValue VecY = ScalarToVector(Y);
      if (VecVT != CmpVT) {
        Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
      } else if (HasPT) {
        Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
      } else {
        Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
      }
    }
    // AVX512 should emit a setcc that will lower to kortest.
    if (VecVT != CmpVT) {
      EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 :
                   CmpVT == MVT::v32i1 ? MVT::i32 : MVT::i16;
      return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
                          DAG.getConstant(0, DL, KRegVT), CC);
    }
    if (HasPT) {
      SDValue BCCmp = DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64,
                                     Cmp);
      SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
      X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
      SDValue SetCC = getSETCC(X86CC, PT, DL, DAG);
      return DAG.getNode(ISD::TRUNCATE, DL, VT, SetCC.getValue(0));
    }
    // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
    // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
    // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
    // setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq
    // setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne
    SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
    SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL,
                                    MVT::i32);
    return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
  }

  return SDValue();
}

static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
                            const X86Subtarget &Subtarget) {
  ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
  SDValue LHS = N->getOperand(0);
  SDValue RHS = N->getOperand(1);
  EVT VT = N->getValueType(0);
  EVT OpVT = LHS.getValueType();
  SDLoc DL(N);

  if (CC == ISD::SETNE || CC == ISD::SETEQ) {
    // 0-x == y --> x+y == 0
    // 0-x != y --> x+y != 0
    if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
        LHS.hasOneUse()) {
      SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1));
      return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
    }
    // x == 0-y --> x+y == 0
    // x != 0-y --> x+y != 0
    if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
        RHS.hasOneUse()) {
      SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
      return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
    }

    if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
      return V;
  }

  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
      (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
    // Put build_vectors on the right.
    if (LHS.getOpcode() == ISD::BUILD_VECTOR) {
      std::swap(LHS, RHS);
      CC = ISD::getSetCCSwappedOperands(CC);
    }

    bool IsSEXT0 =
        (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
        (LHS.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
    bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());

    if (IsSEXT0 && IsVZero1) {
      assert(VT == LHS.getOperand(0).getValueType() &&
             "Uexpected operand type");
      if (CC == ISD::SETGT)
        return DAG.getConstant(0, DL, VT);
      if (CC == ISD::SETLE)
        return DAG.getConstant(1, DL, VT);
      if (CC == ISD::SETEQ || CC == ISD::SETGE)
        return DAG.getNOT(DL, LHS.getOperand(0), VT);

      assert((CC == ISD::SETNE || CC == ISD::SETLT) &&
             "Unexpected condition code!");
      return LHS.getOperand(0);
    }
  }

  // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
  // pre-promote its result type since vXi1 vectors don't get promoted
  // during type legalization.
  // NOTE: The element count check is to ignore operand types that need to
  // go through type promotion to a 128-bit vector.
  if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
      VT.getVectorElementType() == MVT::i1 &&
      (OpVT.getVectorElementType() == MVT::i8 ||
       OpVT.getVectorElementType() == MVT::i16)) {
    SDValue Setcc = DAG.getNode(ISD::SETCC, DL, OpVT, LHS, RHS,
                                N->getOperand(2));
    return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
  }

  // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
  // to avoid scalarization via legalization because v4i32 is not a legal type.
  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
      LHS.getValueType() == MVT::v4f32)
    return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);

  return SDValue();
}

static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI,
                             const X86Subtarget &Subtarget) {
  SDValue Src = N->getOperand(0);
  MVT SrcVT = Src.getSimpleValueType();
  MVT VT = N->getSimpleValueType(0);
  unsigned NumBits = VT.getScalarSizeInBits();
  unsigned NumElts = SrcVT.getVectorNumElements();

  // Perform constant folding.
  if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
    assert(VT == MVT::i32 && "Unexpected result type");
    APInt Imm(32, 0);
    for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
      if (!Src.getOperand(Idx).isUndef() &&
          Src.getConstantOperandAPInt(Idx).isNegative())
        Imm.setBit(Idx);
    }
    return DAG.getConstant(Imm, SDLoc(N), VT);
  }

  // Look through int->fp bitcasts that don't change the element width.
  unsigned EltWidth = SrcVT.getScalarSizeInBits();
  if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
      Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
    return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));

  // Fold movmsk(not(x)) -> not(movmsk) to improve folding of movmsk results
  // with scalar comparisons.
  if (SDValue NotSrc = IsNOT(Src, DAG)) {
    SDLoc DL(N);
    APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
    NotSrc = DAG.getBitcast(SrcVT, NotSrc);
    return DAG.getNode(ISD::XOR, DL, VT,
                       DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
                       DAG.getConstant(NotMask, DL, VT));
  }

  // Simplify the inputs.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  APInt DemandedMask(APInt::getAllOnesValue(NumBits));
  if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
    return SDValue(N, 0);

  return SDValue();
}

static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
                                       TargetLowering::DAGCombinerInfo &DCI) {
  // With vector masks we only demand the upper bit of the mask.
  SDValue Mask = cast<X86MaskedGatherScatterSDNode>(N)->getMask();
  if (Mask.getScalarValueSizeInBits() != 1) {
    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
    if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
      return SDValue(N, 0);
  }

  return SDValue();
}

static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
                                    TargetLowering::DAGCombinerInfo &DCI) {
  SDLoc DL(N);
  auto *GorS = cast<MaskedGatherScatterSDNode>(N);
  SDValue Chain = GorS->getChain();
  SDValue Index = GorS->getIndex();
  SDValue Mask = GorS->getMask();
  SDValue Base = GorS->getBasePtr();
  SDValue Scale = GorS->getScale();

  if (DCI.isBeforeLegalize()) {
    unsigned IndexWidth = Index.getScalarValueSizeInBits();

    // Shrink constant indices if they are larger than 32-bits.
    // Only do this before legalize types since v2i64 could become v2i32.
    // FIXME: We could check that the type is legal if we're after legalize
    // types, but then we would need to construct test cases where that happens.
    // FIXME: We could support more than just constant vectors, but we need to
    // careful with costing. A truncate that can be optimized out would be fine.
    // Otherwise we might only want to create a truncate if it avoids a split.
    if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
      if (BV->isConstant() && IndexWidth > 32 &&
          DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
        unsigned NumElts = Index.getValueType().getVectorNumElements();
        EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
        Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
        if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
          SDValue Ops[] = { Chain, Gather->getPassThru(),
                            Mask, Base, Index, Scale } ;
          return DAG.getMaskedGather(Gather->getVTList(),
                                     Gather->getMemoryVT(), DL, Ops,
                                     Gather->getMemOperand(),
                                     Gather->getIndexType());
        }
        auto *Scatter = cast<MaskedScatterSDNode>(GorS);
        SDValue Ops[] = { Chain, Scatter->getValue(),
                          Mask, Base, Index, Scale };
        return DAG.getMaskedScatter(Scatter->getVTList(),
                                    Scatter->getMemoryVT(), DL,
                                    Ops, Scatter->getMemOperand(),
                                    Scatter->getIndexType());
      }
    }

    // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
    // there are sufficient sign bits. Only do this before legalize types to
    // avoid creating illegal types in truncate.
    if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
         Index.getOpcode() == ISD::ZERO_EXTEND) &&
        IndexWidth > 32 &&
        Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
        DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
      unsigned NumElts = Index.getValueType().getVectorNumElements();
      EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
      Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
      if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
        SDValue Ops[] = { Chain, Gather->getPassThru(),
                          Mask, Base, Index, Scale } ;
        return DAG.getMaskedGather(Gather->getVTList(),
                                   Gather->getMemoryVT(), DL, Ops,
                                   Gather->getMemOperand(),
                                   Gather->getIndexType());
      }
      auto *Scatter = cast<MaskedScatterSDNode>(GorS);
      SDValue Ops[] = { Chain, Scatter->getValue(),
                        Mask, Base, Index, Scale };
      return DAG.getMaskedScatter(Scatter->getVTList(),
                                  Scatter->getMemoryVT(), DL,
                                  Ops, Scatter->getMemOperand(),
                                  Scatter->getIndexType());
    }
  }

  if (DCI.isBeforeLegalizeOps()) {
    unsigned IndexWidth = Index.getScalarValueSizeInBits();

    // Make sure the index is either i32 or i64
    if (IndexWidth != 32 && IndexWidth != 64) {
      MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
      EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
                                   Index.getValueType().getVectorNumElements());
      Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
      if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
        SDValue Ops[] = { Chain, Gather->getPassThru(),
                          Mask, Base, Index, Scale } ;
        return DAG.getMaskedGather(Gather->getVTList(),
                                   Gather->getMemoryVT(), DL, Ops,
                                   Gather->getMemOperand(),
                                   Gather->getIndexType());
      }
      auto *Scatter = cast<MaskedScatterSDNode>(GorS);
      SDValue Ops[] = { Chain, Scatter->getValue(),
                        Mask, Base, Index, Scale };
      return DAG.getMaskedScatter(Scatter->getVTList(),
                                  Scatter->getMemoryVT(), DL,
                                  Ops, Scatter->getMemOperand(),
                                  Scatter->getIndexType());
    }
  }

  // With vector masks we only demand the upper bit of the mask.
  if (Mask.getScalarValueSizeInBits() != 1) {
    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
    if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
      return SDValue(N, 0);
  }

  return SDValue();
}

// Optimize  RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {
  SDLoc DL(N);
  X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
  SDValue EFLAGS = N->getOperand(1);

  // Try to simplify the EFLAGS and condition code operands.
  if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
    return getSETCC(CC, Flags, DL, DAG);

  return SDValue();
}

/// Optimize branch condition evaluation.
static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
                             const X86Subtarget &Subtarget) {
  SDLoc DL(N);
  SDValue EFLAGS = N->getOperand(3);
  X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));

  // Try to simplify the EFLAGS and condition code operands.
  // Make sure to not keep references to operands, as combineSetCCEFLAGS can
  // RAUW them under us.
  if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
    SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
    return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
                       N->getOperand(1), Cond, Flags);
  }

  return SDValue();
}

static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
                                                  SelectionDAG &DAG) {
  // Take advantage of vector comparisons producing 0 or -1 in each lane to
  // optimize away operation when it's from a constant.
  //
  // The general transformation is:
  //    UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
  //       AND(VECTOR_CMP(x,y), constant2)
  //    constant2 = UNARYOP(constant)

  // Early exit if this isn't a vector operation, the operand of the
  // unary operation isn't a bitwise AND, or if the sizes of the operations
  // aren't the same.
  EVT VT = N->getValueType(0);
  if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
      N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
      VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
    return SDValue();

  // Now check that the other operand of the AND is a constant. We could
  // make the transformation for non-constant splats as well, but it's unclear
  // that would be a benefit as it would not eliminate any operations, just
  // perform one more step in scalar code before moving to the vector unit.
  if (auto *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(0).getOperand(1))) {
    // Bail out if the vector isn't a constant.
    if (!BV->isConstant())
      return SDValue();

    // Everything checks out. Build up the new and improved node.
    SDLoc DL(N);
    EVT IntVT = BV->getValueType(0);
    // Create a new constant of the appropriate type for the transformed
    // DAG.
    SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
    // The AND node needs bitcasts to/from an integer vector type around it.
    SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
    SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
                                 N->getOperand(0)->getOperand(0), MaskConst);
    SDValue Res = DAG.getBitcast(VT, NewAnd);
    return Res;
  }

  return SDValue();
}

/// If we are converting a value to floating-point, try to replace scalar
/// truncate of an extracted vector element with a bitcast. This tries to keep
/// the sequence on XMM registers rather than moving between vector and GPRs.
static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
  // TODO: This is currently only used by combineSIntToFP, but it is generalized
  //       to allow being called by any similar cast opcode.
  // TODO: Consider merging this into lowering: vectorizeExtractedCast().
  SDValue Trunc = N->getOperand(0);
  if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
    return SDValue();

  SDValue ExtElt = Trunc.getOperand(0);
  if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
      !isNullConstant(ExtElt.getOperand(1)))
    return SDValue();

  EVT TruncVT = Trunc.getValueType();
  EVT SrcVT = ExtElt.getValueType();
  unsigned DestWidth = TruncVT.getSizeInBits();
  unsigned SrcWidth = SrcVT.getSizeInBits();
  if (SrcWidth % DestWidth != 0)
    return SDValue();

  // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
  EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
  unsigned VecWidth = SrcVecVT.getSizeInBits();
  unsigned NumElts = VecWidth / DestWidth;
  EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
  SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
  SDLoc DL(N);
  SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
                                  BitcastVec, ExtElt.getOperand(1));
  return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
}

static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {
  SDValue Op0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  EVT InVT = Op0.getValueType();

  // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
  // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
  // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
  if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
    SDLoc dl(N);
    EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
                                 InVT.getVectorNumElements());
    SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);

    // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
    return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
  }

  // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
  // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
  // the optimization here.
  if (DAG.SignBitIsZero(Op0))
    return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);

  return SDValue();
}

static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
                               TargetLowering::DAGCombinerInfo &DCI,
                               const X86Subtarget &Subtarget) {
  // First try to optimize away the conversion entirely when it's
  // conditionally from a constant. Vectors only.
  if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
    return Res;

  // Now move on to more general possibilities.
  SDValue Op0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  EVT InVT = Op0.getValueType();

  // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
  // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
  // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
  if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
    SDLoc dl(N);
    EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
                                 InVT.getVectorNumElements());
    SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
    return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
  }

  // Without AVX512DQ we only support i64 to float scalar conversion. For both
  // vectors and scalars, see if we know that the upper bits are all the sign
  // bit, in which case we can truncate the input to i32 and convert from that.
  if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
    unsigned BitWidth = InVT.getScalarSizeInBits();
    unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
    if (NumSignBits >= (BitWidth - 31)) {
      EVT TruncVT = MVT::i32;
      if (InVT.isVector())
        TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
                                   InVT.getVectorNumElements());
      SDLoc dl(N);
      if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
        SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
        return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
      }
      // If we're after legalize and the type is v2i32 we need to shuffle and
      // use CVTSI2P.
      assert(InVT == MVT::v2i64 && "Unexpected VT!");
      SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
      SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
                                          { 0, 2, -1, -1 });
      return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
    }
  }

  // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
  // a 32-bit target where SSE doesn't support i64->FP operations.
  if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
      Op0.getOpcode() == ISD::LOAD) {
    LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
    EVT LdVT = Ld->getValueType(0);

    // This transformation is not supported if the result type is f16 or f128.
    if (VT == MVT::f16 || VT == MVT::f128)
      return SDValue();

    // If we have AVX512DQ we can use packed conversion instructions unless
    // the VT is f80.
    if (Subtarget.hasDQI() && VT != MVT::f80)
      return SDValue();

    if (Ld->isSimple() && !VT.isVector() &&
        ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
        !Subtarget.is64Bit() && LdVT == MVT::i64) {
      SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD(
          SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
      DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
      return FILDChain;
    }
  }

  if (SDValue V = combineToFPTruncExtElt(N, DAG))
    return V;

  return SDValue();
}

static bool needCarryOrOverflowFlag(SDValue Flags) {
  assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");

  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
         UI != UE; ++UI) {
    SDNode *User = *UI;

    X86::CondCode CC;
    switch (User->getOpcode()) {
    default:
      // Be conservative.
      return true;
    case X86ISD::SETCC:
    case X86ISD::SETCC_CARRY:
      CC = (X86::CondCode)User->getConstantOperandVal(0);
      break;
    case X86ISD::BRCOND:
      CC = (X86::CondCode)User->getConstantOperandVal(2);
      break;
    case X86ISD::CMOV:
      CC = (X86::CondCode)User->getConstantOperandVal(2);
      break;
    }

    switch (CC) {
    default: break;
    case X86::COND_A: case X86::COND_AE:
    case X86::COND_B: case X86::COND_BE:
    case X86::COND_O: case X86::COND_NO:
    case X86::COND_G: case X86::COND_GE:
    case X86::COND_L: case X86::COND_LE:
      return true;
    }
  }

  return false;
}

static bool onlyZeroFlagUsed(SDValue Flags) {
  assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");

  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
         UI != UE; ++UI) {
    SDNode *User = *UI;

    unsigned CCOpNo;
    switch (User->getOpcode()) {
    default:
      // Be conservative.
      return false;
    case X86ISD::SETCC:       CCOpNo = 0; break;
    case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
    case X86ISD::BRCOND:      CCOpNo = 2; break;
    case X86ISD::CMOV:        CCOpNo = 2; break;
    }

    X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
    if (CC != X86::COND_E && CC != X86::COND_NE)
      return false;
  }

  return true;
}

static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
  // Only handle test patterns.
  if (!isNullConstant(N->getOperand(1)))
    return SDValue();

  // If we have a CMP of a truncated binop, see if we can make a smaller binop
  // and use its flags directly.
  // TODO: Maybe we should try promoting compares that only use the zero flag
  // first if we can prove the upper bits with computeKnownBits?
  SDLoc dl(N);
  SDValue Op = N->getOperand(0);
  EVT VT = Op.getValueType();

  // If we have a constant logical shift that's only used in a comparison
  // against zero turn it into an equivalent AND. This allows turning it into
  // a TEST instruction later.
  if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
      Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
      onlyZeroFlagUsed(SDValue(N, 0))) {
    unsigned BitWidth = VT.getSizeInBits();
    const APInt &ShAmt = Op.getConstantOperandAPInt(1);
    if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
      unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
      APInt Mask = Op.getOpcode() == ISD::SRL
                       ? APInt::getHighBitsSet(BitWidth, MaskBits)
                       : APInt::getLowBitsSet(BitWidth, MaskBits);
      if (Mask.isSignedIntN(32)) {
        Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
                         DAG.getConstant(Mask, dl, VT));
        return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
                           DAG.getConstant(0, dl, VT));
      }
    }
  }

  // Look for a truncate with a single use.
  if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
    return SDValue();

  Op = Op.getOperand(0);

  // Arithmetic op can only have one use.
  if (!Op.hasOneUse())
    return SDValue();

  unsigned NewOpc;
  switch (Op.getOpcode()) {
  default: return SDValue();
  case ISD::AND:
    // Skip and with constant. We have special handling for and with immediate
    // during isel to generate test instructions.
    if (isa<ConstantSDNode>(Op.getOperand(1)))
      return SDValue();
    NewOpc = X86ISD::AND;
    break;
  case ISD::OR:  NewOpc = X86ISD::OR;  break;
  case ISD::XOR: NewOpc = X86ISD::XOR; break;
  case ISD::ADD:
    // If the carry or overflow flag is used, we can't truncate.
    if (needCarryOrOverflowFlag(SDValue(N, 0)))
      return SDValue();
    NewOpc = X86ISD::ADD;
    break;
  case ISD::SUB:
    // If the carry or overflow flag is used, we can't truncate.
    if (needCarryOrOverflowFlag(SDValue(N, 0)))
      return SDValue();
    NewOpc = X86ISD::SUB;
    break;
  }

  // We found an op we can narrow. Truncate its inputs.
  SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
  SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));

  // Use a X86 specific opcode to avoid DAG combine messing with it.
  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
  Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);

  // For AND, keep a CMP so that we can match the test pattern.
  if (NewOpc == X86ISD::AND)
    return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
                       DAG.getConstant(0, dl, VT));

  // Return the flags.
  return Op.getValue(1);
}

static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
                                TargetLowering::DAGCombinerInfo &DCI) {
  assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
         "Expected X86ISD::ADD or X86ISD::SUB");

  SDLoc DL(N);
  SDValue LHS = N->getOperand(0);
  SDValue RHS = N->getOperand(1);
  MVT VT = LHS.getSimpleValueType();
  unsigned GenericOpc = X86ISD::ADD == N->getOpcode() ? ISD::ADD : ISD::SUB;

  // If we don't use the flag result, simplify back to a generic ADD/SUB.
  if (!N->hasAnyUseOfValue(1)) {
    SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
    return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
  }

  // Fold any similar generic ADD/SUB opcodes to reuse this node.
  auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
    SDValue Ops[] = {N0, N1};
    SDVTList VTs = DAG.getVTList(N->getValueType(0));
    if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
      SDValue Op(N, 0);
      if (Negate)
        Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
      DCI.CombineTo(GenericAddSub, Op);
    }
  };
  MatchGeneric(LHS, RHS, false);
  MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());

  return SDValue();
}

static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
  if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
    MVT VT = N->getSimpleValueType(0);
    SDVTList VTs = DAG.getVTList(VT, MVT::i32);
    return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
                       N->getOperand(0), N->getOperand(1),
                       Flags);
  }

  // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
  // iff the flag result is dead.
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);
  if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op1) &&
      !N->hasAnyUseOfValue(1))
    return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), Op0.getOperand(0),
                       Op0.getOperand(1), N->getOperand(2));

  return SDValue();
}

// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI) {
  // If the LHS and RHS of the ADC node are zero, then it can't overflow and
  // the result is either zero or one (depending on the input carry bit).
  // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
  if (X86::isZeroNode(N->getOperand(0)) &&
      X86::isZeroNode(N->getOperand(1)) &&
      // We don't have a good way to replace an EFLAGS use, so only do this when
      // dead right now.
      SDValue(N, 1).use_empty()) {
    SDLoc DL(N);
    EVT VT = N->getValueType(0);
    SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
    SDValue Res1 =
        DAG.getNode(ISD::AND, DL, VT,
                    DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
                                DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
                                N->getOperand(2)),
                    DAG.getConstant(1, DL, VT));
    return DCI.CombineTo(N, Res1, CarryOut);
  }

  if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
    MVT VT = N->getSimpleValueType(0);
    SDVTList VTs = DAG.getVTList(VT, MVT::i32);
    return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
                       N->getOperand(0), N->getOperand(1),
                       Flags);
  }

  return SDValue();
}

/// If this is an add or subtract where one operand is produced by a cmp+setcc,
/// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
/// with CMP+{ADC, SBB}.
static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
  bool IsSub = N->getOpcode() == ISD::SUB;
  SDValue X = N->getOperand(0);
  SDValue Y = N->getOperand(1);

  // If this is an add, canonicalize a zext operand to the RHS.
  // TODO: Incomplete? What if both sides are zexts?
  if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
      Y.getOpcode() != ISD::ZERO_EXTEND)
    std::swap(X, Y);

  // Look through a one-use zext.
  bool PeekedThroughZext = false;
  if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
    Y = Y.getOperand(0);
    PeekedThroughZext = true;
  }

  // If this is an add, canonicalize a setcc operand to the RHS.
  // TODO: Incomplete? What if both sides are setcc?
  // TODO: Should we allow peeking through a zext of the other operand?
  if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
      Y.getOpcode() != X86ISD::SETCC)
    std::swap(X, Y);

  if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
    return SDValue();

  SDLoc DL(N);
  EVT VT = N->getValueType(0);
  X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);

  // If X is -1 or 0, then we have an opportunity to avoid constants required in
  // the general case below.
  auto *ConstantX = dyn_cast<ConstantSDNode>(X);
  if (ConstantX) {
    if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
        (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
      // This is a complicated way to get -1 or 0 from the carry flag:
      // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
      //  0 - SETB  -->  0 -  (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
      return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
                         DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
                         Y.getOperand(1));
    }

    if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
        (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
      SDValue EFLAGS = Y->getOperand(1);
      if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
          EFLAGS.getValueType().isInteger() &&
          !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
        // Swap the operands of a SUB, and we have the same pattern as above.
        // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
        //  0 - SETA  (SUB A, B) -->  0 - SETB  (SUB B, A) --> SUB + SBB
        SDValue NewSub = DAG.getNode(
            X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
            EFLAGS.getOperand(1), EFLAGS.getOperand(0));
        SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
        return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
                           DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
                           NewEFLAGS);
      }
    }
  }

  if (CC == X86::COND_B) {
    // X + SETB Z --> adc X, 0
    // X - SETB Z --> sbb X, 0
    return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
                       DAG.getVTList(VT, MVT::i32), X,
                       DAG.getConstant(0, DL, VT), Y.getOperand(1));
  }

  if (CC == X86::COND_A) {
    SDValue EFLAGS = Y->getOperand(1);
    // Try to convert COND_A into COND_B in an attempt to facilitate
    // materializing "setb reg".
    //
    // Do not flip "e > c", where "c" is a constant, because Cmp instruction
    // cannot take an immediate as its first operand.
    //
    if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
        EFLAGS.getValueType().isInteger() &&
        !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
      SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
                                   EFLAGS.getNode()->getVTList(),
                                   EFLAGS.getOperand(1), EFLAGS.getOperand(0));
      SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
      return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
                         DAG.getVTList(VT, MVT::i32), X,
                         DAG.getConstant(0, DL, VT), NewEFLAGS);
    }
  }

  if (CC != X86::COND_E && CC != X86::COND_NE)
    return SDValue();

  SDValue Cmp = Y.getOperand(1);
  if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
      !X86::isZeroNode(Cmp.getOperand(1)) ||
      !Cmp.getOperand(0).getValueType().isInteger())
    return SDValue();

  SDValue Z = Cmp.getOperand(0);
  EVT ZVT = Z.getValueType();

  // If X is -1 or 0, then we have an opportunity to avoid constants required in
  // the general case below.
  if (ConstantX) {
    // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
    // fake operands:
    //  0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
    // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
    if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
        (!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
      SDValue Zero = DAG.getConstant(0, DL, ZVT);
      SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
      SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
      return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
                         DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
                         SDValue(Neg.getNode(), 1));
    }

    // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
    // with fake operands:
    //  0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
    // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
    if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
        (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
      SDValue One = DAG.getConstant(1, DL, ZVT);
      SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
      return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
                         DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cmp1);
    }
  }

  // (cmp Z, 1) sets the carry flag if Z is 0.
  SDValue One = DAG.getConstant(1, DL, ZVT);
  SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);

  // Add the flags type for ADC/SBB nodes.
  SDVTList VTs = DAG.getVTList(VT, MVT::i32);

  // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
  // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
  if (CC == X86::COND_NE)
    return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
                       DAG.getConstant(-1ULL, DL, VT), Cmp1);

  // X - (Z == 0) --> sub X, (zext(sete  Z, 0)) --> sbb X, 0, (cmp Z, 1)
  // X + (Z == 0) --> add X, (zext(sete  Z, 0)) --> adc X, 0, (cmp Z, 1)
  return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
                     DAG.getConstant(0, DL, VT), Cmp1);
}

static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {
  if (!Subtarget.hasSSE2())
    return SDValue();

  EVT VT = N->getValueType(0);

  // If the vector size is less than 128, or greater than the supported RegSize,
  // do not use PMADD.
  if (!VT.isVector() || VT.getVectorNumElements() < 8)
    return SDValue();

  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);

  auto UsePMADDWD = [&](SDValue Op) {
    ShrinkMode Mode;
    return Op.getOpcode() == ISD::MUL &&
           canReduceVMulWidth(Op.getNode(), DAG, Mode) && Mode != MULU16 &&
           (!Subtarget.hasSSE41() ||
            (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
             Op->isOnlyUserOf(Op.getOperand(1).getNode())));
  };

  SDValue MulOp, OtherOp;
  if (UsePMADDWD(Op0)) {
    MulOp = Op0;
    OtherOp = Op1;
  } else if (UsePMADDWD(Op1)) {
    MulOp = Op1;
    OtherOp = Op0;
  } else
   return SDValue();

  SDLoc DL(N);
  EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
                                   VT.getVectorNumElements());
  EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
                                VT.getVectorNumElements() / 2);

  // Shrink the operands of mul.
  SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(0));
  SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(1));

  // Madd vector size is half of the original vector size
  auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                           ArrayRef<SDValue> Ops) {
    MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
    return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
  };
  SDValue Madd = SplitOpsAndApply(DAG, Subtarget, DL, MAddVT, { N0, N1 },
                                  PMADDWDBuilder);
  // Fill the rest of the output with 0
  SDValue Zero = DAG.getConstant(0, DL, Madd.getSimpleValueType());
  SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd, Zero);

  // Preserve the reduction flag on the ADD. We may need to revisit for the
  // other operand.
  SDNodeFlags Flags;
  Flags.setVectorReduction(true);
  return DAG.getNode(ISD::ADD, DL, VT, Concat, OtherOp, Flags);
}

static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {
  if (!Subtarget.hasSSE2())
    return SDValue();

  SDLoc DL(N);
  EVT VT = N->getValueType(0);

  // TODO: There's nothing special about i32, any integer type above i16 should
  // work just as well.
  if (!VT.isVector() || !VT.isSimple() ||
      !(VT.getVectorElementType() == MVT::i32))
    return SDValue();

  unsigned RegSize = 128;
  if (Subtarget.useBWIRegs())
    RegSize = 512;
  else if (Subtarget.hasAVX())
    RegSize = 256;

  // We only handle v16i32 for SSE2 / v32i32 for AVX / v64i32 for AVX512.
  // TODO: We should be able to handle larger vectors by splitting them before
  // feeding them into several SADs, and then reducing over those.
  if (VT.getSizeInBits() / 4 > RegSize)
    return SDValue();

  // We know N is a reduction add. To match SAD, we need one of the operands to
  // be an ABS.
  SDValue AbsOp = N->getOperand(0);
  SDValue OtherOp = N->getOperand(1);
  if (AbsOp.getOpcode() != ISD::ABS)
    std::swap(AbsOp, OtherOp);
  if (AbsOp.getOpcode() != ISD::ABS)
    return SDValue();

  // Check whether we have an abs-diff pattern feeding into the select.
  SDValue SadOp0, SadOp1;
  if(!detectZextAbsDiff(AbsOp, SadOp0, SadOp1))
    return SDValue();

  // SAD pattern detected. Now build a SAD instruction and an addition for
  // reduction. Note that the number of elements of the result of SAD is less
  // than the number of elements of its input. Therefore, we could only update
  // part of elements in the reduction vector.
  SDValue Sad = createPSADBW(DAG, SadOp0, SadOp1, DL, Subtarget);

  // The output of PSADBW is a vector of i64.
  // We need to turn the vector of i64 into a vector of i32.
  // If the reduction vector is at least as wide as the psadbw result, just
  // bitcast. If it's narrower which can only occur for v2i32, bits 127:16 of
  // the PSADBW will be zero. If we promote/ narrow vectors, truncate the v2i64
  // result to v2i32 which will be removed by type legalization. If we/ widen
  // narrow vectors then we bitcast to v4i32 and extract v2i32.
  MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
  Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);

  if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
    // Fill the upper elements with zero to match the add width.
    assert(VT.getSizeInBits() % ResVT.getSizeInBits() == 0 && "Unexpected VTs");
    unsigned NumConcats = VT.getSizeInBits() / ResVT.getSizeInBits();
    SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, DL, ResVT));
    Ops[0] = Sad;
    Sad = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops);
  } else if (VT.getSizeInBits() < ResVT.getSizeInBits()) {
    Sad = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Sad,
                      DAG.getIntPtrConstant(0, DL));
  }

  // Preserve the reduction flag on the ADD. We may need to revisit for the
  // other operand.
  SDNodeFlags Flags;
  Flags.setVectorReduction(true);
  return DAG.getNode(ISD::ADD, DL, VT, Sad, OtherOp, Flags);
}

static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
                            const SDLoc &DL, EVT VT,
                            const X86Subtarget &Subtarget) {
  // Example of pattern we try to detect:
  // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
  //(add (build_vector (extract_elt t, 0),
  //                   (extract_elt t, 2),
  //                   (extract_elt t, 4),
  //                   (extract_elt t, 6)),
  //     (build_vector (extract_elt t, 1),
  //                   (extract_elt t, 3),
  //                   (extract_elt t, 5),
  //                   (extract_elt t, 7)))

  if (!Subtarget.hasSSE2())
    return SDValue();

  if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
      Op1.getOpcode() != ISD::BUILD_VECTOR)
    return SDValue();

  if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
      VT.getVectorNumElements() < 4 ||
      !isPowerOf2_32(VT.getVectorNumElements()))
    return SDValue();

  // Check if one of Op0,Op1 is of the form:
  // (build_vector (extract_elt Mul, 0),
  //               (extract_elt Mul, 2),
  //               (extract_elt Mul, 4),
  //                   ...
  // the other is of the form:
  // (build_vector (extract_elt Mul, 1),
  //               (extract_elt Mul, 3),
  //               (extract_elt Mul, 5),
  //                   ...
  // and identify Mul.
  SDValue Mul;
  for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
    SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
            Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
    // TODO: Be more tolerant to undefs.
    if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
      return SDValue();
    auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
    auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
    auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
    auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
    if (!Const0L || !Const1L || !Const0H || !Const1H)
      return SDValue();
    unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
             Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
    // Commutativity of mul allows factors of a product to reorder.
    if (Idx0L > Idx1L)
      std::swap(Idx0L, Idx1L);
    if (Idx0H > Idx1H)
      std::swap(Idx0H, Idx1H);
    // Commutativity of add allows pairs of factors to reorder.
    if (Idx0L > Idx0H) {
      std::swap(Idx0L, Idx0H);
      std::swap(Idx1L, Idx1H);
    }
    if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
        Idx1H != 2 * i + 3)
      return SDValue();
    if (!Mul) {
      // First time an extract_elt's source vector is visited. Must be a MUL
      // with 2X number of vector elements than the BUILD_VECTOR.
      // Both extracts must be from same MUL.
      Mul = Op0L->getOperand(0);
      if (Mul->getOpcode() != ISD::MUL ||
          Mul.getValueType().getVectorNumElements() != 2 * e)
        return SDValue();
    }
    // Check that the extract is from the same MUL previously seen.
    if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
        Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
      return SDValue();
  }

  // Check if the Mul source can be safely shrunk.
  ShrinkMode Mode;
  if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) || Mode == MULU16)
    return SDValue();

  auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                         ArrayRef<SDValue> Ops) {
    // Shrink by adding truncate nodes and let DAGCombine fold with the
    // sources.
    EVT InVT = Ops[0].getValueType();
    assert(InVT.getScalarType() == MVT::i32 &&
           "Unexpected scalar element type");
    assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
    EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
                                 InVT.getVectorNumElements() / 2);
    EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
                                   InVT.getVectorNumElements());
    return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
                       DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[0]),
                       DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[1]));
  };
  return SplitOpsAndApply(DAG, Subtarget, DL, VT,
                          { Mul.getOperand(0), Mul.getOperand(1) },
                          PMADDBuilder);
}

// Attempt to turn this pattern into PMADDWD.
// (mul (add (sext (build_vector)), (sext (build_vector))),
//      (add (sext (build_vector)), (sext (build_vector)))
static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
                              const SDLoc &DL, EVT VT,
                              const X86Subtarget &Subtarget) {
  if (!Subtarget.hasSSE2())
    return SDValue();

  if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
    return SDValue();

  if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
      VT.getVectorNumElements() < 4 ||
      !isPowerOf2_32(VT.getVectorNumElements()))
    return SDValue();

  SDValue N00 = N0.getOperand(0);
  SDValue N01 = N0.getOperand(1);
  SDValue N10 = N1.getOperand(0);
  SDValue N11 = N1.getOperand(1);

  // All inputs need to be sign extends.
  // TODO: Support ZERO_EXTEND from known positive?
  if (N00.getOpcode() != ISD::SIGN_EXTEND ||
      N01.getOpcode() != ISD::SIGN_EXTEND ||
      N10.getOpcode() != ISD::SIGN_EXTEND ||
      N11.getOpcode() != ISD::SIGN_EXTEND)
    return SDValue();

  // Peek through the extends.
  N00 = N00.getOperand(0);
  N01 = N01.getOperand(0);
  N10 = N10.getOperand(0);
  N11 = N11.getOperand(0);

  // Must be extending from vXi16.
  EVT InVT = N00.getValueType();
  if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
      N10.getValueType() != InVT || N11.getValueType() != InVT)
    return SDValue();

  // All inputs should be build_vectors.
  if (N00.getOpcode() != ISD::BUILD_VECTOR ||
      N01.getOpcode() != ISD::BUILD_VECTOR ||
      N10.getOpcode() != ISD::BUILD_VECTOR ||
      N11.getOpcode() != ISD::BUILD_VECTOR)
    return SDValue();

  // For each element, we need to ensure we have an odd element from one vector
  // multiplied by the odd element of another vector and the even element from
  // one of the same vectors being multiplied by the even element from the
  // other vector. So we need to make sure for each element i, this operator
  // is being performed:
  //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
  SDValue In0, In1;
  for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
    SDValue N00Elt = N00.getOperand(i);
    SDValue N01Elt = N01.getOperand(i);
    SDValue N10Elt = N10.getOperand(i);
    SDValue N11Elt = N11.getOperand(i);
    // TODO: Be more tolerant to undefs.
    if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
        N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
      return SDValue();
    auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
    auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
    auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
    auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
    if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
      return SDValue();
    unsigned IdxN00 = ConstN00Elt->getZExtValue();
    unsigned IdxN01 = ConstN01Elt->getZExtValue();
    unsigned IdxN10 = ConstN10Elt->getZExtValue();
    unsigned IdxN11 = ConstN11Elt->getZExtValue();
    // Add is commutative so indices can be reordered.
    if (IdxN00 > IdxN10) {
      std::swap(IdxN00, IdxN10);
      std::swap(IdxN01, IdxN11);
    }
    // N0 indices be the even element. N1 indices must be the next odd element.
    if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
        IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
      return SDValue();
    SDValue N00In = N00Elt.getOperand(0);
    SDValue N01In = N01Elt.getOperand(0);
    SDValue N10In = N10Elt.getOperand(0);
    SDValue N11In = N11Elt.getOperand(0);
    // First time we find an input capture it.
    if (!In0) {
      In0 = N00In;
      In1 = N01In;
    }
    // Mul is commutative so the input vectors can be in any order.
    // Canonicalize to make the compares easier.
    if (In0 != N00In)
      std::swap(N00In, N01In);
    if (In0 != N10In)
      std::swap(N10In, N11In);
    if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
      return SDValue();
  }

  auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                         ArrayRef<SDValue> Ops) {
    // Shrink by adding truncate nodes and let DAGCombine fold with the
    // sources.
    EVT OpVT = Ops[0].getValueType();
    assert(OpVT.getScalarType() == MVT::i16 &&
           "Unexpected scalar element type");
    assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
    EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
                                 OpVT.getVectorNumElements() / 2);
    return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
  };
  return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
                          PMADDBuilder);
}

static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {
  const SDNodeFlags Flags = N->getFlags();
  if (Flags.hasVectorReduction()) {
    if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget))
      return Sad;
    if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget))
      return MAdd;
  }
  EVT VT = N->getValueType(0);
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);

  if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
    return MAdd;
  if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
    return MAdd;

  // Try to synthesize horizontal adds from adds of shuffles.
  if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
       VT == MVT::v8i32) &&
      Subtarget.hasSSSE3() &&
      isHorizontalBinOp(Op0, Op1, DAG, Subtarget, true)) {
    auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                          ArrayRef<SDValue> Ops) {
      return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops);
    };
    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
                            HADDBuilder);
  }

  // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
  // (sub Y, (sext (vXi1 X))).
  // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
  // generic DAG combine without a legal type check, but adding this there
  // caused regressions.
  if (VT.isVector()) {
    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
        Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
        TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
      SDLoc DL(N);
      SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
      return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
    }

    if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
        Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
        TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
      SDLoc DL(N);
      SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
      return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
    }
  }

  return combineAddOrSubToADCOrSBB(N, DAG);
}

static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget) {
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);
  EVT VT = N->getValueType(0);

  if (!VT.isVector())
    return SDValue();

  // PSUBUS is supported, starting from SSE2, but truncation for v8i32
  // is only worth it with SSSE3 (PSHUFB).
  EVT EltVT = VT.getVectorElementType();
  if (!(Subtarget.hasSSE2() && (EltVT == MVT::i8 || EltVT == MVT::i16)) &&
      !(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
      !(Subtarget.useBWIRegs() && (VT == MVT::v16i32)))
    return SDValue();

  SDValue SubusLHS, SubusRHS;
  // Try to find umax(a,b) - b or a - umin(a,b) patterns
  // they may be converted to subus(a,b).
  // TODO: Need to add IR canonicalization for this code.
  if (Op0.getOpcode() == ISD::UMAX) {
    SubusRHS = Op1;
    SDValue MaxLHS = Op0.getOperand(0);
    SDValue MaxRHS = Op0.getOperand(1);
    if (MaxLHS == Op1)
      SubusLHS = MaxRHS;
    else if (MaxRHS == Op1)
      SubusLHS = MaxLHS;
    else
      return SDValue();
  } else if (Op1.getOpcode() == ISD::UMIN) {
    SubusLHS = Op0;
    SDValue MinLHS = Op1.getOperand(0);
    SDValue MinRHS = Op1.getOperand(1);
    if (MinLHS == Op0)
      SubusRHS = MinRHS;
    else if (MinRHS == Op0)
      SubusRHS = MinLHS;
    else
      return SDValue();
  } else
    return SDValue();

  // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
  // special preprocessing in some cases.
  if (EltVT == MVT::i8 || EltVT == MVT::i16)
    return DAG.getNode(ISD::USUBSAT, SDLoc(N), VT, SubusLHS, SubusRHS);

  assert((VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64) &&
         "Unexpected VT!");

  // Special preprocessing case can be only applied
  // if the value was zero extended from 16 bit,
  // so we require first 16 bits to be zeros for 32 bit
  // values, or first 48 bits for 64 bit values.
  KnownBits Known = DAG.computeKnownBits(SubusLHS);
  unsigned NumZeros = Known.countMinLeadingZeros();
  if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
    return SDValue();

  EVT ExtType = SubusLHS.getValueType();
  EVT ShrinkedType;
  if (VT == MVT::v8i32 || VT == MVT::v8i64)
    ShrinkedType = MVT::v8i16;
  else
    ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;

  // If SubusLHS is zeroextended - truncate SubusRHS to it's
  // size SubusRHS = umin(0xFFF.., SubusRHS).
  SDValue SaturationConst =
      DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
                                           ShrinkedType.getScalarSizeInBits()),
                      SDLoc(SubusLHS), ExtType);
  SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
                             SaturationConst);
  SDValue NewSubusLHS =
      DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
  SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
  SDValue Psubus = DAG.getNode(ISD::USUBSAT, SDLoc(N), ShrinkedType,
                               NewSubusLHS, NewSubusRHS);

  // Zero extend the result, it may be used somewhere as 32 bit,
  // if not zext and following trunc will shrink.
  return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
}

static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);

  // X86 can't encode an immediate LHS of a sub. See if we can push the
  // negation into a preceding instruction.
  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
    // If the RHS of the sub is a XOR with one use and a constant, invert the
    // immediate. Then add one to the LHS of the sub so we can turn
    // X-Y -> X+~Y+1, saving one register.
    if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
        isa<ConstantSDNode>(Op1.getOperand(1))) {
      const APInt &XorC = Op1.getConstantOperandAPInt(1);
      EVT VT = Op0.getValueType();
      SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
                                   Op1.getOperand(0),
                                   DAG.getConstant(~XorC, SDLoc(Op1), VT));
      return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
                         DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
    }
  }

  // Try to synthesize horizontal subs from subs of shuffles.
  EVT VT = N->getValueType(0);
  if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
       VT == MVT::v8i32) &&
      Subtarget.hasSSSE3() &&
      isHorizontalBinOp(Op0, Op1, DAG, Subtarget, false)) {
    auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
                          ArrayRef<SDValue> Ops) {
      return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops);
    };
    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
                            HSUBBuilder);
  }

  // Try to create PSUBUS if SUB's argument is max/min
  if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
    return V;

  return combineAddOrSubToADCOrSBB(N, DAG);
}

static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget) {
  MVT VT = N->getSimpleValueType(0);
  SDLoc DL(N);

  if (N->getOperand(0) == N->getOperand(1)) {
    if (N->getOpcode() == X86ISD::PCMPEQ)
      return DAG.getConstant(-1, DL, VT);
    if (N->getOpcode() == X86ISD::PCMPGT)
      return DAG.getConstant(0, DL, VT);
  }

  return SDValue();
}

/// Helper that combines an array of subvector ops as if they were the operands
/// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
/// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
                                      ArrayRef<SDValue> Ops, SelectionDAG &DAG,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const X86Subtarget &Subtarget) {
  assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");

  if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
    return DAG.getUNDEF(VT);

  if (llvm::all_of(Ops, [](SDValue Op) {
        return ISD::isBuildVectorAllZeros(Op.getNode());
      }))
    return getZeroVector(VT, Subtarget, DAG, DL);

  SDValue Op0 = Ops[0];

  // Fold subvector loads into one.
  // If needed, look through bitcasts to get to the load.
  if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
    bool Fast;
    const X86TargetLowering *TLI = Subtarget.getTargetLowering();
    if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
                                *FirstLd->getMemOperand(), &Fast) &&
        Fast) {
      if (SDValue Ld =
              EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
        return Ld;
    }
  }

  // Repeated subvectors.
  if (llvm::all_of(Ops, [Op0](SDValue Op) { return Op == Op0; })) {
    // If this broadcast/subv_broadcast is inserted into both halves, use a
    // larger broadcast/subv_broadcast.
    if (Op0.getOpcode() == X86ISD::VBROADCAST ||
        Op0.getOpcode() == X86ISD::SUBV_BROADCAST)
      return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));

    // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
    if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
        (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
      return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
                         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
                                     Op0.getOperand(0),
                                     DAG.getIntPtrConstant(0, DL)));

    // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
    if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
        (Subtarget.hasAVX2() ||
         (VT.getScalarSizeInBits() >= 32 && MayFoldLoad(Op0.getOperand(0)))) &&
        Op0.getOperand(0).getValueType() == VT.getScalarType())
      return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
  }

  bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });

  // Repeated opcode.
  // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
  // but it currently struggles with different vector widths.
  if (llvm::all_of(Ops, [Op0](SDValue Op) {
        return Op.getOpcode() == Op0.getOpcode();
      })) {
    unsigned NumOps = Ops.size();
    switch (Op0.getOpcode()) {
    case X86ISD::PSHUFHW:
    case X86ISD::PSHUFLW:
    case X86ISD::PSHUFD:
      if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
          Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
        SmallVector<SDValue, 2> Src;
        for (unsigned i = 0; i != NumOps; ++i)
          Src.push_back(Ops[i].getOperand(0));
        return DAG.getNode(Op0.getOpcode(), DL, VT,
                           DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
                           Op0.getOperand(1));
      }
      LLVM_FALLTHROUGH;
    case X86ISD::VPERMILPI:
      // TODO - add support for vXf64/vXi64 shuffles.
      if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
          Subtarget.hasAVX() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
        SmallVector<SDValue, 2> Src;
        for (unsigned i = 0; i != NumOps; ++i)
          Src.push_back(DAG.getBitcast(MVT::v4f32, Ops[i].getOperand(0)));
        SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f32, Src);
        Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
                          Op0.getOperand(1));
        return DAG.getBitcast(VT, Res);
      }
      break;
    case X86ISD::PACKUS:
      if (NumOps == 2 && VT.is256BitVector() && Subtarget.hasInt256()) {
        SmallVector<SDValue, 2> LHS, RHS;
        for (unsigned i = 0; i != NumOps; ++i) {
          LHS.push_back(Ops[i].getOperand(0));
          RHS.push_back(Ops[i].getOperand(1));
        }
        MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
        SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
                                 NumOps * SrcVT.getVectorNumElements());
        return DAG.getNode(Op0.getOpcode(), DL, VT,
                           DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, LHS),
                           DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, RHS));
      }
      break;
    }
  }

  return SDValue();
}

static SDValue combineConcatVectors(SDNode *N, SelectionDAG &DAG,
                                    TargetLowering::DAGCombinerInfo &DCI,
                                    const X86Subtarget &Subtarget) {
  EVT VT = N->getValueType(0);
  EVT SrcVT = N->getOperand(0).getValueType();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  // Don't do anything for i1 vectors.
  if (VT.getVectorElementType() == MVT::i1)
    return SDValue();

  if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
    SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
    if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
                                           DCI, Subtarget))
      return R;
  }

  return SDValue();
}

static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const X86Subtarget &Subtarget) {
  if (DCI.isBeforeLegalizeOps())
    return SDValue();

  MVT OpVT = N->getSimpleValueType(0);

  bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;

  SDLoc dl(N);
  SDValue Vec = N->getOperand(0);
  SDValue SubVec = N->getOperand(1);

  uint64_t IdxVal = N->getConstantOperandVal(2);
  MVT SubVecVT = SubVec.getSimpleValueType();

  if (Vec.isUndef() && SubVec.isUndef())
    return DAG.getUNDEF(OpVT);

  // Inserting undefs/zeros into zeros/undefs is a zero vector.
  if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
      (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
    return getZeroVector(OpVT, Subtarget, DAG, dl);

  if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
    // If we're inserting into a zero vector and then into a larger zero vector,
    // just insert into the larger zero vector directly.
    if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
        ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
      uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
      return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
                         getZeroVector(OpVT, Subtarget, DAG, dl),
                         SubVec.getOperand(1),
                         DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
    }

    // If we're inserting into a zero vector and our input was extracted from an
    // insert into a zero vector of the same type and the extraction was at
    // least as large as the original insertion. Just insert the original
    // subvector into a zero vector.
    if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
        isNullConstant(SubVec.getOperand(1)) &&
        SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
      SDValue Ins = SubVec.getOperand(0);
      if (isNullConstant(Ins.getOperand(2)) &&
          ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
          Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
        return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
                           getZeroVector(OpVT, Subtarget, DAG, dl),
                           Ins.getOperand(1), N->getOperand(2));
    }
  }

  // Stop here if this is an i1 vector.
  if (IsI1Vector)
    return SDValue();

  // If this is an insert of an extract, combine to a shuffle. Don't do this
  // if the insert or extract can be represented with a subregister operation.
  if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
      SubVec.getOperand(0).getSimpleValueType() == OpVT &&
      (IdxVal != 0 || !Vec.isUndef())) {
    int ExtIdxVal = SubVec.getConstantOperandVal(1);
    if (ExtIdxVal != 0) {
      int VecNumElts = OpVT.getVectorNumElements();
      int SubVecNumElts = SubVecVT.getVectorNumElements();
      SmallVector<int, 64> Mask(VecNumElts);
      // First create an identity shuffle mask.
      for (int i = 0; i != VecNumElts; ++i)
        Mask[i] = i;
      // Now insert the extracted portion.
      for (int i = 0; i != SubVecNumElts; ++i)
        Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;

      return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
    }
  }

  // Match concat_vector style patterns.
  SmallVector<SDValue, 2> SubVectorOps;
  if (collectConcatOps(N, SubVectorOps)) {
    if (SDValue Fold =
            combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
      return Fold;

    // If we're inserting all zeros into the upper half, change this to
    // a concat with zero. We will match this to a move
    // with implicit upper bit zeroing during isel.
    // We do this here because we don't want combineConcatVectorOps to
    // create INSERT_SUBVECTOR from CONCAT_VECTORS.
    if (SubVectorOps.size() == 2 &&
        ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
      return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
                         getZeroVector(OpVT, Subtarget, DAG, dl),
                         SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
  }

  // If this is a broadcast insert into an upper undef, use a larger broadcast.
  if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
    return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));

  // If this is a broadcast load inserted into an upper undef, use a larger
  // broadcast load.
  if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
      SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
    auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
    SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
    SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
    SDValue BcastLd =
        DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
                                MemIntr->getMemoryVT(),
                                MemIntr->getMemOperand());
    DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
    return BcastLd;
  }

  return SDValue();
}

/// If we are extracting a subvector of a vector select and the select condition
/// is composed of concatenated vectors, try to narrow the select width. This
/// is a common pattern for AVX1 integer code because 256-bit selects may be
/// legal, but there is almost no integer math/logic available for 256-bit.
/// This function should only be called with legal types (otherwise, the calls
/// to get simple value types will assert).
static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
  SDValue Sel = peekThroughBitcasts(Ext->getOperand(0));
  SmallVector<SDValue, 4> CatOps;
  if (Sel.getOpcode() != ISD::VSELECT ||
      !collectConcatOps(Sel.getOperand(0).getNode(), CatOps))
    return SDValue();

  // Note: We assume simple value types because this should only be called with
  //       legal operations/types.
  // TODO: This can be extended to handle extraction to 256-bits.
  MVT VT = Ext->getSimpleValueType(0);
  if (!VT.is128BitVector())
    return SDValue();

  MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
  if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
    return SDValue();

  MVT WideVT = Ext->getOperand(0).getSimpleValueType();
  MVT SelVT = Sel.getSimpleValueType();
  assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
         "Unexpected vector type with legal operations");

  unsigned SelElts = SelVT.getVectorNumElements();
  unsigned CastedElts = WideVT.getVectorNumElements();
  unsigned ExtIdx = cast<ConstantSDNode>(Ext->getOperand(1))->getZExtValue();
  if (SelElts % CastedElts == 0) {
    // The select has the same or more (narrower) elements than the extract
    // operand. The extraction index gets scaled by that factor.
    ExtIdx *= (SelElts / CastedElts);
  } else if (CastedElts % SelElts == 0) {
    // The select has less (wider) elements than the extract operand. Make sure
    // that the extraction index can be divided evenly.
    unsigned IndexDivisor = CastedElts / SelElts;
    if (ExtIdx % IndexDivisor != 0)
      return SDValue();
    ExtIdx /= IndexDivisor;
  } else {
    llvm_unreachable("Element count of simple vector types are not divisible?");
  }

  unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
  unsigned NarrowElts = SelElts / NarrowingFactor;
  MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
  SDLoc DL(Ext);
  SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
  SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
  SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
  SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
  return DAG.getBitcast(VT, NarrowSel);
}

static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
                                       TargetLowering::DAGCombinerInfo &DCI,
                                       const X86Subtarget &Subtarget) {
  // For AVX1 only, if we are extracting from a 256-bit and+not (which will
  // eventually get combined/lowered into ANDNP) with a concatenated operand,
  // split the 'and' into 128-bit ops to avoid the concatenate and extract.
  // We let generic combining take over from there to simplify the
  // insert/extract and 'not'.
  // This pattern emerges during AVX1 legalization. We handle it before lowering
  // to avoid complications like splitting constant vector loads.

  // Capture the original wide type in the likely case that we need to bitcast
  // back to this type.
  if (!N->getValueType(0).isSimple())
    return SDValue();

  MVT VT = N->getSimpleValueType(0);
  SDValue InVec = N->getOperand(0);
  SDValue InVecBC = peekThroughBitcasts(InVec);
  EVT InVecVT = InVec.getValueType();
  EVT InVecBCVT = InVecBC.getValueType();
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
      TLI.isTypeLegal(InVecVT) &&
      InVecVT.getSizeInBits() == 256 && InVecBC.getOpcode() == ISD::AND) {
    auto isConcatenatedNot = [] (SDValue V) {
      V = peekThroughBitcasts(V);
      if (!isBitwiseNot(V))
        return false;
      SDValue NotOp = V->getOperand(0);
      return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
    };
    if (isConcatenatedNot(InVecBC.getOperand(0)) ||
        isConcatenatedNot(InVecBC.getOperand(1))) {
      // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
      SDValue Concat = split256IntArith(InVecBC, DAG);
      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
                         DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
    }
  }

  if (DCI.isBeforeLegalizeOps())
    return SDValue();

  if (SDValue V = narrowExtractedVectorSelect(N, DAG))
    return V;

  unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();

  if (ISD::isBuildVectorAllZeros(InVec.getNode()))
    return getZeroVector(VT, Subtarget, DAG, SDLoc(N));

  if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
    if (VT.getScalarType() == MVT::i1)
      return DAG.getConstant(1, SDLoc(N), VT);
    return getOnesVector(VT, DAG, SDLoc(N));
  }

  if (InVec.getOpcode() == ISD::BUILD_VECTOR)
    return DAG.getBuildVector(
        VT, SDLoc(N),
        InVec.getNode()->ops().slice(IdxVal, VT.getVectorNumElements()));

  // Try to move vector bitcast after extract_subv by scaling extraction index:
  // extract_subv (bitcast X), Index --> bitcast (extract_subv X, Index')
  // TODO: Move this to DAGCombiner::visitEXTRACT_SUBVECTOR
  if (InVec != InVecBC && InVecBCVT.isVector()) {
    unsigned SrcNumElts = InVecBCVT.getVectorNumElements();
    unsigned DestNumElts = InVecVT.getVectorNumElements();
    if ((DestNumElts % SrcNumElts) == 0) {
      unsigned DestSrcRatio = DestNumElts / SrcNumElts;
      if ((VT.getVectorNumElements() % DestSrcRatio) == 0) {
        unsigned NewExtNumElts = VT.getVectorNumElements() / DestSrcRatio;
        EVT NewExtVT = EVT::getVectorVT(*DAG.getContext(),
                                        InVecBCVT.getScalarType(), NewExtNumElts);
        if ((N->getConstantOperandVal(1) % DestSrcRatio) == 0 &&
            TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NewExtVT)) {
          unsigned IndexValScaled = N->getConstantOperandVal(1) / DestSrcRatio;
          SDLoc DL(N);
          SDValue NewIndex = DAG.getIntPtrConstant(IndexValScaled, DL);
          SDValue NewExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewExtVT,
                                           InVecBC, NewIndex);
          return DAG.getBitcast(VT, NewExtract);
        }
      }
    }
  }

  // If we are extracting from an insert into a zero vector, replace with a
  // smaller insert into zero if we don't access less than the original
  // subvector. Don't do this for i1 vectors.
  if (VT.getVectorElementType() != MVT::i1 &&
      InVec.getOpcode() == ISD::INSERT_SUBVECTOR && IdxVal == 0 &&
      InVec.hasOneUse() && isNullConstant(InVec.getOperand(2)) &&
      ISD::isBuildVectorAllZeros(InVec.getOperand(0).getNode()) &&
      InVec.getOperand(1).getValueSizeInBits() <= VT.getSizeInBits()) {
    SDLoc DL(N);
    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
                       getZeroVector(VT, Subtarget, DAG, DL),
                       InVec.getOperand(1), InVec.getOperand(2));
  }

  // If we're extracting from a broadcast then we're better off just
  // broadcasting to the smaller type directly, assuming this is the only use.
  // As its a broadcast we don't care about the extraction index.
  if (InVec.getOpcode() == X86ISD::VBROADCAST && InVec.hasOneUse() &&
      InVec.getOperand(0).getValueSizeInBits() <= VT.getSizeInBits())
    return DAG.getNode(X86ISD::VBROADCAST, SDLoc(N), VT, InVec.getOperand(0));

  if (InVec.getOpcode() == X86ISD::VBROADCAST_LOAD && InVec.hasOneUse()) {
    auto *MemIntr = cast<MemIntrinsicSDNode>(InVec);
    if (MemIntr->getMemoryVT().getSizeInBits() <= VT.getSizeInBits()) {
      SDVTList Tys = DAG.getVTList(VT, MVT::Other);
      SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
      SDValue BcastLd =
          DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
                                  MemIntr->getMemoryVT(),
                                  MemIntr->getMemOperand());
      DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
      return BcastLd;
    }
  }

  // If we're extracting the lowest subvector and we're the only user,
  // we may be able to perform this with a smaller vector width.
  if (IdxVal == 0 && InVec.hasOneUse()) {
    unsigned InOpcode = InVec.getOpcode();
    if (VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
      // v2f64 CVTDQ2PD(v4i32).
      if (InOpcode == ISD::SINT_TO_FP &&
          InVec.getOperand(0).getValueType() == MVT::v4i32) {
        return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
      }
      // v2f64 CVTUDQ2PD(v4i32).
      if (InOpcode == ISD::UINT_TO_FP &&
          InVec.getOperand(0).getValueType() == MVT::v4i32) {
        return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
      }
      // v2f64 CVTPS2PD(v4f32).
      if (InOpcode == ISD::FP_EXTEND &&
          InVec.getOperand(0).getValueType() == MVT::v4f32) {
        return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
      }
    }
    if ((InOpcode == ISD::ANY_EXTEND ||
         InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
         InOpcode == ISD::ZERO_EXTEND ||
         InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
         InOpcode == ISD::SIGN_EXTEND ||
         InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
        VT.is128BitVector() &&
        InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
      unsigned ExtOp = getOpcode_EXTEND_VECTOR_INREG(InOpcode);
      return DAG.getNode(ExtOp, SDLoc(N), VT, InVec.getOperand(0));
    }
    if (InOpcode == ISD::VSELECT &&
        InVec.getOperand(0).getValueType().is256BitVector() &&
        InVec.getOperand(1).getValueType().is256BitVector() &&
        InVec.getOperand(2).getValueType().is256BitVector()) {
      SDLoc DL(N);
      SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
      SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
      SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
      return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
    }
  }

  return SDValue();
}

static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
  EVT VT = N->getValueType(0);
  SDValue Src = N->getOperand(0);
  SDLoc DL(N);

  // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
  // This occurs frequently in our masked scalar intrinsic code and our
  // floating point select lowering with AVX512.
  // TODO: SimplifyDemandedBits instead?
  if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
    if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
      if (C->getAPIntValue().isOneValue())
        return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
                           Src.getOperand(0));

  // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
  if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
      Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
      Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
    if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
      if (C->isNullValue())
        return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
                           Src.getOperand(1));

  // Reduce v2i64 to v4i32 if we don't need the upper bits.
  // TODO: Move to DAGCombine?
  if (VT == MVT::v2i64 && Src.getOpcode() == ISD::ANY_EXTEND &&
      Src.getValueType() == MVT::i64 && Src.hasOneUse() &&
      Src.getOperand(0).getScalarValueSizeInBits() <= 32)
    return DAG.getBitcast(
        VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
                        DAG.getAnyExtOrTrunc(Src.getOperand(0), DL, MVT::i32)));

  return SDValue();
}

// Simplify PMULDQ and PMULUDQ operations.
static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI,
                             const X86Subtarget &Subtarget) {
  SDValue LHS = N->getOperand(0);
  SDValue RHS = N->getOperand(1);

  // Canonicalize constant to RHS.
  if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
      !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
    return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);

  // Multiply by zero.
  // Don't return RHS as it may contain UNDEFs.
  if (ISD::isBuildVectorAllZeros(RHS.getNode()))
    return DAG.getConstant(0, SDLoc(N), N->getValueType(0));

  // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
    return SDValue(N, 0);

  // If the input is an extend_invec and the SimplifyDemandedBits call didn't
  // convert it to any_extend_invec, due to the LegalOperations check, do the
  // conversion directly to a vector shuffle manually. This exposes combine
  // opportunities missed by combineExtInVec not calling
  // combineX86ShufflesRecursively on SSE4.1 targets.
  // FIXME: This is basically a hack around several other issues related to
  // ANY_EXTEND_VECTOR_INREG.
  if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
      (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
       LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
      LHS.getOperand(0).getValueType() == MVT::v4i32) {
    SDLoc dl(N);
    LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
                               LHS.getOperand(0), { 0, -1, 1, -1 });
    LHS = DAG.getBitcast(MVT::v2i64, LHS);
    return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
  }
  if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
      (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
       RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
      RHS.getOperand(0).getValueType() == MVT::v4i32) {
    SDLoc dl(N);
    RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
                               RHS.getOperand(0), { 0, -1, 1, -1 });
    RHS = DAG.getBitcast(MVT::v2i64, RHS);
    return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
  }

  return SDValue();
}

static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
                               TargetLowering::DAGCombinerInfo &DCI,
                               const X86Subtarget &Subtarget) {
  EVT VT = N->getValueType(0);
  SDValue In = N->getOperand(0);
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();

  // Try to merge vector loads and extend_inreg to an extload.
  if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
      In.hasOneUse()) {
    auto *Ld = cast<LoadSDNode>(In);
    if (Ld->isSimple()) {
      MVT SVT = In.getSimpleValueType().getVectorElementType();
      ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
      EVT MemVT = EVT::getVectorVT(*DAG.getContext(), SVT,
                                   VT.getVectorNumElements());
      if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
        SDValue Load =
            DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
                           Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
                           Ld->getMemOperand()->getFlags());
        DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
        return Load;
      }
    }
  }

  // Attempt to combine as a shuffle.
  // TODO: SSE41 support
  if (Subtarget.hasAVX() && N->getOpcode() != ISD::SIGN_EXTEND_VECTOR_INREG) {
    SDValue Op(N, 0);
    if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
      if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
        return Res;
  }

  return SDValue();
}

static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI) {
  EVT VT = N->getValueType(0);

  APInt KnownUndef, KnownZero;
  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
  if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
                                     KnownZero, DCI))
    return SDValue(N, 0);

  return SDValue();
}

SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
                                             DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;
  switch (N->getOpcode()) {
  default: break;
  case ISD::SCALAR_TO_VECTOR:
    return combineScalarToVector(N, DAG);
  case ISD::EXTRACT_VECTOR_ELT:
  case X86ISD::PEXTRW:
  case X86ISD::PEXTRB:
    return combineExtractVectorElt(N, DAG, DCI, Subtarget);
  case ISD::CONCAT_VECTORS:
    return combineConcatVectors(N, DAG, DCI, Subtarget);
  case ISD::INSERT_SUBVECTOR:
    return combineInsertSubvector(N, DAG, DCI, Subtarget);
  case ISD::EXTRACT_SUBVECTOR:
    return combineExtractSubvector(N, DAG, DCI, Subtarget);
  case ISD::VSELECT:
  case ISD::SELECT:
  case X86ISD::BLENDV:      return combineSelect(N, DAG, DCI, Subtarget);
  case ISD::BITCAST:        return combineBitcast(N, DAG, DCI, Subtarget);
  case X86ISD::CMOV:        return combineCMov(N, DAG, DCI, Subtarget);
  case X86ISD::CMP:         return combineCMP(N, DAG);
  case ISD::ADD:            return combineAdd(N, DAG, DCI, Subtarget);
  case ISD::SUB:            return combineSub(N, DAG, DCI, Subtarget);
  case X86ISD::ADD:
  case X86ISD::SUB:         return combineX86AddSub(N, DAG, DCI);
  case X86ISD::SBB:         return combineSBB(N, DAG);
  case X86ISD::ADC:         return combineADC(N, DAG, DCI);
  case ISD::MUL:            return combineMul(N, DAG, DCI, Subtarget);
  case ISD::SHL:            return combineShiftLeft(N, DAG);
  case ISD::SRA:            return combineShiftRightArithmetic(N, DAG);
  case ISD::SRL:            return combineShiftRightLogical(N, DAG, DCI);
  case ISD::AND:            return combineAnd(N, DAG, DCI, Subtarget);
  case ISD::OR:             return combineOr(N, DAG, DCI, Subtarget);
  case ISD::XOR:            return combineXor(N, DAG, DCI, Subtarget);
  case X86ISD::BEXTR:       return combineBEXTR(N, DAG, DCI, Subtarget);
  case ISD::LOAD:           return combineLoad(N, DAG, DCI, Subtarget);
  case ISD::MLOAD:          return combineMaskedLoad(N, DAG, DCI, Subtarget);
  case ISD::STORE:          return combineStore(N, DAG, DCI, Subtarget);
  case ISD::MSTORE:         return combineMaskedStore(N, DAG, DCI, Subtarget);
  case ISD::SINT_TO_FP:     return combineSIntToFP(N, DAG, DCI, Subtarget);
  case ISD::UINT_TO_FP:     return combineUIntToFP(N, DAG, Subtarget);
  case ISD::FADD:
  case ISD::FSUB:           return combineFaddFsub(N, DAG, Subtarget);
  case ISD::FNEG:           return combineFneg(N, DAG, Subtarget);
  case ISD::TRUNCATE:       return combineTruncate(N, DAG, Subtarget);
  case X86ISD::VTRUNC:      return combineVTRUNC(N, DAG);
  case X86ISD::ANDNP:       return combineAndnp(N, DAG, DCI, Subtarget);
  case X86ISD::FAND:        return combineFAnd(N, DAG, Subtarget);
  case X86ISD::FANDN:       return combineFAndn(N, DAG, Subtarget);
  case X86ISD::FXOR:
  case X86ISD::FOR:         return combineFOr(N, DAG, Subtarget);
  case X86ISD::FMIN:
  case X86ISD::FMAX:        return combineFMinFMax(N, DAG);
  case ISD::FMINNUM:
  case ISD::FMAXNUM:        return combineFMinNumFMaxNum(N, DAG, Subtarget);
  case X86ISD::CVTSI2P:
  case X86ISD::CVTUI2P:     return combineX86INT_TO_FP(N, DAG, DCI);
  case X86ISD::CVTP2SI:
  case X86ISD::CVTP2UI:
  case X86ISD::CVTTP2SI:
  case X86ISD::CVTTP2UI:    return combineCVTP2I_CVTTP2I(N, DAG, DCI);
  case X86ISD::BT:          return combineBT(N, DAG, DCI);
  case ISD::ANY_EXTEND:
  case ISD::ZERO_EXTEND:    return combineZext(N, DAG, DCI, Subtarget);
  case ISD::SIGN_EXTEND:    return combineSext(N, DAG, DCI, Subtarget);
  case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
  case ISD::ANY_EXTEND_VECTOR_INREG:
  case ISD::SIGN_EXTEND_VECTOR_INREG:
  case ISD::ZERO_EXTEND_VECTOR_INREG: return combineExtInVec(N, DAG, DCI,
                                                             Subtarget);
  case ISD::SETCC:          return combineSetCC(N, DAG, Subtarget);
  case X86ISD::SETCC:       return combineX86SetCC(N, DAG, Subtarget);
  case X86ISD::BRCOND:      return combineBrCond(N, DAG, Subtarget);
  case X86ISD::PACKSS:
  case X86ISD::PACKUS:      return combineVectorPack(N, DAG, DCI, Subtarget);
  case X86ISD::VSHL:
  case X86ISD::VSRA:
  case X86ISD::VSRL:
    return combineVectorShiftVar(N, DAG, DCI, Subtarget);
  case X86ISD::VSHLI:
  case X86ISD::VSRAI:
  case X86ISD::VSRLI:
    return combineVectorShiftImm(N, DAG, DCI, Subtarget);
  case X86ISD::PINSRB:
  case X86ISD::PINSRW:      return combineVectorInsert(N, DAG, DCI, Subtarget);
  case X86ISD::SHUFP:       // Handle all target specific shuffles
  case X86ISD::INSERTPS:
  case X86ISD::EXTRQI:
  case X86ISD::INSERTQI:
  case X86ISD::PALIGNR:
  case X86ISD::VSHLDQ:
  case X86ISD::VSRLDQ:
  case X86ISD::BLENDI:
  case X86ISD::UNPCKH:
  case X86ISD::UNPCKL:
  case X86ISD::MOVHLPS:
  case X86ISD::MOVLHPS:
  case X86ISD::PSHUFB:
  case X86ISD::PSHUFD:
  case X86ISD::PSHUFHW:
  case X86ISD::PSHUFLW:
  case X86ISD::MOVSHDUP:
  case X86ISD::MOVSLDUP:
  case X86ISD::MOVDDUP:
  case X86ISD::MOVSS:
  case X86ISD::MOVSD:
  case X86ISD::VBROADCAST:
  case X86ISD::VPPERM:
  case X86ISD::VPERMI:
  case X86ISD::VPERMV:
  case X86ISD::VPERMV3:
  case X86ISD::VPERMIL2:
  case X86ISD::VPERMILPI:
  case X86ISD::VPERMILPV:
  case X86ISD::VPERM2X128:
  case X86ISD::SHUF128:
  case X86ISD::VZEXT_MOVL:
  case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
  case X86ISD::FMADD_RND:
  case X86ISD::FMSUB:
  case X86ISD::FMSUB_RND:
  case X86ISD::FNMADD:
  case X86ISD::FNMADD_RND:
  case X86ISD::FNMSUB:
  case X86ISD::FNMSUB_RND:
  case ISD::FMA: return combineFMA(N, DAG, DCI, Subtarget);
  case X86ISD::FMADDSUB_RND:
  case X86ISD::FMSUBADD_RND:
  case X86ISD::FMADDSUB:
  case X86ISD::FMSUBADD:    return combineFMADDSUB(N, DAG, DCI);
  case X86ISD::MOVMSK:      return combineMOVMSK(N, DAG, DCI, Subtarget);
  case X86ISD::MGATHER:
  case X86ISD::MSCATTER:    return combineX86GatherScatter(N, DAG, DCI);
  case ISD::MGATHER:
  case ISD::MSCATTER:       return combineGatherScatter(N, DAG, DCI);
  case X86ISD::PCMPEQ:
  case X86ISD::PCMPGT:      return combineVectorCompare(N, DAG, Subtarget);
  case X86ISD::PMULDQ:
  case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI, Subtarget);
  case X86ISD::KSHIFTL:
  case X86ISD::KSHIFTR:     return combineKSHIFT(N, DAG, DCI);
  }

  return SDValue();
}

bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
  if (!isTypeLegal(VT))
    return false;

  // There are no vXi8 shifts.
  if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
    return false;

  // TODO: Almost no 8-bit ops are desirable because they have no actual
  //       size/speed advantages vs. 32-bit ops, but they do have a major
  //       potential disadvantage by causing partial register stalls.
  //
  // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
  // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
  // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
  // check for a constant operand to the multiply.
  if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
    return false;

  // i16 instruction encodings are longer and some i16 instructions are slow,
  // so those are not desirable.
  if (VT == MVT::i16) {
    switch (Opc) {
    default:
      break;
    case ISD::LOAD:
    case ISD::SIGN_EXTEND:
    case ISD::ZERO_EXTEND:
    case ISD::ANY_EXTEND:
    case ISD::SHL:
    case ISD::SRA:
    case ISD::SRL:
    case ISD::SUB:
    case ISD::ADD:
    case ISD::MUL:
    case ISD::AND:
    case ISD::OR:
    case ISD::XOR:
      return false;
    }
  }

  // Any legal type not explicitly accounted for above here is desirable.
  return true;
}

SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
                                                  SDValue Value, SDValue Addr,
                                                  SelectionDAG &DAG) const {
  const Module *M = DAG.getMachineFunction().getMMI().getModule();
  Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
  if (IsCFProtectionSupported) {
    // In case control-flow branch protection is enabled, we need to add
    // notrack prefix to the indirect branch.
    // In order to do that we create NT_BRIND SDNode.
    // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
    return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
  }

  return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
}

bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
  EVT VT = Op.getValueType();
  bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
                             isa<ConstantSDNode>(Op.getOperand(1));

  // i16 is legal, but undesirable since i16 instruction encodings are longer
  // and some i16 instructions are slow.
  // 8-bit multiply-by-constant can usually be expanded to something cheaper
  // using LEA and/or other ALU ops.
  if (VT != MVT::i16 && !Is8BitMulByConstant)
    return false;

  auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
    if (!Op.hasOneUse())
      return false;
    SDNode *User = *Op->use_begin();
    if (!ISD::isNormalStore(User))
      return false;
    auto *Ld = cast<LoadSDNode>(Load);
    auto *St = cast<StoreSDNode>(User);
    return Ld->getBasePtr() == St->getBasePtr();
  };

  auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
    if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
      return false;
    if (!Op.hasOneUse())
      return false;
    SDNode *User = *Op->use_begin();
    if (User->getOpcode() != ISD::ATOMIC_STORE)
      return false;
    auto *Ld = cast<AtomicSDNode>(Load);
    auto *St = cast<AtomicSDNode>(User);
    return Ld->getBasePtr() == St->getBasePtr();
  };

  bool Commute = false;
  switch (Op.getOpcode()) {
  default: return false;
  case ISD::SIGN_EXTEND:
  case ISD::ZERO_EXTEND:
  case ISD::ANY_EXTEND:
    break;
  case ISD::SHL:
  case ISD::SRA:
  case ISD::SRL: {
    SDValue N0 = Op.getOperand(0);
    // Look out for (store (shl (load), x)).
    if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
      return false;
    break;
  }
  case ISD::ADD:
  case ISD::MUL:
  case ISD::AND:
  case ISD::OR:
  case ISD::XOR:
    Commute = true;
    LLVM_FALLTHROUGH;
  case ISD::SUB: {
    SDValue N0 = Op.getOperand(0);
    SDValue N1 = Op.getOperand(1);
    // Avoid disabling potential load folding opportunities.
    if (MayFoldLoad(N1) &&
        (!Commute || !isa<ConstantSDNode>(N0) ||
         (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
      return false;
    if (MayFoldLoad(N0) &&
        ((Commute && !isa<ConstantSDNode>(N1)) ||
         (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
      return false;
    if (IsFoldableAtomicRMW(N0, Op) ||
        (Commute && IsFoldableAtomicRMW(N1, Op)))
      return false;
  }
  }

  PVT = MVT::i32;
  return true;
}

bool X86TargetLowering::
    isDesirableToCombineBuildVectorToShuffleTruncate(
        ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {

  assert(SrcVT.getVectorNumElements() == ShuffleMask.size() &&
         "Element count mismatch");
  assert(
      Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) &&
      "Shuffle Mask expected to be legal");

  // For 32-bit elements VPERMD is better than shuffle+truncate.
  // TODO: After we improve lowerBuildVector, add execption for VPERMW.
  if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2())
    return false;

  if (is128BitLaneCrossingShuffleMask(SrcVT.getSimpleVT(), ShuffleMask))
    return false;

  return true;
}

//===----------------------------------------------------------------------===//
//                           X86 Inline Assembly Support
//===----------------------------------------------------------------------===//

// Helper to match a string separated by whitespace.
static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
  S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.

  for (StringRef Piece : Pieces) {
    if (!S.startswith(Piece)) // Check if the piece matches.
      return false;

    S = S.substr(Piece.size());
    StringRef::size_type Pos = S.find_first_not_of(" \t");
    if (Pos == 0) // We matched a prefix.
      return false;

    S = S.substr(Pos);
  }

  return S.empty();
}

static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {

  if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
    if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
        std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
        std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {

      if (AsmPieces.size() == 3)
        return true;
      else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
        return true;
    }
  }
  return false;
}

bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
  InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());

  const std::string &AsmStr = IA->getAsmString();

  IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
  if (!Ty || Ty->getBitWidth() % 16 != 0)
    return false;

  // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
  SmallVector<StringRef, 4> AsmPieces;
  SplitString(AsmStr, AsmPieces, ";\n");

  switch (AsmPieces.size()) {
  default: return false;
  case 1:
    // FIXME: this should verify that we are targeting a 486 or better.  If not,
    // we will turn this bswap into something that will be lowered to logical
    // ops instead of emitting the bswap asm.  For now, we don't support 486 or
    // lower so don't worry about this.
    // bswap $0
    if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
        matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
        matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
        matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
        matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
        matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
      // No need to check constraints, nothing other than the equivalent of
      // "=r,0" would be valid here.
      return IntrinsicLowering::LowerToByteSwap(CI);
    }

    // rorw $$8, ${0:w}  -->  llvm.bswap.i16
    if (CI->getType()->isIntegerTy(16) &&
        IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
        (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
         matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
      AsmPieces.clear();
      StringRef ConstraintsStr = IA->getConstraintString();
      SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
      array_pod_sort(AsmPieces.begin(), AsmPieces.end());
      if (clobbersFlagRegisters(AsmPieces))
        return IntrinsicLowering::LowerToByteSwap(CI);
    }
    break;
  case 3:
    if (CI->getType()->isIntegerTy(32) &&
        IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
        matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
        matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
        matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
      AsmPieces.clear();
      StringRef ConstraintsStr = IA->getConstraintString();
      SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
      array_pod_sort(AsmPieces.begin(), AsmPieces.end());
      if (clobbersFlagRegisters(AsmPieces))
        return IntrinsicLowering::LowerToByteSwap(CI);
    }

    if (CI->getType()->isIntegerTy(64)) {
      InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
      if (Constraints.size() >= 2 &&
          Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
          Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
        // bswap %eax / bswap %edx / xchgl %eax, %edx  -> llvm.bswap.i64
        if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
            matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
            matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
          return IntrinsicLowering::LowerToByteSwap(CI);
      }
    }
    break;
  }
  return false;
}

static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
  X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
                           .Case("{@cca}", X86::COND_A)
                           .Case("{@ccae}", X86::COND_AE)
                           .Case("{@ccb}", X86::COND_B)
                           .Case("{@ccbe}", X86::COND_BE)
                           .Case("{@ccc}", X86::COND_B)
                           .Case("{@cce}", X86::COND_E)
                           .Case("{@ccz}", X86::COND_E)
                           .Case("{@ccg}", X86::COND_G)
                           .Case("{@ccge}", X86::COND_GE)
                           .Case("{@ccl}", X86::COND_L)
                           .Case("{@ccle}", X86::COND_LE)
                           .Case("{@ccna}", X86::COND_BE)
                           .Case("{@ccnae}", X86::COND_B)
                           .Case("{@ccnb}", X86::COND_AE)
                           .Case("{@ccnbe}", X86::COND_A)
                           .Case("{@ccnc}", X86::COND_AE)
                           .Case("{@ccne}", X86::COND_NE)
                           .Case("{@ccnz}", X86::COND_NE)
                           .Case("{@ccng}", X86::COND_LE)
                           .Case("{@ccnge}", X86::COND_L)
                           .Case("{@ccnl}", X86::COND_GE)
                           .Case("{@ccnle}", X86::COND_G)
                           .Case("{@ccno}", X86::COND_NO)
                           .Case("{@ccnp}", X86::COND_P)
                           .Case("{@ccns}", X86::COND_NS)
                           .Case("{@cco}", X86::COND_O)
                           .Case("{@ccp}", X86::COND_P)
                           .Case("{@ccs}", X86::COND_S)
                           .Default(X86::COND_INVALID);
  return Cond;
}

/// Given a constraint letter, return the type of constraint for this target.
X86TargetLowering::ConstraintType
X86TargetLowering::getConstraintType(StringRef Constraint) const {
  if (Constraint.size() == 1) {
    switch (Constraint[0]) {
    case 'R':
    case 'q':
    case 'Q':
    case 'f':
    case 't':
    case 'u':
    case 'y':
    case 'x':
    case 'v':
    case 'Y':
    case 'l':
    case 'k': // AVX512 masking registers.
      return C_RegisterClass;
    case 'a':
    case 'b':
    case 'c':
    case 'd':
    case 'S':
    case 'D':
    case 'A':
      return C_Register;
    case 'I':
    case 'J':
    case 'K':
    case 'N':
    case 'G':
    case 'L':
    case 'M':
      return C_Immediate;
    case 'C':
    case 'e':
    case 'Z':
      return C_Other;
    default:
      break;
    }
  }
  else if (Constraint.size() == 2) {
    switch (Constraint[0]) {
    default:
      break;
    case 'Y':
      switch (Constraint[1]) {
      default:
        break;
      case 'z':
      case '0':
        return C_Register;
      case 'i':
      case 'm':
      case 'k':
      case 't':
      case '2':
        return C_RegisterClass;
      }
    }
  } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
    return C_Other;
  return TargetLowering::getConstraintType(Constraint);
}

/// Examine constraint type and operand type and determine a weight value.
/// This object must already have been set up with the operand type
/// and the current alternative constraint selected.
TargetLowering::ConstraintWeight
  X86TargetLowering::getSingleConstraintMatchWeight(
    AsmOperandInfo &info, const char *constraint) const {
  ConstraintWeight weight = CW_Invalid;
  Value *CallOperandVal = info.CallOperandVal;
    // If we don't have a value, we can't do a match,
    // but allow it at the lowest weight.
  if (!CallOperandVal)
    return CW_Default;
  Type *type = CallOperandVal->getType();
  // Look at the constraint type.
  switch (*constraint) {
  default:
    weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
    LLVM_FALLTHROUGH;
  case 'R':
  case 'q':
  case 'Q':
  case 'a':
  case 'b':
  case 'c':
  case 'd':
  case 'S':
  case 'D':
  case 'A':
    if (CallOperandVal->getType()->isIntegerTy())
      weight = CW_SpecificReg;
    break;
  case 'f':
  case 't':
  case 'u':
    if (type->isFloatingPointTy())
      weight = CW_SpecificReg;
    break;
  case 'y':
    if (type->isX86_MMXTy() && Subtarget.hasMMX())
      weight = CW_SpecificReg;
    break;
  case 'Y': {
    unsigned Size = StringRef(constraint).size();
    // Pick 'i' as the next char as 'Yi' and 'Y' are synonymous, when matching 'Y'
    char NextChar = Size == 2 ? constraint[1] : 'i';
    if (Size > 2)
      break;
    switch (NextChar) {
      default:
        return CW_Invalid;
      // XMM0
      case 'z':
      case '0':
        if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1())
          return CW_SpecificReg;
        return CW_Invalid;
      // Conditional OpMask regs (AVX512)
      case 'k':
        if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
          return CW_Register;
        return CW_Invalid;
      // Any MMX reg
      case 'm':
        if (type->isX86_MMXTy() && Subtarget.hasMMX())
          return weight;
        return CW_Invalid;
      // Any SSE reg when ISA >= SSE2, same as 'Y'
      case 'i':
      case 't':
      case '2':
        if (!Subtarget.hasSSE2())
          return CW_Invalid;
        break;
    }
    // Fall through (handle "Y" constraint).
    LLVM_FALLTHROUGH;
  }
  case 'v':
    if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
      weight = CW_Register;
    LLVM_FALLTHROUGH;
  case 'x':
    if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
        ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
      weight = CW_Register;
    break;
  case 'k':
    // Enable conditional vector operations using %k<#> registers.
    if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
      weight = CW_Register;
    break;
  case 'I':
    if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
      if (C->getZExtValue() <= 31)
        weight = CW_Constant;
    }
    break;
  case 'J':
    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
      if (C->getZExtValue() <= 63)
        weight = CW_Constant;
    }
    break;
  case 'K':
    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
      if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
        weight = CW_Constant;
    }
    break;
  case 'L':
    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
      if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
        weight = CW_Constant;
    }
    break;
  case 'M':
    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
      if (C->getZExtValue() <= 3)
        weight = CW_Constant;
    }
    break;
  case 'N':
    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
      if (C->getZExtValue() <= 0xff)
        weight = CW_Constant;
    }
    break;
  case 'G':
  case 'C':
    if (isa<ConstantFP>(CallOperandVal)) {
      weight = CW_Constant;
    }
    break;
  case 'e':
    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
      if ((C->getSExtValue() >= -0x80000000LL) &&
          (C->getSExtValue() <= 0x7fffffffLL))
        weight = CW_Constant;
    }
    break;
  case 'Z':
    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
      if (C->getZExtValue() <= 0xffffffff)
        weight = CW_Constant;
    }
    break;
  }
  return weight;
}

/// Try to replace an X constraint, which matches anything, with another that
/// has more specific requirements based on the type of the corresponding
/// operand.
const char *X86TargetLowering::
LowerXConstraint(EVT ConstraintVT) const {
  // FP X constraints get lowered to SSE1/2 registers if available, otherwise
  // 'f' like normal targets.
  if (ConstraintVT.isFloatingPoint()) {
    if (Subtarget.hasSSE2())
      return "Y";
    if (Subtarget.hasSSE1())
      return "x";
  }

  return TargetLowering::LowerXConstraint(ConstraintVT);
}

// Lower @cc targets via setcc.
SDValue X86TargetLowering::LowerAsmOutputForConstraint(
    SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
    SelectionDAG &DAG) const {
  X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
  if (Cond == X86::COND_INVALID)
    return SDValue();
  // Check that return type is valid.
  if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
      OpInfo.ConstraintVT.getSizeInBits() < 8)
    report_fatal_error("Flag output operand is of invalid type");

  // Get EFLAGS register. Only update chain when copyfrom is glued.
  if (Flag.getNode()) {
    Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
    Chain = Flag.getValue(1);
  } else
    Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
  // Extract CC code.
  SDValue CC = getSETCC(Cond, Flag, DL, DAG);
  // Extend to 32-bits
  SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);

  return Result;
}

/// Lower the specified operand into the Ops vector.
/// If it is invalid, don't add anything to Ops.
void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
                                                     std::string &Constraint,
                                                     std::vector<SDValue>&Ops,
                                                     SelectionDAG &DAG) const {
  SDValue Result;

  // Only support length 1 constraints for now.
  if (Constraint.length() > 1) return;

  char ConstraintLetter = Constraint[0];
  switch (ConstraintLetter) {
  default: break;
  case 'I':
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (C->getZExtValue() <= 31) {
        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
                                       Op.getValueType());
        break;
      }
    }
    return;
  case 'J':
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (C->getZExtValue() <= 63) {
        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
                                       Op.getValueType());
        break;
      }
    }
    return;
  case 'K':
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (isInt<8>(C->getSExtValue())) {
        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
                                       Op.getValueType());
        break;
      }
    }
    return;
  case 'L':
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
          (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
        Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
                                       Op.getValueType());
        break;
      }
    }
    return;
  case 'M':
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (C->getZExtValue() <= 3) {
        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
                                       Op.getValueType());
        break;
      }
    }
    return;
  case 'N':
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (C->getZExtValue() <= 255) {
        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
                                       Op.getValueType());
        break;
      }
    }
    return;
  case 'O':
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (C->getZExtValue() <= 127) {
        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
                                       Op.getValueType());
        break;
      }
    }
    return;
  case 'e': {
    // 32-bit signed value
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
                                           C->getSExtValue())) {
        // Widen to 64 bits here to get it sign extended.
        Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
        break;
      }
    // FIXME gcc accepts some relocatable values here too, but only in certain
    // memory models; it's complicated.
    }
    return;
  }
  case 'Z': {
    // 32-bit unsigned value
    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
      if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
                                           C->getZExtValue())) {
        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
                                       Op.getValueType());
        break;
      }
    }
    // FIXME gcc accepts some relocatable values here too, but only in certain
    // memory models; it's complicated.
    return;
  }
  case 'i': {
    // Literal immediates are always ok.
    if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
      bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
      BooleanContent BCont = getBooleanContents(MVT::i64);
      ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
                                    : ISD::SIGN_EXTEND;
      int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
                                                  : CST->getSExtValue();
      Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
      break;
    }

    // In any sort of PIC mode addresses need to be computed at runtime by
    // adding in a register or some sort of table lookup.  These can't
    // be used as immediates.
    if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
      return;

    // If we are in non-pic codegen mode, we allow the address of a global (with
    // an optional displacement) to be used with 'i'.
    if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
      // If we require an extra load to get this address, as in PIC mode, we
      // can't accept it.
      if (isGlobalStubReference(
              Subtarget.classifyGlobalReference(GA->getGlobal())))
        return;
    break;
  }
  }

  if (Result.getNode()) {
    Ops.push_back(Result);
    return;
  }
  return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}

/// Check if \p RC is a general purpose register class.
/// I.e., GR* or one of their variant.
static bool isGRClass(const TargetRegisterClass &RC) {
  return RC.hasSuperClassEq(&X86::GR8RegClass) ||
         RC.hasSuperClassEq(&X86::GR16RegClass) ||
         RC.hasSuperClassEq(&X86::GR32RegClass) ||
         RC.hasSuperClassEq(&X86::GR64RegClass) ||
         RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
}

/// Check if \p RC is a vector register class.
/// I.e., FR* / VR* or one of their variant.
static bool isFRClass(const TargetRegisterClass &RC) {
  return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
         RC.hasSuperClassEq(&X86::FR64XRegClass) ||
         RC.hasSuperClassEq(&X86::VR128XRegClass) ||
         RC.hasSuperClassEq(&X86::VR256XRegClass) ||
         RC.hasSuperClassEq(&X86::VR512RegClass);
}

/// Check if \p RC is a mask register class.
/// I.e., VK* or one of their variant.
static bool isVKClass(const TargetRegisterClass &RC) {
  return RC.hasSuperClassEq(&X86::VK1RegClass) ||
         RC.hasSuperClassEq(&X86::VK2RegClass) ||
         RC.hasSuperClassEq(&X86::VK4RegClass) ||
         RC.hasSuperClassEq(&X86::VK8RegClass) ||
         RC.hasSuperClassEq(&X86::VK16RegClass) ||
         RC.hasSuperClassEq(&X86::VK32RegClass) ||
         RC.hasSuperClassEq(&X86::VK64RegClass);
}

std::pair<unsigned, const TargetRegisterClass *>
X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
                                                StringRef Constraint,
                                                MVT VT) const {
  // First, see if this is a constraint that directly corresponds to an LLVM
  // register class.
  if (Constraint.size() == 1) {
    // GCC Constraint Letters
    switch (Constraint[0]) {
    default: break;
    // 'A' means [ER]AX + [ER]DX.
    case 'A':
      if (Subtarget.is64Bit())
        return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
      assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
             "Expecting 64, 32 or 16 bit subtarget");
      return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);

      // TODO: Slight differences here in allocation order and leaving
      // RIP in the class. Do they matter any more here than they do
      // in the normal allocation?
    case 'k':
      if (Subtarget.hasAVX512()) {
        if (VT == MVT::i1)
          return std::make_pair(0U, &X86::VK1RegClass);
        if (VT == MVT::i8)
          return std::make_pair(0U, &X86::VK8RegClass);
        if (VT == MVT::i16)
          return std::make_pair(0U, &X86::VK16RegClass);
      }
      if (Subtarget.hasBWI()) {
        if (VT == MVT::i32)
          return std::make_pair(0U, &X86::VK32RegClass);
        if (VT == MVT::i64)
          return std::make_pair(0U, &X86::VK64RegClass);
      }
      break;
    case 'q':   // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
      if (Subtarget.is64Bit()) {
        if (VT == MVT::i32 || VT == MVT::f32)
          return std::make_pair(0U, &X86::GR32RegClass);
        if (VT == MVT::i16)
          return std::make_pair(0U, &X86::GR16RegClass);
        if (VT == MVT::i8 || VT == MVT::i1)
          return std::make_pair(0U, &X86::GR8RegClass);
        if (VT == MVT::i64 || VT == MVT::f64)
          return std::make_pair(0U, &X86::GR64RegClass);
        break;
      }
      LLVM_FALLTHROUGH;
      // 32-bit fallthrough
    case 'Q':   // Q_REGS
      if (VT == MVT::i32 || VT == MVT::f32)
        return std::make_pair(0U, &X86::GR32_ABCDRegClass);
      if (VT == MVT::i16)
        return std::make_pair(0U, &X86::GR16_ABCDRegClass);
      if (VT == MVT::i8 || VT == MVT::i1)
        return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
      if (VT == MVT::i64)
        return std::make_pair(0U, &X86::GR64_ABCDRegClass);
      break;
    case 'r':   // GENERAL_REGS
    case 'l':   // INDEX_REGS
      if (VT == MVT::i8 || VT == MVT::i1)
        return std::make_pair(0U, &X86::GR8RegClass);
      if (VT == MVT::i16)
        return std::make_pair(0U, &X86::GR16RegClass);
      if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
        return std::make_pair(0U, &X86::GR32RegClass);
      return std::make_pair(0U, &X86::GR64RegClass);
    case 'R':   // LEGACY_REGS
      if (VT == MVT::i8 || VT == MVT::i1)
        return std::make_pair(0U, &X86::GR8_NOREXRegClass);
      if (VT == MVT::i16)
        return std::make_pair(0U, &X86::GR16_NOREXRegClass);
      if (VT == MVT::i32 || !Subtarget.is64Bit())
        return std::make_pair(0U, &X86::GR32_NOREXRegClass);
      return std::make_pair(0U, &X86::GR64_NOREXRegClass);
    case 'f':  // FP Stack registers.
      // If SSE is enabled for this VT, use f80 to ensure the isel moves the
      // value to the correct fpstack register class.
      if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
        return std::make_pair(0U, &X86::RFP32RegClass);
      if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
        return std::make_pair(0U, &X86::RFP64RegClass);
      return std::make_pair(0U, &X86::RFP80RegClass);
    case 'y':   // MMX_REGS if MMX allowed.
      if (!Subtarget.hasMMX()) break;
      return std::make_pair(0U, &X86::VR64RegClass);
    case 'Y':   // SSE_REGS if SSE2 allowed
      if (!Subtarget.hasSSE2()) break;
      LLVM_FALLTHROUGH;
    case 'v':
    case 'x':   // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
      if (!Subtarget.hasSSE1()) break;
      bool VConstraint = (Constraint[0] == 'v');

      switch (VT.SimpleTy) {
      default: break;
      // Scalar SSE types.
      case MVT::f32:
      case MVT::i32:
        if (VConstraint && Subtarget.hasVLX())
          return std::make_pair(0U, &X86::FR32XRegClass);
        return std::make_pair(0U, &X86::FR32RegClass);
      case MVT::f64:
      case MVT::i64:
        if (VConstraint && Subtarget.hasVLX())
          return std::make_pair(0U, &X86::FR64XRegClass);
        return std::make_pair(0U, &X86::FR64RegClass);
      // TODO: Handle i128 in FR128RegClass after it is tested well.
      // Vector types and fp128.
      case MVT::f128:
      case MVT::v16i8:
      case MVT::v8i16:
      case MVT::v4i32:
      case MVT::v2i64:
      case MVT::v4f32:
      case MVT::v2f64:
        if (VConstraint && Subtarget.hasVLX())
          return std::make_pair(0U, &X86::VR128XRegClass);
        return std::make_pair(0U, &X86::VR128RegClass);
      // AVX types.
      case MVT::v32i8:
      case MVT::v16i16:
      case MVT::v8i32:
      case MVT::v4i64:
      case MVT::v8f32:
      case MVT::v4f64:
        if (VConstraint && Subtarget.hasVLX())
          return std::make_pair(0U, &X86::VR256XRegClass);
        if (Subtarget.hasAVX())
          return std::make_pair(0U, &X86::VR256RegClass);
        break;
      case MVT::v8f64:
      case MVT::v16f32:
      case MVT::v16i32:
      case MVT::v8i64:
        if (!Subtarget.hasAVX512()) break;
        if (VConstraint)
          return std::make_pair(0U, &X86::VR512RegClass);
        return std::make_pair(0U, &X86::VR512_0_15RegClass);
      }
      break;
    }
  } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
    switch (Constraint[1]) {
    default:
      break;
    case 'i':
    case 't':
    case '2':
      return getRegForInlineAsmConstraint(TRI, "Y", VT);
    case 'm':
      if (!Subtarget.hasMMX()) break;
      return std::make_pair(0U, &X86::VR64RegClass);
    case 'z':
    case '0':
      if (!Subtarget.hasSSE1()) break;
      return std::make_pair(X86::XMM0, &X86::VR128RegClass);
    case 'k':
      // This register class doesn't allocate k0 for masked vector operation.
      if (Subtarget.hasAVX512()) {
        if (VT == MVT::i1)
          return std::make_pair(0U, &X86::VK1WMRegClass);
        if (VT == MVT::i8)
          return std::make_pair(0U, &X86::VK8WMRegClass);
        if (VT == MVT::i16)
          return std::make_pair(0U, &X86::VK16WMRegClass);
      }
      if (Subtarget.hasBWI()) {
        if (VT == MVT::i32)
          return std::make_pair(0U, &X86::VK32WMRegClass);
        if (VT == MVT::i64)
          return std::make_pair(0U, &X86::VK64WMRegClass);
      }
      break;
    }
  }

  if (parseConstraintCode(Constraint) != X86::COND_INVALID)
    return std::make_pair(0U, &X86::GR32RegClass);

  // Use the default implementation in TargetLowering to convert the register
  // constraint into a member of a register class.
  std::pair<unsigned, const TargetRegisterClass*> Res;
  Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);

  // Not found as a standard register?
  if (!Res.second) {
    // Map st(0) -> st(7) -> ST0
    if (Constraint.size() == 7 && Constraint[0] == '{' &&
        tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
        Constraint[3] == '(' &&
        (Constraint[4] >= '0' && Constraint[4] <= '7') &&
        Constraint[5] == ')' && Constraint[6] == '}') {
      // st(7) is not allocatable and thus not a member of RFP80. Return
      // singleton class in cases where we have a reference to it.
      if (Constraint[4] == '7')
        return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
      return std::make_pair(X86::FP0 + Constraint[4] - '0',
                            &X86::RFP80RegClass);
    }

    // GCC allows "st(0)" to be called just plain "st".
    if (StringRef("{st}").equals_lower(Constraint))
      return std::make_pair(X86::FP0, &X86::RFP80RegClass);

    // flags -> EFLAGS
    if (StringRef("{flags}").equals_lower(Constraint))
      return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);

    // dirflag -> DF
    if (StringRef("{dirflag}").equals_lower(Constraint))
      return std::make_pair(X86::DF, &X86::DFCCRRegClass);

    // fpsr -> FPSW
    if (StringRef("{fpsr}").equals_lower(Constraint))
      return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);

    return Res;
  }

  // Make sure it isn't a register that requires 64-bit mode.
  if (!Subtarget.is64Bit() &&
      (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
      TRI->getEncodingValue(Res.first) >= 8) {
    // Register requires REX prefix, but we're in 32-bit mode.
    return std::make_pair(0, nullptr);
  }

  // Make sure it isn't a register that requires AVX512.
  if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
      TRI->getEncodingValue(Res.first) & 0x10) {
    // Register requires EVEX prefix.
    return std::make_pair(0, nullptr);
  }

  // Otherwise, check to see if this is a register class of the wrong value
  // type.  For example, we want to map "{ax},i32" -> {eax}, we don't want it to
  // turn into {ax},{dx}.
  // MVT::Other is used to specify clobber names.
  if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
    return Res;   // Correct type already, nothing to do.

  // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
  // return "eax". This should even work for things like getting 64bit integer
  // registers when given an f64 type.
  const TargetRegisterClass *Class = Res.second;
  // The generic code will match the first register class that contains the
  // given register. Thus, based on the ordering of the tablegened file,
  // the "plain" GR classes might not come first.
  // Therefore, use a helper method.
  if (isGRClass(*Class)) {
    unsigned Size = VT.getSizeInBits();
    if (Size == 1) Size = 8;
    unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
    if (DestReg > 0) {
      bool is64Bit = Subtarget.is64Bit();
      const TargetRegisterClass *RC =
          Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
        : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
        : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
        : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
        : nullptr;
      if (Size == 64 && !is64Bit) {
        // Model GCC's behavior here and select a fixed pair of 32-bit
        // registers.
        switch (DestReg) {
        case X86::RAX:
          return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
        case X86::RDX:
          return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
        case X86::RCX:
          return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
        case X86::RBX:
          return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
        case X86::RSI:
          return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
        case X86::RDI:
          return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
        case X86::RBP:
          return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
        default:
          return std::make_pair(0, nullptr);
        }
      }
      if (RC && RC->contains(DestReg))
        return std::make_pair(DestReg, RC);
      return Res;
    }
    // No register found/type mismatch.
    return std::make_pair(0, nullptr);
  } else if (isFRClass(*Class)) {
    // Handle references to XMM physical registers that got mapped into the
    // wrong class.  This can happen with constraints like {xmm0} where the
    // target independent register mapper will just pick the first match it can
    // find, ignoring the required type.

    // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
    if (VT == MVT::f32 || VT == MVT::i32)
      Res.second = &X86::FR32XRegClass;
    else if (VT == MVT::f64 || VT == MVT::i64)
      Res.second = &X86::FR64XRegClass;
    else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
      Res.second = &X86::VR128XRegClass;
    else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
      Res.second = &X86::VR256XRegClass;
    else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
      Res.second = &X86::VR512RegClass;
    else {
      // Type mismatch and not a clobber: Return an error;
      Res.first = 0;
      Res.second = nullptr;
    }
  } else if (isVKClass(*Class)) {
    if (VT == MVT::i1)
      Res.second = &X86::VK1RegClass;
    else if (VT == MVT::i8)
      Res.second = &X86::VK8RegClass;
    else if (VT == MVT::i16)
      Res.second = &X86::VK16RegClass;
    else if (VT == MVT::i32)
      Res.second = &X86::VK32RegClass;
    else if (VT == MVT::i64)
      Res.second = &X86::VK64RegClass;
    else {
      // Type mismatch and not a clobber: Return an error;
      Res.first = 0;
      Res.second = nullptr;
    }
  }

  return Res;
}

int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
                                            const AddrMode &AM, Type *Ty,
                                            unsigned AS) const {
  // Scaling factors are not free at all.
  // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
  // will take 2 allocations in the out of order engine instead of 1
  // for plain addressing mode, i.e. inst (reg1).
  // E.g.,
  // vaddps (%rsi,%rdx), %ymm0, %ymm1
  // Requires two allocations (one for the load, one for the computation)
  // whereas:
  // vaddps (%rsi), %ymm0, %ymm1
  // Requires just 1 allocation, i.e., freeing allocations for other operations
  // and having less micro operations to execute.
  //
  // For some X86 architectures, this is even worse because for instance for
  // stores, the complex addressing mode forces the instruction to use the
  // "load" ports instead of the dedicated "store" port.
  // E.g., on Haswell:
  // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
  // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
  if (isLegalAddressingMode(DL, AM, Ty, AS))
    // Scale represents reg2 * scale, thus account for 1
    // as soon as we use a second register.
    return AM.Scale != 0;
  return -1;
}

bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
  // Integer division on x86 is expensive. However, when aggressively optimizing
  // for code size, we prefer to use a div instruction, as it is usually smaller
  // than the alternative sequence.
  // The exception to this is vector division. Since x86 doesn't have vector
  // integer division, leaving the division as-is is a loss even in terms of
  // size, because it will have to be scalarized, while the alternative code
  // sequence can be performed in vector form.
  bool OptSize =
      Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
  return OptSize && !VT.isVector();
}

void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
  if (!Subtarget.is64Bit())
    return;

  // Update IsSplitCSR in X86MachineFunctionInfo.
  X86MachineFunctionInfo *AFI =
      Entry->getParent()->getInfo<X86MachineFunctionInfo>();
  AFI->setIsSplitCSR(true);
}

void X86TargetLowering::insertCopiesSplitCSR(
    MachineBasicBlock *Entry,
    const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
  const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
  const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
  if (!IStart)
    return;

  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
  MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
  MachineBasicBlock::iterator MBBI = Entry->begin();
  for (const MCPhysReg *I = IStart; *I; ++I) {
    const TargetRegisterClass *RC = nullptr;
    if (X86::GR64RegClass.contains(*I))
      RC = &X86::GR64RegClass;
    else
      llvm_unreachable("Unexpected register class in CSRsViaCopy!");

    Register NewVR = MRI->createVirtualRegister(RC);
    // Create copy from CSR to a virtual register.
    // FIXME: this currently does not emit CFI pseudo-instructions, it works
    // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
    // nounwind. If we want to generalize this later, we may need to emit
    // CFI pseudo-instructions.
    assert(
        Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
        "Function should be nounwind in insertCopiesSplitCSR!");
    Entry->addLiveIn(*I);
    BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
        .addReg(*I);

    // Insert the copy-back instructions right before the terminator.
    for (auto *Exit : Exits)
      BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
              TII->get(TargetOpcode::COPY), *I)
          .addReg(NewVR);
  }
}

bool X86TargetLowering::supportSwiftError() const {
  return Subtarget.is64Bit();
}

/// Returns the name of the symbol used to emit stack probes or the empty
/// string if not applicable.
StringRef
X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
  // If the function specifically requests stack probes, emit them.
  if (MF.getFunction().hasFnAttribute("probe-stack"))
    return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();

  // Generally, if we aren't on Windows, the platform ABI does not include
  // support for stack probes, so don't emit them.
  if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
      MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
    return "";

  // We need a stack probe to conform to the Windows ABI. Choose the right
  // symbol.
  if (Subtarget.is64Bit())
    return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
  return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
}

unsigned
X86TargetLowering::getStackProbeSize(MachineFunction &MF) const {
  // The default stack probe size is 4096 if the function has no stackprobesize
  // attribute.
  unsigned StackProbeSize = 4096;
  const Function &Fn = MF.getFunction();
  if (Fn.hasFnAttribute("stack-probe-size"))
    Fn.getFnAttribute("stack-probe-size")
        .getValueAsString()
        .getAsInteger(0, StackProbeSize);
  return StackProbeSize;
}