reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
  185
  186
  187
  188
  189
  190
  191
  192
  193
  194
  195
  196
  197
  198
  199
  200
  201
  202
  203
  204
  205
  206
  207
  208
  209
  210
  211
  212
  213
  214
  215
  216
  217
  218
  219
  220
  221
  222
  223
  224
  225
  226
  227
  228
  229
  230
  231
  232
  233
  234
  235
  236
  237
  238
  239
  240
  241
  242
  243
  244
  245
  246
  247
  248
  249
  250
  251
  252
  253
  254
  255
  256
  257
  258
  259
  260
  261
  262
  263
  264
  265
  266
  267
  268
  269
  270
  271
  272
  273
  274
  275
  276
  277
  278
  279
  280
  281
  282
  283
  284
  285
  286
  287
  288
  289
  290
  291
  292
  293
  294
  295
  296
  297
  298
  299
  300
  301
  302
  303
  304
  305
  306
  307
  308
  309
  310
  311
  312
  313
  314
  315
  316
  317
  318
  319
  320
  321
  322
  323
  324
  325
  326
  327
  328
  329
  330
  331
  332
  333
  334
  335
  336
  337
  338
  339
  340
  341
  342
  343
  344
  345
  346
  347
  348
  349
  350
  351
  352
  353
  354
  355
  356
  357
  358
  359
  360
  361
  362
  363
  364
  365
  366
  367
  368
  369
  370
  371
  372
  373
  374
  375
  376
  377
  378
  379
  380
  381
  382
  383
  384
  385
  386
  387
  388
  389
  390
  391
  392
  393
  394
  395
  396
  397
  398
  399
  400
  401
  402
  403
  404
  405
  406
  407
  408
  409
  410
  411
  412
  413
  414
  415
  416
  417
  418
  419
  420
  421
  422
  423
  424
  425
  426
  427
  428
  429
  430
  431
  432
  433
  434
  435
  436
  437
  438
  439
  440
  441
  442
  443
  444
  445
  446
  447
  448
  449
  450
  451
  452
  453
  454
  455
  456
  457
  458
  459
  460
  461
  462
  463
  464
  465
  466
  467
  468
  469
  470
  471
  472
  473
  474
  475
  476
  477
  478
  479
  480
  481
  482
  483
  484
  485
  486
  487
  488
  489
  490
  491
  492
  493
  494
  495
  496
  497
  498
  499
  500
  501
  502
  503
  504
  505
  506
  507
  508
  509
  510
  511
  512
  513
  514
  515
  516
  517
  518
  519
  520
  521
  522
  523
  524
  525
  526
  527
  528
  529
  530
  531
  532
  533
  534
  535
  536
  537
  538
  539
  540
  541
  542
  543
  544
  545
  546
  547
  548
  549
  550
  551
  552
  553
  554
  555
  556
  557
  558
  559
  560
  561
  562
  563
  564
  565
  566
  567
  568
  569
  570
  571
  572
  573
  574
  575
  576
  577
  578
  579
  580
  581
  582
  583
  584
  585
  586
  587
  588
  589
  590
  591
  592
  593
  594
  595
  596
  597
  598
  599
  600
  601
  602
  603
  604
  605
  606
  607
  608
  609
  610
  611
  612
  613
  614
  615
  616
  617
  618
  619
  620
  621
  622
  623
  624
  625
  626
  627
  628
  629
  630
  631
  632
  633
  634
  635
  636
  637
  638
  639
  640
  641
  642
  643
  644
  645
  646
  647
  648
  649
  650
  651
  652
  653
  654
  655
  656
  657
  658
  659
  660
  661
  662
  663
  664
  665
  666
  667
  668
  669
  670
  671
  672
  673
  674
  675
  676
  677
  678
  679
  680
  681
  682
  683
  684
  685
  686
  687
  688
  689
  690
  691
  692
  693
  694
  695
  696
  697
  698
  699
  700
  701
  702
  703
  704
  705
  706
  707
  708
  709
  710
  711
  712
  713
  714
  715
  716
  717
  718
  719
  720
  721
  722
  723
  724
  725
  726
  727
  728
  729
  730
  731
  732
  733
  734
  735
  736
  737
  738
  739
  740
  741
  742
  743
  744
  745
  746
  747
  748
  749
  750
  751
  752
  753
  754
  755
  756
  757
  758
  759
  760
  761
  762
  763
  764
  765
  766
  767
  768
  769
  770
  771
  772
  773
  774
  775
  776
  777
  778
  779
  780
  781
  782
  783
  784
  785
  786
  787
  788
  789
  790
  791
  792
  793
  794
  795
  796
  797
  798
  799
  800
  801
  802
  803
  804
  805
  806
  807
  808
  809
  810
  811
  812
  813
  814
  815
  816
  817
  818
  819
  820
  821
  822
  823
  824
  825
  826
  827
  828
  829
  830
  831
  832
  833
  834
  835
  836
  837
  838
  839
  840
  841
  842
  843
  844
  845
  846
  847
  848
  849
  850
  851
  852
  853
  854
  855
  856
  857
  858
  859
  860
  861
  862
  863
  864
  865
  866
  867
  868
  869
  870
  871
  872
  873
  874
  875
  876
  877
  878
  879
  880
  881
  882
  883
  884
  885
  886
  887
  888
  889
  890
  891
  892
  893
  894
  895
  896
  897
  898
  899
  900
  901
  902
  903
  904
  905
  906
  907
  908
  909
  910
  911
  912
  913
  914
  915
  916
  917
  918
  919
  920
  921
  922
  923
  924
  925
  926
  927
  928
  929
  930
  931
  932
  933
  934
  935
  936
  937
  938
  939
  940
  941
  942
  943
  944
  945
  946
  947
  948
  949
  950
  951
  952
  953
  954
  955
  956
  957
  958
  959
  960
  961
  962
  963
  964
  965
  966
  967
  968
  969
  970
  971
  972
  973
  974
  975
  976
  977
  978
  979
  980
  981
  982
  983
  984
  985
  986
  987
  988
  989
  990
  991
  992
  993
  994
  995
  996
  997
  998
  999
 1000
 1001
 1002
 1003
 1004
 1005
 1006
 1007
 1008
 1009
 1010
 1011
 1012
 1013
 1014
 1015
 1016
 1017
 1018
 1019
 1020
 1021
 1022
 1023
 1024
 1025
 1026
 1027
 1028
 1029
 1030
 1031
 1032
 1033
 1034
 1035
 1036
 1037
 1038
 1039
 1040
 1041
 1042
 1043
 1044
 1045
 1046
 1047
 1048
 1049
 1050
 1051
 1052
 1053
 1054
 1055
 1056
 1057
 1058
 1059
 1060
 1061
 1062
 1063
 1064
 1065
 1066
 1067
 1068
 1069
 1070
 1071
 1072
 1073
 1074
 1075
 1076
 1077
 1078
 1079
 1080
 1081
 1082
 1083
 1084
 1085
 1086
 1087
 1088
 1089
 1090
 1091
 1092
 1093
 1094
 1095
 1096
 1097
 1098
 1099
 1100
 1101
 1102
 1103
 1104
 1105
 1106
 1107
 1108
 1109
 1110
 1111
 1112
 1113
 1114
 1115
 1116
 1117
 1118
 1119
 1120
 1121
 1122
 1123
 1124
 1125
 1126
 1127
 1128
 1129
 1130
 1131
 1132
 1133
 1134
 1135
 1136
 1137
 1138
 1139
 1140
 1141
 1142
 1143
 1144
 1145
 1146
 1147
 1148
 1149
 1150
 1151
 1152
 1153
 1154
 1155
 1156
 1157
 1158
 1159
 1160
 1161
 1162
 1163
 1164
 1165
 1166
 1167
 1168
 1169
 1170
 1171
 1172
 1173
 1174
 1175
 1176
 1177
 1178
 1179
 1180
 1181
 1182
 1183
 1184
 1185
 1186
 1187
 1188
 1189
 1190
 1191
 1192
 1193
 1194
 1195
 1196
 1197
 1198
 1199
 1200
 1201
 1202
 1203
 1204
 1205
 1206
 1207
 1208
 1209
 1210
 1211
 1212
 1213
 1214
 1215
 1216
 1217
 1218
 1219
 1220
 1221
 1222
 1223
 1224
 1225
 1226
 1227
 1228
 1229
 1230
 1231
 1232
 1233
 1234
 1235
 1236
 1237
 1238
 1239
 1240
 1241
 1242
 1243
 1244
 1245
 1246
 1247
 1248
 1249
 1250
 1251
 1252
 1253
 1254
 1255
 1256
 1257
 1258
 1259
 1260
 1261
 1262
 1263
 1264
 1265
 1266
 1267
 1268
 1269
 1270
 1271
 1272
 1273
 1274
 1275
 1276
 1277
 1278
 1279
 1280
 1281
 1282
 1283
 1284
 1285
 1286
 1287
 1288
 1289
 1290
 1291
 1292
 1293
 1294
 1295
 1296
 1297
 1298
 1299
 1300
 1301
 1302
 1303
 1304
 1305
 1306
 1307
 1308
 1309
 1310
 1311
 1312
 1313
 1314
 1315
 1316
 1317
 1318
 1319
 1320
 1321
 1322
 1323
 1324
 1325
 1326
 1327
 1328
 1329
 1330
 1331
 1332
 1333
 1334
 1335
 1336
 1337
 1338
 1339
 1340
 1341
 1342
 1343
 1344
 1345
 1346
 1347
 1348
 1349
 1350
 1351
 1352
 1353
 1354
 1355
 1356
 1357
 1358
 1359
 1360
 1361
 1362
 1363
 1364
 1365
 1366
 1367
 1368
 1369
 1370
 1371
 1372
 1373
 1374
 1375
 1376
 1377
 1378
 1379
 1380
 1381
 1382
 1383
 1384
 1385
 1386
 1387
 1388
 1389
 1390
 1391
 1392
 1393
 1394
 1395
 1396
 1397
 1398
 1399
 1400
 1401
 1402
 1403
 1404
 1405
 1406
 1407
 1408
 1409
 1410
 1411
 1412
 1413
 1414
 1415
 1416
 1417
 1418
 1419
 1420
 1421
 1422
 1423
 1424
 1425
 1426
 1427
 1428
 1429
 1430
 1431
 1432
 1433
 1434
 1435
 1436
 1437
 1438
 1439
 1440
 1441
 1442
 1443
 1444
 1445
 1446
 1447
 1448
 1449
 1450
 1451
 1452
 1453
 1454
 1455
 1456
 1457
 1458
 1459
 1460
 1461
 1462
 1463
 1464
 1465
 1466
 1467
 1468
 1469
 1470
 1471
 1472
 1473
 1474
 1475
 1476
 1477
 1478
 1479
 1480
 1481
 1482
 1483
 1484
 1485
 1486
 1487
 1488
 1489
 1490
 1491
 1492
 1493
 1494
 1495
 1496
 1497
 1498
 1499
 1500
 1501
 1502
 1503
 1504
 1505
 1506
 1507
 1508
 1509
 1510
 1511
 1512
 1513
 1514
 1515
 1516
 1517
 1518
 1519
 1520
 1521
 1522
 1523
 1524
 1525
 1526
 1527
 1528
 1529
 1530
 1531
 1532
 1533
 1534
 1535
 1536
 1537
 1538
 1539
 1540
 1541
 1542
 1543
 1544
 1545
 1546
 1547
 1548
 1549
 1550
 1551
 1552
 1553
 1554
 1555
 1556
 1557
 1558
 1559
 1560
 1561
 1562
 1563
 1564
 1565
 1566
 1567
 1568
 1569
 1570
 1571
 1572
 1573
 1574
 1575
 1576
 1577
 1578
 1579
 1580
 1581
 1582
 1583
 1584
 1585
 1586
 1587
 1588
 1589
 1590
 1591
 1592
 1593
 1594
 1595
 1596
 1597
 1598
 1599
 1600
 1601
 1602
 1603
 1604
 1605
 1606
 1607
 1608
 1609
 1610
 1611
 1612
 1613
 1614
 1615
 1616
 1617
 1618
 1619
 1620
 1621
 1622
 1623
 1624
 1625
 1626
 1627
 1628
 1629
 1630
 1631
 1632
 1633
 1634
 1635
 1636
 1637
 1638
 1639
 1640
 1641
 1642
 1643
 1644
 1645
 1646
 1647
 1648
 1649
 1650
 1651
 1652
 1653
 1654
 1655
 1656
 1657
 1658
 1659
 1660
 1661
 1662
 1663
 1664
 1665
 1666
 1667
 1668
 1669
 1670
 1671
 1672
 1673
 1674
 1675
 1676
 1677
 1678
 1679
 1680
 1681
 1682
 1683
 1684
 1685
 1686
 1687
 1688
 1689
 1690
 1691
 1692
 1693
 1694
 1695
 1696
 1697
 1698
 1699
 1700
 1701
 1702
 1703
 1704
 1705
 1706
 1707
 1708
 1709
 1710
 1711
 1712
 1713
 1714
 1715
 1716
 1717
 1718
 1719
 1720
 1721
 1722
 1723
 1724
 1725
 1726
 1727
 1728
 1729
 1730
 1731
 1732
 1733
 1734
 1735
 1736
 1737
 1738
 1739
 1740
 1741
 1742
 1743
 1744
 1745
 1746
 1747
 1748
 1749
 1750
 1751
 1752
 1753
 1754
 1755
 1756
 1757
 1758
 1759
 1760
 1761
 1762
 1763
 1764
 1765
 1766
 1767
 1768
 1769
 1770
 1771
 1772
 1773
 1774
 1775
 1776
 1777
 1778
 1779
 1780
 1781
 1782
 1783
 1784
 1785
 1786
 1787
 1788
 1789
 1790
 1791
 1792
 1793
 1794
 1795
 1796
 1797
 1798
 1799
 1800
 1801
 1802
 1803
 1804
 1805
 1806
 1807
 1808
 1809
 1810
 1811
 1812
 1813
 1814
 1815
 1816
 1817
 1818
 1819
 1820
 1821
 1822
 1823
 1824
 1825
 1826
 1827
 1828
 1829
 1830
 1831
 1832
 1833
 1834
 1835
 1836
 1837
 1838
 1839
 1840
 1841
 1842
 1843
 1844
 1845
 1846
 1847
 1848
 1849
 1850
 1851
 1852
 1853
 1854
 1855
 1856
 1857
 1858
 1859
 1860
 1861
 1862
 1863
 1864
 1865
 1866
 1867
 1868
 1869
 1870
 1871
 1872
 1873
 1874
 1875
 1876
 1877
 1878
 1879
 1880
 1881
 1882
 1883
 1884
 1885
 1886
 1887
 1888
 1889
 1890
 1891
 1892
 1893
 1894
 1895
 1896
 1897
 1898
 1899
 1900
 1901
 1902
 1903
 1904
 1905
 1906
 1907
 1908
 1909
 1910
 1911
 1912
 1913
 1914
 1915
 1916
 1917
 1918
 1919
 1920
 1921
 1922
 1923
 1924
 1925
 1926
 1927
 1928
 1929
 1930
 1931
 1932
 1933
 1934
 1935
 1936
 1937
 1938
 1939
 1940
 1941
 1942
 1943
 1944
 1945
 1946
 1947
 1948
 1949
 1950
 1951
 1952
 1953
 1954
 1955
 1956
 1957
 1958
 1959
 1960
 1961
 1962
 1963
 1964
 1965
 1966
 1967
 1968
 1969
 1970
 1971
 1972
 1973
 1974
 1975
 1976
 1977
 1978
 1979
 1980
 1981
 1982
 1983
 1984
 1985
 1986
 1987
 1988
 1989
 1990
 1991
 1992
 1993
 1994
 1995
 1996
 1997
 1998
 1999
 2000
 2001
 2002
 2003
 2004
 2005
 2006
 2007
 2008
 2009
 2010
 2011
 2012
 2013
 2014
 2015
 2016
 2017
 2018
 2019
 2020
 2021
 2022
 2023
 2024
 2025
 2026
 2027
 2028
 2029
 2030
 2031
 2032
 2033
 2034
 2035
 2036
 2037
 2038
 2039
 2040
 2041
 2042
 2043
 2044
 2045
 2046
 2047
 2048
 2049
 2050
 2051
 2052
 2053
 2054
 2055
 2056
 2057
 2058
 2059
 2060
 2061
 2062
 2063
 2064
 2065
 2066
 2067
 2068
 2069
 2070
 2071
 2072
 2073
 2074
 2075
 2076
 2077
 2078
 2079
 2080
 2081
 2082
 2083
 2084
 2085
 2086
 2087
 2088
 2089
 2090
 2091
 2092
 2093
 2094
 2095
 2096
 2097
 2098
 2099
 2100
 2101
 2102
 2103
 2104
 2105
 2106
 2107
 2108
 2109
 2110
 2111
 2112
 2113
 2114
 2115
 2116
 2117
 2118
 2119
 2120
 2121
 2122
 2123
 2124
 2125
 2126
 2127
 2128
 2129
 2130
 2131
 2132
 2133
 2134
 2135
 2136
 2137
 2138
 2139
 2140
 2141
 2142
 2143
 2144
 2145
 2146
 2147
 2148
 2149
 2150
 2151
 2152
 2153
 2154
 2155
 2156
 2157
 2158
 2159
 2160
 2161
 2162
 2163
 2164
 2165
 2166
 2167
 2168
 2169
 2170
 2171
 2172
 2173
 2174
 2175
 2176
 2177
 2178
 2179
 2180
 2181
 2182
 2183
 2184
 2185
 2186
 2187
 2188
 2189
 2190
 2191
 2192
 2193
 2194
 2195
 2196
 2197
 2198
 2199
 2200
 2201
 2202
 2203
 2204
 2205
 2206
 2207
 2208
 2209
 2210
 2211
 2212
 2213
 2214
 2215
 2216
 2217
 2218
 2219
 2220
 2221
 2222
 2223
 2224
 2225
 2226
 2227
 2228
 2229
 2230
 2231
 2232
 2233
 2234
 2235
 2236
 2237
 2238
 2239
 2240
 2241
 2242
 2243
 2244
 2245
 2246
 2247
 2248
 2249
 2250
 2251
 2252
 2253
 2254
 2255
 2256
 2257
 2258
 2259
 2260
 2261
 2262
 2263
 2264
 2265
 2266
 2267
 2268
 2269
 2270
 2271
 2272
 2273
 2274
 2275
 2276
 2277
 2278
 2279
 2280
 2281
 2282
 2283
 2284
 2285
 2286
 2287
 2288
 2289
 2290
 2291
 2292
 2293
 2294
 2295
 2296
 2297
 2298
 2299
 2300
 2301
 2302
 2303
 2304
 2305
 2306
 2307
 2308
 2309
 2310
 2311
 2312
 2313
 2314
 2315
 2316
 2317
 2318
 2319
 2320
 2321
 2322
 2323
 2324
 2325
 2326
 2327
 2328
 2329
 2330
 2331
 2332
 2333
 2334
 2335
 2336
 2337
 2338
 2339
 2340
 2341
 2342
 2343
 2344
 2345
 2346
 2347
 2348
 2349
 2350
 2351
 2352
 2353
 2354
 2355
 2356
 2357
 2358
 2359
 2360
 2361
 2362
 2363
 2364
 2365
 2366
 2367
 2368
 2369
 2370
 2371
 2372
 2373
 2374
 2375
 2376
 2377
 2378
 2379
 2380
 2381
 2382
 2383
 2384
 2385
 2386
 2387
 2388
 2389
 2390
 2391
 2392
 2393
 2394
 2395
 2396
 2397
 2398
 2399
 2400
 2401
 2402
 2403
 2404
 2405
 2406
 2407
 2408
 2409
 2410
 2411
 2412
 2413
 2414
 2415
 2416
 2417
 2418
 2419
 2420
 2421
 2422
 2423
 2424
 2425
 2426
 2427
 2428
 2429
 2430
 2431
 2432
 2433
 2434
 2435
 2436
 2437
 2438
 2439
 2440
 2441
 2442
 2443
 2444
 2445
 2446
 2447
 2448
 2449
 2450
 2451
 2452
 2453
 2454
 2455
 2456
 2457
 2458
 2459
 2460
 2461
 2462
 2463
 2464
 2465
 2466
 2467
 2468
 2469
 2470
 2471
 2472
 2473
 2474
 2475
 2476
 2477
 2478
 2479
 2480
 2481
 2482
 2483
 2484
 2485
 2486
 2487
 2488
 2489
 2490
 2491
 2492
 2493
 2494
 2495
 2496
 2497
 2498
 2499
 2500
 2501
 2502
 2503
 2504
 2505
 2506
 2507
 2508
 2509
 2510
 2511
 2512
 2513
 2514
 2515
 2516
 2517
 2518
 2519
 2520
 2521
 2522
 2523
 2524
 2525
 2526
 2527
 2528
 2529
 2530
 2531
 2532
 2533
 2534
 2535
 2536
 2537
 2538
 2539
 2540
 2541
 2542
 2543
 2544
 2545
 2546
 2547
 2548
 2549
 2550
 2551
 2552
 2553
 2554
 2555
 2556
 2557
 2558
 2559
 2560
 2561
 2562
 2563
 2564
 2565
 2566
 2567
 2568
 2569
 2570
 2571
 2572
 2573
 2574
 2575
 2576
 2577
 2578
 2579
 2580
 2581
 2582
 2583
 2584
 2585
 2586
 2587
 2588
 2589
 2590
 2591
 2592
 2593
 2594
 2595
 2596
 2597
 2598
 2599
 2600
 2601
 2602
 2603
 2604
 2605
 2606
 2607
 2608
 2609
 2610
 2611
 2612
 2613
 2614
 2615
 2616
 2617
 2618
 2619
 2620
 2621
 2622
 2623
 2624
 2625
 2626
 2627
 2628
 2629
 2630
 2631
 2632
 2633
 2634
 2635
 2636
 2637
 2638
 2639
 2640
 2641
 2642
 2643
 2644
 2645
 2646
 2647
 2648
 2649
 2650
 2651
 2652
 2653
 2654
 2655
 2656
 2657
 2658
 2659
 2660
 2661
 2662
 2663
 2664
 2665
 2666
 2667
 2668
 2669
 2670
 2671
 2672
 2673
 2674
 2675
 2676
 2677
 2678
 2679
 2680
 2681
 2682
 2683
 2684
 2685
 2686
 2687
 2688
 2689
 2690
 2691
 2692
 2693
 2694
 2695
 2696
 2697
 2698
 2699
 2700
 2701
 2702
 2703
 2704
 2705
 2706
 2707
 2708
 2709
 2710
 2711
 2712
 2713
 2714
 2715
 2716
 2717
 2718
 2719
 2720
 2721
 2722
 2723
 2724
 2725
 2726
 2727
 2728
 2729
 2730
 2731
 2732
 2733
 2734
 2735
 2736
 2737
 2738
 2739
 2740
 2741
 2742
 2743
 2744
 2745
 2746
 2747
 2748
 2749
 2750
 2751
 2752
 2753
 2754
 2755
 2756
 2757
 2758
 2759
 2760
 2761
 2762
 2763
 2764
 2765
 2766
 2767
 2768
 2769
 2770
 2771
 2772
 2773
 2774
 2775
 2776
 2777
 2778
 2779
 2780
 2781
 2782
 2783
 2784
 2785
 2786
 2787
 2788
 2789
 2790
 2791
 2792
 2793
 2794
 2795
 2796
 2797
 2798
 2799
 2800
 2801
 2802
 2803
 2804
 2805
 2806
 2807
 2808
 2809
 2810
 2811
 2812
 2813
 2814
 2815
 2816
 2817
 2818
 2819
 2820
 2821
 2822
 2823
 2824
 2825
 2826
 2827
 2828
 2829
 2830
 2831
 2832
 2833
 2834
 2835
 2836
 2837
 2838
 2839
 2840
 2841
 2842
 2843
 2844
 2845
 2846
 2847
 2848
 2849
 2850
 2851
 2852
 2853
 2854
 2855
 2856
 2857
 2858
 2859
 2860
 2861
 2862
 2863
 2864
 2865
 2866
 2867
 2868
 2869
 2870
 2871
 2872
 2873
 2874
 2875
 2876
 2877
 2878
 2879
 2880
 2881
 2882
 2883
 2884
 2885
 2886
 2887
 2888
 2889
 2890
 2891
 2892
 2893
 2894
 2895
 2896
 2897
 2898
 2899
 2900
 2901
 2902
 2903
 2904
 2905
 2906
 2907
 2908
 2909
 2910
 2911
 2912
 2913
 2914
 2915
 2916
 2917
 2918
 2919
 2920
 2921
 2922
 2923
 2924
 2925
 2926
 2927
 2928
 2929
 2930
 2931
 2932
 2933
 2934
 2935
 2936
 2937
 2938
 2939
 2940
 2941
 2942
 2943
 2944
 2945
 2946
 2947
 2948
 2949
 2950
 2951
 2952
 2953
 2954
 2955
 2956
 2957
 2958
 2959
 2960
 2961
 2962
 2963
 2964
 2965
 2966
 2967
 2968
 2969
 2970
 2971
 2972
 2973
 2974
 2975
 2976
 2977
 2978
 2979
 2980
 2981
 2982
 2983
 2984
 2985
 2986
 2987
 2988
 2989
 2990
 2991
 2992
 2993
 2994
 2995
 2996
 2997
 2998
 2999
 3000
 3001
 3002
 3003
 3004
 3005
 3006
 3007
 3008
 3009
 3010
 3011
 3012
 3013
 3014
 3015
 3016
 3017
 3018
 3019
 3020
 3021
 3022
 3023
 3024
 3025
 3026
 3027
 3028
 3029
 3030
 3031
 3032
 3033
 3034
 3035
 3036
 3037
 3038
 3039
 3040
 3041
 3042
 3043
 3044
 3045
 3046
 3047
 3048
 3049
 3050
 3051
 3052
 3053
 3054
 3055
 3056
 3057
 3058
 3059
 3060
 3061
 3062
 3063
 3064
 3065
 3066
 3067
 3068
 3069
 3070
 3071
 3072
 3073
 3074
 3075
 3076
 3077
 3078
 3079
 3080
 3081
 3082
 3083
 3084
 3085
 3086
 3087
 3088
 3089
 3090
 3091
 3092
 3093
 3094
 3095
 3096
 3097
 3098
 3099
 3100
 3101
 3102
 3103
 3104
 3105
 3106
 3107
 3108
 3109
 3110
 3111
 3112
 3113
 3114
 3115
 3116
 3117
 3118
 3119
 3120
 3121
 3122
 3123
 3124
 3125
 3126
 3127
 3128
 3129
 3130
 3131
 3132
 3133
 3134
 3135
 3136
 3137
 3138
 3139
 3140
 3141
 3142
 3143
 3144
 3145
 3146
 3147
 3148
 3149
 3150
 3151
 3152
 3153
 3154
 3155
 3156
 3157
 3158
 3159
 3160
 3161
 3162
 3163
 3164
 3165
 3166
 3167
 3168
 3169
 3170
 3171
 3172
 3173
 3174
 3175
 3176
 3177
 3178
 3179
 3180
 3181
 3182
 3183
 3184
 3185
 3186
 3187
 3188
 3189
 3190
 3191
 3192
 3193
 3194
 3195
 3196
 3197
 3198
 3199
 3200
 3201
 3202
 3203
 3204
 3205
 3206
 3207
 3208
 3209
 3210
 3211
 3212
 3213
 3214
 3215
 3216
 3217
 3218
 3219
 3220
 3221
 3222
 3223
 3224
 3225
 3226
 3227
 3228
 3229
 3230
 3231
 3232
 3233
 3234
 3235
 3236
 3237
 3238
 3239
 3240
 3241
 3242
 3243
 3244
 3245
 3246
 3247
 3248
 3249
 3250
 3251
 3252
 3253
 3254
 3255
 3256
 3257
 3258
 3259
 3260
 3261
 3262
 3263
 3264
 3265
 3266
 3267
 3268
 3269
 3270
 3271
 3272
 3273
 3274
 3275
 3276
 3277
 3278
 3279
 3280
 3281
 3282
 3283
 3284
 3285
 3286
 3287
 3288
 3289
 3290
 3291
 3292
 3293
 3294
 3295
 3296
 3297
 3298
 3299
 3300
 3301
 3302
 3303
 3304
 3305
 3306
 3307
 3308
 3309
 3310
 3311
 3312
 3313
 3314
 3315
 3316
 3317
 3318
 3319
 3320
 3321
 3322
 3323
 3324
 3325
 3326
 3327
 3328
 3329
 3330
 3331
 3332
 3333
 3334
 3335
 3336
 3337
 3338
 3339
 3340
 3341
 3342
 3343
 3344
 3345
 3346
 3347
 3348
 3349
 3350
 3351
 3352
 3353
 3354
 3355
 3356
 3357
 3358
 3359
 3360
 3361
 3362
 3363
 3364
 3365
 3366
 3367
 3368
 3369
 3370
 3371
 3372
 3373
 3374
 3375
 3376
 3377
 3378
 3379
 3380
 3381
 3382
 3383
 3384
 3385
 3386
 3387
 3388
 3389
 3390
 3391
 3392
 3393
 3394
 3395
 3396
 3397
 3398
 3399
 3400
 3401
 3402
 3403
 3404
 3405
 3406
 3407
 3408
 3409
 3410
 3411
 3412
 3413
 3414
 3415
 3416
 3417
 3418
 3419
 3420
 3421
 3422
 3423
 3424
 3425
 3426
 3427
 3428
 3429
 3430
 3431
 3432
 3433
 3434
 3435
 3436
 3437
 3438
 3439
 3440
 3441
 3442
 3443
 3444
 3445
 3446
 3447
 3448
 3449
 3450
 3451
 3452
 3453
 3454
 3455
 3456
 3457
 3458
 3459
 3460
 3461
 3462
 3463
 3464
 3465
 3466
 3467
 3468
 3469
 3470
 3471
 3472
 3473
 3474
 3475
 3476
 3477
 3478
 3479
 3480
 3481
 3482
 3483
 3484
 3485
 3486
 3487
 3488
 3489
 3490
 3491
 3492
 3493
 3494
 3495
 3496
 3497
 3498
 3499
 3500
 3501
 3502
 3503
 3504
 3505
 3506
 3507
 3508
 3509
 3510
 3511
 3512
 3513
 3514
 3515
 3516
 3517
 3518
 3519
 3520
 3521
 3522
 3523
 3524
 3525
 3526
 3527
 3528
 3529
 3530
 3531
 3532
 3533
 3534
 3535
 3536
 3537
 3538
 3539
 3540
 3541
 3542
 3543
 3544
 3545
 3546
 3547
 3548
 3549
 3550
 3551
 3552
 3553
 3554
 3555
 3556
 3557
 3558
 3559
 3560
 3561
 3562
 3563
 3564
 3565
 3566
 3567
 3568
 3569
 3570
 3571
 3572
 3573
 3574
 3575
 3576
 3577
 3578
 3579
 3580
 3581
 3582
 3583
 3584
 3585
 3586
 3587
 3588
 3589
 3590
 3591
 3592
 3593
 3594
 3595
 3596
 3597
 3598
 3599
 3600
 3601
 3602
 3603
 3604
 3605
 3606
 3607
 3608
 3609
 3610
 3611
 3612
 3613
 3614
 3615
 3616
 3617
 3618
 3619
 3620
 3621
 3622
 3623
 3624
 3625
 3626
 3627
 3628
 3629
 3630
 3631
 3632
 3633
 3634
 3635
 3636
 3637
 3638
 3639
 3640
 3641
 3642
 3643
 3644
 3645
 3646
 3647
 3648
 3649
 3650
 3651
 3652
 3653
 3654
 3655
 3656
 3657
 3658
 3659
 3660
 3661
 3662
 3663
 3664
 3665
 3666
 3667
 3668
 3669
 3670
 3671
 3672
 3673
 3674
 3675
 3676
 3677
 3678
 3679
 3680
 3681
 3682
 3683
 3684
 3685
 3686
 3687
 3688
 3689
 3690
 3691
 3692
 3693
 3694
 3695
 3696
 3697
 3698
 3699
 3700
 3701
 3702
 3703
 3704
 3705
 3706
 3707
 3708
 3709
 3710
 3711
 3712
 3713
 3714
 3715
 3716
 3717
 3718
 3719
 3720
 3721
 3722
 3723
 3724
 3725
 3726
 3727
 3728
 3729
 3730
 3731
 3732
 3733
 3734
 3735
 3736
 3737
 3738
 3739
 3740
 3741
 3742
 3743
 3744
 3745
 3746
 3747
 3748
 3749
 3750
 3751
 3752
 3753
 3754
 3755
 3756
 3757
 3758
 3759
 3760
 3761
 3762
 3763
 3764
 3765
 3766
 3767
 3768
 3769
 3770
 3771
 3772
 3773
 3774
 3775
 3776
 3777
 3778
 3779
 3780
 3781
 3782
 3783
 3784
 3785
 3786
 3787
 3788
 3789
 3790
 3791
 3792
 3793
 3794
 3795
 3796
 3797
 3798
 3799
 3800
 3801
 3802
 3803
 3804
 3805
 3806
 3807
 3808
 3809
 3810
 3811
 3812
 3813
 3814
 3815
 3816
 3817
 3818
 3819
 3820
 3821
 3822
 3823
 3824
 3825
 3826
 3827
 3828
 3829
 3830
 3831
 3832
 3833
 3834
 3835
 3836
 3837
 3838
 3839
 3840
 3841
 3842
 3843
 3844
 3845
 3846
 3847
 3848
 3849
 3850
 3851
 3852
 3853
 3854
 3855
 3856
 3857
 3858
 3859
 3860
 3861
 3862
 3863
 3864
 3865
 3866
 3867
 3868
 3869
 3870
 3871
 3872
 3873
 3874
 3875
 3876
 3877
 3878
 3879
 3880
 3881
 3882
 3883
 3884
 3885
 3886
 3887
 3888
 3889
 3890
 3891
 3892
 3893
 3894
 3895
 3896
 3897
 3898
 3899
 3900
 3901
 3902
 3903
 3904
 3905
 3906
 3907
 3908
 3909
 3910
 3911
 3912
 3913
 3914
 3915
 3916
 3917
 3918
 3919
 3920
 3921
 3922
 3923
 3924
 3925
 3926
 3927
 3928
 3929
 3930
 3931
 3932
 3933
 3934
 3935
 3936
 3937
 3938
 3939
 3940
 3941
 3942
 3943
 3944
 3945
 3946
 3947
 3948
 3949
 3950
 3951
 3952
 3953
 3954
 3955
 3956
 3957
 3958
 3959
 3960
 3961
 3962
 3963
 3964
 3965
 3966
 3967
 3968
 3969
 3970
 3971
 3972
 3973
 3974
 3975
 3976
 3977
 3978
 3979
 3980
 3981
 3982
 3983
 3984
 3985
 3986
 3987
 3988
 3989
 3990
 3991
 3992
 3993
 3994
 3995
 3996
 3997
 3998
 3999
 4000
 4001
 4002
 4003
 4004
 4005
 4006
 4007
 4008
 4009
 4010
 4011
 4012
 4013
 4014
 4015
 4016
 4017
 4018
 4019
 4020
 4021
 4022
 4023
 4024
 4025
 4026
 4027
 4028
 4029
 4030
 4031
 4032
 4033
 4034
 4035
 4036
 4037
 4038
 4039
 4040
 4041
 4042
 4043
 4044
 4045
 4046
 4047
 4048
 4049
 4050
 4051
 4052
 4053
 4054
 4055
 4056
 4057
 4058
 4059
 4060
 4061
 4062
 4063
 4064
 4065
 4066
 4067
 4068
 4069
 4070
 4071
 4072
 4073
 4074
 4075
 4076
 4077
 4078
 4079
 4080
 4081
 4082
 4083
 4084
 4085
 4086
 4087
 4088
 4089
 4090
 4091
 4092
 4093
 4094
 4095
 4096
 4097
 4098
 4099
 4100
 4101
 4102
 4103
 4104
 4105
 4106
 4107
 4108
 4109
 4110
 4111
 4112
 4113
 4114
 4115
 4116
 4117
 4118
 4119
 4120
 4121
 4122
 4123
 4124
 4125
 4126
 4127
 4128
 4129
 4130
 4131
 4132
 4133
 4134
 4135
 4136
 4137
 4138
 4139
 4140
 4141
 4142
 4143
 4144
 4145
 4146
 4147
 4148
 4149
 4150
 4151
 4152
 4153
 4154
 4155
 4156
 4157
 4158
 4159
 4160
 4161
 4162
 4163
 4164
 4165
 4166
 4167
 4168
 4169
 4170
 4171
 4172
 4173
 4174
 4175
 4176
 4177
 4178
 4179
 4180
 4181
 4182
 4183
 4184
 4185
 4186
 4187
 4188
 4189
 4190
 4191
 4192
 4193
 4194
 4195
 4196
 4197
 4198
 4199
 4200
 4201
 4202
 4203
 4204
 4205
 4206
 4207
 4208
 4209
 4210
 4211
 4212
 4213
 4214
 4215
 4216
 4217
 4218
 4219
 4220
 4221
 4222
 4223
 4224
 4225
 4226
 4227
 4228
 4229
 4230
 4231
 4232
 4233
 4234
 4235
 4236
 4237
 4238
 4239
 4240
 4241
 4242
 4243
 4244
 4245
 4246
 4247
 4248
 4249
 4250
 4251
 4252
 4253
 4254
 4255
 4256
 4257
 4258
 4259
 4260
 4261
 4262
 4263
 4264
 4265
 4266
 4267
 4268
 4269
 4270
 4271
 4272
 4273
 4274
 4275
 4276
 4277
 4278
 4279
 4280
 4281
 4282
 4283
 4284
 4285
 4286
 4287
 4288
 4289
 4290
 4291
 4292
 4293
 4294
 4295
 4296
 4297
 4298
 4299
 4300
 4301
 4302
 4303
 4304
 4305
 4306
 4307
 4308
 4309
 4310
 4311
 4312
 4313
 4314
 4315
 4316
 4317
 4318
 4319
 4320
 4321
 4322
 4323
 4324
 4325
 4326
 4327
 4328
 4329
 4330
 4331
 4332
 4333
 4334
 4335
 4336
 4337
 4338
 4339
 4340
 4341
 4342
 4343
 4344
 4345
 4346
 4347
 4348
 4349
 4350
 4351
 4352
 4353
 4354
 4355
 4356
 4357
 4358
 4359
 4360
 4361
 4362
 4363
 4364
 4365
 4366
 4367
 4368
 4369
 4370
 4371
 4372
 4373
 4374
 4375
 4376
 4377
 4378
 4379
 4380
 4381
 4382
 4383
 4384
 4385
 4386
 4387
 4388
 4389
 4390
 4391
 4392
 4393
 4394
 4395
 4396
 4397
 4398
 4399
 4400
 4401
 4402
 4403
 4404
 4405
 4406
 4407
 4408
 4409
 4410
 4411
 4412
 4413
 4414
 4415
 4416
 4417
 4418
 4419
 4420
 4421
 4422
 4423
 4424
 4425
 4426
 4427
 4428
 4429
 4430
 4431
 4432
 4433
 4434
 4435
 4436
 4437
 4438
 4439
 4440
 4441
 4442
 4443
 4444
 4445
 4446
 4447
 4448
 4449
 4450
 4451
 4452
 4453
 4454
 4455
 4456
 4457
 4458
 4459
 4460
 4461
 4462
 4463
 4464
 4465
 4466
 4467
 4468
 4469
 4470
 4471
 4472
 4473
 4474
 4475
 4476
 4477
 4478
 4479
 4480
 4481
 4482
 4483
 4484
 4485
 4486
 4487
 4488
 4489
 4490
 4491
 4492
 4493
 4494
 4495
 4496
 4497
 4498
 4499
 4500
 4501
 4502
 4503
 4504
 4505
 4506
 4507
 4508
 4509
 4510
 4511
 4512
 4513
 4514
 4515
 4516
 4517
 4518
 4519
 4520
 4521
 4522
 4523
 4524
 4525
 4526
 4527
 4528
 4529
 4530
 4531
 4532
 4533
 4534
 4535
 4536
 4537
 4538
 4539
 4540
 4541
 4542
 4543
 4544
 4545
 4546
 4547
 4548
 4549
 4550
 4551
 4552
 4553
 4554
 4555
 4556
 4557
 4558
 4559
 4560
 4561
 4562
 4563
 4564
 4565
 4566
 4567
 4568
 4569
 4570
 4571
 4572
 4573
 4574
 4575
 4576
 4577
 4578
 4579
 4580
 4581
 4582
 4583
 4584
 4585
 4586
 4587
 4588
 4589
 4590
 4591
 4592
 4593
 4594
 4595
 4596
 4597
 4598
 4599
 4600
 4601
 4602
 4603
 4604
 4605
 4606
 4607
 4608
 4609
 4610
 4611
 4612
 4613
 4614
 4615
 4616
 4617
 4618
 4619
 4620
 4621
 4622
 4623
 4624
 4625
 4626
 4627
 4628
 4629
 4630
 4631
 4632
 4633
 4634
 4635
 4636
 4637
 4638
 4639
 4640
 4641
 4642
 4643
 4644
 4645
 4646
 4647
 4648
 4649
 4650
 4651
 4652
 4653
 4654
 4655
 4656
 4657
 4658
 4659
 4660
 4661
 4662
 4663
 4664
 4665
 4666
 4667
 4668
 4669
 4670
 4671
 4672
 4673
 4674
 4675
 4676
 4677
 4678
 4679
 4680
 4681
 4682
 4683
 4684
 4685
 4686
 4687
 4688
 4689
 4690
 4691
 4692
 4693
 4694
 4695
 4696
 4697
 4698
 4699
 4700
 4701
 4702
 4703
 4704
 4705
 4706
 4707
 4708
 4709
 4710
 4711
 4712
 4713
 4714
 4715
 4716
 4717
 4718
 4719
 4720
 4721
 4722
 4723
 4724
 4725
 4726
 4727
 4728
 4729
 4730
 4731
 4732
 4733
 4734
 4735
 4736
 4737
 4738
 4739
 4740
 4741
 4742
 4743
 4744
 4745
 4746
 4747
 4748
 4749
 4750
 4751
 4752
 4753
 4754
 4755
 4756
 4757
 4758
 4759
 4760
 4761
 4762
 4763
 4764
 4765
 4766
 4767
 4768
 4769
 4770
 4771
 4772
 4773
 4774
 4775
 4776
 4777
 4778
 4779
 4780
 4781
 4782
 4783
 4784
 4785
 4786
 4787
 4788
 4789
 4790
 4791
 4792
 4793
 4794
 4795
 4796
 4797
 4798
 4799
 4800
 4801
 4802
 4803
 4804
 4805
 4806
 4807
 4808
 4809
 4810
 4811
 4812
 4813
 4814
 4815
 4816
 4817
 4818
 4819
 4820
 4821
 4822
 4823
 4824
 4825
 4826
 4827
 4828
 4829
 4830
 4831
 4832
 4833
 4834
 4835
 4836
 4837
 4838
 4839
 4840
 4841
 4842
 4843
 4844
 4845
 4846
 4847
 4848
 4849
 4850
 4851
 4852
 4853
 4854
 4855
 4856
 4857
 4858
 4859
 4860
 4861
 4862
 4863
 4864
 4865
 4866
 4867
 4868
 4869
 4870
 4871
 4872
 4873
 4874
 4875
 4876
 4877
 4878
 4879
 4880
 4881
 4882
 4883
 4884
 4885
 4886
 4887
 4888
 4889
 4890
 4891
 4892
 4893
 4894
 4895
 4896
 4897
 4898
 4899
 4900
 4901
 4902
 4903
 4904
 4905
 4906
 4907
 4908
 4909
 4910
 4911
 4912
 4913
 4914
 4915
 4916
 4917
 4918
 4919
 4920
 4921
 4922
 4923
 4924
 4925
 4926
 4927
 4928
 4929
 4930
 4931
 4932
 4933
 4934
 4935
 4936
 4937
 4938
 4939
 4940
 4941
 4942
 4943
 4944
 4945
 4946
 4947
 4948
 4949
 4950
 4951
 4952
 4953
 4954
 4955
 4956
 4957
 4958
 4959
 4960
 4961
 4962
 4963
 4964
 4965
 4966
 4967
 4968
 4969
 4970
 4971
 4972
 4973
 4974
 4975
 4976
 4977
 4978
 4979
 4980
 4981
 4982
 4983
 4984
 4985
 4986
 4987
 4988
 4989
 4990
 4991
 4992
 4993
 4994
 4995
 4996
 4997
 4998
 4999
 5000
 5001
 5002
 5003
 5004
 5005
 5006
 5007
 5008
 5009
 5010
 5011
 5012
 5013
 5014
 5015
 5016
 5017
 5018
 5019
 5020
 5021
 5022
 5023
 5024
 5025
 5026
 5027
 5028
 5029
 5030
 5031
 5032
 5033
 5034
 5035
 5036
 5037
 5038
 5039
 5040
 5041
 5042
 5043
 5044
 5045
 5046
 5047
 5048
 5049
 5050
 5051
 5052
 5053
 5054
 5055
 5056
 5057
 5058
 5059
 5060
 5061
 5062
 5063
 5064
 5065
 5066
 5067
 5068
 5069
 5070
 5071
 5072
 5073
 5074
 5075
 5076
 5077
 5078
 5079
 5080
 5081
 5082
 5083
 5084
 5085
 5086
 5087
 5088
 5089
 5090
 5091
 5092
 5093
 5094
 5095
 5096
 5097
 5098
 5099
 5100
 5101
 5102
 5103
 5104
 5105
 5106
 5107
 5108
 5109
 5110
 5111
 5112
 5113
 5114
 5115
 5116
 5117
 5118
 5119
 5120
 5121
 5122
 5123
 5124
 5125
 5126
 5127
 5128
 5129
 5130
 5131
 5132
 5133
 5134
 5135
 5136
 5137
 5138
 5139
 5140
 5141
 5142
 5143
 5144
 5145
 5146
 5147
 5148
 5149
 5150
 5151
 5152
 5153
 5154
 5155
 5156
 5157
 5158
 5159
 5160
 5161
 5162
 5163
 5164
 5165
 5166
 5167
 5168
 5169
 5170
 5171
 5172
 5173
 5174
 5175
 5176
 5177
 5178
 5179
 5180
 5181
 5182
 5183
 5184
 5185
 5186
 5187
 5188
 5189
 5190
 5191
 5192
 5193
 5194
 5195
 5196
 5197
 5198
 5199
 5200
 5201
 5202
 5203
 5204
 5205
 5206
 5207
 5208
 5209
 5210
 5211
 5212
 5213
 5214
 5215
 5216
 5217
 5218
 5219
 5220
 5221
 5222
 5223
 5224
 5225
 5226
 5227
 5228
 5229
 5230
 5231
 5232
 5233
 5234
 5235
 5236
 5237
 5238
 5239
 5240
 5241
 5242
 5243
 5244
 5245
 5246
 5247
 5248
 5249
 5250
 5251
 5252
 5253
 5254
 5255
 5256
 5257
 5258
 5259
 5260
 5261
 5262
 5263
 5264
 5265
 5266
 5267
 5268
 5269
 5270
 5271
 5272
 5273
 5274
 5275
 5276
 5277
 5278
 5279
 5280
 5281
 5282
 5283
 5284
 5285
 5286
 5287
 5288
 5289
 5290
 5291
 5292
 5293
 5294
 5295
 5296
 5297
 5298
 5299
 5300
 5301
 5302
 5303
 5304
 5305
 5306
 5307
 5308
 5309
 5310
 5311
 5312
 5313
 5314
 5315
 5316
 5317
 5318
 5319
 5320
 5321
 5322
 5323
 5324
 5325
 5326
 5327
 5328
 5329
 5330
 5331
 5332
 5333
 5334
 5335
 5336
 5337
 5338
 5339
 5340
 5341
 5342
 5343
 5344
 5345
 5346
 5347
 5348
 5349
 5350
 5351
 5352
 5353
 5354
 5355
 5356
 5357
 5358
 5359
 5360
 5361
 5362
 5363
 5364
 5365
 5366
 5367
 5368
 5369
 5370
 5371
 5372
 5373
 5374
 5375
 5376
 5377
 5378
 5379
 5380
 5381
 5382
 5383
 5384
 5385
 5386
 5387
 5388
 5389
 5390
 5391
 5392
 5393
 5394
 5395
 5396
 5397
 5398
 5399
 5400
 5401
 5402
 5403
 5404
 5405
 5406
 5407
 5408
 5409
 5410
 5411
 5412
 5413
 5414
 5415
 5416
 5417
 5418
 5419
 5420
 5421
 5422
 5423
 5424
 5425
 5426
 5427
 5428
 5429
 5430
 5431
 5432
 5433
 5434
 5435
 5436
 5437
 5438
 5439
 5440
 5441
 5442
 5443
 5444
 5445
 5446
 5447
 5448
 5449
 5450
 5451
 5452
 5453
 5454
 5455
 5456
 5457
 5458
 5459
 5460
 5461
 5462
 5463
 5464
 5465
 5466
 5467
 5468
 5469
 5470
 5471
 5472
 5473
 5474
 5475
 5476
 5477
 5478
 5479
 5480
 5481
 5482
 5483
 5484
 5485
 5486
 5487
 5488
 5489
 5490
 5491
 5492
 5493
 5494
 5495
 5496
 5497
 5498
 5499
 5500
 5501
 5502
 5503
 5504
 5505
 5506
 5507
 5508
 5509
 5510
 5511
 5512
 5513
 5514
 5515
 5516
 5517
 5518
 5519
 5520
 5521
 5522
 5523
 5524
 5525
 5526
 5527
 5528
 5529
 5530
 5531
 5532
 5533
 5534
 5535
 5536
 5537
 5538
 5539
 5540
 5541
 5542
 5543
 5544
 5545
 5546
 5547
 5548
 5549
 5550
 5551
 5552
 5553
 5554
 5555
 5556
 5557
 5558
 5559
 5560
 5561
 5562
 5563
 5564
 5565
 5566
 5567
 5568
 5569
 5570
 5571
 5572
 5573
 5574
 5575
 5576
 5577
 5578
 5579
 5580
 5581
 5582
 5583
 5584
 5585
 5586
 5587
 5588
 5589
 5590
 5591
 5592
 5593
 5594
 5595
 5596
 5597
 5598
 5599
 5600
 5601
 5602
 5603
 5604
 5605
 5606
 5607
 5608
 5609
 5610
 5611
 5612
 5613
 5614
 5615
 5616
 5617
 5618
 5619
 5620
 5621
 5622
 5623
 5624
 5625
 5626
 5627
 5628
 5629
 5630
 5631
 5632
 5633
 5634
 5635
 5636
 5637
 5638
 5639
 5640
 5641
 5642
 5643
 5644
 5645
 5646
 5647
 5648
 5649
 5650
 5651
 5652
 5653
 5654
 5655
 5656
 5657
 5658
 5659
 5660
 5661
 5662
 5663
 5664
 5665
 5666
 5667
 5668
 5669
 5670
 5671
 5672
 5673
 5674
 5675
 5676
 5677
 5678
 5679
 5680
 5681
 5682
 5683
 5684
 5685
 5686
 5687
 5688
 5689
 5690
 5691
 5692
 5693
 5694
 5695
 5696
 5697
 5698
 5699
 5700
 5701
 5702
 5703
 5704
 5705
 5706
 5707
 5708
 5709
 5710
 5711
 5712
 5713
 5714
 5715
 5716
 5717
 5718
 5719
 5720
 5721
 5722
 5723
 5724
 5725
 5726
 5727
 5728
 5729
 5730
 5731
 5732
 5733
 5734
 5735
 5736
 5737
 5738
 5739
 5740
 5741
 5742
 5743
 5744
 5745
 5746
 5747
 5748
 5749
 5750
 5751
 5752
 5753
 5754
 5755
 5756
 5757
 5758
 5759
 5760
 5761
 5762
 5763
 5764
 5765
 5766
 5767
 5768
 5769
 5770
 5771
 5772
 5773
 5774
 5775
 5776
 5777
 5778
 5779
 5780
 5781
 5782
 5783
 5784
 5785
 5786
 5787
 5788
 5789
 5790
 5791
 5792
 5793
 5794
 5795
 5796
 5797
 5798
 5799
 5800
 5801
 5802
 5803
 5804
 5805
 5806
 5807
 5808
 5809
 5810
 5811
 5812
 5813
 5814
 5815
 5816
 5817
 5818
 5819
 5820
 5821
 5822
 5823
 5824
 5825
 5826
 5827
 5828
 5829
 5830
 5831
 5832
 5833
 5834
 5835
 5836
 5837
 5838
 5839
 5840
 5841
 5842
 5843
 5844
 5845
 5846
 5847
 5848
 5849
 5850
 5851
 5852
 5853
 5854
 5855
 5856
 5857
 5858
 5859
 5860
 5861
 5862
 5863
 5864
 5865
 5866
 5867
 5868
 5869
 5870
 5871
 5872
 5873
 5874
 5875
 5876
 5877
 5878
 5879
 5880
 5881
 5882
 5883
 5884
 5885
 5886
 5887
 5888
 5889
 5890
 5891
 5892
 5893
 5894
 5895
 5896
 5897
 5898
 5899
 5900
 5901
 5902
 5903
 5904
 5905
 5906
 5907
 5908
 5909
 5910
 5911
 5912
 5913
 5914
 5915
 5916
 5917
 5918
 5919
 5920
 5921
 5922
 5923
 5924
 5925
 5926
 5927
 5928
 5929
 5930
 5931
 5932
 5933
 5934
 5935
 5936
 5937
 5938
 5939
 5940
 5941
 5942
 5943
 5944
 5945
 5946
 5947
 5948
 5949
 5950
 5951
 5952
 5953
 5954
 5955
 5956
 5957
 5958
 5959
 5960
 5961
 5962
 5963
 5964
 5965
 5966
 5967
 5968
 5969
 5970
 5971
 5972
 5973
 5974
 5975
 5976
 5977
 5978
 5979
 5980
 5981
 5982
 5983
 5984
 5985
 5986
 5987
 5988
 5989
 5990
 5991
 5992
 5993
 5994
 5995
 5996
 5997
 5998
 5999
 6000
 6001
 6002
 6003
 6004
 6005
 6006
 6007
 6008
 6009
 6010
 6011
 6012
 6013
 6014
 6015
 6016
 6017
 6018
 6019
 6020
 6021
 6022
 6023
 6024
 6025
 6026
 6027
 6028
 6029
 6030
 6031
 6032
 6033
 6034
 6035
 6036
 6037
 6038
 6039
 6040
 6041
 6042
 6043
 6044
 6045
 6046
 6047
 6048
 6049
 6050
 6051
 6052
 6053
 6054
 6055
 6056
 6057
 6058
 6059
 6060
 6061
 6062
 6063
 6064
 6065
 6066
 6067
 6068
 6069
 6070
 6071
 6072
 6073
 6074
 6075
 6076
 6077
 6078
 6079
 6080
 6081
 6082
 6083
 6084
 6085
 6086
 6087
 6088
 6089
 6090
 6091
 6092
 6093
 6094
 6095
 6096
 6097
 6098
 6099
 6100
 6101
 6102
 6103
 6104
 6105
 6106
 6107
 6108
 6109
 6110
 6111
 6112
 6113
 6114
 6115
 6116
 6117
 6118
 6119
 6120
 6121
 6122
 6123
 6124
 6125
 6126
 6127
 6128
 6129
 6130
 6131
 6132
 6133
 6134
 6135
 6136
 6137
 6138
 6139
 6140
 6141
 6142
 6143
 6144
 6145
 6146
 6147
 6148
 6149
 6150
 6151
 6152
 6153
 6154
 6155
 6156
 6157
 6158
 6159
 6160
 6161
 6162
 6163
 6164
 6165
 6166
 6167
 6168
 6169
 6170
 6171
 6172
 6173
 6174
 6175
 6176
 6177
 6178
 6179
 6180
 6181
 6182
 6183
 6184
 6185
 6186
 6187
 6188
 6189
 6190
 6191
 6192
 6193
 6194
 6195
 6196
 6197
 6198
 6199
 6200
 6201
 6202
 6203
 6204
 6205
 6206
 6207
 6208
 6209
 6210
 6211
 6212
 6213
 6214
 6215
 6216
 6217
 6218
 6219
 6220
 6221
 6222
 6223
 6224
 6225
 6226
 6227
 6228
 6229
 6230
 6231
 6232
 6233
 6234
 6235
 6236
 6237
 6238
 6239
 6240
 6241
 6242
 6243
 6244
 6245
 6246
 6247
 6248
 6249
 6250
 6251
 6252
 6253
 6254
 6255
 6256
 6257
 6258
 6259
 6260
 6261
 6262
 6263
 6264
 6265
 6266
 6267
 6268
 6269
 6270
 6271
 6272
 6273
 6274
 6275
 6276
 6277
 6278
 6279
 6280
 6281
 6282
 6283
 6284
 6285
 6286
 6287
 6288
 6289
 6290
 6291
 6292
 6293
 6294
 6295
 6296
 6297
 6298
 6299
 6300
 6301
 6302
 6303
 6304
 6305
 6306
 6307
 6308
 6309
 6310
 6311
 6312
 6313
 6314
 6315
 6316
 6317
 6318
 6319
 6320
 6321
 6322
 6323
 6324
 6325
 6326
 6327
 6328
 6329
 6330
 6331
 6332
 6333
 6334
 6335
 6336
 6337
 6338
 6339
 6340
 6341
 6342
 6343
 6344
 6345
 6346
 6347
 6348
 6349
 6350
 6351
 6352
 6353
 6354
 6355
 6356
 6357
 6358
 6359
 6360
 6361
 6362
 6363
 6364
 6365
 6366
 6367
 6368
 6369
 6370
 6371
 6372
 6373
 6374
 6375
 6376
 6377
 6378
 6379
 6380
 6381
 6382
 6383
 6384
 6385
 6386
 6387
 6388
 6389
 6390
 6391
 6392
 6393
 6394
 6395
 6396
 6397
 6398
 6399
 6400
 6401
 6402
 6403
 6404
 6405
 6406
 6407
 6408
 6409
 6410
 6411
 6412
 6413
 6414
 6415
 6416
 6417
 6418
 6419
 6420
 6421
 6422
 6423
 6424
 6425
 6426
 6427
 6428
 6429
 6430
 6431
 6432
 6433
 6434
 6435
 6436
 6437
 6438
 6439
 6440
 6441
 6442
 6443
 6444
 6445
 6446
 6447
 6448
 6449
 6450
 6451
 6452
 6453
 6454
 6455
 6456
 6457
 6458
 6459
 6460
 6461
 6462
 6463
 6464
 6465
 6466
 6467
 6468
 6469
 6470
 6471
 6472
 6473
 6474
 6475
 6476
 6477
 6478
 6479
 6480
 6481
 6482
 6483
 6484
 6485
 6486
 6487
 6488
 6489
 6490
 6491
 6492
 6493
 6494
 6495
 6496
 6497
 6498
 6499
 6500
 6501
 6502
 6503
 6504
 6505
 6506
 6507
 6508
 6509
 6510
 6511
 6512
 6513
 6514
 6515
 6516
 6517
 6518
 6519
 6520
 6521
 6522
 6523
 6524
 6525
 6526
 6527
 6528
 6529
 6530
 6531
 6532
 6533
 6534
 6535
 6536
 6537
 6538
 6539
 6540
 6541
 6542
 6543
 6544
 6545
 6546
 6547
 6548
 6549
 6550
 6551
 6552
 6553
 6554
 6555
 6556
 6557
 6558
 6559
 6560
 6561
 6562
 6563
 6564
 6565
 6566
 6567
 6568
 6569
 6570
 6571
 6572
 6573
 6574
 6575
 6576
 6577
 6578
 6579
 6580
 6581
 6582
 6583
 6584
 6585
 6586
 6587
 6588
 6589
 6590
 6591
 6592
 6593
 6594
 6595
 6596
 6597
 6598
 6599
 6600
 6601
 6602
 6603
 6604
 6605
 6606
 6607
 6608
 6609
 6610
 6611
 6612
 6613
 6614
 6615
 6616
 6617
 6618
 6619
 6620
 6621
 6622
 6623
 6624
 6625
 6626
 6627
 6628
 6629
 6630
 6631
 6632
 6633
 6634
 6635
 6636
 6637
 6638
 6639
 6640
 6641
 6642
 6643
 6644
 6645
 6646
 6647
 6648
 6649
 6650
 6651
 6652
 6653
 6654
 6655
 6656
 6657
 6658
 6659
 6660
 6661
 6662
 6663
 6664
 6665
 6666
 6667
 6668
 6669
 6670
 6671
 6672
 6673
 6674
 6675
 6676
 6677
 6678
 6679
 6680
 6681
 6682
 6683
 6684
 6685
 6686
 6687
 6688
 6689
 6690
 6691
 6692
 6693
 6694
 6695
 6696
 6697
 6698
 6699
 6700
 6701
 6702
 6703
 6704
 6705
 6706
 6707
 6708
 6709
 6710
 6711
 6712
 6713
 6714
 6715
 6716
 6717
 6718
 6719
 6720
 6721
 6722
 6723
 6724
 6725
 6726
 6727
 6728
 6729
 6730
 6731
 6732
 6733
 6734
 6735
 6736
 6737
 6738
 6739
 6740
 6741
 6742
 6743
 6744
 6745
 6746
 6747
 6748
 6749
 6750
 6751
 6752
 6753
 6754
 6755
 6756
 6757
 6758
 6759
 6760
 6761
 6762
 6763
 6764
 6765
 6766
 6767
 6768
 6769
 6770
 6771
 6772
 6773
 6774
 6775
 6776
 6777
 6778
 6779
 6780
 6781
 6782
 6783
 6784
 6785
 6786
 6787
 6788
 6789
 6790
 6791
 6792
 6793
 6794
 6795
 6796
 6797
 6798
 6799
 6800
 6801
 6802
 6803
 6804
 6805
 6806
 6807
 6808
 6809
 6810
 6811
 6812
 6813
 6814
 6815
 6816
 6817
 6818
 6819
 6820
 6821
 6822
 6823
 6824
 6825
 6826
 6827
 6828
 6829
 6830
 6831
 6832
 6833
 6834
 6835
 6836
 6837
 6838
 6839
 6840
 6841
 6842
 6843
 6844
 6845
 6846
 6847
 6848
 6849
 6850
 6851
 6852
 6853
 6854
 6855
 6856
 6857
 6858
 6859
 6860
 6861
 6862
 6863
 6864
 6865
 6866
 6867
 6868
 6869
 6870
 6871
 6872
 6873
 6874
 6875
 6876
 6877
 6878
 6879
 6880
 6881
 6882
 6883
 6884
 6885
 6886
 6887
 6888
 6889
 6890
 6891
 6892
 6893
 6894
 6895
 6896
 6897
 6898
 6899
 6900
 6901
 6902
 6903
 6904
 6905
 6906
 6907
 6908
 6909
 6910
 6911
 6912
 6913
 6914
 6915
 6916
 6917
 6918
 6919
 6920
 6921
 6922
 6923
 6924
 6925
 6926
 6927
 6928
 6929
 6930
 6931
 6932
 6933
 6934
 6935
 6936
 6937
 6938
 6939
 6940
 6941
 6942
 6943
 6944
 6945
 6946
 6947
 6948
 6949
 6950
 6951
 6952
 6953
 6954
 6955
 6956
 6957
 6958
 6959
 6960
 6961
 6962
 6963
 6964
 6965
 6966
 6967
 6968
 6969
 6970
 6971
 6972
 6973
 6974
 6975
 6976
 6977
 6978
 6979
 6980
 6981
 6982
 6983
 6984
 6985
 6986
 6987
 6988
 6989
 6990
 6991
 6992
 6993
 6994
 6995
 6996
 6997
 6998
 6999
 7000
 7001
 7002
 7003
 7004
 7005
 7006
 7007
 7008
 7009
 7010
 7011
 7012
 7013
 7014
 7015
 7016
 7017
 7018
 7019
 7020
 7021
 7022
 7023
 7024
 7025
 7026
 7027
 7028
 7029
 7030
 7031
 7032
 7033
 7034
 7035
 7036
 7037
 7038
 7039
 7040
 7041
 7042
 7043
 7044
 7045
 7046
 7047
 7048
 7049
 7050
 7051
 7052
 7053
 7054
 7055
 7056
 7057
 7058
 7059
 7060
 7061
 7062
 7063
 7064
 7065
 7066
 7067
 7068
 7069
 7070
 7071
 7072
 7073
 7074
 7075
 7076
 7077
 7078
 7079
 7080
 7081
 7082
 7083
 7084
 7085
 7086
 7087
 7088
 7089
 7090
 7091
 7092
 7093
 7094
 7095
 7096
 7097
 7098
 7099
 7100
 7101
 7102
 7103
 7104
 7105
 7106
 7107
 7108
 7109
 7110
 7111
 7112
 7113
 7114
 7115
 7116
 7117
 7118
 7119
 7120
 7121
 7122
 7123
 7124
 7125
 7126
 7127
 7128
 7129
 7130
 7131
 7132
 7133
 7134
 7135
 7136
 7137
 7138
 7139
 7140
 7141
 7142
 7143
 7144
 7145
 7146
 7147
 7148
 7149
 7150
 7151
 7152
 7153
 7154
 7155
 7156
 7157
 7158
 7159
 7160
 7161
 7162
 7163
 7164
 7165
 7166
 7167
 7168
 7169
 7170
 7171
 7172
 7173
 7174
 7175
 7176
 7177
 7178
 7179
 7180
 7181
 7182
 7183
 7184
 7185
 7186
 7187
 7188
 7189
 7190
 7191
 7192
 7193
 7194
 7195
 7196
 7197
 7198
 7199
 7200
 7201
 7202
 7203
 7204
 7205
 7206
 7207
 7208
 7209
 7210
 7211
 7212
 7213
 7214
 7215
 7216
 7217
 7218
 7219
 7220
 7221
 7222
 7223
 7224
 7225
 7226
 7227
 7228
 7229
 7230
 7231
 7232
 7233
 7234
 7235
 7236
 7237
 7238
 7239
 7240
 7241
 7242
 7243
 7244
 7245
 7246
 7247
 7248
 7249
 7250
 7251
 7252
 7253
 7254
 7255
 7256
 7257
 7258
 7259
 7260
 7261
 7262
 7263
 7264
 7265
 7266
 7267
 7268
 7269
 7270
 7271
 7272
 7273
 7274
 7275
 7276
 7277
 7278
 7279
 7280
 7281
 7282
 7283
 7284
 7285
 7286
 7287
 7288
 7289
 7290
 7291
 7292
 7293
 7294
 7295
 7296
 7297
 7298
 7299
 7300
 7301
 7302
 7303
 7304
 7305
 7306
 7307
 7308
 7309
 7310
 7311
 7312
 7313
 7314
 7315
 7316
 7317
 7318
 7319
 7320
 7321
 7322
 7323
 7324
 7325
 7326
 7327
 7328
 7329
 7330
 7331
 7332
 7333
 7334
 7335
 7336
 7337
 7338
 7339
 7340
 7341
 7342
 7343
 7344
 7345
 7346
 7347
 7348
 7349
 7350
 7351
 7352
 7353
 7354
 7355
 7356
 7357
 7358
 7359
 7360
 7361
 7362
 7363
 7364
 7365
 7366
 7367
 7368
 7369
 7370
 7371
 7372
 7373
 7374
 7375
 7376
 7377
 7378
 7379
 7380
 7381
 7382
 7383
 7384
 7385
 7386
 7387
 7388
 7389
 7390
 7391
 7392
 7393
 7394
 7395
 7396
 7397
 7398
 7399
 7400
 7401
 7402
 7403
 7404
 7405
 7406
 7407
 7408
 7409
 7410
 7411
 7412
 7413
 7414
 7415
 7416
 7417
 7418
 7419
 7420
 7421
 7422
 7423
 7424
 7425
 7426
 7427
 7428
 7429
 7430
 7431
 7432
 7433
 7434
 7435
 7436
 7437
 7438
 7439
 7440
 7441
 7442
 7443
 7444
 7445
 7446
 7447
 7448
 7449
 7450
 7451
 7452
 7453
 7454
 7455
 7456
 7457
 7458
 7459
 7460
 7461
 7462
 7463
 7464
 7465
 7466
 7467
 7468
 7469
 7470
 7471
 7472
 7473
 7474
 7475
 7476
 7477
 7478
 7479
 7480
 7481
 7482
 7483
 7484
 7485
 7486
 7487
 7488
 7489
 7490
 7491
 7492
 7493
 7494
 7495
 7496
 7497
 7498
 7499
 7500
 7501
 7502
 7503
 7504
 7505
 7506
 7507
 7508
 7509
 7510
 7511
 7512
 7513
 7514
 7515
 7516
 7517
 7518
 7519
 7520
 7521
 7522
 7523
 7524
 7525
 7526
 7527
 7528
 7529
 7530
 7531
 7532
 7533
 7534
 7535
 7536
 7537
 7538
 7539
 7540
 7541
 7542
 7543
 7544
 7545
 7546
 7547
 7548
 7549
 7550
 7551
 7552
 7553
 7554
 7555
 7556
 7557
 7558
 7559
 7560
 7561
 7562
 7563
 7564
 7565
 7566
 7567
 7568
 7569
 7570
 7571
 7572
 7573
 7574
 7575
 7576
 7577
 7578
 7579
 7580
 7581
 7582
 7583
 7584
 7585
 7586
 7587
 7588
 7589
 7590
 7591
 7592
 7593
 7594
 7595
 7596
 7597
 7598
 7599
 7600
 7601
 7602
 7603
 7604
 7605
 7606
 7607
 7608
 7609
 7610
 7611
 7612
 7613
 7614
 7615
 7616
 7617
 7618
 7619
 7620
 7621
 7622
 7623
 7624
 7625
 7626
 7627
 7628
 7629
 7630
 7631
 7632
 7633
 7634
 7635
 7636
 7637
 7638
 7639
 7640
 7641
 7642
 7643
 7644
 7645
 7646
 7647
 7648
 7649
 7650
 7651
 7652
 7653
 7654
 7655
 7656
 7657
 7658
 7659
 7660
 7661
 7662
 7663
 7664
 7665
 7666
 7667
 7668
 7669
 7670
 7671
 7672
 7673
 7674
 7675
 7676
 7677
 7678
 7679
 7680
 7681
 7682
 7683
 7684
 7685
 7686
 7687
 7688
 7689
 7690
 7691
 7692
 7693
 7694
 7695
 7696
 7697
 7698
 7699
 7700
 7701
 7702
 7703
 7704
 7705
 7706
 7707
 7708
 7709
 7710
 7711
 7712
 7713
 7714
 7715
 7716
 7717
 7718
 7719
 7720
 7721
 7722
 7723
 7724
 7725
 7726
 7727
 7728
 7729
 7730
 7731
 7732
 7733
 7734
 7735
 7736
 7737
 7738
 7739
 7740
 7741
 7742
 7743
 7744
 7745
 7746
 7747
 7748
 7749
 7750
 7751
 7752
 7753
 7754
 7755
 7756
 7757
 7758
 7759
 7760
 7761
 7762
 7763
 7764
 7765
 7766
 7767
 7768
 7769
 7770
 7771
 7772
 7773
 7774
 7775
 7776
 7777
 7778
 7779
 7780
 7781
 7782
 7783
 7784
 7785
 7786
 7787
 7788
 7789
 7790
 7791
 7792
 7793
 7794
 7795
 7796
 7797
 7798
 7799
 7800
 7801
 7802
 7803
 7804
 7805
 7806
 7807
 7808
//===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the SystemZTargetLowering class.
//
//===----------------------------------------------------------------------===//

#include "SystemZISelLowering.h"
#include "SystemZCallingConv.h"
#include "SystemZConstantPoolValue.h"
#include "SystemZMachineFunctionInfo.h"
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/KnownBits.h"
#include <cctype>

using namespace llvm;

#define DEBUG_TYPE "systemz-lower"

namespace {
// Represents information about a comparison.
struct Comparison {
  Comparison(SDValue Op0In, SDValue Op1In)
    : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}

  // The operands to the comparison.
  SDValue Op0, Op1;

  // The opcode that should be used to compare Op0 and Op1.
  unsigned Opcode;

  // A SystemZICMP value.  Only used for integer comparisons.
  unsigned ICmpType;

  // The mask of CC values that Opcode can produce.
  unsigned CCValid;

  // The mask of CC values for which the original condition is true.
  unsigned CCMask;
};
} // end anonymous namespace

// Classify VT as either 32 or 64 bit.
static bool is32Bit(EVT VT) {
  switch (VT.getSimpleVT().SimpleTy) {
  case MVT::i32:
    return true;
  case MVT::i64:
    return false;
  default:
    llvm_unreachable("Unsupported type");
  }
}

// Return a version of MachineOperand that can be safely used before the
// final use.
static MachineOperand earlyUseOperand(MachineOperand Op) {
  if (Op.isReg())
    Op.setIsKill(false);
  return Op;
}

SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
                                             const SystemZSubtarget &STI)
    : TargetLowering(TM), Subtarget(STI) {
  MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0));

  // Set up the register classes.
  if (Subtarget.hasHighWord())
    addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
  else
    addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
  addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
  if (Subtarget.hasVector()) {
    addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass);
    addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass);
  } else {
    addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
    addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
  }
  if (Subtarget.hasVectorEnhancements1())
    addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass);
  else
    addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);

  if (Subtarget.hasVector()) {
    addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass);
    addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass);
    addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass);
    addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass);
    addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass);
    addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass);
  }

  // Compute derived properties from the register classes
  computeRegisterProperties(Subtarget.getRegisterInfo());

  // Set up special registers.
  setStackPointerRegisterToSaveRestore(SystemZ::R15D);

  // TODO: It may be better to default to latency-oriented scheduling, however
  // LLVM's current latency-oriented scheduler can't handle physreg definitions
  // such as SystemZ has with CC, so set this to the register-pressure
  // scheduler, because it can.
  setSchedulingPreference(Sched::RegPressure);

  setBooleanContents(ZeroOrOneBooleanContent);
  setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);

  // Instructions are strings of 2-byte aligned 2-byte values.
  setMinFunctionAlignment(Align(2));
  // For performance reasons we prefer 16-byte alignment.
  setPrefFunctionAlignment(Align(16));

  // Handle operations that are handled in a similar way for all types.
  for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
       I <= MVT::LAST_FP_VALUETYPE;
       ++I) {
    MVT VT = MVT::SimpleValueType(I);
    if (isTypeLegal(VT)) {
      // Lower SET_CC into an IPM-based sequence.
      setOperationAction(ISD::SETCC, VT, Custom);

      // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
      setOperationAction(ISD::SELECT, VT, Expand);

      // Lower SELECT_CC and BR_CC into separate comparisons and branches.
      setOperationAction(ISD::SELECT_CC, VT, Custom);
      setOperationAction(ISD::BR_CC,     VT, Custom);
    }
  }

  // Expand jump table branches as address arithmetic followed by an
  // indirect jump.
  setOperationAction(ISD::BR_JT, MVT::Other, Expand);

  // Expand BRCOND into a BR_CC (see above).
  setOperationAction(ISD::BRCOND, MVT::Other, Expand);

  // Handle integer types.
  for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
       I <= MVT::LAST_INTEGER_VALUETYPE;
       ++I) {
    MVT VT = MVT::SimpleValueType(I);
    if (isTypeLegal(VT)) {
      // Expand individual DIV and REMs into DIVREMs.
      setOperationAction(ISD::SDIV, VT, Expand);
      setOperationAction(ISD::UDIV, VT, Expand);
      setOperationAction(ISD::SREM, VT, Expand);
      setOperationAction(ISD::UREM, VT, Expand);
      setOperationAction(ISD::SDIVREM, VT, Custom);
      setOperationAction(ISD::UDIVREM, VT, Custom);

      // Support addition/subtraction with overflow.
      setOperationAction(ISD::SADDO, VT, Custom);
      setOperationAction(ISD::SSUBO, VT, Custom);

      // Support addition/subtraction with carry.
      setOperationAction(ISD::UADDO, VT, Custom);
      setOperationAction(ISD::USUBO, VT, Custom);

      // Support carry in as value rather than glue.
      setOperationAction(ISD::ADDCARRY, VT, Custom);
      setOperationAction(ISD::SUBCARRY, VT, Custom);

      // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
      // stores, putting a serialization instruction after the stores.
      setOperationAction(ISD::ATOMIC_LOAD,  VT, Custom);
      setOperationAction(ISD::ATOMIC_STORE, VT, Custom);

      // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
      // available, or if the operand is constant.
      setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);

      // Use POPCNT on z196 and above.
      if (Subtarget.hasPopulationCount())
        setOperationAction(ISD::CTPOP, VT, Custom);
      else
        setOperationAction(ISD::CTPOP, VT, Expand);

      // No special instructions for these.
      setOperationAction(ISD::CTTZ,            VT, Expand);
      setOperationAction(ISD::ROTR,            VT, Expand);

      // Use *MUL_LOHI where possible instead of MULH*.
      setOperationAction(ISD::MULHS, VT, Expand);
      setOperationAction(ISD::MULHU, VT, Expand);
      setOperationAction(ISD::SMUL_LOHI, VT, Custom);
      setOperationAction(ISD::UMUL_LOHI, VT, Custom);

      // Only z196 and above have native support for conversions to unsigned.
      // On z10, promoting to i64 doesn't generate an inexact condition for
      // values that are outside the i32 range but in the i64 range, so use
      // the default expansion.
      if (!Subtarget.hasFPExtension())
        setOperationAction(ISD::FP_TO_UINT, VT, Expand);

      // Mirror those settings for STRICT_FP_TO_[SU]INT.  Note that these all
      // default to Expand, so need to be modified to Legal where appropriate.
      setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Legal);
      if (Subtarget.hasFPExtension())
        setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Legal);
    }
  }

  // Type legalization will convert 8- and 16-bit atomic operations into
  // forms that operate on i32s (but still keeping the original memory VT).
  // Lower them into full i32 operations.
  setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Custom);
  setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Custom);
  setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Custom);
  setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Custom);
  setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Custom);
  setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Custom);
  setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
  setOperationAction(ISD::ATOMIC_LOAD_MIN,  MVT::i32, Custom);
  setOperationAction(ISD::ATOMIC_LOAD_MAX,  MVT::i32, Custom);
  setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
  setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);

  // Even though i128 is not a legal type, we still need to custom lower
  // the atomic operations in order to exploit SystemZ instructions.
  setOperationAction(ISD::ATOMIC_LOAD,     MVT::i128, Custom);
  setOperationAction(ISD::ATOMIC_STORE,    MVT::i128, Custom);

  // We can use the CC result of compare-and-swap to implement
  // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS.
  setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom);
  setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom);
  setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);

  setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);

  // Traps are legal, as we will convert them to "j .+2".
  setOperationAction(ISD::TRAP, MVT::Other, Legal);

  // z10 has instructions for signed but not unsigned FP conversion.
  // Handle unsigned 32-bit types as signed 64-bit types.
  if (!Subtarget.hasFPExtension()) {
    setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
    setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
  }

  // We have native support for a 64-bit CTLZ, via FLOGR.
  setOperationAction(ISD::CTLZ, MVT::i32, Promote);
  setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote);
  setOperationAction(ISD::CTLZ, MVT::i64, Legal);

  // On z15 we have native support for a 64-bit CTPOP.
  if (Subtarget.hasMiscellaneousExtensions3()) {
    setOperationAction(ISD::CTPOP, MVT::i32, Promote);
    setOperationAction(ISD::CTPOP, MVT::i64, Legal);
  }

  // Give LowerOperation the chance to replace 64-bit ORs with subregs.
  setOperationAction(ISD::OR, MVT::i64, Custom);

  // FIXME: Can we support these natively?
  setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
  setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
  setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);

  // We have native instructions for i8, i16 and i32 extensions, but not i1.
  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
  for (MVT VT : MVT::integer_valuetypes()) {
    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
    setLoadExtAction(ISD::EXTLOAD,  VT, MVT::i1, Promote);
  }

  // Handle the various types of symbolic address.
  setOperationAction(ISD::ConstantPool,     PtrVT, Custom);
  setOperationAction(ISD::GlobalAddress,    PtrVT, Custom);
  setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
  setOperationAction(ISD::BlockAddress,     PtrVT, Custom);
  setOperationAction(ISD::JumpTable,        PtrVT, Custom);

  // We need to handle dynamic allocations specially because of the
  // 160-byte area at the bottom of the stack.
  setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
  setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom);

  // Use custom expanders so that we can force the function to use
  // a frame pointer.
  setOperationAction(ISD::STACKSAVE,    MVT::Other, Custom);
  setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);

  // Handle prefetches with PFD or PFDRL.
  setOperationAction(ISD::PREFETCH, MVT::Other, Custom);

  for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
    // Assume by default that all vector operations need to be expanded.
    for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)
      if (getOperationAction(Opcode, VT) == Legal)
        setOperationAction(Opcode, VT, Expand);

    // Likewise all truncating stores and extending loads.
    for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
      setTruncStoreAction(VT, InnerVT, Expand);
      setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
      setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
      setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
    }

    if (isTypeLegal(VT)) {
      // These operations are legal for anything that can be stored in a
      // vector register, even if there is no native support for the format
      // as such.  In particular, we can do these for v4f32 even though there
      // are no specific instructions for that format.
      setOperationAction(ISD::LOAD, VT, Legal);
      setOperationAction(ISD::STORE, VT, Legal);
      setOperationAction(ISD::VSELECT, VT, Legal);
      setOperationAction(ISD::BITCAST, VT, Legal);
      setOperationAction(ISD::UNDEF, VT, Legal);

      // Likewise, except that we need to replace the nodes with something
      // more specific.
      setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
      setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
    }
  }

  // Handle integer vector types.
  for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
    if (isTypeLegal(VT)) {
      // These operations have direct equivalents.
      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal);
      setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal);
      setOperationAction(ISD::ADD, VT, Legal);
      setOperationAction(ISD::SUB, VT, Legal);
      if (VT != MVT::v2i64)
        setOperationAction(ISD::MUL, VT, Legal);
      setOperationAction(ISD::AND, VT, Legal);
      setOperationAction(ISD::OR, VT, Legal);
      setOperationAction(ISD::XOR, VT, Legal);
      if (Subtarget.hasVectorEnhancements1())
        setOperationAction(ISD::CTPOP, VT, Legal);
      else
        setOperationAction(ISD::CTPOP, VT, Custom);
      setOperationAction(ISD::CTTZ, VT, Legal);
      setOperationAction(ISD::CTLZ, VT, Legal);

      // Convert a GPR scalar to a vector by inserting it into element 0.
      setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);

      // Use a series of unpacks for extensions.
      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
      setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);

      // Detect shifts by a scalar amount and convert them into
      // V*_BY_SCALAR.
      setOperationAction(ISD::SHL, VT, Custom);
      setOperationAction(ISD::SRA, VT, Custom);
      setOperationAction(ISD::SRL, VT, Custom);

      // At present ROTL isn't matched by DAGCombiner.  ROTR should be
      // converted into ROTL.
      setOperationAction(ISD::ROTL, VT, Expand);
      setOperationAction(ISD::ROTR, VT, Expand);

      // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
      // and inverting the result as necessary.
      setOperationAction(ISD::SETCC, VT, Custom);
    }
  }

  if (Subtarget.hasVector()) {
    // There should be no need to check for float types other than v2f64
    // since <2 x f32> isn't a legal type.
    setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
    setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal);
    setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
    setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal);
    setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
    setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal);
    setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
    setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal);

    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f64, Legal);
  }

  if (Subtarget.hasVectorEnhancements2()) {
    setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
    setOperationAction(ISD::FP_TO_SINT, MVT::v4f32, Legal);
    setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
    setOperationAction(ISD::FP_TO_UINT, MVT::v4f32, Legal);
    setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
    setOperationAction(ISD::SINT_TO_FP, MVT::v4f32, Legal);
    setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
    setOperationAction(ISD::UINT_TO_FP, MVT::v4f32, Legal);

    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f32, Legal);
  }

  // Handle floating-point types.
  for (unsigned I = MVT::FIRST_FP_VALUETYPE;
       I <= MVT::LAST_FP_VALUETYPE;
       ++I) {
    MVT VT = MVT::SimpleValueType(I);
    if (isTypeLegal(VT)) {
      // We can use FI for FRINT.
      setOperationAction(ISD::FRINT, VT, Legal);

      // We can use the extended form of FI for other rounding operations.
      if (Subtarget.hasFPExtension()) {
        setOperationAction(ISD::FNEARBYINT, VT, Legal);
        setOperationAction(ISD::FFLOOR, VT, Legal);
        setOperationAction(ISD::FCEIL, VT, Legal);
        setOperationAction(ISD::FTRUNC, VT, Legal);
        setOperationAction(ISD::FROUND, VT, Legal);
      }

      // No special instructions for these.
      setOperationAction(ISD::FSIN, VT, Expand);
      setOperationAction(ISD::FCOS, VT, Expand);
      setOperationAction(ISD::FSINCOS, VT, Expand);
      setOperationAction(ISD::FREM, VT, Expand);
      setOperationAction(ISD::FPOW, VT, Expand);

      // Handle constrained floating-point operations.
      setOperationAction(ISD::STRICT_FADD, VT, Legal);
      setOperationAction(ISD::STRICT_FSUB, VT, Legal);
      setOperationAction(ISD::STRICT_FMUL, VT, Legal);
      setOperationAction(ISD::STRICT_FDIV, VT, Legal);
      setOperationAction(ISD::STRICT_FMA, VT, Legal);
      setOperationAction(ISD::STRICT_FSQRT, VT, Legal);
      setOperationAction(ISD::STRICT_FRINT, VT, Legal);
      setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal);
      setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
      if (Subtarget.hasFPExtension()) {
        setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
        setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
        setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
        setOperationAction(ISD::STRICT_FROUND, VT, Legal);
        setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
      }
    }
  }

  // Handle floating-point vector types.
  if (Subtarget.hasVector()) {
    // Scalar-to-vector conversion is just a subreg.
    setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
    setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);

    // Some insertions and extractions can be done directly but others
    // need to go via integers.
    setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
    setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);

    // These operations have direct equivalents.
    setOperationAction(ISD::FADD, MVT::v2f64, Legal);
    setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
    setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
    setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
    setOperationAction(ISD::FMA, MVT::v2f64, Legal);
    setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
    setOperationAction(ISD::FABS, MVT::v2f64, Legal);
    setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
    setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
    setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
    setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
    setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
    setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
    setOperationAction(ISD::FROUND, MVT::v2f64, Legal);

    // Handle constrained floating-point operations.
    setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
    setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
  }

  // The vector enhancements facility 1 has instructions for these.
  if (Subtarget.hasVectorEnhancements1()) {
    setOperationAction(ISD::FADD, MVT::v4f32, Legal);
    setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
    setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
    setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
    setOperationAction(ISD::FMA, MVT::v4f32, Legal);
    setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
    setOperationAction(ISD::FABS, MVT::v4f32, Legal);
    setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
    setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
    setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
    setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
    setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
    setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
    setOperationAction(ISD::FROUND, MVT::v4f32, Legal);

    setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
    setOperationAction(ISD::FMAXIMUM, MVT::f64, Legal);
    setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
    setOperationAction(ISD::FMINIMUM, MVT::f64, Legal);

    setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal);
    setOperationAction(ISD::FMAXIMUM, MVT::v2f64, Legal);
    setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal);
    setOperationAction(ISD::FMINIMUM, MVT::v2f64, Legal);

    setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
    setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal);
    setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
    setOperationAction(ISD::FMINIMUM, MVT::f32, Legal);

    setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
    setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal);
    setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
    setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal);

    setOperationAction(ISD::FMAXNUM, MVT::f128, Legal);
    setOperationAction(ISD::FMAXIMUM, MVT::f128, Legal);
    setOperationAction(ISD::FMINNUM, MVT::f128, Legal);
    setOperationAction(ISD::FMINIMUM, MVT::f128, Legal);

    // Handle constrained floating-point operations.
    setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
    setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
    for (auto VT : { MVT::f32, MVT::f64, MVT::f128,
                     MVT::v4f32, MVT::v2f64 }) {
      setOperationAction(ISD::STRICT_FMAXNUM, VT, Legal);
      setOperationAction(ISD::STRICT_FMINNUM, VT, Legal);
    }
  }

  // We have fused multiply-addition for f32 and f64 but not f128.
  setOperationAction(ISD::FMA, MVT::f32,  Legal);
  setOperationAction(ISD::FMA, MVT::f64,  Legal);
  if (Subtarget.hasVectorEnhancements1())
    setOperationAction(ISD::FMA, MVT::f128, Legal);
  else
    setOperationAction(ISD::FMA, MVT::f128, Expand);

  // We don't have a copysign instruction on vector registers.
  if (Subtarget.hasVectorEnhancements1())
    setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);

  // Needed so that we don't try to implement f128 constant loads using
  // a load-and-extend of a f80 constant (in cases where the constant
  // would fit in an f80).
  for (MVT VT : MVT::fp_valuetypes())
    setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);

  // We don't have extending load instruction on vector registers.
  if (Subtarget.hasVectorEnhancements1()) {
    setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
    setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
  }

  // Floating-point truncation and stores need to be done separately.
  setTruncStoreAction(MVT::f64,  MVT::f32, Expand);
  setTruncStoreAction(MVT::f128, MVT::f32, Expand);
  setTruncStoreAction(MVT::f128, MVT::f64, Expand);

  // We have 64-bit FPR<->GPR moves, but need special handling for
  // 32-bit forms.
  if (!Subtarget.hasVector()) {
    setOperationAction(ISD::BITCAST, MVT::i32, Custom);
    setOperationAction(ISD::BITCAST, MVT::f32, Custom);
  }

  // VASTART and VACOPY need to deal with the SystemZ-specific varargs
  // structure, but VAEND is a no-op.
  setOperationAction(ISD::VASTART, MVT::Other, Custom);
  setOperationAction(ISD::VACOPY,  MVT::Other, Custom);
  setOperationAction(ISD::VAEND,   MVT::Other, Expand);

  // Codes for which we want to perform some z-specific combinations.
  setTargetDAGCombine(ISD::ZERO_EXTEND);
  setTargetDAGCombine(ISD::SIGN_EXTEND);
  setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
  setTargetDAGCombine(ISD::LOAD);
  setTargetDAGCombine(ISD::STORE);
  setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
  setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
  setTargetDAGCombine(ISD::FP_ROUND);
  setTargetDAGCombine(ISD::FP_EXTEND);
  setTargetDAGCombine(ISD::BSWAP);
  setTargetDAGCombine(ISD::SDIV);
  setTargetDAGCombine(ISD::UDIV);
  setTargetDAGCombine(ISD::SREM);
  setTargetDAGCombine(ISD::UREM);

  // Handle intrinsics.
  setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);

  // We want to use MVC in preference to even a single load/store pair.
  MaxStoresPerMemcpy = 0;
  MaxStoresPerMemcpyOptSize = 0;

  // The main memset sequence is a byte store followed by an MVC.
  // Two STC or MV..I stores win over that, but the kind of fused stores
  // generated by target-independent code don't when the byte value is
  // variable.  E.g.  "STC <reg>;MHI <reg>,257;STH <reg>" is not better
  // than "STC;MVC".  Handle the choice in target-specific code instead.
  MaxStoresPerMemset = 0;
  MaxStoresPerMemsetOptSize = 0;
}

EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL,
                                              LLVMContext &, EVT VT) const {
  if (!VT.isVector())
    return MVT::i32;
  return VT.changeVectorElementTypeToInteger();
}

bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
  VT = VT.getScalarType();

  if (!VT.isSimple())
    return false;

  switch (VT.getSimpleVT().SimpleTy) {
  case MVT::f32:
  case MVT::f64:
    return true;
  case MVT::f128:
    return Subtarget.hasVectorEnhancements1();
  default:
    break;
  }

  return false;
}

// Return true if the constant can be generated with a vector instruction,
// such as VGM, VGMB or VREPI.
bool SystemZVectorConstantInfo::isVectorConstantLegal(
    const SystemZSubtarget &Subtarget) {
  const SystemZInstrInfo *TII =
      static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
  if (!Subtarget.hasVector() ||
      (isFP128 && !Subtarget.hasVectorEnhancements1()))
    return false;

  // Try using VECTOR GENERATE BYTE MASK.  This is the architecturally-
  // preferred way of creating all-zero and all-one vectors so give it
  // priority over other methods below.
  unsigned Mask = 0;
  unsigned I = 0;
  for (; I < SystemZ::VectorBytes; ++I) {
    uint64_t Byte = IntBits.lshr(I * 8).trunc(8).getZExtValue();
    if (Byte == 0xff)
      Mask |= 1ULL << I;
    else if (Byte != 0)
      break;
  }
  if (I == SystemZ::VectorBytes) {
    Opcode = SystemZISD::BYTE_MASK;
    OpVals.push_back(Mask);
    VecVT = MVT::getVectorVT(MVT::getIntegerVT(8), 16);
    return true;
  }

  if (SplatBitSize > 64)
    return false;

  auto tryValue = [&](uint64_t Value) -> bool {
    // Try VECTOR REPLICATE IMMEDIATE
    int64_t SignedValue = SignExtend64(Value, SplatBitSize);
    if (isInt<16>(SignedValue)) {
      OpVals.push_back(((unsigned) SignedValue));
      Opcode = SystemZISD::REPLICATE;
      VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize),
                               SystemZ::VectorBits / SplatBitSize);
      return true;
    }
    // Try VECTOR GENERATE MASK
    unsigned Start, End;
    if (TII->isRxSBGMask(Value, SplatBitSize, Start, End)) {
      // isRxSBGMask returns the bit numbers for a full 64-bit value, with 0
      // denoting 1 << 63 and 63 denoting 1.  Convert them to bit numbers for
      // an SplatBitSize value, so that 0 denotes 1 << (SplatBitSize-1).
      OpVals.push_back(Start - (64 - SplatBitSize));
      OpVals.push_back(End - (64 - SplatBitSize));
      Opcode = SystemZISD::ROTATE_MASK;
      VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize),
                               SystemZ::VectorBits / SplatBitSize);
      return true;
    }
    return false;
  };

  // First try assuming that any undefined bits above the highest set bit
  // and below the lowest set bit are 1s.  This increases the likelihood of
  // being able to use a sign-extended element value in VECTOR REPLICATE
  // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
  uint64_t SplatBitsZ = SplatBits.getZExtValue();
  uint64_t SplatUndefZ = SplatUndef.getZExtValue();
  uint64_t Lower =
      (SplatUndefZ & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1));
  uint64_t Upper =
      (SplatUndefZ & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1));
  if (tryValue(SplatBitsZ | Upper | Lower))
    return true;

  // Now try assuming that any undefined bits between the first and
  // last defined set bits are set.  This increases the chances of
  // using a non-wraparound mask.
  uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
  return tryValue(SplatBitsZ | Middle);
}

SystemZVectorConstantInfo::SystemZVectorConstantInfo(APFloat FPImm) {
  IntBits = FPImm.bitcastToAPInt().zextOrSelf(128);
  isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad());

  // Find the smallest splat.
  SplatBits = FPImm.bitcastToAPInt();
  unsigned Width = SplatBits.getBitWidth();
  while (Width > 8) {
    unsigned HalfSize = Width / 2;
    APInt HighValue = SplatBits.lshr(HalfSize).trunc(HalfSize);
    APInt LowValue = SplatBits.trunc(HalfSize);

    // If the two halves do not match, stop here.
    if (HighValue != LowValue || 8 > HalfSize)
      break;

    SplatBits = HighValue;
    Width = HalfSize;
  }
  SplatUndef = 0;
  SplatBitSize = Width;
}

SystemZVectorConstantInfo::SystemZVectorConstantInfo(BuildVectorSDNode *BVN) {
  assert(BVN->isConstant() && "Expected a constant BUILD_VECTOR");
  bool HasAnyUndefs;

  // Get IntBits by finding the 128 bit splat.
  BVN->isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
                       true);

  // Get SplatBits by finding the 8 bit or greater splat.
  BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
                       true);
}

bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                         bool ForCodeSize) const {
  // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
  if (Imm.isZero() || Imm.isNegZero())
    return true;

  return SystemZVectorConstantInfo(Imm).isVectorConstantLegal(Subtarget);
}

bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
  // We can use CGFI or CLGFI.
  return isInt<32>(Imm) || isUInt<32>(Imm);
}

bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const {
  // We can use ALGFI or SLGFI.
  return isUInt<32>(Imm) || isUInt<32>(-Imm);
}

bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
    EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const {
  // Unaligned accesses should never be slower than the expanded version.
  // We check specifically for aligned accesses in the few cases where
  // they are required.
  if (Fast)
    *Fast = true;
  return true;
}

// Information about the addressing mode for a memory access.
struct AddressingMode {
  // True if a long displacement is supported.
  bool LongDisplacement;

  // True if use of index register is supported.
  bool IndexReg;

  AddressingMode(bool LongDispl, bool IdxReg) :
    LongDisplacement(LongDispl), IndexReg(IdxReg) {}
};

// Return the desired addressing mode for a Load which has only one use (in
// the same block) which is a Store.
static AddressingMode getLoadStoreAddrMode(bool HasVector,
                                          Type *Ty) {
  // With vector support a Load->Store combination may be combined to either
  // an MVC or vector operations and it seems to work best to allow the
  // vector addressing mode.
  if (HasVector)
    return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);

  // Otherwise only the MVC case is special.
  bool MVC = Ty->isIntegerTy(8);
  return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/);
}

// Return the addressing mode which seems most desirable given an LLVM
// Instruction pointer.
static AddressingMode
supportedAddressingMode(Instruction *I, bool HasVector) {
  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
    switch (II->getIntrinsicID()) {
    default: break;
    case Intrinsic::memset:
    case Intrinsic::memmove:
    case Intrinsic::memcpy:
      return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
    }
  }

  if (isa<LoadInst>(I) && I->hasOneUse()) {
    auto *SingleUser = cast<Instruction>(*I->user_begin());
    if (SingleUser->getParent() == I->getParent()) {
      if (isa<ICmpInst>(SingleUser)) {
        if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
          if (C->getBitWidth() <= 64 &&
              (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue())))
            // Comparison of memory with 16 bit signed / unsigned immediate
            return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
      } else if (isa<StoreInst>(SingleUser))
        // Load->Store
        return getLoadStoreAddrMode(HasVector, I->getType());
    }
  } else if (auto *StoreI = dyn_cast<StoreInst>(I)) {
    if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
      if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent())
        // Load->Store
        return getLoadStoreAddrMode(HasVector, LoadI->getType());
  }

  if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) {

    // * Use LDE instead of LE/LEY for z13 to avoid partial register
    //   dependencies (LDE only supports small offsets).
    // * Utilize the vector registers to hold floating point
    //   values (vector load / store instructions only support small
    //   offsets).

    Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() :
                         I->getOperand(0)->getType());
    bool IsFPAccess = MemAccessTy->isFloatingPointTy();
    bool IsVectorAccess = MemAccessTy->isVectorTy();

    // A store of an extracted vector element will be combined into a VSTE type
    // instruction.
    if (!IsVectorAccess && isa<StoreInst>(I)) {
      Value *DataOp = I->getOperand(0);
      if (isa<ExtractElementInst>(DataOp))
        IsVectorAccess = true;
    }

    // A load which gets inserted into a vector element will be combined into a
    // VLE type instruction.
    if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) {
      User *LoadUser = *I->user_begin();
      if (isa<InsertElementInst>(LoadUser))
        IsVectorAccess = true;
    }

    if (IsFPAccess || IsVectorAccess)
      return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
  }

  return AddressingMode(true/*LongDispl*/, true/*IdxReg*/);
}

bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
       const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const {
  // Punt on globals for now, although they can be used in limited
  // RELATIVE LONG cases.
  if (AM.BaseGV)
    return false;

  // Require a 20-bit signed offset.
  if (!isInt<20>(AM.BaseOffs))
    return false;

  AddressingMode SupportedAM(true, true);
  if (I != nullptr)
    SupportedAM = supportedAddressingMode(I, Subtarget.hasVector());

  if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs))
    return false;

  if (!SupportedAM.IndexReg)
    // No indexing allowed.
    return AM.Scale == 0;
  else
    // Indexing is OK but no scale factor can be applied.
    return AM.Scale == 0 || AM.Scale == 1;
}

bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
  if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
    return false;
  unsigned FromBits = FromType->getPrimitiveSizeInBits();
  unsigned ToBits = ToType->getPrimitiveSizeInBits();
  return FromBits > ToBits;
}

bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
  if (!FromVT.isInteger() || !ToVT.isInteger())
    return false;
  unsigned FromBits = FromVT.getSizeInBits();
  unsigned ToBits = ToVT.getSizeInBits();
  return FromBits > ToBits;
}

//===----------------------------------------------------------------------===//
// Inline asm support
//===----------------------------------------------------------------------===//

TargetLowering::ConstraintType
SystemZTargetLowering::getConstraintType(StringRef Constraint) const {
  if (Constraint.size() == 1) {
    switch (Constraint[0]) {
    case 'a': // Address register
    case 'd': // Data register (equivalent to 'r')
    case 'f': // Floating-point register
    case 'h': // High-part register
    case 'r': // General-purpose register
    case 'v': // Vector register
      return C_RegisterClass;

    case 'Q': // Memory with base and unsigned 12-bit displacement
    case 'R': // Likewise, plus an index
    case 'S': // Memory with base and signed 20-bit displacement
    case 'T': // Likewise, plus an index
    case 'm': // Equivalent to 'T'.
      return C_Memory;

    case 'I': // Unsigned 8-bit constant
    case 'J': // Unsigned 12-bit constant
    case 'K': // Signed 16-bit constant
    case 'L': // Signed 20-bit displacement (on all targets we support)
    case 'M': // 0x7fffffff
      return C_Immediate;

    default:
      break;
    }
  }
  return TargetLowering::getConstraintType(Constraint);
}

TargetLowering::ConstraintWeight SystemZTargetLowering::
getSingleConstraintMatchWeight(AsmOperandInfo &info,
                               const char *constraint) const {
  ConstraintWeight weight = CW_Invalid;
  Value *CallOperandVal = info.CallOperandVal;
  // If we don't have a value, we can't do a match,
  // but allow it at the lowest weight.
  if (!CallOperandVal)
    return CW_Default;
  Type *type = CallOperandVal->getType();
  // Look at the constraint type.
  switch (*constraint) {
  default:
    weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
    break;

  case 'a': // Address register
  case 'd': // Data register (equivalent to 'r')
  case 'h': // High-part register
  case 'r': // General-purpose register
    if (CallOperandVal->getType()->isIntegerTy())
      weight = CW_Register;
    break;

  case 'f': // Floating-point register
    if (type->isFloatingPointTy())
      weight = CW_Register;
    break;

  case 'v': // Vector register
    if ((type->isVectorTy() || type->isFloatingPointTy()) &&
        Subtarget.hasVector())
      weight = CW_Register;
    break;

  case 'I': // Unsigned 8-bit constant
    if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
      if (isUInt<8>(C->getZExtValue()))
        weight = CW_Constant;
    break;

  case 'J': // Unsigned 12-bit constant
    if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
      if (isUInt<12>(C->getZExtValue()))
        weight = CW_Constant;
    break;

  case 'K': // Signed 16-bit constant
    if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
      if (isInt<16>(C->getSExtValue()))
        weight = CW_Constant;
    break;

  case 'L': // Signed 20-bit displacement (on all targets we support)
    if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
      if (isInt<20>(C->getSExtValue()))
        weight = CW_Constant;
    break;

  case 'M': // 0x7fffffff
    if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
      if (C->getZExtValue() == 0x7fffffff)
        weight = CW_Constant;
    break;
  }
  return weight;
}

// Parse a "{tNNN}" register constraint for which the register type "t"
// has already been verified.  MC is the class associated with "t" and
// Map maps 0-based register numbers to LLVM register numbers.
static std::pair<unsigned, const TargetRegisterClass *>
parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC,
                    const unsigned *Map, unsigned Size) {
  assert(*(Constraint.end()-1) == '}' && "Missing '}'");
  if (isdigit(Constraint[2])) {
    unsigned Index;
    bool Failed =
        Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index);
    if (!Failed && Index < Size && Map[Index])
      return std::make_pair(Map[Index], RC);
  }
  return std::make_pair(0U, nullptr);
}

std::pair<unsigned, const TargetRegisterClass *>
SystemZTargetLowering::getRegForInlineAsmConstraint(
    const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
  if (Constraint.size() == 1) {
    // GCC Constraint Letters
    switch (Constraint[0]) {
    default: break;
    case 'd': // Data register (equivalent to 'r')
    case 'r': // General-purpose register
      if (VT == MVT::i64)
        return std::make_pair(0U, &SystemZ::GR64BitRegClass);
      else if (VT == MVT::i128)
        return std::make_pair(0U, &SystemZ::GR128BitRegClass);
      return std::make_pair(0U, &SystemZ::GR32BitRegClass);

    case 'a': // Address register
      if (VT == MVT::i64)
        return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
      else if (VT == MVT::i128)
        return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
      return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);

    case 'h': // High-part register (an LLVM extension)
      return std::make_pair(0U, &SystemZ::GRH32BitRegClass);

    case 'f': // Floating-point register
      if (VT == MVT::f64)
        return std::make_pair(0U, &SystemZ::FP64BitRegClass);
      else if (VT == MVT::f128)
        return std::make_pair(0U, &SystemZ::FP128BitRegClass);
      return std::make_pair(0U, &SystemZ::FP32BitRegClass);

    case 'v': // Vector register
      if (Subtarget.hasVector()) {
        if (VT == MVT::f32)
          return std::make_pair(0U, &SystemZ::VR32BitRegClass);
        if (VT == MVT::f64)
          return std::make_pair(0U, &SystemZ::VR64BitRegClass);
        return std::make_pair(0U, &SystemZ::VR128BitRegClass);
      }
      break;
    }
  }
  if (Constraint.size() > 0 && Constraint[0] == '{') {
    // We need to override the default register parsing for GPRs and FPRs
    // because the interpretation depends on VT.  The internal names of
    // the registers are also different from the external names
    // (F0D and F0S instead of F0, etc.).
    if (Constraint[1] == 'r') {
      if (VT == MVT::i32)
        return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
                                   SystemZMC::GR32Regs, 16);
      if (VT == MVT::i128)
        return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
                                   SystemZMC::GR128Regs, 16);
      return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
                                 SystemZMC::GR64Regs, 16);
    }
    if (Constraint[1] == 'f') {
      if (VT == MVT::f32)
        return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
                                   SystemZMC::FP32Regs, 16);
      if (VT == MVT::f128)
        return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
                                   SystemZMC::FP128Regs, 16);
      return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
                                 SystemZMC::FP64Regs, 16);
    }
    if (Constraint[1] == 'v') {
      if (VT == MVT::f32)
        return parseRegisterNumber(Constraint, &SystemZ::VR32BitRegClass,
                                   SystemZMC::VR32Regs, 32);
      if (VT == MVT::f64)
        return parseRegisterNumber(Constraint, &SystemZ::VR64BitRegClass,
                                   SystemZMC::VR64Regs, 32);
      return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass,
                                 SystemZMC::VR128Regs, 32);
    }
  }
  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}

void SystemZTargetLowering::
LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
                             std::vector<SDValue> &Ops,
                             SelectionDAG &DAG) const {
  // Only support length 1 constraints for now.
  if (Constraint.length() == 1) {
    switch (Constraint[0]) {
    case 'I': // Unsigned 8-bit constant
      if (auto *C = dyn_cast<ConstantSDNode>(Op))
        if (isUInt<8>(C->getZExtValue()))
          Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
                                              Op.getValueType()));
      return;

    case 'J': // Unsigned 12-bit constant
      if (auto *C = dyn_cast<ConstantSDNode>(Op))
        if (isUInt<12>(C->getZExtValue()))
          Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
                                              Op.getValueType()));
      return;

    case 'K': // Signed 16-bit constant
      if (auto *C = dyn_cast<ConstantSDNode>(Op))
        if (isInt<16>(C->getSExtValue()))
          Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
                                              Op.getValueType()));
      return;

    case 'L': // Signed 20-bit displacement (on all targets we support)
      if (auto *C = dyn_cast<ConstantSDNode>(Op))
        if (isInt<20>(C->getSExtValue()))
          Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
                                              Op.getValueType()));
      return;

    case 'M': // 0x7fffffff
      if (auto *C = dyn_cast<ConstantSDNode>(Op))
        if (C->getZExtValue() == 0x7fffffff)
          Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
                                              Op.getValueType()));
      return;
    }
  }
  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}

//===----------------------------------------------------------------------===//
// Calling conventions
//===----------------------------------------------------------------------===//

#include "SystemZGenCallingConv.inc"

const MCPhysReg *SystemZTargetLowering::getScratchRegisters(
  CallingConv::ID) const {
  static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
                                           SystemZ::R14D, 0 };
  return ScratchRegs;
}

bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
                                                     Type *ToType) const {
  return isTruncateFree(FromType, ToType);
}

bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
  return CI->isTailCall();
}

// We do not yet support 128-bit single-element vector types.  If the user
// attempts to use such types as function argument or return type, prefer
// to error out instead of emitting code violating the ABI.
static void VerifyVectorType(MVT VT, EVT ArgVT) {
  if (ArgVT.isVector() && !VT.isVector())
    report_fatal_error("Unsupported vector argument or return type");
}

static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) {
  for (unsigned i = 0; i < Ins.size(); ++i)
    VerifyVectorType(Ins[i].VT, Ins[i].ArgVT);
}

static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) {
  for (unsigned i = 0; i < Outs.size(); ++i)
    VerifyVectorType(Outs[i].VT, Outs[i].ArgVT);
}

// Value is a value that has been passed to us in the location described by VA
// (and so has type VA.getLocVT()).  Convert Value to VA.getValVT(), chaining
// any loads onto Chain.
static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL,
                                   CCValAssign &VA, SDValue Chain,
                                   SDValue Value) {
  // If the argument has been promoted from a smaller type, insert an
  // assertion to capture this.
  if (VA.getLocInfo() == CCValAssign::SExt)
    Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
                        DAG.getValueType(VA.getValVT()));
  else if (VA.getLocInfo() == CCValAssign::ZExt)
    Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
                        DAG.getValueType(VA.getValVT()));

  if (VA.isExtInLoc())
    Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
  else if (VA.getLocInfo() == CCValAssign::BCvt) {
    // If this is a short vector argument loaded from the stack,
    // extend from i64 to full vector size and then bitcast.
    assert(VA.getLocVT() == MVT::i64);
    assert(VA.getValVT().isVector());
    Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)});
    Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value);
  } else
    assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
  return Value;
}

// Value is a value of type VA.getValVT() that we need to copy into
// the location described by VA.  Return a copy of Value converted to
// VA.getValVT().  The caller is responsible for handling indirect values.
static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL,
                                   CCValAssign &VA, SDValue Value) {
  switch (VA.getLocInfo()) {
  case CCValAssign::SExt:
    return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
  case CCValAssign::ZExt:
    return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
  case CCValAssign::AExt:
    return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
  case CCValAssign::BCvt:
    // If this is a short vector argument to be stored to the stack,
    // bitcast to v2i64 and then extract first element.
    assert(VA.getLocVT() == MVT::i64);
    assert(VA.getValVT().isVector());
    Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value);
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value,
                       DAG.getConstant(0, DL, MVT::i32));
  case CCValAssign::Full:
    return Value;
  default:
    llvm_unreachable("Unhandled getLocInfo()");
  }
}

SDValue SystemZTargetLowering::LowerFormalArguments(
    SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
  MachineFunction &MF = DAG.getMachineFunction();
  MachineFrameInfo &MFI = MF.getFrameInfo();
  MachineRegisterInfo &MRI = MF.getRegInfo();
  SystemZMachineFunctionInfo *FuncInfo =
      MF.getInfo<SystemZMachineFunctionInfo>();
  auto *TFL =
      static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
  EVT PtrVT = getPointerTy(DAG.getDataLayout());

  // Detect unsupported vector argument types.
  if (Subtarget.hasVector())
    VerifyVectorTypes(Ins);

  // Assign locations to all of the incoming arguments.
  SmallVector<CCValAssign, 16> ArgLocs;
  SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
  CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);

  unsigned NumFixedGPRs = 0;
  unsigned NumFixedFPRs = 0;
  for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
    SDValue ArgValue;
    CCValAssign &VA = ArgLocs[I];
    EVT LocVT = VA.getLocVT();
    if (VA.isRegLoc()) {
      // Arguments passed in registers
      const TargetRegisterClass *RC;
      switch (LocVT.getSimpleVT().SimpleTy) {
      default:
        // Integers smaller than i64 should be promoted to i64.
        llvm_unreachable("Unexpected argument type");
      case MVT::i32:
        NumFixedGPRs += 1;
        RC = &SystemZ::GR32BitRegClass;
        break;
      case MVT::i64:
        NumFixedGPRs += 1;
        RC = &SystemZ::GR64BitRegClass;
        break;
      case MVT::f32:
        NumFixedFPRs += 1;
        RC = &SystemZ::FP32BitRegClass;
        break;
      case MVT::f64:
        NumFixedFPRs += 1;
        RC = &SystemZ::FP64BitRegClass;
        break;
      case MVT::v16i8:
      case MVT::v8i16:
      case MVT::v4i32:
      case MVT::v2i64:
      case MVT::v4f32:
      case MVT::v2f64:
        RC = &SystemZ::VR128BitRegClass;
        break;
      }

      Register VReg = MRI.createVirtualRegister(RC);
      MRI.addLiveIn(VA.getLocReg(), VReg);
      ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
    } else {
      assert(VA.isMemLoc() && "Argument not register or memory");

      // Create the frame index object for this incoming parameter.
      int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
                                     VA.getLocMemOffset(), true);

      // Create the SelectionDAG nodes corresponding to a load
      // from this parameter.  Unpromoted ints and floats are
      // passed as right-justified 8-byte values.
      SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
      if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
        FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
                          DAG.getIntPtrConstant(4, DL));
      ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
                             MachinePointerInfo::getFixedStack(MF, FI));
    }

    // Convert the value of the argument register into the value that's
    // being passed.
    if (VA.getLocInfo() == CCValAssign::Indirect) {
      InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
                                   MachinePointerInfo()));
      // If the original argument was split (e.g. i128), we need
      // to load all parts of it here (using the same address).
      unsigned ArgIndex = Ins[I].OrigArgIndex;
      assert (Ins[I].PartOffset == 0);
      while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) {
        CCValAssign &PartVA = ArgLocs[I + 1];
        unsigned PartOffset = Ins[I + 1].PartOffset;
        SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
                                      DAG.getIntPtrConstant(PartOffset, DL));
        InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
                                     MachinePointerInfo()));
        ++I;
      }
    } else
      InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
  }

  if (IsVarArg) {
    // Save the number of non-varargs registers for later use by va_start, etc.
    FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
    FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);

    // Likewise the address (in the form of a frame index) of where the
    // first stack vararg would be.  The 1-byte size here is arbitrary.
    int64_t StackSize = CCInfo.getNextStackOffset();
    FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));

    // ...and a similar frame index for the caller-allocated save area
    // that will be used to store the incoming registers.
    int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
    unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true);
    FuncInfo->setRegSaveFrameIndex(RegSaveIndex);

    // Store the FPR varargs in the reserved frame slots.  (We store the
    // GPRs as part of the prologue.)
    if (NumFixedFPRs < SystemZ::NumArgFPRs) {
      SDValue MemOps[SystemZ::NumArgFPRs];
      for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
        unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
        int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true);
        SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
        unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
                                     &SystemZ::FP64BitRegClass);
        SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
        MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
                                 MachinePointerInfo::getFixedStack(MF, FI));
      }
      // Join the stores, which are independent of one another.
      Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
                          makeArrayRef(&MemOps[NumFixedFPRs],
                                       SystemZ::NumArgFPRs-NumFixedFPRs));
    }
  }

  return Chain;
}

static bool canUseSiblingCall(const CCState &ArgCCInfo,
                              SmallVectorImpl<CCValAssign> &ArgLocs,
                              SmallVectorImpl<ISD::OutputArg> &Outs) {
  // Punt if there are any indirect or stack arguments, or if the call
  // needs the callee-saved argument register R6, or if the call uses
  // the callee-saved register arguments SwiftSelf and SwiftError.
  for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
    CCValAssign &VA = ArgLocs[I];
    if (VA.getLocInfo() == CCValAssign::Indirect)
      return false;
    if (!VA.isRegLoc())
      return false;
    Register Reg = VA.getLocReg();
    if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
      return false;
    if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError())
      return false;
  }
  return true;
}

SDValue
SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
                                 SmallVectorImpl<SDValue> &InVals) const {
  SelectionDAG &DAG = CLI.DAG;
  SDLoc &DL = CLI.DL;
  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
  SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
  SDValue Chain = CLI.Chain;
  SDValue Callee = CLI.Callee;
  bool &IsTailCall = CLI.IsTailCall;
  CallingConv::ID CallConv = CLI.CallConv;
  bool IsVarArg = CLI.IsVarArg;
  MachineFunction &MF = DAG.getMachineFunction();
  EVT PtrVT = getPointerTy(MF.getDataLayout());

  // Detect unsupported vector argument and return types.
  if (Subtarget.hasVector()) {
    VerifyVectorTypes(Outs);
    VerifyVectorTypes(Ins);
  }

  // Analyze the operands of the call, assigning locations to each operand.
  SmallVector<CCValAssign, 16> ArgLocs;
  SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
  ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);

  // We don't support GuaranteedTailCallOpt, only automatically-detected
  // sibling calls.
  if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs))
    IsTailCall = false;

  // Get a count of how many bytes are to be pushed on the stack.
  unsigned NumBytes = ArgCCInfo.getNextStackOffset();

  // Mark the start of the call.
  if (!IsTailCall)
    Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);

  // Copy argument values to their designated locations.
  SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
  SmallVector<SDValue, 8> MemOpChains;
  SDValue StackPtr;
  for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
    CCValAssign &VA = ArgLocs[I];
    SDValue ArgValue = OutVals[I];

    if (VA.getLocInfo() == CCValAssign::Indirect) {
      // Store the argument in a stack slot and pass its address.
      SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
      int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
      MemOpChains.push_back(
          DAG.getStore(Chain, DL, ArgValue, SpillSlot,
                       MachinePointerInfo::getFixedStack(MF, FI)));
      // If the original argument was split (e.g. i128), we need
      // to store all parts of it here (and pass just one address).
      unsigned ArgIndex = Outs[I].OrigArgIndex;
      assert (Outs[I].PartOffset == 0);
      while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
        SDValue PartValue = OutVals[I + 1];
        unsigned PartOffset = Outs[I + 1].PartOffset;
        SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
                                      DAG.getIntPtrConstant(PartOffset, DL));
        MemOpChains.push_back(
            DAG.getStore(Chain, DL, PartValue, Address,
                         MachinePointerInfo::getFixedStack(MF, FI)));
        ++I;
      }
      ArgValue = SpillSlot;
    } else
      ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);

    if (VA.isRegLoc())
      // Queue up the argument copies and emit them at the end.
      RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
    else {
      assert(VA.isMemLoc() && "Argument not register or memory");

      // Work out the address of the stack slot.  Unpromoted ints and
      // floats are passed as right-justified 8-byte values.
      if (!StackPtr.getNode())
        StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
      unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
      if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
        Offset += 4;
      SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
                                    DAG.getIntPtrConstant(Offset, DL));

      // Emit the store.
      MemOpChains.push_back(
          DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
    }
  }

  // Join the stores, which are independent of one another.
  if (!MemOpChains.empty())
    Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);

  // Accept direct calls by converting symbolic call addresses to the
  // associated Target* opcodes.  Force %r1 to be used for indirect
  // tail calls.
  SDValue Glue;
  if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
    Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
  } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
    Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
  } else if (IsTailCall) {
    Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
    Glue = Chain.getValue(1);
    Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
  }

  // Build a sequence of copy-to-reg nodes, chained and glued together.
  for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
    Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
                             RegsToPass[I].second, Glue);
    Glue = Chain.getValue(1);
  }

  // The first call operand is the chain and the second is the target address.
  SmallVector<SDValue, 8> Ops;
  Ops.push_back(Chain);
  Ops.push_back(Callee);

  // Add argument registers to the end of the list so that they are
  // known live into the call.
  for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
    Ops.push_back(DAG.getRegister(RegsToPass[I].first,
                                  RegsToPass[I].second.getValueType()));

  // Add a register mask operand representing the call-preserved registers.
  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
  const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
  assert(Mask && "Missing call preserved mask for calling convention");
  Ops.push_back(DAG.getRegisterMask(Mask));

  // Glue the call to the argument copies, if any.
  if (Glue.getNode())
    Ops.push_back(Glue);

  // Emit the call.
  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
  if (IsTailCall)
    return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
  Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
  Glue = Chain.getValue(1);

  // Mark the end of the call, which is glued to the call itself.
  Chain = DAG.getCALLSEQ_END(Chain,
                             DAG.getConstant(NumBytes, DL, PtrVT, true),
                             DAG.getConstant(0, DL, PtrVT, true),
                             Glue, DL);
  Glue = Chain.getValue(1);

  // Assign locations to each value returned by this call.
  SmallVector<CCValAssign, 16> RetLocs;
  CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
  RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);

  // Copy all of the result registers out of their specified physreg.
  for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
    CCValAssign &VA = RetLocs[I];

    // Copy the value out, gluing the copy to the end of the call sequence.
    SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
                                          VA.getLocVT(), Glue);
    Chain = RetValue.getValue(1);
    Glue = RetValue.getValue(2);

    // Convert the value of the return register into the value that's
    // being returned.
    InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
  }

  return Chain;
}

bool SystemZTargetLowering::
CanLowerReturn(CallingConv::ID CallConv,
               MachineFunction &MF, bool isVarArg,
               const SmallVectorImpl<ISD::OutputArg> &Outs,
               LLVMContext &Context) const {
  // Detect unsupported vector return types.
  if (Subtarget.hasVector())
    VerifyVectorTypes(Outs);

  // Special case that we cannot easily detect in RetCC_SystemZ since
  // i128 is not a legal type.
  for (auto &Out : Outs)
    if (Out.ArgVT == MVT::i128)
      return false;

  SmallVector<CCValAssign, 16> RetLocs;
  CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
  return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ);
}

SDValue
SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
                                   bool IsVarArg,
                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
                                   const SmallVectorImpl<SDValue> &OutVals,
                                   const SDLoc &DL, SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();

  // Detect unsupported vector return types.
  if (Subtarget.hasVector())
    VerifyVectorTypes(Outs);

  // Assign locations to each returned value.
  SmallVector<CCValAssign, 16> RetLocs;
  CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
  RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);

  // Quick exit for void returns
  if (RetLocs.empty())
    return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);

  // Copy the result values into the output registers.
  SDValue Glue;
  SmallVector<SDValue, 4> RetOps;
  RetOps.push_back(Chain);
  for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
    CCValAssign &VA = RetLocs[I];
    SDValue RetValue = OutVals[I];

    // Make the return register live on exit.
    assert(VA.isRegLoc() && "Can only return in registers!");

    // Promote the value as required.
    RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);

    // Chain and glue the copies together.
    Register Reg = VA.getLocReg();
    Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
    Glue = Chain.getValue(1);
    RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
  }

  // Update chain and glue.
  RetOps[0] = Chain;
  if (Glue.getNode())
    RetOps.push_back(Glue);

  return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
}

// Return true if Op is an intrinsic node with chain that returns the CC value
// as its only (other) argument.  Provide the associated SystemZISD opcode and
// the mask of valid CC values if so.
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
                                      unsigned &CCValid) {
  unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
  switch (Id) {
  case Intrinsic::s390_tbegin:
    Opcode = SystemZISD::TBEGIN;
    CCValid = SystemZ::CCMASK_TBEGIN;
    return true;

  case Intrinsic::s390_tbegin_nofloat:
    Opcode = SystemZISD::TBEGIN_NOFLOAT;
    CCValid = SystemZ::CCMASK_TBEGIN;
    return true;

  case Intrinsic::s390_tend:
    Opcode = SystemZISD::TEND;
    CCValid = SystemZ::CCMASK_TEND;
    return true;

  default:
    return false;
  }
}

// Return true if Op is an intrinsic node without chain that returns the
// CC value as its final argument.  Provide the associated SystemZISD
// opcode and the mask of valid CC values if so.
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {
  unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
  switch (Id) {
  case Intrinsic::s390_vpkshs:
  case Intrinsic::s390_vpksfs:
  case Intrinsic::s390_vpksgs:
    Opcode = SystemZISD::PACKS_CC;
    CCValid = SystemZ::CCMASK_VCMP;
    return true;

  case Intrinsic::s390_vpklshs:
  case Intrinsic::s390_vpklsfs:
  case Intrinsic::s390_vpklsgs:
    Opcode = SystemZISD::PACKLS_CC;
    CCValid = SystemZ::CCMASK_VCMP;
    return true;

  case Intrinsic::s390_vceqbs:
  case Intrinsic::s390_vceqhs:
  case Intrinsic::s390_vceqfs:
  case Intrinsic::s390_vceqgs:
    Opcode = SystemZISD::VICMPES;
    CCValid = SystemZ::CCMASK_VCMP;
    return true;

  case Intrinsic::s390_vchbs:
  case Intrinsic::s390_vchhs:
  case Intrinsic::s390_vchfs:
  case Intrinsic::s390_vchgs:
    Opcode = SystemZISD::VICMPHS;
    CCValid = SystemZ::CCMASK_VCMP;
    return true;

  case Intrinsic::s390_vchlbs:
  case Intrinsic::s390_vchlhs:
  case Intrinsic::s390_vchlfs:
  case Intrinsic::s390_vchlgs:
    Opcode = SystemZISD::VICMPHLS;
    CCValid = SystemZ::CCMASK_VCMP;
    return true;

  case Intrinsic::s390_vtm:
    Opcode = SystemZISD::VTM;
    CCValid = SystemZ::CCMASK_VCMP;
    return true;

  case Intrinsic::s390_vfaebs:
  case Intrinsic::s390_vfaehs:
  case Intrinsic::s390_vfaefs:
    Opcode = SystemZISD::VFAE_CC;
    CCValid = SystemZ::CCMASK_ANY;
    return true;

  case Intrinsic::s390_vfaezbs:
  case Intrinsic::s390_vfaezhs:
  case Intrinsic::s390_vfaezfs:
    Opcode = SystemZISD::VFAEZ_CC;
    CCValid = SystemZ::CCMASK_ANY;
    return true;

  case Intrinsic::s390_vfeebs:
  case Intrinsic::s390_vfeehs:
  case Intrinsic::s390_vfeefs:
    Opcode = SystemZISD::VFEE_CC;
    CCValid = SystemZ::CCMASK_ANY;
    return true;

  case Intrinsic::s390_vfeezbs:
  case Intrinsic::s390_vfeezhs:
  case Intrinsic::s390_vfeezfs:
    Opcode = SystemZISD::VFEEZ_CC;
    CCValid = SystemZ::CCMASK_ANY;
    return true;

  case Intrinsic::s390_vfenebs:
  case Intrinsic::s390_vfenehs:
  case Intrinsic::s390_vfenefs:
    Opcode = SystemZISD::VFENE_CC;
    CCValid = SystemZ::CCMASK_ANY;
    return true;

  case Intrinsic::s390_vfenezbs:
  case Intrinsic::s390_vfenezhs:
  case Intrinsic::s390_vfenezfs:
    Opcode = SystemZISD::VFENEZ_CC;
    CCValid = SystemZ::CCMASK_ANY;
    return true;

  case Intrinsic::s390_vistrbs:
  case Intrinsic::s390_vistrhs:
  case Intrinsic::s390_vistrfs:
    Opcode = SystemZISD::VISTR_CC;
    CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3;
    return true;

  case Intrinsic::s390_vstrcbs:
  case Intrinsic::s390_vstrchs:
  case Intrinsic::s390_vstrcfs:
    Opcode = SystemZISD::VSTRC_CC;
    CCValid = SystemZ::CCMASK_ANY;
    return true;

  case Intrinsic::s390_vstrczbs:
  case Intrinsic::s390_vstrczhs:
  case Intrinsic::s390_vstrczfs:
    Opcode = SystemZISD::VSTRCZ_CC;
    CCValid = SystemZ::CCMASK_ANY;
    return true;

  case Intrinsic::s390_vstrsb:
  case Intrinsic::s390_vstrsh:
  case Intrinsic::s390_vstrsf:
    Opcode = SystemZISD::VSTRS_CC;
    CCValid = SystemZ::CCMASK_ANY;
    return true;

  case Intrinsic::s390_vstrszb:
  case Intrinsic::s390_vstrszh:
  case Intrinsic::s390_vstrszf:
    Opcode = SystemZISD::VSTRSZ_CC;
    CCValid = SystemZ::CCMASK_ANY;
    return true;

  case Intrinsic::s390_vfcedbs:
  case Intrinsic::s390_vfcesbs:
    Opcode = SystemZISD::VFCMPES;
    CCValid = SystemZ::CCMASK_VCMP;
    return true;

  case Intrinsic::s390_vfchdbs:
  case Intrinsic::s390_vfchsbs:
    Opcode = SystemZISD::VFCMPHS;
    CCValid = SystemZ::CCMASK_VCMP;
    return true;

  case Intrinsic::s390_vfchedbs:
  case Intrinsic::s390_vfchesbs:
    Opcode = SystemZISD::VFCMPHES;
    CCValid = SystemZ::CCMASK_VCMP;
    return true;

  case Intrinsic::s390_vftcidb:
  case Intrinsic::s390_vftcisb:
    Opcode = SystemZISD::VFTCI;
    CCValid = SystemZ::CCMASK_VCMP;
    return true;

  case Intrinsic::s390_tdc:
    Opcode = SystemZISD::TDC;
    CCValid = SystemZ::CCMASK_TDC;
    return true;

  default:
    return false;
  }
}

// Emit an intrinsic with chain and an explicit CC register result.
static SDNode *emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op,
                                           unsigned Opcode) {
  // Copy all operands except the intrinsic ID.
  unsigned NumOps = Op.getNumOperands();
  SmallVector<SDValue, 6> Ops;
  Ops.reserve(NumOps - 1);
  Ops.push_back(Op.getOperand(0));
  for (unsigned I = 2; I < NumOps; ++I)
    Ops.push_back(Op.getOperand(I));

  assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
  SDVTList RawVTs = DAG.getVTList(MVT::i32, MVT::Other);
  SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
  SDValue OldChain = SDValue(Op.getNode(), 1);
  SDValue NewChain = SDValue(Intr.getNode(), 1);
  DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
  return Intr.getNode();
}

// Emit an intrinsic with an explicit CC register result.
static SDNode *emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op,
                                   unsigned Opcode) {
  // Copy all operands except the intrinsic ID.
  unsigned NumOps = Op.getNumOperands();
  SmallVector<SDValue, 6> Ops;
  Ops.reserve(NumOps - 1);
  for (unsigned I = 1; I < NumOps; ++I)
    Ops.push_back(Op.getOperand(I));

  SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), Op->getVTList(), Ops);
  return Intr.getNode();
}

// CC is a comparison that will be implemented using an integer or
// floating-point comparison.  Return the condition code mask for
// a branch on true.  In the integer case, CCMASK_CMP_UO is set for
// unsigned comparisons and clear for signed ones.  In the floating-point
// case, CCMASK_CMP_UO has its normal mask meaning (unordered).
static unsigned CCMaskForCondCode(ISD::CondCode CC) {
#define CONV(X) \
  case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
  case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
  case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X

  switch (CC) {
  default:
    llvm_unreachable("Invalid integer condition!");

  CONV(EQ);
  CONV(NE);
  CONV(GT);
  CONV(GE);
  CONV(LT);
  CONV(LE);

  case ISD::SETO:  return SystemZ::CCMASK_CMP_O;
  case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
  }
#undef CONV
}

// If C can be converted to a comparison against zero, adjust the operands
// as necessary.
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
  if (C.ICmpType == SystemZICMP::UnsignedOnly)
    return;

  auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
  if (!ConstOp1)
    return;

  int64_t Value = ConstOp1->getSExtValue();
  if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
      (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
      (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
      (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
    C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
    C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType());
  }
}

// If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
// adjust the operands as necessary.
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL,
                             Comparison &C) {
  // For us to make any changes, it must a comparison between a single-use
  // load and a constant.
  if (!C.Op0.hasOneUse() ||
      C.Op0.getOpcode() != ISD::LOAD ||
      C.Op1.getOpcode() != ISD::Constant)
    return;

  // We must have an 8- or 16-bit load.
  auto *Load = cast<LoadSDNode>(C.Op0);
  unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
  if (NumBits != 8 && NumBits != 16)
    return;

  // The load must be an extending one and the constant must be within the
  // range of the unextended value.
  auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
  uint64_t Value = ConstOp1->getZExtValue();
  uint64_t Mask = (1 << NumBits) - 1;
  if (Load->getExtensionType() == ISD::SEXTLOAD) {
    // Make sure that ConstOp1 is in range of C.Op0.
    int64_t SignedValue = ConstOp1->getSExtValue();
    if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
      return;
    if (C.ICmpType != SystemZICMP::SignedOnly) {
      // Unsigned comparison between two sign-extended values is equivalent
      // to unsigned comparison between two zero-extended values.
      Value &= Mask;
    } else if (NumBits == 8) {
      // Try to treat the comparison as unsigned, so that we can use CLI.
      // Adjust CCMask and Value as necessary.
      if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
        // Test whether the high bit of the byte is set.
        Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
      else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
        // Test whether the high bit of the byte is clear.
        Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
      else
        // No instruction exists for this combination.
        return;
      C.ICmpType = SystemZICMP::UnsignedOnly;
    }
  } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
    if (Value > Mask)
      return;
    // If the constant is in range, we can use any comparison.
    C.ICmpType = SystemZICMP::Any;
  } else
    return;

  // Make sure that the first operand is an i32 of the right extension type.
  ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ?
                              ISD::SEXTLOAD :
                              ISD::ZEXTLOAD);
  if (C.Op0.getValueType() != MVT::i32 ||
      Load->getExtensionType() != ExtType) {
    C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(),
                           Load->getBasePtr(), Load->getPointerInfo(),
                           Load->getMemoryVT(), Load->getAlignment(),
                           Load->getMemOperand()->getFlags());
    // Update the chain uses.
    DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1));
  }

  // Make sure that the second operand is an i32 with the right value.
  if (C.Op1.getValueType() != MVT::i32 ||
      Value != ConstOp1->getZExtValue())
    C.Op1 = DAG.getConstant(Value, DL, MVT::i32);
}

// Return true if Op is either an unextended load, or a load suitable
// for integer register-memory comparisons of type ICmpType.
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
  auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
  if (Load) {
    // There are no instructions to compare a register with a memory byte.
    if (Load->getMemoryVT() == MVT::i8)
      return false;
    // Otherwise decide on extension type.
    switch (Load->getExtensionType()) {
    case ISD::NON_EXTLOAD:
      return true;
    case ISD::SEXTLOAD:
      return ICmpType != SystemZICMP::UnsignedOnly;
    case ISD::ZEXTLOAD:
      return ICmpType != SystemZICMP::SignedOnly;
    default:
      break;
    }
  }
  return false;
}

// Return true if it is better to swap the operands of C.
static bool shouldSwapCmpOperands(const Comparison &C) {
  // Leave f128 comparisons alone, since they have no memory forms.
  if (C.Op0.getValueType() == MVT::f128)
    return false;

  // Always keep a floating-point constant second, since comparisons with
  // zero can use LOAD TEST and comparisons with other constants make a
  // natural memory operand.
  if (isa<ConstantFPSDNode>(C.Op1))
    return false;

  // Never swap comparisons with zero since there are many ways to optimize
  // those later.
  auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
  if (ConstOp1 && ConstOp1->getZExtValue() == 0)
    return false;

  // Also keep natural memory operands second if the loaded value is
  // only used here.  Several comparisons have memory forms.
  if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
    return false;

  // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
  // In that case we generally prefer the memory to be second.
  if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
    // The only exceptions are when the second operand is a constant and
    // we can use things like CHHSI.
    if (!ConstOp1)
      return true;
    // The unsigned memory-immediate instructions can handle 16-bit
    // unsigned integers.
    if (C.ICmpType != SystemZICMP::SignedOnly &&
        isUInt<16>(ConstOp1->getZExtValue()))
      return false;
    // The signed memory-immediate instructions can handle 16-bit
    // signed integers.
    if (C.ICmpType != SystemZICMP::UnsignedOnly &&
        isInt<16>(ConstOp1->getSExtValue()))
      return false;
    return true;
  }

  // Try to promote the use of CGFR and CLGFR.
  unsigned Opcode0 = C.Op0.getOpcode();
  if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
    return true;
  if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
    return true;
  if (C.ICmpType != SystemZICMP::SignedOnly &&
      Opcode0 == ISD::AND &&
      C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
      cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
    return true;

  return false;
}

// Return a version of comparison CC mask CCMask in which the LT and GT
// actions are swapped.
static unsigned reverseCCMask(unsigned CCMask) {
  return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
          (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
          (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
          (CCMask & SystemZ::CCMASK_CMP_UO));
}

// Check whether C tests for equality between X and Y and whether X - Y
// or Y - X is also computed.  In that case it's better to compare the
// result of the subtraction against zero.
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL,
                                 Comparison &C) {
  if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
      C.CCMask == SystemZ::CCMASK_CMP_NE) {
    for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
      SDNode *N = *I;
      if (N->getOpcode() == ISD::SUB &&
          ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
           (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
        C.Op0 = SDValue(N, 0);
        C.Op1 = DAG.getConstant(0, DL, N->getValueType(0));
        return;
      }
    }
  }
}

// Check whether C compares a floating-point value with zero and if that
// floating-point value is also negated.  In this case we can use the
// negation to set CC, so avoiding separate LOAD AND TEST and
// LOAD (NEGATIVE/COMPLEMENT) instructions.
static void adjustForFNeg(Comparison &C) {
  auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
  if (C1 && C1->isZero()) {
    for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
      SDNode *N = *I;
      if (N->getOpcode() == ISD::FNEG) {
        C.Op0 = SDValue(N, 0);
        C.CCMask = reverseCCMask(C.CCMask);
        return;
      }
    }
  }
}

// Check whether C compares (shl X, 32) with 0 and whether X is
// also sign-extended.  In that case it is better to test the result
// of the sign extension using LTGFR.
//
// This case is important because InstCombine transforms a comparison
// with (sext (trunc X)) into a comparison with (shl X, 32).
static void adjustForLTGFR(Comparison &C) {
  // Check for a comparison between (shl X, 32) and 0.
  if (C.Op0.getOpcode() == ISD::SHL &&
      C.Op0.getValueType() == MVT::i64 &&
      C.Op1.getOpcode() == ISD::Constant &&
      cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
    auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
    if (C1 && C1->getZExtValue() == 32) {
      SDValue ShlOp0 = C.Op0.getOperand(0);
      // See whether X has any SIGN_EXTEND_INREG uses.
      for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
        SDNode *N = *I;
        if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
            cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
          C.Op0 = SDValue(N, 0);
          return;
        }
      }
    }
  }
}

// If C compares the truncation of an extending load, try to compare
// the untruncated value instead.  This exposes more opportunities to
// reuse CC.
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
                               Comparison &C) {
  if (C.Op0.getOpcode() == ISD::TRUNCATE &&
      C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
      C.Op1.getOpcode() == ISD::Constant &&
      cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
    auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
    if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
      unsigned Type = L->getExtensionType();
      if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
          (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
        C.Op0 = C.Op0.getOperand(0);
        C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType());
      }
    }
  }
}

// Return true if shift operation N has an in-range constant shift value.
// Store it in ShiftVal if so.
static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
  auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
  if (!Shift)
    return false;

  uint64_t Amount = Shift->getZExtValue();
  if (Amount >= N.getValueSizeInBits())
    return false;

  ShiftVal = Amount;
  return true;
}

// Check whether an AND with Mask is suitable for a TEST UNDER MASK
// instruction and whether the CC value is descriptive enough to handle
// a comparison of type Opcode between the AND result and CmpVal.
// CCMask says which comparison result is being tested and BitSize is
// the number of bits in the operands.  If TEST UNDER MASK can be used,
// return the corresponding CC mask, otherwise return 0.
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
                                     uint64_t Mask, uint64_t CmpVal,
                                     unsigned ICmpType) {
  assert(Mask != 0 && "ANDs with zero should have been removed by now");

  // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
  if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
      !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
    return 0;

  // Work out the masks for the lowest and highest bits.
  unsigned HighShift = 63 - countLeadingZeros(Mask);
  uint64_t High = uint64_t(1) << HighShift;
  uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);

  // Signed ordered comparisons are effectively unsigned if the sign
  // bit is dropped.
  bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);

  // Check for equality comparisons with 0, or the equivalent.
  if (CmpVal == 0) {
    if (CCMask == SystemZ::CCMASK_CMP_EQ)
      return SystemZ::CCMASK_TM_ALL_0;
    if (CCMask == SystemZ::CCMASK_CMP_NE)
      return SystemZ::CCMASK_TM_SOME_1;
  }
  if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
    if (CCMask == SystemZ::CCMASK_CMP_LT)
      return SystemZ::CCMASK_TM_ALL_0;
    if (CCMask == SystemZ::CCMASK_CMP_GE)
      return SystemZ::CCMASK_TM_SOME_1;
  }
  if (EffectivelyUnsigned && CmpVal < Low) {
    if (CCMask == SystemZ::CCMASK_CMP_LE)
      return SystemZ::CCMASK_TM_ALL_0;
    if (CCMask == SystemZ::CCMASK_CMP_GT)
      return SystemZ::CCMASK_TM_SOME_1;
  }

  // Check for equality comparisons with the mask, or the equivalent.
  if (CmpVal == Mask) {
    if (CCMask == SystemZ::CCMASK_CMP_EQ)
      return SystemZ::CCMASK_TM_ALL_1;
    if (CCMask == SystemZ::CCMASK_CMP_NE)
      return SystemZ::CCMASK_TM_SOME_0;
  }
  if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
    if (CCMask == SystemZ::CCMASK_CMP_GT)
      return SystemZ::CCMASK_TM_ALL_1;
    if (CCMask == SystemZ::CCMASK_CMP_LE)
      return SystemZ::CCMASK_TM_SOME_0;
  }
  if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
    if (CCMask == SystemZ::CCMASK_CMP_GE)
      return SystemZ::CCMASK_TM_ALL_1;
    if (CCMask == SystemZ::CCMASK_CMP_LT)
      return SystemZ::CCMASK_TM_SOME_0;
  }

  // Check for ordered comparisons with the top bit.
  if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
    if (CCMask == SystemZ::CCMASK_CMP_LE)
      return SystemZ::CCMASK_TM_MSB_0;
    if (CCMask == SystemZ::CCMASK_CMP_GT)
      return SystemZ::CCMASK_TM_MSB_1;
  }
  if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
    if (CCMask == SystemZ::CCMASK_CMP_LT)
      return SystemZ::CCMASK_TM_MSB_0;
    if (CCMask == SystemZ::CCMASK_CMP_GE)
      return SystemZ::CCMASK_TM_MSB_1;
  }

  // If there are just two bits, we can do equality checks for Low and High
  // as well.
  if (Mask == Low + High) {
    if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
      return SystemZ::CCMASK_TM_MIXED_MSB_0;
    if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
      return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
    if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
      return SystemZ::CCMASK_TM_MIXED_MSB_1;
    if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
      return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
  }

  // Looks like we've exhausted our options.
  return 0;
}

// See whether C can be implemented as a TEST UNDER MASK instruction.
// Update the arguments with the TM version if so.
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
                                   Comparison &C) {
  // Check that we have a comparison with a constant.
  auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
  if (!ConstOp1)
    return;
  uint64_t CmpVal = ConstOp1->getZExtValue();

  // Check whether the nonconstant input is an AND with a constant mask.
  Comparison NewC(C);
  uint64_t MaskVal;
  ConstantSDNode *Mask = nullptr;
  if (C.Op0.getOpcode() == ISD::AND) {
    NewC.Op0 = C.Op0.getOperand(0);
    NewC.Op1 = C.Op0.getOperand(1);
    Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
    if (!Mask)
      return;
    MaskVal = Mask->getZExtValue();
  } else {
    // There is no instruction to compare with a 64-bit immediate
    // so use TMHH instead if possible.  We need an unsigned ordered
    // comparison with an i64 immediate.
    if (NewC.Op0.getValueType() != MVT::i64 ||
        NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
        NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
        NewC.ICmpType == SystemZICMP::SignedOnly)
      return;
    // Convert LE and GT comparisons into LT and GE.
    if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
        NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
      if (CmpVal == uint64_t(-1))
        return;
      CmpVal += 1;
      NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
    }
    // If the low N bits of Op1 are zero than the low N bits of Op0 can
    // be masked off without changing the result.
    MaskVal = -(CmpVal & -CmpVal);
    NewC.ICmpType = SystemZICMP::UnsignedOnly;
  }
  if (!MaskVal)
    return;

  // Check whether the combination of mask, comparison value and comparison
  // type are suitable.
  unsigned BitSize = NewC.Op0.getValueSizeInBits();
  unsigned NewCCMask, ShiftVal;
  if (NewC.ICmpType != SystemZICMP::SignedOnly &&
      NewC.Op0.getOpcode() == ISD::SHL &&
      isSimpleShift(NewC.Op0, ShiftVal) &&
      (MaskVal >> ShiftVal != 0) &&
      ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
      (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
                                        MaskVal >> ShiftVal,
                                        CmpVal >> ShiftVal,
                                        SystemZICMP::Any))) {
    NewC.Op0 = NewC.Op0.getOperand(0);
    MaskVal >>= ShiftVal;
  } else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
             NewC.Op0.getOpcode() == ISD::SRL &&
             isSimpleShift(NewC.Op0, ShiftVal) &&
             (MaskVal << ShiftVal != 0) &&
             ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
             (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
                                               MaskVal << ShiftVal,
                                               CmpVal << ShiftVal,
                                               SystemZICMP::UnsignedOnly))) {
    NewC.Op0 = NewC.Op0.getOperand(0);
    MaskVal <<= ShiftVal;
  } else {
    NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
                                     NewC.ICmpType);
    if (!NewCCMask)
      return;
  }

  // Go ahead and make the change.
  C.Opcode = SystemZISD::TM;
  C.Op0 = NewC.Op0;
  if (Mask && Mask->getZExtValue() == MaskVal)
    C.Op1 = SDValue(Mask, 0);
  else
    C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType());
  C.CCValid = SystemZ::CCMASK_TM;
  C.CCMask = NewCCMask;
}

// See whether the comparison argument contains a redundant AND
// and remove it if so.  This sometimes happens due to the generic
// BRCOND expansion.
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL,
                                  Comparison &C) {
  if (C.Op0.getOpcode() != ISD::AND)
    return;
  auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
  if (!Mask)
    return;
  KnownBits Known = DAG.computeKnownBits(C.Op0.getOperand(0));
  if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue())
    return;

  C.Op0 = C.Op0.getOperand(0);
}

// Return a Comparison that tests the condition-code result of intrinsic
// node Call against constant integer CC using comparison code Cond.
// Opcode is the opcode of the SystemZISD operation for the intrinsic
// and CCValid is the set of possible condition-code results.
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
                                  SDValue Call, unsigned CCValid, uint64_t CC,
                                  ISD::CondCode Cond) {
  Comparison C(Call, SDValue());
  C.Opcode = Opcode;
  C.CCValid = CCValid;
  if (Cond == ISD::SETEQ)
    // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
    C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
  else if (Cond == ISD::SETNE)
    // ...and the inverse of that.
    C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
  else if (Cond == ISD::SETLT || Cond == ISD::SETULT)
    // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
    // always true for CC>3.
    C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
  else if (Cond == ISD::SETGE || Cond == ISD::SETUGE)
    // ...and the inverse of that.
    C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
  else if (Cond == ISD::SETLE || Cond == ISD::SETULE)
    // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
    // always true for CC>3.
    C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
  else if (Cond == ISD::SETGT || Cond == ISD::SETUGT)
    // ...and the inverse of that.
    C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
  else
    llvm_unreachable("Unexpected integer comparison type");
  C.CCMask &= CCValid;
  return C;
}

// Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
                         ISD::CondCode Cond, const SDLoc &DL) {
  if (CmpOp1.getOpcode() == ISD::Constant) {
    uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
    unsigned Opcode, CCValid;
    if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
        CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
        isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
      return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
    if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
        CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
        isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
      return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
  }
  Comparison C(CmpOp0, CmpOp1);
  C.CCMask = CCMaskForCondCode(Cond);
  if (C.Op0.getValueType().isFloatingPoint()) {
    C.CCValid = SystemZ::CCMASK_FCMP;
    C.Opcode = SystemZISD::FCMP;
    adjustForFNeg(C);
  } else {
    C.CCValid = SystemZ::CCMASK_ICMP;
    C.Opcode = SystemZISD::ICMP;
    // Choose the type of comparison.  Equality and inequality tests can
    // use either signed or unsigned comparisons.  The choice also doesn't
    // matter if both sign bits are known to be clear.  In those cases we
    // want to give the main isel code the freedom to choose whichever
    // form fits best.
    if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
        C.CCMask == SystemZ::CCMASK_CMP_NE ||
        (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
      C.ICmpType = SystemZICMP::Any;
    else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
      C.ICmpType = SystemZICMP::UnsignedOnly;
    else
      C.ICmpType = SystemZICMP::SignedOnly;
    C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
    adjustForRedundantAnd(DAG, DL, C);
    adjustZeroCmp(DAG, DL, C);
    adjustSubwordCmp(DAG, DL, C);
    adjustForSubtraction(DAG, DL, C);
    adjustForLTGFR(C);
    adjustICmpTruncate(DAG, DL, C);
  }

  if (shouldSwapCmpOperands(C)) {
    std::swap(C.Op0, C.Op1);
    C.CCMask = reverseCCMask(C.CCMask);
  }

  adjustForTestUnderMask(DAG, DL, C);
  return C;
}

// Emit the comparison instruction described by C.
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
  if (!C.Op1.getNode()) {
    SDNode *Node;
    switch (C.Op0.getOpcode()) {
    case ISD::INTRINSIC_W_CHAIN:
      Node = emitIntrinsicWithCCAndChain(DAG, C.Op0, C.Opcode);
      return SDValue(Node, 0);
    case ISD::INTRINSIC_WO_CHAIN:
      Node = emitIntrinsicWithCC(DAG, C.Op0, C.Opcode);
      return SDValue(Node, Node->getNumValues() - 1);
    default:
      llvm_unreachable("Invalid comparison operands");
    }
  }
  if (C.Opcode == SystemZISD::ICMP)
    return DAG.getNode(SystemZISD::ICMP, DL, MVT::i32, C.Op0, C.Op1,
                       DAG.getTargetConstant(C.ICmpType, DL, MVT::i32));
  if (C.Opcode == SystemZISD::TM) {
    bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
                         bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
    return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1,
                       DAG.getTargetConstant(RegisterOnly, DL, MVT::i32));
  }
  return DAG.getNode(C.Opcode, DL, MVT::i32, C.Op0, C.Op1);
}

// Implement a 32-bit *MUL_LOHI operation by extending both operands to
// 64 bits.  Extend is the extension type to use.  Store the high part
// in Hi and the low part in Lo.
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
                            SDValue Op0, SDValue Op1, SDValue &Hi,
                            SDValue &Lo) {
  Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
  Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
  SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
  Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
                   DAG.getConstant(32, DL, MVT::i64));
  Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
  Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
}

// Lower a binary operation that produces two VT results, one in each
// half of a GR128 pair.  Op0 and Op1 are the VT operands to the operation,
// and Opcode performs the GR128 operation.  Store the even register result
// in Even and the odd register result in Odd.
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
                             unsigned Opcode, SDValue Op0, SDValue Op1,
                             SDValue &Even, SDValue &Odd) {
  SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1);
  bool Is32Bit = is32Bit(VT);
  Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
  Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
}

// Return an i32 value that is 1 if the CC value produced by CCReg is
// in the mask CCMask and 0 otherwise.  CC is known to have a value
// in CCValid, so other values can be ignored.
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg,
                         unsigned CCValid, unsigned CCMask) {
  SDValue Ops[] = {DAG.getConstant(1, DL, MVT::i32),
                   DAG.getConstant(0, DL, MVT::i32),
                   DAG.getTargetConstant(CCValid, DL, MVT::i32),
                   DAG.getTargetConstant(CCMask, DL, MVT::i32), CCReg};
  return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops);
}

// Return the SystemISD vector comparison operation for CC, or 0 if it cannot
// be done directly.  IsFP is true if CC is for a floating-point rather than
// integer comparison.
static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) {
  switch (CC) {
  case ISD::SETOEQ:
  case ISD::SETEQ:
    return IsFP ? SystemZISD::VFCMPE : SystemZISD::VICMPE;

  case ISD::SETOGE:
  case ISD::SETGE:
    return IsFP ? SystemZISD::VFCMPHE : static_cast<SystemZISD::NodeType>(0);

  case ISD::SETOGT:
  case ISD::SETGT:
    return IsFP ? SystemZISD::VFCMPH : SystemZISD::VICMPH;

  case ISD::SETUGT:
    return IsFP ? static_cast<SystemZISD::NodeType>(0) : SystemZISD::VICMPHL;

  default:
    return 0;
  }
}

// Return the SystemZISD vector comparison operation for CC or its inverse,
// or 0 if neither can be done directly.  Indicate in Invert whether the
// result is for the inverse of CC.  IsFP is true if CC is for a
// floating-point rather than integer comparison.
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP,
                                            bool &Invert) {
  if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
    Invert = false;
    return Opcode;
  }

  CC = ISD::getSetCCInverse(CC, !IsFP);
  if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
    Invert = true;
    return Opcode;
  }

  return 0;
}

// Return a v2f64 that contains the extended form of elements Start and Start+1
// of v4f32 value Op.
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL,
                                  SDValue Op) {
  int Mask[] = { Start, -1, Start + 1, -1 };
  Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask);
  return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op);
}

// Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
// producing a result of type VT.
SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode,
                                            const SDLoc &DL, EVT VT,
                                            SDValue CmpOp0,
                                            SDValue CmpOp1) const {
  // There is no hardware support for v4f32 (unless we have the vector
  // enhancements facility 1), so extend the vector into two v2f64s
  // and compare those.
  if (CmpOp0.getValueType() == MVT::v4f32 &&
      !Subtarget.hasVectorEnhancements1()) {
    SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0);
    SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0);
    SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1);
    SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1);
    SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1);
    SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1);
    return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes);
  }
  return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1);
}

// Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
// an integer mask of type VT.
SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG,
                                                const SDLoc &DL, EVT VT,
                                                ISD::CondCode CC,
                                                SDValue CmpOp0,
                                                SDValue CmpOp1) const {
  bool IsFP = CmpOp0.getValueType().isFloatingPoint();
  bool Invert = false;
  SDValue Cmp;
  switch (CC) {
    // Handle tests for order using (or (ogt y x) (oge x y)).
  case ISD::SETUO:
    Invert = true;
    LLVM_FALLTHROUGH;
  case ISD::SETO: {
    assert(IsFP && "Unexpected integer comparison");
    SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
    SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1);
    Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE);
    break;
  }

    // Handle <> tests using (or (ogt y x) (ogt x y)).
  case ISD::SETUEQ:
    Invert = true;
    LLVM_FALLTHROUGH;
  case ISD::SETONE: {
    assert(IsFP && "Unexpected integer comparison");
    SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
    SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1);
    Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT);
    break;
  }

    // Otherwise a single comparison is enough.  It doesn't really
    // matter whether we try the inversion or the swap first, since
    // there are no cases where both work.
  default:
    if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
      Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1);
    else {
      CC = ISD::getSetCCSwappedOperands(CC);
      if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
        Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0);
      else
        llvm_unreachable("Unhandled comparison");
    }
    break;
  }
  if (Invert) {
    SDValue Mask =
      DAG.getSplatBuildVector(VT, DL, DAG.getConstant(-1, DL, MVT::i64));
    Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask);
  }
  return Cmp;
}

SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
                                          SelectionDAG &DAG) const {
  SDValue CmpOp0   = Op.getOperand(0);
  SDValue CmpOp1   = Op.getOperand(1);
  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
  SDLoc DL(Op);
  EVT VT = Op.getValueType();
  if (VT.isVector())
    return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1);

  Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
  SDValue CCReg = emitCmp(DAG, DL, C);
  return emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask);
}

SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
  SDValue CmpOp0   = Op.getOperand(2);
  SDValue CmpOp1   = Op.getOperand(3);
  SDValue Dest     = Op.getOperand(4);
  SDLoc DL(Op);

  Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
  SDValue CCReg = emitCmp(DAG, DL, C);
  return DAG.getNode(
      SystemZISD::BR_CCMASK, DL, Op.getValueType(), Op.getOperand(0),
      DAG.getTargetConstant(C.CCValid, DL, MVT::i32),
      DAG.getTargetConstant(C.CCMask, DL, MVT::i32), Dest, CCReg);
}

// Return true if Pos is CmpOp and Neg is the negative of CmpOp,
// allowing Pos and Neg to be wider than CmpOp.
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
  return (Neg.getOpcode() == ISD::SUB &&
          Neg.getOperand(0).getOpcode() == ISD::Constant &&
          cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
          Neg.getOperand(1) == Pos &&
          (Pos == CmpOp ||
           (Pos.getOpcode() == ISD::SIGN_EXTEND &&
            Pos.getOperand(0) == CmpOp)));
}

// Return the absolute or negative absolute of Op; IsNegative decides which.
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
                           bool IsNegative) {
  Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
  if (IsNegative)
    Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
                     DAG.getConstant(0, DL, Op.getValueType()), Op);
  return Op;
}

SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
                                              SelectionDAG &DAG) const {
  SDValue CmpOp0   = Op.getOperand(0);
  SDValue CmpOp1   = Op.getOperand(1);
  SDValue TrueOp   = Op.getOperand(2);
  SDValue FalseOp  = Op.getOperand(3);
  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
  SDLoc DL(Op);

  Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));

  // Check for absolute and negative-absolute selections, including those
  // where the comparison value is sign-extended (for LPGFR and LNGFR).
  // This check supplements the one in DAGCombiner.
  if (C.Opcode == SystemZISD::ICMP &&
      C.CCMask != SystemZ::CCMASK_CMP_EQ &&
      C.CCMask != SystemZ::CCMASK_CMP_NE &&
      C.Op1.getOpcode() == ISD::Constant &&
      cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
    if (isAbsolute(C.Op0, TrueOp, FalseOp))
      return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
    if (isAbsolute(C.Op0, FalseOp, TrueOp))
      return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
  }

  SDValue CCReg = emitCmp(DAG, DL, C);
  SDValue Ops[] = {TrueOp, FalseOp,
                   DAG.getTargetConstant(C.CCValid, DL, MVT::i32),
                   DAG.getTargetConstant(C.CCMask, DL, MVT::i32), CCReg};

  return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops);
}

SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
                                                  SelectionDAG &DAG) const {
  SDLoc DL(Node);
  const GlobalValue *GV = Node->getGlobal();
  int64_t Offset = Node->getOffset();
  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  CodeModel::Model CM = DAG.getTarget().getCodeModel();

  SDValue Result;
  if (Subtarget.isPC32DBLSymbol(GV, CM)) {
    // Assign anchors at 1<<12 byte boundaries.
    uint64_t Anchor = Offset & ~uint64_t(0xfff);
    Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
    Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);

    // The offset can be folded into the address if it is aligned to a halfword.
    Offset -= Anchor;
    if (Offset != 0 && (Offset & 1) == 0) {
      SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
      Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
      Offset = 0;
    }
  } else {
    Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
    Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
    Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
                         MachinePointerInfo::getGOT(DAG.getMachineFunction()));
  }

  // If there was a non-zero offset that we didn't fold, create an explicit
  // addition for it.
  if (Offset != 0)
    Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
                         DAG.getConstant(Offset, DL, PtrVT));

  return Result;
}

SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
                                                 SelectionDAG &DAG,
                                                 unsigned Opcode,
                                                 SDValue GOTOffset) const {
  SDLoc DL(Node);
  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  SDValue Chain = DAG.getEntryNode();
  SDValue Glue;

  // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
  SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
  Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
  Glue = Chain.getValue(1);
  Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
  Glue = Chain.getValue(1);

  // The first call operand is the chain and the second is the TLS symbol.
  SmallVector<SDValue, 8> Ops;
  Ops.push_back(Chain);
  Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL,
                                           Node->getValueType(0),
                                           0, 0));

  // Add argument registers to the end of the list so that they are
  // known live into the call.
  Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT));
  Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT));

  // Add a register mask operand representing the call-preserved registers.
  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
  const uint32_t *Mask =
      TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
  assert(Mask && "Missing call preserved mask for calling convention");
  Ops.push_back(DAG.getRegisterMask(Mask));

  // Glue the call to the argument copies.
  Ops.push_back(Glue);

  // Emit the call.
  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
  Chain = DAG.getNode(Opcode, DL, NodeTys, Ops);
  Glue = Chain.getValue(1);

  // Copy the return value from %r2.
  return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
}

SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL,
                                                  SelectionDAG &DAG) const {
  SDValue Chain = DAG.getEntryNode();
  EVT PtrVT = getPointerTy(DAG.getDataLayout());

  // The high part of the thread pointer is in access register 0.
  SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32);
  TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);

  // The low part of the thread pointer is in access register 1.
  SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32);
  TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);

  // Merge them into a single 64-bit address.
  SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
                                    DAG.getConstant(32, DL, PtrVT));
  return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
}

SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
                                                     SelectionDAG &DAG) const {
  if (DAG.getTarget().useEmulatedTLS())
    return LowerToTLSEmulatedModel(Node, DAG);
  SDLoc DL(Node);
  const GlobalValue *GV = Node->getGlobal();
  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  TLSModel::Model model = DAG.getTarget().getTLSModel(GV);

  SDValue TP = lowerThreadPointer(DL, DAG);

  // Get the offset of GA from the thread pointer, based on the TLS model.
  SDValue Offset;
  switch (model) {
    case TLSModel::GeneralDynamic: {
      // Load the GOT offset of the tls_index (module ID / per-symbol offset).
      SystemZConstantPoolValue *CPV =
        SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD);

      Offset = DAG.getConstantPool(CPV, PtrVT, 8);
      Offset = DAG.getLoad(
          PtrVT, DL, DAG.getEntryNode(), Offset,
          MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));

      // Call __tls_get_offset to retrieve the offset.
      Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
      break;
    }

    case TLSModel::LocalDynamic: {
      // Load the GOT offset of the module ID.
      SystemZConstantPoolValue *CPV =
        SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM);

      Offset = DAG.getConstantPool(CPV, PtrVT, 8);
      Offset = DAG.getLoad(
          PtrVT, DL, DAG.getEntryNode(), Offset,
          MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));

      // Call __tls_get_offset to retrieve the module base offset.
      Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);

      // Note: The SystemZLDCleanupPass will remove redundant computations
      // of the module base offset.  Count total number of local-dynamic
      // accesses to trigger execution of that pass.
      SystemZMachineFunctionInfo* MFI =
        DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>();
      MFI->incNumLocalDynamicTLSAccesses();

      // Add the per-symbol offset.
      CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF);

      SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8);
      DTPOffset = DAG.getLoad(
          PtrVT, DL, DAG.getEntryNode(), DTPOffset,
          MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));

      Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
      break;
    }

    case TLSModel::InitialExec: {
      // Load the offset from the GOT.
      Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
                                          SystemZII::MO_INDNTPOFF);
      Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
      Offset =
          DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset,
                      MachinePointerInfo::getGOT(DAG.getMachineFunction()));
      break;
    }

    case TLSModel::LocalExec: {
      // Force the offset into the constant pool and load it from there.
      SystemZConstantPoolValue *CPV =
        SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);

      Offset = DAG.getConstantPool(CPV, PtrVT, 8);
      Offset = DAG.getLoad(
          PtrVT, DL, DAG.getEntryNode(), Offset,
          MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
      break;
    }
  }

  // Add the base and offset together.
  return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
}

SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
                                                 SelectionDAG &DAG) const {
  SDLoc DL(Node);
  const BlockAddress *BA = Node->getBlockAddress();
  int64_t Offset = Node->getOffset();
  EVT PtrVT = getPointerTy(DAG.getDataLayout());

  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
  Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
  return Result;
}

SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
                                              SelectionDAG &DAG) const {
  SDLoc DL(JT);
  EVT PtrVT = getPointerTy(DAG.getDataLayout());
  SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);

  // Use LARL to load the address of the table.
  return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
}

SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
                                                 SelectionDAG &DAG) const {
  SDLoc DL(CP);
  EVT PtrVT = getPointerTy(DAG.getDataLayout());

  SDValue Result;
  if (CP->isMachineConstantPoolEntry())
    Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
                                       CP->getAlignment());
  else
    Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
                                       CP->getAlignment(), CP->getOffset());

  // Use LARL to load the address of the constant pool entry.
  return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
}

SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
                                              SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();
  MachineFrameInfo &MFI = MF.getFrameInfo();
  MFI.setFrameAddressIsTaken(true);

  SDLoc DL(Op);
  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
  EVT PtrVT = getPointerTy(DAG.getDataLayout());

  // If the back chain frame index has not been allocated yet, do so.
  SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>();
  int BackChainIdx = FI->getFramePointerSaveIndex();
  if (!BackChainIdx) {
    // By definition, the frame address is the address of the back chain.
    BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false);
    FI->setFramePointerSaveIndex(BackChainIdx);
  }
  SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT);

  // FIXME The frontend should detect this case.
  if (Depth > 0) {
    report_fatal_error("Unsupported stack frame traversal count");
  }

  return BackChain;
}

SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
                                               SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();
  MachineFrameInfo &MFI = MF.getFrameInfo();
  MFI.setReturnAddressIsTaken(true);

  if (verifyReturnAddressArgumentIsConstant(Op, DAG))
    return SDValue();

  SDLoc DL(Op);
  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
  EVT PtrVT = getPointerTy(DAG.getDataLayout());

  // FIXME The frontend should detect this case.
  if (Depth > 0) {
    report_fatal_error("Unsupported stack frame traversal count");
  }

  // Return R14D, which has the return address. Mark it an implicit live-in.
  unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
  return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
}

SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
                                            SelectionDAG &DAG) const {
  SDLoc DL(Op);
  SDValue In = Op.getOperand(0);
  EVT InVT = In.getValueType();
  EVT ResVT = Op.getValueType();

  // Convert loads directly.  This is normally done by DAGCombiner,
  // but we need this case for bitcasts that are created during lowering
  // and which are then lowered themselves.
  if (auto *LoadN = dyn_cast<LoadSDNode>(In))
    if (ISD::isNormalLoad(LoadN)) {
      SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(),
                                    LoadN->getBasePtr(), LoadN->getMemOperand());
      // Update the chain uses.
      DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1));
      return NewLoad;
    }

  if (InVT == MVT::i32 && ResVT == MVT::f32) {
    SDValue In64;
    if (Subtarget.hasHighWord()) {
      SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
                                       MVT::i64);
      In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
                                       MVT::i64, SDValue(U64, 0), In);
    } else {
      In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
      In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
                         DAG.getConstant(32, DL, MVT::i64));
    }
    SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
    return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
                                      DL, MVT::f32, Out64);
  }
  if (InVT == MVT::f32 && ResVT == MVT::i32) {
    SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
    SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
                                             MVT::f64, SDValue(U64, 0), In);
    SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
    if (Subtarget.hasHighWord())
      return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
                                        MVT::i32, Out64);
    SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
                                DAG.getConstant(32, DL, MVT::i64));
    return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
  }
  llvm_unreachable("Unexpected bitcast combination");
}

SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
                                            SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();
  SystemZMachineFunctionInfo *FuncInfo =
    MF.getInfo<SystemZMachineFunctionInfo>();
  EVT PtrVT = getPointerTy(DAG.getDataLayout());

  SDValue Chain   = Op.getOperand(0);
  SDValue Addr    = Op.getOperand(1);
  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
  SDLoc DL(Op);

  // The initial values of each field.
  const unsigned NumFields = 4;
  SDValue Fields[NumFields] = {
    DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT),
    DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT),
    DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
    DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
  };

  // Store each field into its respective slot.
  SDValue MemOps[NumFields];
  unsigned Offset = 0;
  for (unsigned I = 0; I < NumFields; ++I) {
    SDValue FieldAddr = Addr;
    if (Offset != 0)
      FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
                              DAG.getIntPtrConstant(Offset, DL));
    MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
                             MachinePointerInfo(SV, Offset));
    Offset += 8;
  }
  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}

SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
                                           SelectionDAG &DAG) const {
  SDValue Chain      = Op.getOperand(0);
  SDValue DstPtr     = Op.getOperand(1);
  SDValue SrcPtr     = Op.getOperand(2);
  const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
  const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
  SDLoc DL(Op);

  return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL),
                       /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
                       /*isTailCall*/false,
                       MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}

SDValue SystemZTargetLowering::
lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
  const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
  MachineFunction &MF = DAG.getMachineFunction();
  bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack");
  bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");

  SDValue Chain = Op.getOperand(0);
  SDValue Size  = Op.getOperand(1);
  SDValue Align = Op.getOperand(2);
  SDLoc DL(Op);

  // If user has set the no alignment function attribute, ignore
  // alloca alignments.
  uint64_t AlignVal = (RealignOpt ?
                       dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0);

  uint64_t StackAlign = TFI->getStackAlignment();
  uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
  uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;

  unsigned SPReg = getStackPointerRegisterToSaveRestore();
  SDValue NeededSpace = Size;

  // Get a reference to the stack pointer.
  SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);

  // If we need a backchain, save it now.
  SDValue Backchain;
  if (StoreBackchain)
    Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());

  // Add extra space for alignment if needed.
  if (ExtraAlignSpace)
    NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace,
                              DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));

  // Get the new stack pointer value.
  SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace);

  // Copy the new stack pointer back.
  Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);

  // The allocated data lives above the 160 bytes allocated for the standard
  // frame, plus any outgoing stack arguments.  We don't know how much that
  // amounts to yet, so emit a special ADJDYNALLOC placeholder.
  SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
  SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);

  // Dynamically realign if needed.
  if (RequiredAlign > StackAlign) {
    Result =
      DAG.getNode(ISD::ADD, DL, MVT::i64, Result,
                  DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
    Result =
      DAG.getNode(ISD::AND, DL, MVT::i64, Result,
                  DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64));
  }

  if (StoreBackchain)
    Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());

  SDValue Ops[2] = { Result, Chain };
  return DAG.getMergeValues(Ops, DL);
}

SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
    SDValue Op, SelectionDAG &DAG) const {
  SDLoc DL(Op);

  return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
}

SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
                                              SelectionDAG &DAG) const {
  EVT VT = Op.getValueType();
  SDLoc DL(Op);
  SDValue Ops[2];
  if (is32Bit(VT))
    // Just do a normal 64-bit multiplication and extract the results.
    // We define this so that it can be used for constant division.
    lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
                    Op.getOperand(1), Ops[1], Ops[0]);
  else if (Subtarget.hasMiscellaneousExtensions2())
    // SystemZISD::SMUL_LOHI returns the low result in the odd register and
    // the high result in the even register.  ISD::SMUL_LOHI is defined to
    // return the low half first, so the results are in reverse order.
    lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI,
                     Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
  else {
    // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
    //
    //   (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
    //
    // but using the fact that the upper halves are either all zeros
    // or all ones:
    //
    //   (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
    //
    // and grouping the right terms together since they are quicker than the
    // multiplication:
    //
    //   (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
    SDValue C63 = DAG.getConstant(63, DL, MVT::i64);
    SDValue LL = Op.getOperand(0);
    SDValue RL = Op.getOperand(1);
    SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
    SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
    // SystemZISD::UMUL_LOHI returns the low result in the odd register and
    // the high result in the even register.  ISD::SMUL_LOHI is defined to
    // return the low half first, so the results are in reverse order.
    lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
                     LL, RL, Ops[1], Ops[0]);
    SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
    SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
    SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
    Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
  }
  return DAG.getMergeValues(Ops, DL);
}

SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
                                              SelectionDAG &DAG) const {
  EVT VT = Op.getValueType();
  SDLoc DL(Op);
  SDValue Ops[2];
  if (is32Bit(VT))
    // Just do a normal 64-bit multiplication and extract the results.
    // We define this so that it can be used for constant division.
    lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
                    Op.getOperand(1), Ops[1], Ops[0]);
  else
    // SystemZISD::UMUL_LOHI returns the low result in the odd register and
    // the high result in the even register.  ISD::UMUL_LOHI is defined to
    // return the low half first, so the results are in reverse order.
    lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
                     Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
  return DAG.getMergeValues(Ops, DL);
}

SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
                                            SelectionDAG &DAG) const {
  SDValue Op0 = Op.getOperand(0);
  SDValue Op1 = Op.getOperand(1);
  EVT VT = Op.getValueType();
  SDLoc DL(Op);

  // We use DSGF for 32-bit division.  This means the first operand must
  // always be 64-bit, and the second operand should be 32-bit whenever
  // that is possible, to improve performance.
  if (is32Bit(VT))
    Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
  else if (DAG.ComputeNumSignBits(Op1) > 32)
    Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);

  // DSG(F) returns the remainder in the even register and the
  // quotient in the odd register.
  SDValue Ops[2];
  lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]);
  return DAG.getMergeValues(Ops, DL);
}

SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
                                            SelectionDAG &DAG) const {
  EVT VT = Op.getValueType();
  SDLoc DL(Op);

  // DL(G) returns the remainder in the even register and the
  // quotient in the odd register.
  SDValue Ops[2];
  lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM,
                   Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
  return DAG.getMergeValues(Ops, DL);
}

SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
  assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");

  // Get the known-zero masks for each operand.
  SDValue Ops[] = {Op.getOperand(0), Op.getOperand(1)};
  KnownBits Known[2] = {DAG.computeKnownBits(Ops[0]),
                        DAG.computeKnownBits(Ops[1])};

  // See if the upper 32 bits of one operand and the lower 32 bits of the
  // other are known zero.  They are the low and high operands respectively.
  uint64_t Masks[] = { Known[0].Zero.getZExtValue(),
                       Known[1].Zero.getZExtValue() };
  unsigned High, Low;
  if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
    High = 1, Low = 0;
  else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
    High = 0, Low = 1;
  else
    return Op;

  SDValue LowOp = Ops[Low];
  SDValue HighOp = Ops[High];

  // If the high part is a constant, we're better off using IILH.
  if (HighOp.getOpcode() == ISD::Constant)
    return Op;

  // If the low part is a constant that is outside the range of LHI,
  // then we're better off using IILF.
  if (LowOp.getOpcode() == ISD::Constant) {
    int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
    if (!isInt<16>(Value))
      return Op;
  }

  // Check whether the high part is an AND that doesn't change the
  // high 32 bits and just masks out low bits.  We can skip it if so.
  if (HighOp.getOpcode() == ISD::AND &&
      HighOp.getOperand(1).getOpcode() == ISD::Constant) {
    SDValue HighOp0 = HighOp.getOperand(0);
    uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
    if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
      HighOp = HighOp0;
  }

  // Take advantage of the fact that all GR32 operations only change the
  // low 32 bits by truncating Low to an i32 and inserting it directly
  // using a subreg.  The interesting cases are those where the truncation
  // can be folded.
  SDLoc DL(Op);
  SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
  return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
                                   MVT::i64, HighOp, Low32);
}

// Lower SADDO/SSUBO/UADDO/USUBO nodes.
SDValue SystemZTargetLowering::lowerXALUO(SDValue Op,
                                          SelectionDAG &DAG) const {
  SDNode *N = Op.getNode();
  SDValue LHS = N->getOperand(0);
  SDValue RHS = N->getOperand(1);
  SDLoc DL(N);
  unsigned BaseOp = 0;
  unsigned CCValid = 0;
  unsigned CCMask = 0;

  switch (Op.getOpcode()) {
  default: llvm_unreachable("Unknown instruction!");
  case ISD::SADDO:
    BaseOp = SystemZISD::SADDO;
    CCValid = SystemZ::CCMASK_ARITH;
    CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
    break;
  case ISD::SSUBO:
    BaseOp = SystemZISD::SSUBO;
    CCValid = SystemZ::CCMASK_ARITH;
    CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
    break;
  case ISD::UADDO:
    BaseOp = SystemZISD::UADDO;
    CCValid = SystemZ::CCMASK_LOGICAL;
    CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
    break;
  case ISD::USUBO:
    BaseOp = SystemZISD::USUBO;
    CCValid = SystemZ::CCMASK_LOGICAL;
    CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
    break;
  }

  SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
  SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);

  SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
  if (N->getValueType(1) == MVT::i1)
    SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);

  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
}

static bool isAddCarryChain(SDValue Carry) {
  while (Carry.getOpcode() == ISD::ADDCARRY)
    Carry = Carry.getOperand(2);
  return Carry.getOpcode() == ISD::UADDO;
}

static bool isSubBorrowChain(SDValue Carry) {
  while (Carry.getOpcode() == ISD::SUBCARRY)
    Carry = Carry.getOperand(2);
  return Carry.getOpcode() == ISD::USUBO;
}

// Lower ADDCARRY/SUBCARRY nodes.
SDValue SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op,
                                                SelectionDAG &DAG) const {

  SDNode *N = Op.getNode();
  MVT VT = N->getSimpleValueType(0);

  // Let legalize expand this if it isn't a legal type yet.
  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
    return SDValue();

  SDValue LHS = N->getOperand(0);
  SDValue RHS = N->getOperand(1);
  SDValue Carry = Op.getOperand(2);
  SDLoc DL(N);
  unsigned BaseOp = 0;
  unsigned CCValid = 0;
  unsigned CCMask = 0;

  switch (Op.getOpcode()) {
  default: llvm_unreachable("Unknown instruction!");
  case ISD::ADDCARRY:
    if (!isAddCarryChain(Carry))
      return SDValue();

    BaseOp = SystemZISD::ADDCARRY;
    CCValid = SystemZ::CCMASK_LOGICAL;
    CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
    break;
  case ISD::SUBCARRY:
    if (!isSubBorrowChain(Carry))
      return SDValue();

    BaseOp = SystemZISD::SUBCARRY;
    CCValid = SystemZ::CCMASK_LOGICAL;
    CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
    break;
  }

  // Set the condition code from the carry flag.
  Carry = DAG.getNode(SystemZISD::GET_CCMASK, DL, MVT::i32, Carry,
                      DAG.getConstant(CCValid, DL, MVT::i32),
                      DAG.getConstant(CCMask, DL, MVT::i32));

  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
  SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS, Carry);

  SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
  if (N->getValueType(1) == MVT::i1)
    SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);

  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
}

SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
                                          SelectionDAG &DAG) const {
  EVT VT = Op.getValueType();
  SDLoc DL(Op);
  Op = Op.getOperand(0);

  // Handle vector types via VPOPCT.
  if (VT.isVector()) {
    Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op);
    Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op);
    switch (VT.getScalarSizeInBits()) {
    case 8:
      break;
    case 16: {
      Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
      SDValue Shift = DAG.getConstant(8, DL, MVT::i32);
      SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift);
      Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
      Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift);
      break;
    }
    case 32: {
      SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL,
                                            DAG.getConstant(0, DL, MVT::i32));
      Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
      break;
    }
    case 64: {
      SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL,
                                            DAG.getConstant(0, DL, MVT::i32));
      Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp);
      Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
      break;
    }
    default:
      llvm_unreachable("Unexpected type");
    }
    return Op;
  }

  // Get the known-zero mask for the operand.
  KnownBits Known = DAG.computeKnownBits(Op);
  unsigned NumSignificantBits = (~Known.Zero).getActiveBits();
  if (NumSignificantBits == 0)
    return DAG.getConstant(0, DL, VT);

  // Skip known-zero high parts of the operand.
  int64_t OrigBitSize = VT.getSizeInBits();
  int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits);
  BitSize = std::min(BitSize, OrigBitSize);

  // The POPCNT instruction counts the number of bits in each byte.
  Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op);
  Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op);
  Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);

  // Add up per-byte counts in a binary tree.  All bits of Op at
  // position larger than BitSize remain zero throughout.
  for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
    SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT));
    if (BitSize != OrigBitSize)
      Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp,
                        DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
    Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
  }

  // Extract overall result from high byte.
  if (BitSize > 8)
    Op = DAG.getNode(ISD::SRL, DL, VT, Op,
                     DAG.getConstant(BitSize - 8, DL, VT));

  return Op;
}

SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
                                                 SelectionDAG &DAG) const {
  SDLoc DL(Op);
  AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
    cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
  SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
    cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());

  // The only fence that needs an instruction is a sequentially-consistent
  // cross-thread fence.
  if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
      FenceSSID == SyncScope::System) {
    return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
                                      Op.getOperand(0)),
                   0);
  }

  // MEMBARRIER is a compiler barrier; it codegens to a no-op.
  return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
}

// Op is an atomic load.  Lower it into a normal volatile load.
SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
                                                SelectionDAG &DAG) const {
  auto *Node = cast<AtomicSDNode>(Op.getNode());
  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
                        Node->getChain(), Node->getBasePtr(),
                        Node->getMemoryVT(), Node->getMemOperand());
}

// Op is an atomic store.  Lower it into a normal volatile store.
SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
                                                 SelectionDAG &DAG) const {
  auto *Node = cast<AtomicSDNode>(Op.getNode());
  SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
                                    Node->getBasePtr(), Node->getMemoryVT(),
                                    Node->getMemOperand());
  // We have to enforce sequential consistency by performing a
  // serialization operation after the store.
  if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent)
    Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op),
                                       MVT::Other, Chain), 0);
  return Chain;
}

// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation.  Lower the first
// two into the fullword ATOMIC_LOADW_* operation given by Opcode.
SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
                                                   SelectionDAG &DAG,
                                                   unsigned Opcode) const {
  auto *Node = cast<AtomicSDNode>(Op.getNode());

  // 32-bit operations need no code outside the main loop.
  EVT NarrowVT = Node->getMemoryVT();
  EVT WideVT = MVT::i32;
  if (NarrowVT == WideVT)
    return Op;

  int64_t BitSize = NarrowVT.getSizeInBits();
  SDValue ChainIn = Node->getChain();
  SDValue Addr = Node->getBasePtr();
  SDValue Src2 = Node->getVal();
  MachineMemOperand *MMO = Node->getMemOperand();
  SDLoc DL(Node);
  EVT PtrVT = Addr.getValueType();

  // Convert atomic subtracts of constants into additions.
  if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
    if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
      Opcode = SystemZISD::ATOMIC_LOADW_ADD;
      Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType());
    }

  // Get the address of the containing word.
  SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
                                    DAG.getConstant(-4, DL, PtrVT));

  // Get the number of bits that the word must be rotated left in order
  // to bring the field to the top bits of a GR32.
  SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
                                 DAG.getConstant(3, DL, PtrVT));
  BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);

  // Get the complementing shift amount, for rotating a field in the top
  // bits back to its proper position.
  SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
                                    DAG.getConstant(0, DL, WideVT), BitShift);

  // Extend the source operand to 32 bits and prepare it for the inner loop.
  // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
  // operations require the source to be shifted in advance.  (This shift
  // can be folded if the source is constant.)  For AND and NAND, the lower
  // bits must be set, while for other opcodes they should be left clear.
  if (Opcode != SystemZISD::ATOMIC_SWAPW)
    Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
                       DAG.getConstant(32 - BitSize, DL, WideVT));
  if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
      Opcode == SystemZISD::ATOMIC_LOADW_NAND)
    Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
                       DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT));

  // Construct the ATOMIC_LOADW_* node.
  SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
  SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
                    DAG.getConstant(BitSize, DL, WideVT) };
  SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
                                             NarrowVT, MMO);

  // Rotate the result of the final CS so that the field is in the lower
  // bits of a GR32, then truncate it.
  SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
                                    DAG.getConstant(BitSize, DL, WideVT));
  SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);

  SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
  return DAG.getMergeValues(RetOps, DL);
}

// Op is an ATOMIC_LOAD_SUB operation.  Lower 8- and 16-bit operations
// into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
// operations into additions.
SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
                                                    SelectionDAG &DAG) const {
  auto *Node = cast<AtomicSDNode>(Op.getNode());
  EVT MemVT = Node->getMemoryVT();
  if (MemVT == MVT::i32 || MemVT == MVT::i64) {
    // A full-width operation.
    assert(Op.getValueType() == MemVT && "Mismatched VTs");
    SDValue Src2 = Node->getVal();
    SDValue NegSrc2;
    SDLoc DL(Src2);

    if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
      // Use an addition if the operand is constant and either LAA(G) is
      // available or the negative value is in the range of A(G)FHI.
      int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
      if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
        NegSrc2 = DAG.getConstant(Value, DL, MemVT);
    } else if (Subtarget.hasInterlockedAccess1())
      // Use LAA(G) if available.
      NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT),
                            Src2);

    if (NegSrc2.getNode())
      return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
                           Node->getChain(), Node->getBasePtr(), NegSrc2,
                           Node->getMemOperand());

    // Use the node as-is.
    return Op;
  }

  return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
}

// Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node.
SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
                                                    SelectionDAG &DAG) const {
  auto *Node = cast<AtomicSDNode>(Op.getNode());
  SDValue ChainIn = Node->getOperand(0);
  SDValue Addr = Node->getOperand(1);
  SDValue CmpVal = Node->getOperand(2);
  SDValue SwapVal = Node->getOperand(3);
  MachineMemOperand *MMO = Node->getMemOperand();
  SDLoc DL(Node);

  // We have native support for 32-bit and 64-bit compare and swap, but we
  // still need to expand extracting the "success" result from the CC.
  EVT NarrowVT = Node->getMemoryVT();
  EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
  if (NarrowVT == WideVT) {
    SDVTList Tys = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
    SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal };
    SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP,
                                               DL, Tys, Ops, NarrowVT, MMO);
    SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
                                SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ);

    DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
    DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
    DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
    return SDValue();
  }

  // Convert 8-bit and 16-bit compare and swap to a loop, implemented
  // via a fullword ATOMIC_CMP_SWAPW operation.
  int64_t BitSize = NarrowVT.getSizeInBits();
  EVT PtrVT = Addr.getValueType();

  // Get the address of the containing word.
  SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
                                    DAG.getConstant(-4, DL, PtrVT));

  // Get the number of bits that the word must be rotated left in order
  // to bring the field to the top bits of a GR32.
  SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
                                 DAG.getConstant(3, DL, PtrVT));
  BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);

  // Get the complementing shift amount, for rotating a field in the top
  // bits back to its proper position.
  SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
                                    DAG.getConstant(0, DL, WideVT), BitShift);

  // Construct the ATOMIC_CMP_SWAPW node.
  SDVTList VTList = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
  SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
                    NegBitShift, DAG.getConstant(BitSize, DL, WideVT) };
  SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
                                             VTList, Ops, NarrowVT, MMO);
  SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
                              SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ);

  DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
  DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
  DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
  return SDValue();
}

MachineMemOperand::Flags
SystemZTargetLowering::getMMOFlags(const Instruction &I) const {
  // Because of how we convert atomic_load and atomic_store to normal loads and
  // stores in the DAG, we need to ensure that the MMOs are marked volatile
  // since DAGCombine hasn't been updated to account for atomic, but non
  // volatile loads.  (See D57601)
  if (auto *SI = dyn_cast<StoreInst>(&I))
    if (SI->isAtomic())
      return MachineMemOperand::MOVolatile;
  if (auto *LI = dyn_cast<LoadInst>(&I))
    if (LI->isAtomic())
      return MachineMemOperand::MOVolatile;
  if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
    if (AI->isAtomic())
      return MachineMemOperand::MOVolatile;
  if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
    if (AI->isAtomic())
      return MachineMemOperand::MOVolatile;
  return MachineMemOperand::MONone;
}

SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
                                              SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();
  MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
  return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
                            SystemZ::R15D, Op.getValueType());
}

SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
                                                 SelectionDAG &DAG) const {
  MachineFunction &MF = DAG.getMachineFunction();
  MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
  bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");

  SDValue Chain = Op.getOperand(0);
  SDValue NewSP = Op.getOperand(1);
  SDValue Backchain;
  SDLoc DL(Op);

  if (StoreBackchain) {
    SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64);
    Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
  }

  Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);

  if (StoreBackchain)
    Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());

  return Chain;
}

SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
                                             SelectionDAG &DAG) const {
  bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
  if (!IsData)
    // Just preserve the chain.
    return Op.getOperand(0);

  SDLoc DL(Op);
  bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
  unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
  auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
  SDValue Ops[] = {Op.getOperand(0), DAG.getTargetConstant(Code, DL, MVT::i32),
                   Op.getOperand(1)};
  return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL,
                                 Node->getVTList(), Ops,
                                 Node->getMemoryVT(), Node->getMemOperand());
}

// Convert condition code in CCReg to an i32 value.
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) {
  SDLoc DL(CCReg);
  SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
  return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
                     DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
}

SDValue
SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
                                              SelectionDAG &DAG) const {
  unsigned Opcode, CCValid;
  if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) {
    assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
    SDNode *Node = emitIntrinsicWithCCAndChain(DAG, Op, Opcode);
    SDValue CC = getCCResult(DAG, SDValue(Node, 0));
    DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC);
    return SDValue();
  }

  return SDValue();
}

SDValue
SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
                                               SelectionDAG &DAG) const {
  unsigned Opcode, CCValid;
  if (isIntrinsicWithCC(Op, Opcode, CCValid)) {
    SDNode *Node = emitIntrinsicWithCC(DAG, Op, Opcode);
    if (Op->getNumValues() == 1)
      return getCCResult(DAG, SDValue(Node, 0));
    assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result");
    return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(),
                       SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1)));
  }

  unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
  switch (Id) {
  case Intrinsic::thread_pointer:
    return lowerThreadPointer(SDLoc(Op), DAG);

  case Intrinsic::s390_vpdi:
    return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));

  case Intrinsic::s390_vperm:
    return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));

  case Intrinsic::s390_vuphb:
  case Intrinsic::s390_vuphh:
  case Intrinsic::s390_vuphf:
    return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1));

  case Intrinsic::s390_vuplhb:
  case Intrinsic::s390_vuplhh:
  case Intrinsic::s390_vuplhf:
    return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1));

  case Intrinsic::s390_vuplb:
  case Intrinsic::s390_vuplhw:
  case Intrinsic::s390_vuplf:
    return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1));

  case Intrinsic::s390_vupllb:
  case Intrinsic::s390_vupllh:
  case Intrinsic::s390_vupllf:
    return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1));

  case Intrinsic::s390_vsumb:
  case Intrinsic::s390_vsumh:
  case Intrinsic::s390_vsumgh:
  case Intrinsic::s390_vsumgf:
  case Intrinsic::s390_vsumqf:
  case Intrinsic::s390_vsumqg:
    return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(),
                       Op.getOperand(1), Op.getOperand(2));
  }

  return SDValue();
}

namespace {
// Says that SystemZISD operation Opcode can be used to perform the equivalent
// of a VPERM with permute vector Bytes.  If Opcode takes three operands,
// Operand is the constant third operand, otherwise it is the number of
// bytes in each element of the result.
struct Permute {
  unsigned Opcode;
  unsigned Operand;
  unsigned char Bytes[SystemZ::VectorBytes];
};
}

static const Permute PermuteForms[] = {
  // VMRHG
  { SystemZISD::MERGE_HIGH, 8,
    { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
  // VMRHF
  { SystemZISD::MERGE_HIGH, 4,
    { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
  // VMRHH
  { SystemZISD::MERGE_HIGH, 2,
    { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
  // VMRHB
  { SystemZISD::MERGE_HIGH, 1,
    { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
  // VMRLG
  { SystemZISD::MERGE_LOW, 8,
    { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
  // VMRLF
  { SystemZISD::MERGE_LOW, 4,
    { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
  // VMRLH
  { SystemZISD::MERGE_LOW, 2,
    { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
  // VMRLB
  { SystemZISD::MERGE_LOW, 1,
    { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
  // VPKG
  { SystemZISD::PACK, 4,
    { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
  // VPKF
  { SystemZISD::PACK, 2,
    { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
  // VPKH
  { SystemZISD::PACK, 1,
    { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
  // VPDI V1, V2, 4  (low half of V1, high half of V2)
  { SystemZISD::PERMUTE_DWORDS, 4,
    { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
  // VPDI V1, V2, 1  (high half of V1, low half of V2)
  { SystemZISD::PERMUTE_DWORDS, 1,
    { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
};

// Called after matching a vector shuffle against a particular pattern.
// Both the original shuffle and the pattern have two vector operands.
// OpNos[0] is the operand of the original shuffle that should be used for
// operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
// OpNos[1] is the same for operand 1 of the pattern.  Resolve these -1s and
// set OpNo0 and OpNo1 to the shuffle operands that should actually be used
// for operands 0 and 1 of the pattern.
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) {
  if (OpNos[0] < 0) {
    if (OpNos[1] < 0)
      return false;
    OpNo0 = OpNo1 = OpNos[1];
  } else if (OpNos[1] < 0) {
    OpNo0 = OpNo1 = OpNos[0];
  } else {
    OpNo0 = OpNos[0];
    OpNo1 = OpNos[1];
  }
  return true;
}

// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes.  Return true if the VPERM can be implemented using P.
// When returning true set OpNo0 to the VPERM operand that should be
// used for operand 0 of P and likewise OpNo1 for operand 1 of P.
//
// For example, if swapping the VPERM operands allows P to match, OpNo0
// will be 1 and OpNo1 will be 0.  If instead Bytes only refers to one
// operand, but rewriting it to use two duplicated operands allows it to
// match P, then OpNo0 and OpNo1 will be the same.
static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P,
                         unsigned &OpNo0, unsigned &OpNo1) {
  int OpNos[] = { -1, -1 };
  for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) {
    int Elt = Bytes[I];
    if (Elt >= 0) {
      // Make sure that the two permute vectors use the same suboperand
      // byte number.  Only the operand numbers (the high bits) are
      // allowed to differ.
      if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1))
        return false;
      int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes;
      int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes;
      // Make sure that the operand mappings are consistent with previous
      // elements.
      if (OpNos[ModelOpNo] == 1 - RealOpNo)
        return false;
      OpNos[ModelOpNo] = RealOpNo;
    }
  }
  return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
}

// As above, but search for a matching permute.
static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes,
                                   unsigned &OpNo0, unsigned &OpNo1) {
  for (auto &P : PermuteForms)
    if (matchPermute(Bytes, P, OpNo0, OpNo1))
      return &P;
  return nullptr;
}

// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes.  This permute is an operand of an outer permute.
// See whether redistributing the -1 bytes gives a shuffle that can be
// implemented using P.  If so, set Transform to a VPERM-like permute vector
// that, when applied to the result of P, gives the original permute in Bytes.
static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes,
                               const Permute &P,
                               SmallVectorImpl<int> &Transform) {
  unsigned To = 0;
  for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) {
    int Elt = Bytes[From];
    if (Elt < 0)
      // Byte number From of the result is undefined.
      Transform[From] = -1;
    else {
      while (P.Bytes[To] != Elt) {
        To += 1;
        if (To == SystemZ::VectorBytes)
          return false;
      }
      Transform[From] = To;
    }
  }
  return true;
}

// As above, but search for a matching permute.
static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes,
                                         SmallVectorImpl<int> &Transform) {
  for (auto &P : PermuteForms)
    if (matchDoublePermute(Bytes, P, Transform))
      return &P;
  return nullptr;
}

// Convert the mask of the given shuffle op into a byte-level mask,
// as if it had type vNi8.
static bool getVPermMask(SDValue ShuffleOp,
                         SmallVectorImpl<int> &Bytes) {
  EVT VT = ShuffleOp.getValueType();
  unsigned NumElements = VT.getVectorNumElements();
  unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();

  if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
    Bytes.resize(NumElements * BytesPerElement, -1);
    for (unsigned I = 0; I < NumElements; ++I) {
      int Index = VSN->getMaskElt(I);
      if (Index >= 0)
        for (unsigned J = 0; J < BytesPerElement; ++J)
          Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
    }
    return true;
  }
  if (SystemZISD::SPLAT == ShuffleOp.getOpcode() &&
      isa<ConstantSDNode>(ShuffleOp.getOperand(1))) {
    unsigned Index = ShuffleOp.getConstantOperandVal(1);
    Bytes.resize(NumElements * BytesPerElement, -1);
    for (unsigned I = 0; I < NumElements; ++I)
      for (unsigned J = 0; J < BytesPerElement; ++J)
        Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
    return true;
  }
  return false;
}

// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes.  See whether bytes [Start, Start + BytesPerElement) of
// the result come from a contiguous sequence of bytes from one input.
// Set Base to the selector for the first byte if so.
static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start,
                            unsigned BytesPerElement, int &Base) {
  Base = -1;
  for (unsigned I = 0; I < BytesPerElement; ++I) {
    if (Bytes[Start + I] >= 0) {
      unsigned Elem = Bytes[Start + I];
      if (Base < 0) {
        Base = Elem - I;
        // Make sure the bytes would come from one input operand.
        if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size())
          return false;
      } else if (unsigned(Base) != Elem - I)
        return false;
    }
  }
  return true;
}

// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes.  Return true if it can be performed using VSLDI.
// When returning true, set StartIndex to the shift amount and OpNo0
// and OpNo1 to the VPERM operands that should be used as the first
// and second shift operand respectively.
static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes,
                               unsigned &StartIndex, unsigned &OpNo0,
                               unsigned &OpNo1) {
  int OpNos[] = { -1, -1 };
  int Shift = -1;
  for (unsigned I = 0; I < 16; ++I) {
    int Index = Bytes[I];
    if (Index >= 0) {
      int ExpectedShift = (Index - I) % SystemZ::VectorBytes;
      int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes;
      int RealOpNo = unsigned(Index) / SystemZ::VectorBytes;
      if (Shift < 0)
        Shift = ExpectedShift;
      else if (Shift != ExpectedShift)
        return false;
      // Make sure that the operand mappings are consistent with previous
      // elements.
      if (OpNos[ModelOpNo] == 1 - RealOpNo)
        return false;
      OpNos[ModelOpNo] = RealOpNo;
    }
  }
  StartIndex = Shift;
  return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
}

// Create a node that performs P on operands Op0 and Op1, casting the
// operands to the appropriate type.  The type of the result is determined by P.
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
                              const Permute &P, SDValue Op0, SDValue Op1) {
  // VPDI (PERMUTE_DWORDS) always operates on v2i64s.  The input
  // elements of a PACK are twice as wide as the outputs.
  unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 :
                      P.Opcode == SystemZISD::PACK ? P.Operand * 2 :
                      P.Operand);
  // Cast both operands to the appropriate type.
  MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8),
                              SystemZ::VectorBytes / InBytes);
  Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0);
  Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1);
  SDValue Op;
  if (P.Opcode == SystemZISD::PERMUTE_DWORDS) {
    SDValue Op2 = DAG.getTargetConstant(P.Operand, DL, MVT::i32);
    Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2);
  } else if (P.Opcode == SystemZISD::PACK) {
    MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8),
                                 SystemZ::VectorBytes / P.Operand);
    Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1);
  } else {
    Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1);
  }
  return Op;
}

// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes.  Implement it on operands Ops[0] and Ops[1] using
// VSLDI or VPERM.
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
                                     SDValue *Ops,
                                     const SmallVectorImpl<int> &Bytes) {
  for (unsigned I = 0; I < 2; ++I)
    Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]);

  // First see whether VSLDI can be used.
  unsigned StartIndex, OpNo0, OpNo1;
  if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1))
    return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0],
                       Ops[OpNo1],
                       DAG.getTargetConstant(StartIndex, DL, MVT::i32));

  // Fall back on VPERM.  Construct an SDNode for the permute vector.
  SDValue IndexNodes[SystemZ::VectorBytes];
  for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
    if (Bytes[I] >= 0)
      IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32);
    else
      IndexNodes[I] = DAG.getUNDEF(MVT::i32);
  SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
  return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2);
}

namespace {
// Describes a general N-operand vector shuffle.
struct GeneralShuffle {
  GeneralShuffle(EVT vt) : VT(vt) {}
  void addUndef();
  bool add(SDValue, unsigned);
  SDValue getNode(SelectionDAG &, const SDLoc &);

  // The operands of the shuffle.
  SmallVector<SDValue, SystemZ::VectorBytes> Ops;

  // Index I is -1 if byte I of the result is undefined.  Otherwise the
  // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
  // Bytes[I] / SystemZ::VectorBytes.
  SmallVector<int, SystemZ::VectorBytes> Bytes;

  // The type of the shuffle result.
  EVT VT;
};
}

// Add an extra undefined element to the shuffle.
void GeneralShuffle::addUndef() {
  unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
  for (unsigned I = 0; I < BytesPerElement; ++I)
    Bytes.push_back(-1);
}

// Add an extra element to the shuffle, taking it from element Elem of Op.
// A null Op indicates a vector input whose value will be calculated later;
// there is at most one such input per shuffle and it always has the same
// type as the result. Aborts and returns false if the source vector elements
// of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
// LLVM they become implicitly extended, but this is rare and not optimized.
bool GeneralShuffle::add(SDValue Op, unsigned Elem) {
  unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();

  // The source vector can have wider elements than the result,
  // either through an explicit TRUNCATE or because of type legalization.
  // We want the least significant part.
  EVT FromVT = Op.getNode() ? Op.getValueType() : VT;
  unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize();

  // Return false if the source elements are smaller than their destination
  // elements.
  if (FromBytesPerElement < BytesPerElement)
    return false;

  unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes +
                   (FromBytesPerElement - BytesPerElement));

  // Look through things like shuffles and bitcasts.
  while (Op.getNode()) {
    if (Op.getOpcode() == ISD::BITCAST)
      Op = Op.getOperand(0);
    else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) {
      // See whether the bytes we need come from a contiguous part of one
      // operand.
      SmallVector<int, SystemZ::VectorBytes> OpBytes;
      if (!getVPermMask(Op, OpBytes))
        break;
      int NewByte;
      if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte))
        break;
      if (NewByte < 0) {
        addUndef();
        return true;
      }
      Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes);
      Byte = unsigned(NewByte) % SystemZ::VectorBytes;
    } else if (Op.isUndef()) {
      addUndef();
      return true;
    } else
      break;
  }

  // Make sure that the source of the extraction is in Ops.
  unsigned OpNo = 0;
  for (; OpNo < Ops.size(); ++OpNo)
    if (Ops[OpNo] == Op)
      break;
  if (OpNo == Ops.size())
    Ops.push_back(Op);

  // Add the element to Bytes.
  unsigned Base = OpNo * SystemZ::VectorBytes + Byte;
  for (unsigned I = 0; I < BytesPerElement; ++I)
    Bytes.push_back(Base + I);

  return true;
}

// Return SDNodes for the completed shuffle.
SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) {
  assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector");

  if (Ops.size() == 0)
    return DAG.getUNDEF(VT);

  // Make sure that there are at least two shuffle operands.
  if (Ops.size() == 1)
    Ops.push_back(DAG.getUNDEF(MVT::v16i8));

  // Create a tree of shuffles, deferring root node until after the loop.
  // Try to redistribute the undefined elements of non-root nodes so that
  // the non-root shuffles match something like a pack or merge, then adjust
  // the parent node's permute vector to compensate for the new order.
  // Among other things, this copes with vectors like <2 x i16> that were
  // padded with undefined elements during type legalization.
  //
  // In the best case this redistribution will lead to the whole tree
  // using packs and merges.  It should rarely be a loss in other cases.
  unsigned Stride = 1;
  for (; Stride * 2 < Ops.size(); Stride *= 2) {
    for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) {
      SDValue SubOps[] = { Ops[I], Ops[I + Stride] };

      // Create a mask for just these two operands.
      SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes);
      for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
        unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes;
        unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes;
        if (OpNo == I)
          NewBytes[J] = Byte;
        else if (OpNo == I + Stride)
          NewBytes[J] = SystemZ::VectorBytes + Byte;
        else
          NewBytes[J] = -1;
      }
      // See if it would be better to reorganize NewMask to avoid using VPERM.
      SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes);
      if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) {
        Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]);
        // Applying NewBytesMap to Ops[I] gets back to NewBytes.
        for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
          if (NewBytes[J] >= 0) {
            assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
                   "Invalid double permute");
            Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J];
          } else
            assert(NewBytesMap[J] < 0 && "Invalid double permute");
        }
      } else {
        // Just use NewBytes on the operands.
        Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes);
        for (unsigned J = 0; J < SystemZ::VectorBytes; ++J)
          if (NewBytes[J] >= 0)
            Bytes[J] = I * SystemZ::VectorBytes + J;
      }
    }
  }

  // Now we just have 2 inputs.  Put the second operand in Ops[1].
  if (Stride > 1) {
    Ops[1] = Ops[Stride];
    for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
      if (Bytes[I] >= int(SystemZ::VectorBytes))
        Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes;
  }

  // Look for an instruction that can do the permute without resorting
  // to VPERM.
  unsigned OpNo0, OpNo1;
  SDValue Op;
  if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1))
    Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]);
  else
    Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes);
  return DAG.getNode(ISD::BITCAST, DL, VT, Op);
}

// Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
static bool isScalarToVector(SDValue Op) {
  for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I)
    if (!Op.getOperand(I).isUndef())
      return false;
  return true;
}

// Return a vector of type VT that contains Value in the first element.
// The other elements don't matter.
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
                                   SDValue Value) {
  // If we have a constant, replicate it to all elements and let the
  // BUILD_VECTOR lowering take care of it.
  if (Value.getOpcode() == ISD::Constant ||
      Value.getOpcode() == ISD::ConstantFP) {
    SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value);
    return DAG.getBuildVector(VT, DL, Ops);
  }
  if (Value.isUndef())
    return DAG.getUNDEF(VT);
  return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
}

// Return a vector of type VT in which Op0 is in element 0 and Op1 is in
// element 1.  Used for cases in which replication is cheap.
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
                                 SDValue Op0, SDValue Op1) {
  if (Op0.isUndef()) {
    if (Op1.isUndef())
      return DAG.getUNDEF(VT);
    return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1);
  }
  if (Op1.isUndef())
    return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0);
  return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT,
                     buildScalarToVector(DAG, DL, VT, Op0),
                     buildScalarToVector(DAG, DL, VT, Op1));
}

// Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
// vector for them.
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0,
                          SDValue Op1) {
  if (Op0.isUndef() && Op1.isUndef())
    return DAG.getUNDEF(MVT::v2i64);
  // If one of the two inputs is undefined then replicate the other one,
  // in order to avoid using another register unnecessarily.
  if (Op0.isUndef())
    Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
  else if (Op1.isUndef())
    Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
  else {
    Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
    Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
  }
  return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1);
}

// If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
// better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
// the non-EXTRACT_VECTOR_ELT elements.  See if the given BUILD_VECTOR
// would benefit from this representation and return it if so.
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
                                     BuildVectorSDNode *BVN) {
  EVT VT = BVN->getValueType(0);
  unsigned NumElements = VT.getVectorNumElements();

  // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
  // on byte vectors.  If there are non-EXTRACT_VECTOR_ELT elements that still
  // need a BUILD_VECTOR, add an additional placeholder operand for that
  // BUILD_VECTOR and store its operands in ResidueOps.
  GeneralShuffle GS(VT);
  SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps;
  bool FoundOne = false;
  for (unsigned I = 0; I < NumElements; ++I) {
    SDValue Op = BVN->getOperand(I);
    if (Op.getOpcode() == ISD::TRUNCATE)
      Op = Op.getOperand(0);
    if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
        Op.getOperand(1).getOpcode() == ISD::Constant) {
      unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
      if (!GS.add(Op.getOperand(0), Elem))
        return SDValue();
      FoundOne = true;
    } else if (Op.isUndef()) {
      GS.addUndef();
    } else {
      if (!GS.add(SDValue(), ResidueOps.size()))
        return SDValue();
      ResidueOps.push_back(BVN->getOperand(I));
    }
  }

  // Nothing to do if there are no EXTRACT_VECTOR_ELTs.
  if (!FoundOne)
    return SDValue();

  // Create the BUILD_VECTOR for the remaining elements, if any.
  if (!ResidueOps.empty()) {
    while (ResidueOps.size() < NumElements)
      ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType()));
    for (auto &Op : GS.Ops) {
      if (!Op.getNode()) {
        Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps);
        break;
      }
    }
  }
  return GS.getNode(DAG, SDLoc(BVN));
}

bool SystemZTargetLowering::isVectorElementLoad(SDValue Op) const {
  if (Op.getOpcode() == ISD::LOAD && cast<LoadSDNode>(Op)->isUnindexed())
    return true;
  if (Subtarget.hasVectorEnhancements2() && Op.getOpcode() == SystemZISD::LRV)
    return true;
  return false;
}

// Combine GPR scalar values Elems into a vector of type VT.
SDValue
SystemZTargetLowering::buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
                                   SmallVectorImpl<SDValue> &Elems) const {
  // See whether there is a single replicated value.
  SDValue Single;
  unsigned int NumElements = Elems.size();
  unsigned int Count = 0;
  for (auto Elem : Elems) {
    if (!Elem.isUndef()) {
      if (!Single.getNode())
        Single = Elem;
      else if (Elem != Single) {
        Single = SDValue();
        break;
      }
      Count += 1;
    }
  }
  // There are three cases here:
  //
  // - if the only defined element is a loaded one, the best sequence
  //   is a replicating load.
  //
  // - otherwise, if the only defined element is an i64 value, we will
  //   end up with the same VLVGP sequence regardless of whether we short-cut
  //   for replication or fall through to the later code.
  //
  // - otherwise, if the only defined element is an i32 or smaller value,
  //   we would need 2 instructions to replicate it: VLVGP followed by VREPx.
  //   This is only a win if the single defined element is used more than once.
  //   In other cases we're better off using a single VLVGx.
  if (Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
    return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single);

  // If all elements are loads, use VLREP/VLEs (below).
  bool AllLoads = true;
  for (auto Elem : Elems)
    if (!isVectorElementLoad(Elem)) {
      AllLoads = false;
      break;
    }

  // The best way of building a v2i64 from two i64s is to use VLVGP.
  if (VT == MVT::v2i64 && !AllLoads)
    return joinDwords(DAG, DL, Elems[0], Elems[1]);

  // Use a 64-bit merge high to combine two doubles.
  if (VT == MVT::v2f64 && !AllLoads)
    return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);

  // Build v4f32 values directly from the FPRs:
  //
  //   <Axxx> <Bxxx> <Cxxxx> <Dxxx>
  //         V              V         VMRHF
  //      <ABxx>         <CDxx>
  //                V                 VMRHG
  //              <ABCD>
  if (VT == MVT::v4f32 && !AllLoads) {
    SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
    SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]);
    // Avoid unnecessary undefs by reusing the other operand.
    if (Op01.isUndef())
      Op01 = Op23;
    else if (Op23.isUndef())
      Op23 = Op01;
    // Merging identical replications is a no-op.
    if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23)
      return Op01;
    Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01);
    Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23);
    SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH,
                             DL, MVT::v2i64, Op01, Op23);
    return DAG.getNode(ISD::BITCAST, DL, VT, Op);
  }

  // Collect the constant terms.
  SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue());
  SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false);

  unsigned NumConstants = 0;
  for (unsigned I = 0; I < NumElements; ++I) {
    SDValue Elem = Elems[I];
    if (Elem.getOpcode() == ISD::Constant ||
        Elem.getOpcode() == ISD::ConstantFP) {
      NumConstants += 1;
      Constants[I] = Elem;
      Done[I] = true;
    }
  }
  // If there was at least one constant, fill in the other elements of
  // Constants with undefs to get a full vector constant and use that
  // as the starting point.
  SDValue Result;
  SDValue ReplicatedVal;
  if (NumConstants > 0) {
    for (unsigned I = 0; I < NumElements; ++I)
      if (!Constants[I].getNode())
        Constants[I] = DAG.getUNDEF(Elems[I].getValueType());
    Result = DAG.getBuildVector(VT, DL, Constants);
  } else {
    // Otherwise try to use VLREP or VLVGP to start the sequence in order to
    // avoid a false dependency on any previous contents of the vector
    // register.

    // Use a VLREP if at least one element is a load. Make sure to replicate
    // the load with the most elements having its value.
    std::map<const SDNode*, unsigned> UseCounts;
    SDNode *LoadMaxUses = nullptr;
    for (unsigned I = 0; I < NumElements; ++I)
      if (isVectorElementLoad(Elems[I])) {
        SDNode *Ld = Elems[I].getNode();
        UseCounts[Ld]++;
        if (LoadMaxUses == nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
          LoadMaxUses = Ld;
      }
    if (LoadMaxUses != nullptr) {
      ReplicatedVal = SDValue(LoadMaxUses, 0);
      Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, ReplicatedVal);
    } else {
      // Try to use VLVGP.
      unsigned I1 = NumElements / 2 - 1;
      unsigned I2 = NumElements - 1;
      bool Def1 = !Elems[I1].isUndef();
      bool Def2 = !Elems[I2].isUndef();
      if (Def1 || Def2) {
        SDValue Elem1 = Elems[Def1 ? I1 : I2];
        SDValue Elem2 = Elems[Def2 ? I2 : I1];
        Result = DAG.getNode(ISD::BITCAST, DL, VT,
                             joinDwords(DAG, DL, Elem1, Elem2));
        Done[I1] = true;
        Done[I2] = true;
      } else
        Result = DAG.getUNDEF(VT);
    }
  }

  // Use VLVGx to insert the other elements.
  for (unsigned I = 0; I < NumElements; ++I)
    if (!Done[I] && !Elems[I].isUndef() && Elems[I] != ReplicatedVal)
      Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I],
                           DAG.getConstant(I, DL, MVT::i32));
  return Result;
}

SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op,
                                                 SelectionDAG &DAG) const {
  auto *BVN = cast<BuildVectorSDNode>(Op.getNode());
  SDLoc DL(Op);
  EVT VT = Op.getValueType();

  if (BVN->isConstant()) {
    if (SystemZVectorConstantInfo(BVN).isVectorConstantLegal(Subtarget))
      return Op;

    // Fall back to loading it from memory.
    return SDValue();
  }

  // See if we should use shuffles to construct the vector from other vectors.
  if (SDValue Res = tryBuildVectorShuffle(DAG, BVN))
    return Res;

  // Detect SCALAR_TO_VECTOR conversions.
  if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op))
    return buildScalarToVector(DAG, DL, VT, Op.getOperand(0));

  // Otherwise use buildVector to build the vector up from GPRs.
  unsigned NumElements = Op.getNumOperands();
  SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements);
  for (unsigned I = 0; I < NumElements; ++I)
    Ops[I] = Op.getOperand(I);
  return buildVector(DAG, DL, VT, Ops);
}

SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
                                                   SelectionDAG &DAG) const {
  auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode());
  SDLoc DL(Op);
  EVT VT = Op.getValueType();
  unsigned NumElements = VT.getVectorNumElements();

  if (VSN->isSplat()) {
    SDValue Op0 = Op.getOperand(0);
    unsigned Index = VSN->getSplatIndex();
    assert(Index < VT.getVectorNumElements() &&
           "Splat index should be defined and in first operand");
    // See whether the value we're splatting is directly available as a scalar.
    if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
        Op0.getOpcode() == ISD::BUILD_VECTOR)
      return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index));
    // Otherwise keep it as a vector-to-vector operation.
    return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0),
                       DAG.getTargetConstant(Index, DL, MVT::i32));
  }

  GeneralShuffle GS(VT);
  for (unsigned I = 0; I < NumElements; ++I) {
    int Elt = VSN->getMaskElt(I);
    if (Elt < 0)
      GS.addUndef();
    else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements),
                     unsigned(Elt) % NumElements))
      return SDValue();
  }
  return GS.getNode(DAG, SDLoc(VSN));
}

SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
                                                     SelectionDAG &DAG) const {
  SDLoc DL(Op);
  // Just insert the scalar into element 0 of an undefined vector.
  return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL,
                     Op.getValueType(), DAG.getUNDEF(Op.getValueType()),
                     Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32));
}

SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
                                                      SelectionDAG &DAG) const {
  // Handle insertions of floating-point values.
  SDLoc DL(Op);
  SDValue Op0 = Op.getOperand(0);
  SDValue Op1 = Op.getOperand(1);
  SDValue Op2 = Op.getOperand(2);
  EVT VT = Op.getValueType();

  // Insertions into constant indices of a v2f64 can be done using VPDI.
  // However, if the inserted value is a bitcast or a constant then it's
  // better to use GPRs, as below.
  if (VT == MVT::v2f64 &&
      Op1.getOpcode() != ISD::BITCAST &&
      Op1.getOpcode() != ISD::ConstantFP &&
      Op2.getOpcode() == ISD::Constant) {
    uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue();
    unsigned Mask = VT.getVectorNumElements() - 1;
    if (Index <= Mask)
      return Op;
  }

  // Otherwise bitcast to the equivalent integer form and insert via a GPR.
  MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
  MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements());
  SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT,
                            DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0),
                            DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2);
  return DAG.getNode(ISD::BITCAST, DL, VT, Res);
}

SDValue
SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
                                               SelectionDAG &DAG) const {
  // Handle extractions of floating-point values.
  SDLoc DL(Op);
  SDValue Op0 = Op.getOperand(0);
  SDValue Op1 = Op.getOperand(1);
  EVT VT = Op.getValueType();
  EVT VecVT = Op0.getValueType();

  // Extractions of constant indices can be done directly.
  if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
    uint64_t Index = CIndexN->getZExtValue();
    unsigned Mask = VecVT.getVectorNumElements() - 1;
    if (Index <= Mask)
      return Op;
  }

  // Otherwise bitcast to the equivalent integer form and extract via a GPR.
  MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits());
  MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements());
  SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT,
                            DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1);
  return DAG.getNode(ISD::BITCAST, DL, VT, Res);
}

SDValue
SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG,
                                              unsigned UnpackHigh) const {
  SDValue PackedOp = Op.getOperand(0);
  EVT OutVT = Op.getValueType();
  EVT InVT = PackedOp.getValueType();
  unsigned ToBits = OutVT.getScalarSizeInBits();
  unsigned FromBits = InVT.getScalarSizeInBits();
  do {
    FromBits *= 2;
    EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits),
                                 SystemZ::VectorBits / FromBits);
    PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp);
  } while (FromBits != ToBits);
  return PackedOp;
}

SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG,
                                          unsigned ByScalar) const {
  // Look for cases where a vector shift can use the *_BY_SCALAR form.
  SDValue Op0 = Op.getOperand(0);
  SDValue Op1 = Op.getOperand(1);
  SDLoc DL(Op);
  EVT VT = Op.getValueType();
  unsigned ElemBitSize = VT.getScalarSizeInBits();

  // See whether the shift vector is a splat represented as BUILD_VECTOR.
  if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
    APInt SplatBits, SplatUndef;
    unsigned SplatBitSize;
    bool HasAnyUndefs;
    // Check for constant splats.  Use ElemBitSize as the minimum element
    // width and reject splats that need wider elements.
    if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
                             ElemBitSize, true) &&
        SplatBitSize == ElemBitSize) {
      SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff,
                                      DL, MVT::i32);
      return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
    }
    // Check for variable splats.
    BitVector UndefElements;
    SDValue Splat = BVN->getSplatValue(&UndefElements);
    if (Splat) {
      // Since i32 is the smallest legal type, we either need a no-op
      // or a truncation.
      SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat);
      return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
    }
  }

  // See whether the shift vector is a splat represented as SHUFFLE_VECTOR,
  // and the shift amount is directly available in a GPR.
  if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
    if (VSN->isSplat()) {
      SDValue VSNOp0 = VSN->getOperand(0);
      unsigned Index = VSN->getSplatIndex();
      assert(Index < VT.getVectorNumElements() &&
             "Splat index should be defined and in first operand");
      if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
          VSNOp0.getOpcode() == ISD::BUILD_VECTOR) {
        // Since i32 is the smallest legal type, we either need a no-op
        // or a truncation.
        SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
                                    VSNOp0.getOperand(Index));
        return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
      }
    }
  }

  // Otherwise just treat the current form as legal.
  return Op;
}

SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
                                              SelectionDAG &DAG) const {
  switch (Op.getOpcode()) {
  case ISD::FRAMEADDR:
    return lowerFRAMEADDR(Op, DAG);
  case ISD::RETURNADDR:
    return lowerRETURNADDR(Op, DAG);
  case ISD::BR_CC:
    return lowerBR_CC(Op, DAG);
  case ISD::SELECT_CC:
    return lowerSELECT_CC(Op, DAG);
  case ISD::SETCC:
    return lowerSETCC(Op, DAG);
  case ISD::GlobalAddress:
    return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
  case ISD::GlobalTLSAddress:
    return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
  case ISD::BlockAddress:
    return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
  case ISD::JumpTable:
    return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
  case ISD::ConstantPool:
    return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
  case ISD::BITCAST:
    return lowerBITCAST(Op, DAG);
  case ISD::VASTART:
    return lowerVASTART(Op, DAG);
  case ISD::VACOPY:
    return lowerVACOPY(Op, DAG);
  case ISD::DYNAMIC_STACKALLOC:
    return lowerDYNAMIC_STACKALLOC(Op, DAG);
  case ISD::GET_DYNAMIC_AREA_OFFSET:
    return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
  case ISD::SMUL_LOHI:
    return lowerSMUL_LOHI(Op, DAG);
  case ISD::UMUL_LOHI:
    return lowerUMUL_LOHI(Op, DAG);
  case ISD::SDIVREM:
    return lowerSDIVREM(Op, DAG);
  case ISD::UDIVREM:
    return lowerUDIVREM(Op, DAG);
  case ISD::SADDO:
  case ISD::SSUBO:
  case ISD::UADDO:
  case ISD::USUBO:
    return lowerXALUO(Op, DAG);
  case ISD::ADDCARRY:
  case ISD::SUBCARRY:
    return lowerADDSUBCARRY(Op, DAG);
  case ISD::OR:
    return lowerOR(Op, DAG);
  case ISD::CTPOP:
    return lowerCTPOP(Op, DAG);
  case ISD::ATOMIC_FENCE:
    return lowerATOMIC_FENCE(Op, DAG);
  case ISD::ATOMIC_SWAP:
    return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
  case ISD::ATOMIC_STORE:
    return lowerATOMIC_STORE(Op, DAG);
  case ISD::ATOMIC_LOAD:
    return lowerATOMIC_LOAD(Op, DAG);
  case ISD::ATOMIC_LOAD_ADD:
    return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
  case ISD::ATOMIC_LOAD_SUB:
    return lowerATOMIC_LOAD_SUB(Op, DAG);
  case ISD::ATOMIC_LOAD_AND:
    return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
  case ISD::ATOMIC_LOAD_OR:
    return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
  case ISD::ATOMIC_LOAD_XOR:
    return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
  case ISD::ATOMIC_LOAD_NAND:
    return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
  case ISD::ATOMIC_LOAD_MIN:
    return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
  case ISD::ATOMIC_LOAD_MAX:
    return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
  case ISD::ATOMIC_LOAD_UMIN:
    return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
  case ISD::ATOMIC_LOAD_UMAX:
    return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
  case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
    return lowerATOMIC_CMP_SWAP(Op, DAG);
  case ISD::STACKSAVE:
    return lowerSTACKSAVE(Op, DAG);
  case ISD::STACKRESTORE:
    return lowerSTACKRESTORE(Op, DAG);
  case ISD::PREFETCH:
    return lowerPREFETCH(Op, DAG);
  case ISD::INTRINSIC_W_CHAIN:
    return lowerINTRINSIC_W_CHAIN(Op, DAG);
  case ISD::INTRINSIC_WO_CHAIN:
    return lowerINTRINSIC_WO_CHAIN(Op, DAG);
  case ISD::BUILD_VECTOR:
    return lowerBUILD_VECTOR(Op, DAG);
  case ISD::VECTOR_SHUFFLE:
    return lowerVECTOR_SHUFFLE(Op, DAG);
  case ISD::SCALAR_TO_VECTOR:
    return lowerSCALAR_TO_VECTOR(Op, DAG);
  case ISD::INSERT_VECTOR_ELT:
    return lowerINSERT_VECTOR_ELT(Op, DAG);
  case ISD::EXTRACT_VECTOR_ELT:
    return lowerEXTRACT_VECTOR_ELT(Op, DAG);
  case ISD::SIGN_EXTEND_VECTOR_INREG:
    return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH);
  case ISD::ZERO_EXTEND_VECTOR_INREG:
    return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH);
  case ISD::SHL:
    return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR);
  case ISD::SRL:
    return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR);
  case ISD::SRA:
    return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR);
  default:
    llvm_unreachable("Unexpected node to lower");
  }
}

// Lower operations with invalid operand or result types (currently used
// only for 128-bit integer types).

static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) {
  SDLoc DL(In);
  SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In,
                           DAG.getIntPtrConstant(0, DL));
  SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In,
                           DAG.getIntPtrConstant(1, DL));
  SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL,
                                    MVT::Untyped, Hi, Lo);
  return SDValue(Pair, 0);
}

static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) {
  SDLoc DL(In);
  SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64,
                                          DL, MVT::i64, In);
  SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64,
                                          DL, MVT::i64, In);
  return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi);
}

void
SystemZTargetLowering::LowerOperationWrapper(SDNode *N,
                                             SmallVectorImpl<SDValue> &Results,
                                             SelectionDAG &DAG) const {
  switch (N->getOpcode()) {
  case ISD::ATOMIC_LOAD: {
    SDLoc DL(N);
    SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other);
    SDValue Ops[] = { N->getOperand(0), N->getOperand(1) };
    MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
    SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128,
                                          DL, Tys, Ops, MVT::i128, MMO);
    Results.push_back(lowerGR128ToI128(DAG, Res));
    Results.push_back(Res.getValue(1));
    break;
  }
  case ISD::ATOMIC_STORE: {
    SDLoc DL(N);
    SDVTList Tys = DAG.getVTList(MVT::Other);
    SDValue Ops[] = { N->getOperand(0),
                      lowerI128ToGR128(DAG, N->getOperand(2)),
                      N->getOperand(1) };
    MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
    SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128,
                                          DL, Tys, Ops, MVT::i128, MMO);
    // We have to enforce sequential consistency by performing a
    // serialization operation after the store.
    if (cast<AtomicSDNode>(N)->getOrdering() ==
        AtomicOrdering::SequentiallyConsistent)
      Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL,
                                       MVT::Other, Res), 0);
    Results.push_back(Res);
    break;
  }
  case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
    SDLoc DL(N);
    SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other);
    SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
                      lowerI128ToGR128(DAG, N->getOperand(2)),
                      lowerI128ToGR128(DAG, N->getOperand(3)) };
    MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
    SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128,
                                          DL, Tys, Ops, MVT::i128, MMO);
    SDValue Success = emitSETCC(DAG, DL, Res.getValue(1),
                                SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ);
    Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1));
    Results.push_back(lowerGR128ToI128(DAG, Res));
    Results.push_back(Success);
    Results.push_back(Res.getValue(2));
    break;
  }
  default:
    llvm_unreachable("Unexpected node to lower");
  }
}

void
SystemZTargetLowering::ReplaceNodeResults(SDNode *N,
                                          SmallVectorImpl<SDValue> &Results,
                                          SelectionDAG &DAG) const {
  return LowerOperationWrapper(N, Results, DAG);
}

const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
  switch ((SystemZISD::NodeType)Opcode) {
    case SystemZISD::FIRST_NUMBER: break;
    OPCODE(RET_FLAG);
    OPCODE(CALL);
    OPCODE(SIBCALL);
    OPCODE(TLS_GDCALL);
    OPCODE(TLS_LDCALL);
    OPCODE(PCREL_WRAPPER);
    OPCODE(PCREL_OFFSET);
    OPCODE(IABS);
    OPCODE(ICMP);
    OPCODE(FCMP);
    OPCODE(TM);
    OPCODE(BR_CCMASK);
    OPCODE(SELECT_CCMASK);
    OPCODE(ADJDYNALLOC);
    OPCODE(POPCNT);
    OPCODE(SMUL_LOHI);
    OPCODE(UMUL_LOHI);
    OPCODE(SDIVREM);
    OPCODE(UDIVREM);
    OPCODE(SADDO);
    OPCODE(SSUBO);
    OPCODE(UADDO);
    OPCODE(USUBO);
    OPCODE(ADDCARRY);
    OPCODE(SUBCARRY);
    OPCODE(GET_CCMASK);
    OPCODE(MVC);
    OPCODE(MVC_LOOP);
    OPCODE(NC);
    OPCODE(NC_LOOP);
    OPCODE(OC);
    OPCODE(OC_LOOP);
    OPCODE(XC);
    OPCODE(XC_LOOP);
    OPCODE(CLC);
    OPCODE(CLC_LOOP);
    OPCODE(STPCPY);
    OPCODE(STRCMP);
    OPCODE(SEARCH_STRING);
    OPCODE(IPM);
    OPCODE(MEMBARRIER);
    OPCODE(TBEGIN);
    OPCODE(TBEGIN_NOFLOAT);
    OPCODE(TEND);
    OPCODE(BYTE_MASK);
    OPCODE(ROTATE_MASK);
    OPCODE(REPLICATE);
    OPCODE(JOIN_DWORDS);
    OPCODE(SPLAT);
    OPCODE(MERGE_HIGH);
    OPCODE(MERGE_LOW);
    OPCODE(SHL_DOUBLE);
    OPCODE(PERMUTE_DWORDS);
    OPCODE(PERMUTE);
    OPCODE(PACK);
    OPCODE(PACKS_CC);
    OPCODE(PACKLS_CC);
    OPCODE(UNPACK_HIGH);
    OPCODE(UNPACKL_HIGH);
    OPCODE(UNPACK_LOW);
    OPCODE(UNPACKL_LOW);
    OPCODE(VSHL_BY_SCALAR);
    OPCODE(VSRL_BY_SCALAR);
    OPCODE(VSRA_BY_SCALAR);
    OPCODE(VSUM);
    OPCODE(VICMPE);
    OPCODE(VICMPH);
    OPCODE(VICMPHL);
    OPCODE(VICMPES);
    OPCODE(VICMPHS);
    OPCODE(VICMPHLS);
    OPCODE(VFCMPE);
    OPCODE(VFCMPH);
    OPCODE(VFCMPHE);
    OPCODE(VFCMPES);
    OPCODE(VFCMPHS);
    OPCODE(VFCMPHES);
    OPCODE(VFTCI);
    OPCODE(VEXTEND);
    OPCODE(VROUND);
    OPCODE(VTM);
    OPCODE(VFAE_CC);
    OPCODE(VFAEZ_CC);
    OPCODE(VFEE_CC);
    OPCODE(VFEEZ_CC);
    OPCODE(VFENE_CC);
    OPCODE(VFENEZ_CC);
    OPCODE(VISTR_CC);
    OPCODE(VSTRC_CC);
    OPCODE(VSTRCZ_CC);
    OPCODE(VSTRS_CC);
    OPCODE(VSTRSZ_CC);
    OPCODE(TDC);
    OPCODE(ATOMIC_SWAPW);
    OPCODE(ATOMIC_LOADW_ADD);
    OPCODE(ATOMIC_LOADW_SUB);
    OPCODE(ATOMIC_LOADW_AND);
    OPCODE(ATOMIC_LOADW_OR);
    OPCODE(ATOMIC_LOADW_XOR);
    OPCODE(ATOMIC_LOADW_NAND);
    OPCODE(ATOMIC_LOADW_MIN);
    OPCODE(ATOMIC_LOADW_MAX);
    OPCODE(ATOMIC_LOADW_UMIN);
    OPCODE(ATOMIC_LOADW_UMAX);
    OPCODE(ATOMIC_CMP_SWAPW);
    OPCODE(ATOMIC_CMP_SWAP);
    OPCODE(ATOMIC_LOAD_128);
    OPCODE(ATOMIC_STORE_128);
    OPCODE(ATOMIC_CMP_SWAP_128);
    OPCODE(LRV);
    OPCODE(STRV);
    OPCODE(VLER);
    OPCODE(VSTER);
    OPCODE(PREFETCH);
  }
  return nullptr;
#undef OPCODE
}

// Return true if VT is a vector whose elements are a whole number of bytes
// in width. Also check for presence of vector support.
bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const {
  if (!Subtarget.hasVector())
    return false;

  return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple();
}

// Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
// producing a result of type ResVT.  Op is a possibly bitcast version
// of the input vector and Index is the index (based on type VecVT) that
// should be extracted.  Return the new extraction if a simplification
// was possible or if Force is true.
SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
                                              EVT VecVT, SDValue Op,
                                              unsigned Index,
                                              DAGCombinerInfo &DCI,
                                              bool Force) const {
  SelectionDAG &DAG = DCI.DAG;

  // The number of bytes being extracted.
  unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();

  for (;;) {
    unsigned Opcode = Op.getOpcode();
    if (Opcode == ISD::BITCAST)
      // Look through bitcasts.
      Op = Op.getOperand(0);
    else if ((Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) &&
             canTreatAsByteVector(Op.getValueType())) {
      // Get a VPERM-like permute mask and see whether the bytes covered
      // by the extracted element are a contiguous sequence from one
      // source operand.
      SmallVector<int, SystemZ::VectorBytes> Bytes;
      if (!getVPermMask(Op, Bytes))
        break;
      int First;
      if (!getShuffleInput(Bytes, Index * BytesPerElement,
                           BytesPerElement, First))
        break;
      if (First < 0)
        return DAG.getUNDEF(ResVT);
      // Make sure the contiguous sequence starts at a multiple of the
      // original element size.
      unsigned Byte = unsigned(First) % Bytes.size();
      if (Byte % BytesPerElement != 0)
        break;
      // We can get the extracted value directly from an input.
      Index = Byte / BytesPerElement;
      Op = Op.getOperand(unsigned(First) / Bytes.size());
      Force = true;
    } else if (Opcode == ISD::BUILD_VECTOR &&
               canTreatAsByteVector(Op.getValueType())) {
      // We can only optimize this case if the BUILD_VECTOR elements are
      // at least as wide as the extracted value.
      EVT OpVT = Op.getValueType();
      unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
      if (OpBytesPerElement < BytesPerElement)
        break;
      // Make sure that the least-significant bit of the extracted value
      // is the least significant bit of an input.
      unsigned End = (Index + 1) * BytesPerElement;
      if (End % OpBytesPerElement != 0)
        break;
      // We're extracting the low part of one operand of the BUILD_VECTOR.
      Op = Op.getOperand(End / OpBytesPerElement - 1);
      if (!Op.getValueType().isInteger()) {
        EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits());
        Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
        DCI.AddToWorklist(Op.getNode());
      }
      EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits());
      Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
      if (VT != ResVT) {
        DCI.AddToWorklist(Op.getNode());
        Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op);
      }
      return Op;
    } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
                Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
                Opcode == ISD::ANY_EXTEND_VECTOR_INREG) &&
               canTreatAsByteVector(Op.getValueType()) &&
               canTreatAsByteVector(Op.getOperand(0).getValueType())) {
      // Make sure that only the unextended bits are significant.
      EVT ExtVT = Op.getValueType();
      EVT OpVT = Op.getOperand(0).getValueType();
      unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize();
      unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
      unsigned Byte = Index * BytesPerElement;
      unsigned SubByte = Byte % ExtBytesPerElement;
      unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
      if (SubByte < MinSubByte ||
          SubByte + BytesPerElement > ExtBytesPerElement)
        break;
      // Get the byte offset of the unextended element
      Byte = Byte / ExtBytesPerElement * OpBytesPerElement;
      // ...then add the byte offset relative to that element.
      Byte += SubByte - MinSubByte;
      if (Byte % BytesPerElement != 0)
        break;
      Op = Op.getOperand(0);
      Index = Byte / BytesPerElement;
      Force = true;
    } else
      break;
  }
  if (Force) {
    if (Op.getValueType() != VecVT) {
      Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op);
      DCI.AddToWorklist(Op.getNode());
    }
    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op,
                       DAG.getConstant(Index, DL, MVT::i32));
  }
  return SDValue();
}

// Optimize vector operations in scalar value Op on the basis that Op
// is truncated to TruncVT.
SDValue SystemZTargetLowering::combineTruncateExtract(
    const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const {
  // If we have (trunc (extract_vector_elt X, Y)), try to turn it into
  // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
  // of type TruncVT.
  if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
      TruncVT.getSizeInBits() % 8 == 0) {
    SDValue Vec = Op.getOperand(0);
    EVT VecVT = Vec.getValueType();
    if (canTreatAsByteVector(VecVT)) {
      if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
        unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
        unsigned TruncBytes = TruncVT.getStoreSize();
        if (BytesPerElement % TruncBytes == 0) {
          // Calculate the value of Y' in the above description.  We are
          // splitting the original elements into Scale equal-sized pieces
          // and for truncation purposes want the last (least-significant)
          // of these pieces for IndexN.  This is easiest to do by calculating
          // the start index of the following element and then subtracting 1.
          unsigned Scale = BytesPerElement / TruncBytes;
          unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;

          // Defer the creation of the bitcast from X to combineExtract,
          // which might be able to optimize the extraction.
          VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8),
                                   VecVT.getStoreSize() / TruncBytes);
          EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
          return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true);
        }
      }
    }
  }
  return SDValue();
}

SDValue SystemZTargetLowering::combineZERO_EXTEND(
    SDNode *N, DAGCombinerInfo &DCI) const {
  // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2')
  SelectionDAG &DAG = DCI.DAG;
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  if (N0.getOpcode() == SystemZISD::SELECT_CCMASK) {
    auto *TrueOp = dyn_cast<ConstantSDNode>(N0.getOperand(0));
    auto *FalseOp = dyn_cast<ConstantSDNode>(N0.getOperand(1));
    if (TrueOp && FalseOp) {
      SDLoc DL(N0);
      SDValue Ops[] = { DAG.getConstant(TrueOp->getZExtValue(), DL, VT),
                        DAG.getConstant(FalseOp->getZExtValue(), DL, VT),
                        N0.getOperand(2), N0.getOperand(3), N0.getOperand(4) };
      SDValue NewSelect = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VT, Ops);
      // If N0 has multiple uses, change other uses as well.
      if (!N0.hasOneUse()) {
        SDValue TruncSelect =
          DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), NewSelect);
        DCI.CombineTo(N0.getNode(), TruncSelect);
      }
      return NewSelect;
    }
  }
  return SDValue();
}

SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
    SDNode *N, DAGCombinerInfo &DCI) const {
  // Convert (sext_in_reg (setcc LHS, RHS, COND), i1)
  // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1)
  // into (select_cc LHS, RHS, -1, 0, COND)
  SelectionDAG &DAG = DCI.DAG;
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
  if (N0.hasOneUse() && N0.getOpcode() == ISD::ANY_EXTEND)
    N0 = N0.getOperand(0);
  if (EVT == MVT::i1 && N0.hasOneUse() && N0.getOpcode() == ISD::SETCC) {
    SDLoc DL(N0);
    SDValue Ops[] = { N0.getOperand(0), N0.getOperand(1),
                      DAG.getConstant(-1, DL, VT), DAG.getConstant(0, DL, VT),
                      N0.getOperand(2) };
    return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
  }
  return SDValue();
}

SDValue SystemZTargetLowering::combineSIGN_EXTEND(
    SDNode *N, DAGCombinerInfo &DCI) const {
  // Convert (sext (ashr (shl X, C1), C2)) to
  // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
  // cheap as narrower ones.
  SelectionDAG &DAG = DCI.DAG;
  SDValue N0 = N->getOperand(0);
  EVT VT = N->getValueType(0);
  if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
    auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
    SDValue Inner = N0.getOperand(0);
    if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
      if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
        unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits());
        unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
        unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
        EVT ShiftVT = N0.getOperand(1).getValueType();
        SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
                                  Inner.getOperand(0));
        SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
                                  DAG.getConstant(NewShlAmt, SDLoc(Inner),
                                                  ShiftVT));
        return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
                           DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
      }
    }
  }
  return SDValue();
}

SDValue SystemZTargetLowering::combineMERGE(
    SDNode *N, DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;
  unsigned Opcode = N->getOpcode();
  SDValue Op0 = N->getOperand(0);
  SDValue Op1 = N->getOperand(1);
  if (Op0.getOpcode() == ISD::BITCAST)
    Op0 = Op0.getOperand(0);
  if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
    // (z_merge_* 0, 0) -> 0.  This is mostly useful for using VLLEZF
    // for v4f32.
    if (Op1 == N->getOperand(0))
      return Op1;
    // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
    EVT VT = Op1.getValueType();
    unsigned ElemBytes = VT.getVectorElementType().getStoreSize();
    if (ElemBytes <= 4) {
      Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
                SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
      EVT InVT = VT.changeVectorElementTypeToInteger();
      EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16),
                                   SystemZ::VectorBytes / ElemBytes / 2);
      if (VT != InVT) {
        Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1);
        DCI.AddToWorklist(Op1.getNode());
      }
      SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1);
      DCI.AddToWorklist(Op.getNode());
      return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
    }
  }
  return SDValue();
}

SDValue SystemZTargetLowering::combineLOAD(
    SDNode *N, DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;
  EVT LdVT = N->getValueType(0);
  if (LdVT.isVector() || LdVT.isInteger())
    return SDValue();
  // Transform a scalar load that is REPLICATEd as well as having other
  // use(s) to the form where the other use(s) use the first element of the
  // REPLICATE instead of the load. Otherwise instruction selection will not
  // produce a VLREP. Avoid extracting to a GPR, so only do this for floating
  // point loads.

  SDValue Replicate;
  SmallVector<SDNode*, 8> OtherUses;
  for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
       UI != UE; ++UI) {
    if (UI->getOpcode() == SystemZISD::REPLICATE) {
      if (Replicate)
        return SDValue(); // Should never happen
      Replicate = SDValue(*UI, 0);
    }
    else if (UI.getUse().getResNo() == 0)
      OtherUses.push_back(*UI);
  }
  if (!Replicate || OtherUses.empty())
    return SDValue();

  SDLoc DL(N);
  SDValue Extract0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, LdVT,
                              Replicate, DAG.getConstant(0, DL, MVT::i32));
  // Update uses of the loaded Value while preserving old chains.
  for (SDNode *U : OtherUses) {
    SmallVector<SDValue, 8> Ops;
    for (SDValue Op : U->ops())
      Ops.push_back((Op.getNode() == N && Op.getResNo() == 0) ? Extract0 : Op);
    DAG.UpdateNodeOperands(U, Ops);
  }
  return SDValue(N, 0);
}

bool SystemZTargetLowering::canLoadStoreByteSwapped(EVT VT) const {
  if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
    return true;
  if (Subtarget.hasVectorEnhancements2())
    if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64)
      return true;
  return false;
}

static bool isVectorElementSwap(ArrayRef<int> M, EVT VT) {
  if (!VT.isVector() || !VT.isSimple() ||
      VT.getSizeInBits() != 128 ||
      VT.getScalarSizeInBits() % 8 != 0)
    return false;

  unsigned NumElts = VT.getVectorNumElements();
  for (unsigned i = 0; i < NumElts; ++i) {
    if (M[i] < 0) continue; // ignore UNDEF indices
    if ((unsigned) M[i] != NumElts - 1 - i)
      return false;
  }

  return true;
}

SDValue SystemZTargetLowering::combineSTORE(
    SDNode *N, DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;
  auto *SN = cast<StoreSDNode>(N);
  auto &Op1 = N->getOperand(1);
  EVT MemVT = SN->getMemoryVT();
  // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
  // for the extraction to be done on a vMiN value, so that we can use VSTE.
  // If X has wider elements then convert it to:
  // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
  if (MemVT.isInteger() && SN->isTruncatingStore()) {
    if (SDValue Value =
            combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) {
      DCI.AddToWorklist(Value.getNode());

      // Rewrite the store with the new form of stored value.
      return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value,
                               SN->getBasePtr(), SN->getMemoryVT(),
                               SN->getMemOperand());
    }
  }
  // Combine STORE (BSWAP) into STRVH/STRV/STRVG/VSTBR
  if (!SN->isTruncatingStore() &&
      Op1.getOpcode() == ISD::BSWAP &&
      Op1.getNode()->hasOneUse() &&
      canLoadStoreByteSwapped(Op1.getValueType())) {

      SDValue BSwapOp = Op1.getOperand(0);

      if (BSwapOp.getValueType() == MVT::i16)
        BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp);

      SDValue Ops[] = {
        N->getOperand(0), BSwapOp, N->getOperand(2)
      };

      return
        DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other),
                                Ops, MemVT, SN->getMemOperand());
    }
  // Combine STORE (element-swap) into VSTER
  if (!SN->isTruncatingStore() &&
      Op1.getOpcode() == ISD::VECTOR_SHUFFLE &&
      Op1.getNode()->hasOneUse() &&
      Subtarget.hasVectorEnhancements2()) {
    ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op1.getNode());
    ArrayRef<int> ShuffleMask = SVN->getMask();
    if (isVectorElementSwap(ShuffleMask, Op1.getValueType())) {
      SDValue Ops[] = {
        N->getOperand(0), Op1.getOperand(0), N->getOperand(2)
      };

      return DAG.getMemIntrinsicNode(SystemZISD::VSTER, SDLoc(N),
                                     DAG.getVTList(MVT::Other),
                                     Ops, MemVT, SN->getMemOperand());
    }
  }

  return SDValue();
}

SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
    SDNode *N, DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;
  // Combine element-swap (LOAD) into VLER
  if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
      N->getOperand(0).hasOneUse() &&
      Subtarget.hasVectorEnhancements2()) {
    ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
    ArrayRef<int> ShuffleMask = SVN->getMask();
    if (isVectorElementSwap(ShuffleMask, N->getValueType(0))) {
      SDValue Load = N->getOperand(0);
      LoadSDNode *LD = cast<LoadSDNode>(Load);

      // Create the element-swapping load.
      SDValue Ops[] = {
        LD->getChain(),    // Chain
        LD->getBasePtr()   // Ptr
      };
      SDValue ESLoad =
        DAG.getMemIntrinsicNode(SystemZISD::VLER, SDLoc(N),
                                DAG.getVTList(LD->getValueType(0), MVT::Other),
                                Ops, LD->getMemoryVT(), LD->getMemOperand());

      // First, combine the VECTOR_SHUFFLE away.  This makes the value produced
      // by the load dead.
      DCI.CombineTo(N, ESLoad);

      // Next, combine the load away, we give it a bogus result value but a real
      // chain result.  The result value is dead because the shuffle is dead.
      DCI.CombineTo(Load.getNode(), ESLoad, ESLoad.getValue(1));

      // Return N so it doesn't get rechecked!
      return SDValue(N, 0);
    }
  }

  return SDValue();
}

SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
    SDNode *N, DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;

  if (!Subtarget.hasVector())
    return SDValue();

  // Look through bitcasts that retain the number of vector elements.
  SDValue Op = N->getOperand(0);
  if (Op.getOpcode() == ISD::BITCAST &&
      Op.getValueType().isVector() &&
      Op.getOperand(0).getValueType().isVector() &&
      Op.getValueType().getVectorNumElements() ==
      Op.getOperand(0).getValueType().getVectorNumElements())
    Op = Op.getOperand(0);

  // Pull BSWAP out of a vector extraction.
  if (Op.getOpcode() == ISD::BSWAP && Op.hasOneUse()) {
    EVT VecVT = Op.getValueType();
    EVT EltVT = VecVT.getVectorElementType();
    Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), EltVT,
                     Op.getOperand(0), N->getOperand(1));
    DCI.AddToWorklist(Op.getNode());
    Op = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Op);
    if (EltVT != N->getValueType(0)) {
      DCI.AddToWorklist(Op.getNode());
      Op = DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Op);
    }
    return Op;
  }

  // Try to simplify a vector extraction.
  if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
    SDValue Op0 = N->getOperand(0);
    EVT VecVT = Op0.getValueType();
    return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0,
                          IndexN->getZExtValue(), DCI, false);
  }
  return SDValue();
}

SDValue SystemZTargetLowering::combineJOIN_DWORDS(
    SDNode *N, DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;
  // (join_dwords X, X) == (replicate X)
  if (N->getOperand(0) == N->getOperand(1))
    return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0),
                       N->getOperand(0));
  return SDValue();
}

SDValue SystemZTargetLowering::combineFP_ROUND(
    SDNode *N, DAGCombinerInfo &DCI) const {

  if (!Subtarget.hasVector())
    return SDValue();

  // (fpround (extract_vector_elt X 0))
  // (fpround (extract_vector_elt X 1)) ->
  // (extract_vector_elt (VROUND X) 0)
  // (extract_vector_elt (VROUND X) 2)
  //
  // This is a special case since the target doesn't really support v2f32s.
  SelectionDAG &DAG = DCI.DAG;
  SDValue Op0 = N->getOperand(0);
  if (N->getValueType(0) == MVT::f32 &&
      Op0.hasOneUse() &&
      Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
      Op0.getOperand(0).getValueType() == MVT::v2f64 &&
      Op0.getOperand(1).getOpcode() == ISD::Constant &&
      cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
    SDValue Vec = Op0.getOperand(0);
    for (auto *U : Vec->uses()) {
      if (U != Op0.getNode() &&
          U->hasOneUse() &&
          U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
          U->getOperand(0) == Vec &&
          U->getOperand(1).getOpcode() == ISD::Constant &&
          cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
        SDValue OtherRound = SDValue(*U->use_begin(), 0);
        if (OtherRound.getOpcode() == ISD::FP_ROUND &&
            OtherRound.getOperand(0) == SDValue(U, 0) &&
            OtherRound.getValueType() == MVT::f32) {
          SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N),
                                       MVT::v4f32, Vec);
          DCI.AddToWorklist(VRound.getNode());
          SDValue Extract1 =
            DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32,
                        VRound, DAG.getConstant(2, SDLoc(U), MVT::i32));
          DCI.AddToWorklist(Extract1.getNode());
          DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1);
          SDValue Extract0 =
            DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32,
                        VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
          return Extract0;
        }
      }
    }
  }
  return SDValue();
}

SDValue SystemZTargetLowering::combineFP_EXTEND(
    SDNode *N, DAGCombinerInfo &DCI) const {

  if (!Subtarget.hasVector())
    return SDValue();

  // (fpextend (extract_vector_elt X 0))
  // (fpextend (extract_vector_elt X 2)) ->
  // (extract_vector_elt (VEXTEND X) 0)
  // (extract_vector_elt (VEXTEND X) 1)
  //
  // This is a special case since the target doesn't really support v2f32s.
  SelectionDAG &DAG = DCI.DAG;
  SDValue Op0 = N->getOperand(0);
  if (N->getValueType(0) == MVT::f64 &&
      Op0.hasOneUse() &&
      Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
      Op0.getOperand(0).getValueType() == MVT::v4f32 &&
      Op0.getOperand(1).getOpcode() == ISD::Constant &&
      cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
    SDValue Vec = Op0.getOperand(0);
    for (auto *U : Vec->uses()) {
      if (U != Op0.getNode() &&
          U->hasOneUse() &&
          U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
          U->getOperand(0) == Vec &&
          U->getOperand(1).getOpcode() == ISD::Constant &&
          cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 2) {
        SDValue OtherExtend = SDValue(*U->use_begin(), 0);
        if (OtherExtend.getOpcode() == ISD::FP_EXTEND &&
            OtherExtend.getOperand(0) == SDValue(U, 0) &&
            OtherExtend.getValueType() == MVT::f64) {
          SDValue VExtend = DAG.getNode(SystemZISD::VEXTEND, SDLoc(N),
                                        MVT::v2f64, Vec);
          DCI.AddToWorklist(VExtend.getNode());
          SDValue Extract1 =
            DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f64,
                        VExtend, DAG.getConstant(1, SDLoc(U), MVT::i32));
          DCI.AddToWorklist(Extract1.getNode());
          DAG.ReplaceAllUsesOfValueWith(OtherExtend, Extract1);
          SDValue Extract0 =
            DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f64,
                        VExtend, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
          return Extract0;
        }
      }
    }
  }
  return SDValue();
}

SDValue SystemZTargetLowering::combineBSWAP(
    SDNode *N, DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;
  // Combine BSWAP (LOAD) into LRVH/LRV/LRVG/VLBR
  if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
      N->getOperand(0).hasOneUse() &&
      canLoadStoreByteSwapped(N->getValueType(0))) {
      SDValue Load = N->getOperand(0);
      LoadSDNode *LD = cast<LoadSDNode>(Load);

      // Create the byte-swapping load.
      SDValue Ops[] = {
        LD->getChain(),    // Chain
        LD->getBasePtr()   // Ptr
      };
      EVT LoadVT = N->getValueType(0);
      if (LoadVT == MVT::i16)
        LoadVT = MVT::i32;
      SDValue BSLoad =
        DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N),
                                DAG.getVTList(LoadVT, MVT::Other),
                                Ops, LD->getMemoryVT(), LD->getMemOperand());

      // If this is an i16 load, insert the truncate.
      SDValue ResVal = BSLoad;
      if (N->getValueType(0) == MVT::i16)
        ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad);

      // First, combine the bswap away.  This makes the value produced by the
      // load dead.
      DCI.CombineTo(N, ResVal);

      // Next, combine the load away, we give it a bogus result value but a real
      // chain result.  The result value is dead because the bswap is dead.
      DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));

      // Return N so it doesn't get rechecked!
      return SDValue(N, 0);
    }

  // Look through bitcasts that retain the number of vector elements.
  SDValue Op = N->getOperand(0);
  if (Op.getOpcode() == ISD::BITCAST &&
      Op.getValueType().isVector() &&
      Op.getOperand(0).getValueType().isVector() &&
      Op.getValueType().getVectorNumElements() ==
      Op.getOperand(0).getValueType().getVectorNumElements())
    Op = Op.getOperand(0);

  // Push BSWAP into a vector insertion if at least one side then simplifies.
  if (Op.getOpcode() == ISD::INSERT_VECTOR_ELT && Op.hasOneUse()) {
    SDValue Vec = Op.getOperand(0);
    SDValue Elt = Op.getOperand(1);
    SDValue Idx = Op.getOperand(2);

    if (DAG.isConstantIntBuildVectorOrConstantInt(Vec) ||
        Vec.getOpcode() == ISD::BSWAP || Vec.isUndef() ||
        DAG.isConstantIntBuildVectorOrConstantInt(Elt) ||
        Elt.getOpcode() == ISD::BSWAP || Elt.isUndef() ||
        (canLoadStoreByteSwapped(N->getValueType(0)) &&
         ISD::isNON_EXTLoad(Elt.getNode()) && Elt.hasOneUse())) {
      EVT VecVT = N->getValueType(0);
      EVT EltVT = N->getValueType(0).getVectorElementType();
      if (VecVT != Vec.getValueType()) {
        Vec = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Vec);
        DCI.AddToWorklist(Vec.getNode());
      }
      if (EltVT != Elt.getValueType()) {
        Elt = DAG.getNode(ISD::BITCAST, SDLoc(N), EltVT, Elt);
        DCI.AddToWorklist(Elt.getNode());
      }
      Vec = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Vec);
      DCI.AddToWorklist(Vec.getNode());
      Elt = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Elt);
      DCI.AddToWorklist(Elt.getNode());
      return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), VecVT,
                         Vec, Elt, Idx);
    }
  }

  // Push BSWAP into a vector shuffle if at least one side then simplifies.
  ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(Op);
  if (SV && Op.hasOneUse()) {
    SDValue Op0 = Op.getOperand(0);
    SDValue Op1 = Op.getOperand(1);

    if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
        Op0.getOpcode() == ISD::BSWAP || Op0.isUndef() ||
        DAG.isConstantIntBuildVectorOrConstantInt(Op1) ||
        Op1.getOpcode() == ISD::BSWAP || Op1.isUndef()) {
      EVT VecVT = N->getValueType(0);
      if (VecVT != Op0.getValueType()) {
        Op0 = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Op0);
        DCI.AddToWorklist(Op0.getNode());
      }
      if (VecVT != Op1.getValueType()) {
        Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Op1);
        DCI.AddToWorklist(Op1.getNode());
      }
      Op0 = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Op0);
      DCI.AddToWorklist(Op0.getNode());
      Op1 = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Op1);
      DCI.AddToWorklist(Op1.getNode());
      return DAG.getVectorShuffle(VecVT, SDLoc(N), Op0, Op1, SV->getMask());
    }
  }

  return SDValue();
}

static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
  // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
  // set by the CCReg instruction using the CCValid / CCMask masks,
  // If the CCReg instruction is itself a ICMP testing the condition
  // code set by some other instruction, see whether we can directly
  // use that condition code.

  // Verify that we have an ICMP against some constant.
  if (CCValid != SystemZ::CCMASK_ICMP)
    return false;
  auto *ICmp = CCReg.getNode();
  if (ICmp->getOpcode() != SystemZISD::ICMP)
    return false;
  auto *CompareLHS = ICmp->getOperand(0).getNode();
  auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
  if (!CompareRHS)
    return false;

  // Optimize the case where CompareLHS is a SELECT_CCMASK.
  if (CompareLHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
    // Verify that we have an appropriate mask for a EQ or NE comparison.
    bool Invert = false;
    if (CCMask == SystemZ::CCMASK_CMP_NE)
      Invert = !Invert;
    else if (CCMask != SystemZ::CCMASK_CMP_EQ)
      return false;

    // Verify that the ICMP compares against one of select values.
    auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
    if (!TrueVal)
      return false;
    auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
    if (!FalseVal)
      return false;
    if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
      Invert = !Invert;
    else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
      return false;

    // Compute the effective CC mask for the new branch or select.
    auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
    auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
    if (!NewCCValid || !NewCCMask)
      return false;
    CCValid = NewCCValid->getZExtValue();
    CCMask = NewCCMask->getZExtValue();
    if (Invert)
      CCMask ^= CCValid;

    // Return the updated CCReg link.
    CCReg = CompareLHS->getOperand(4);
    return true;
  }

  // Optimize the case where CompareRHS is (SRA (SHL (IPM))).
  if (CompareLHS->getOpcode() == ISD::SRA) {
    auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
    if (!SRACount || SRACount->getZExtValue() != 30)
      return false;
    auto *SHL = CompareLHS->getOperand(0).getNode();
    if (SHL->getOpcode() != ISD::SHL)
      return false;
    auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
    if (!SHLCount || SHLCount->getZExtValue() != 30 - SystemZ::IPM_CC)
      return false;
    auto *IPM = SHL->getOperand(0).getNode();
    if (IPM->getOpcode() != SystemZISD::IPM)
      return false;

    // Avoid introducing CC spills (because SRA would clobber CC).
    if (!CompareLHS->hasOneUse())
      return false;
    // Verify that the ICMP compares against zero.
    if (CompareRHS->getZExtValue() != 0)
      return false;

    // Compute the effective CC mask for the new branch or select.
    switch (CCMask) {
    case SystemZ::CCMASK_CMP_EQ: break;
    case SystemZ::CCMASK_CMP_NE: break;
    case SystemZ::CCMASK_CMP_LT: CCMask = SystemZ::CCMASK_CMP_GT; break;
    case SystemZ::CCMASK_CMP_GT: CCMask = SystemZ::CCMASK_CMP_LT; break;
    case SystemZ::CCMASK_CMP_LE: CCMask = SystemZ::CCMASK_CMP_GE; break;
    case SystemZ::CCMASK_CMP_GE: CCMask = SystemZ::CCMASK_CMP_LE; break;
    default: return false;
    }

    // Return the updated CCReg link.
    CCReg = IPM->getOperand(0);
    return true;
  }

  return false;
}

SDValue SystemZTargetLowering::combineBR_CCMASK(
    SDNode *N, DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;

  // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK.
  auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1));
  auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2));
  if (!CCValid || !CCMask)
    return SDValue();

  int CCValidVal = CCValid->getZExtValue();
  int CCMaskVal = CCMask->getZExtValue();
  SDValue Chain = N->getOperand(0);
  SDValue CCReg = N->getOperand(4);

  if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
    return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0),
                       Chain,
                       DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32),
                       DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32),
                       N->getOperand(3), CCReg);
  return SDValue();
}

SDValue SystemZTargetLowering::combineSELECT_CCMASK(
    SDNode *N, DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;

  // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK.
  auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(2));
  auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(3));
  if (!CCValid || !CCMask)
    return SDValue();

  int CCValidVal = CCValid->getZExtValue();
  int CCMaskVal = CCMask->getZExtValue();
  SDValue CCReg = N->getOperand(4);

  if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
    return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
                       N->getOperand(0), N->getOperand(1),
                       DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32),
                       DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32),
                       CCReg);
  return SDValue();
}


SDValue SystemZTargetLowering::combineGET_CCMASK(
    SDNode *N, DAGCombinerInfo &DCI) const {

  // Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible
  auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1));
  auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2));
  if (!CCValid || !CCMask)
    return SDValue();
  int CCValidVal = CCValid->getZExtValue();
  int CCMaskVal = CCMask->getZExtValue();

  SDValue Select = N->getOperand(0);
  if (Select->getOpcode() != SystemZISD::SELECT_CCMASK)
    return SDValue();

  auto *SelectCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2));
  auto *SelectCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3));
  if (!SelectCCValid || !SelectCCMask)
    return SDValue();
  int SelectCCValidVal = SelectCCValid->getZExtValue();
  int SelectCCMaskVal = SelectCCMask->getZExtValue();

  auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0));
  auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1));
  if (!TrueVal || !FalseVal)
    return SDValue();
  if (TrueVal->getZExtValue() != 0 && FalseVal->getZExtValue() == 0)
    ;
  else if (TrueVal->getZExtValue() == 0 && FalseVal->getZExtValue() != 0)
    SelectCCMaskVal ^= SelectCCValidVal;
  else
    return SDValue();

  if (SelectCCValidVal & ~CCValidVal)
    return SDValue();
  if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
    return SDValue();

  return Select->getOperand(4);
}

SDValue SystemZTargetLowering::combineIntDIVREM(
    SDNode *N, DAGCombinerInfo &DCI) const {
  SelectionDAG &DAG = DCI.DAG;
  EVT VT = N->getValueType(0);
  // In the case where the divisor is a vector of constants a cheaper
  // sequence of instructions can replace the divide. BuildSDIV is called to
  // do this during DAG combining, but it only succeeds when it can build a
  // multiplication node. The only option for SystemZ is ISD::SMUL_LOHI, and
  // since it is not Legal but Custom it can only happen before
  // legalization. Therefore we must scalarize this early before Combine
  // 1. For widened vectors, this is already the result of type legalization.
  if (DCI.Level == BeforeLegalizeTypes && VT.isVector() && isTypeLegal(VT) &&
      DAG.isConstantIntBuildVectorOrConstantInt(N->getOperand(1)))
    return DAG.UnrollVectorOp(N);
  return SDValue();
}

SDValue SystemZTargetLowering::unwrapAddress(SDValue N) const {
  if (N->getOpcode() == SystemZISD::PCREL_WRAPPER)
    return N->getOperand(0);
  return N;
}

SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
                                                 DAGCombinerInfo &DCI) const {
  switch(N->getOpcode()) {
  default: break;
  case ISD::ZERO_EXTEND:        return combineZERO_EXTEND(N, DCI);
  case ISD::SIGN_EXTEND:        return combineSIGN_EXTEND(N, DCI);
  case ISD::SIGN_EXTEND_INREG:  return combineSIGN_EXTEND_INREG(N, DCI);
  case SystemZISD::MERGE_HIGH:
  case SystemZISD::MERGE_LOW:   return combineMERGE(N, DCI);
  case ISD::LOAD:               return combineLOAD(N, DCI);
  case ISD::STORE:              return combineSTORE(N, DCI);
  case ISD::VECTOR_SHUFFLE:     return combineVECTOR_SHUFFLE(N, DCI);
  case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI);
  case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI);
  case ISD::FP_ROUND:           return combineFP_ROUND(N, DCI);
  case ISD::FP_EXTEND:          return combineFP_EXTEND(N, DCI);
  case ISD::BSWAP:              return combineBSWAP(N, DCI);
  case SystemZISD::BR_CCMASK:   return combineBR_CCMASK(N, DCI);
  case SystemZISD::SELECT_CCMASK: return combineSELECT_CCMASK(N, DCI);
  case SystemZISD::GET_CCMASK:  return combineGET_CCMASK(N, DCI);
  case ISD::SDIV:
  case ISD::UDIV:
  case ISD::SREM:
  case ISD::UREM:               return combineIntDIVREM(N, DCI);
  }

  return SDValue();
}

// Return the demanded elements for the OpNo source operand of Op. DemandedElts
// are for Op.
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts,
                                    unsigned OpNo) {
  EVT VT = Op.getValueType();
  unsigned NumElts = (VT.isVector() ? VT.getVectorNumElements() : 1);
  APInt SrcDemE;
  unsigned Opcode = Op.getOpcode();
  if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
    unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
    switch (Id) {
    case Intrinsic::s390_vpksh:   // PACKS
    case Intrinsic::s390_vpksf:
    case Intrinsic::s390_vpksg:
    case Intrinsic::s390_vpkshs:  // PACKS_CC
    case Intrinsic::s390_vpksfs:
    case Intrinsic::s390_vpksgs:
    case Intrinsic::s390_vpklsh:  // PACKLS
    case Intrinsic::s390_vpklsf:
    case Intrinsic::s390_vpklsg:
    case Intrinsic::s390_vpklshs: // PACKLS_CC
    case Intrinsic::s390_vpklsfs:
    case Intrinsic::s390_vpklsgs:
      // VECTOR PACK truncates the elements of two source vectors into one.
      SrcDemE = DemandedElts;
      if (OpNo == 2)
        SrcDemE.lshrInPlace(NumElts / 2);
      SrcDemE = SrcDemE.trunc(NumElts / 2);
      break;
      // VECTOR UNPACK extends half the elements of the source vector.
    case Intrinsic::s390_vuphb:  // VECTOR UNPACK HIGH
    case Intrinsic::s390_vuphh:
    case Intrinsic::s390_vuphf:
    case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH
    case Intrinsic::s390_vuplhh:
    case Intrinsic::s390_vuplhf:
      SrcDemE = APInt(NumElts * 2, 0);
      SrcDemE.insertBits(DemandedElts, 0);
      break;
    case Intrinsic::s390_vuplb:  // VECTOR UNPACK LOW
    case Intrinsic::s390_vuplhw:
    case Intrinsic::s390_vuplf:
    case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW
    case Intrinsic::s390_vupllh:
    case Intrinsic::s390_vupllf:
      SrcDemE = APInt(NumElts * 2, 0);
      SrcDemE.insertBits(DemandedElts, NumElts);
      break;
    case Intrinsic::s390_vpdi: {
      // VECTOR PERMUTE DWORD IMMEDIATE selects one element from each source.
      SrcDemE = APInt(NumElts, 0);
      if (!DemandedElts[OpNo - 1])
        break;
      unsigned Mask = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
      unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
      // Demand input element 0 or 1, given by the mask bit value.
      SrcDemE.setBit((Mask & MaskBit)? 1 : 0);
      break;
    }
    case Intrinsic::s390_vsldb: {
      // VECTOR SHIFT LEFT DOUBLE BY BYTE
      assert(VT == MVT::v16i8 && "Unexpected type.");
      unsigned FirstIdx = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
      assert (FirstIdx > 0 && FirstIdx < 16 && "Unused operand.");
      unsigned NumSrc0Els = 16 - FirstIdx;
      SrcDemE = APInt(NumElts, 0);
      if (OpNo == 1) {
        APInt DemEls = DemandedElts.trunc(NumSrc0Els);
        SrcDemE.insertBits(DemEls, FirstIdx);
      } else {
        APInt DemEls = DemandedElts.lshr(NumSrc0Els);
        SrcDemE.insertBits(DemEls, 0);
      }
      break;
    }
    case Intrinsic::s390_vperm:
      SrcDemE = APInt(NumElts, 1);
      break;
    default:
      llvm_unreachable("Unhandled intrinsic.");
      break;
    }
  } else {
    switch (Opcode) {
    case SystemZISD::JOIN_DWORDS:
      // Scalar operand.
      SrcDemE = APInt(1, 1);
      break;
    case SystemZISD::SELECT_CCMASK:
      SrcDemE = DemandedElts;
      break;
    default:
      llvm_unreachable("Unhandled opcode.");
      break;
    }
  }
  return SrcDemE;
}

static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known,
                                  const APInt &DemandedElts,
                                  const SelectionDAG &DAG, unsigned Depth,
                                  unsigned OpNo) {
  APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo);
  APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1);
  KnownBits LHSKnown =
      DAG.computeKnownBits(Op.getOperand(OpNo), Src0DemE, Depth + 1);
  KnownBits RHSKnown =
      DAG.computeKnownBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1);
  Known.Zero = LHSKnown.Zero & RHSKnown.Zero;
  Known.One = LHSKnown.One & RHSKnown.One;
}

void
SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
                                                     KnownBits &Known,
                                                     const APInt &DemandedElts,
                                                     const SelectionDAG &DAG,
                                                     unsigned Depth) const {
  Known.resetAll();

  // Intrinsic CC result is returned in the two low bits.
  unsigned tmp0, tmp1; // not used
  if (Op.getResNo() == 1 && isIntrinsicWithCC(Op, tmp0, tmp1)) {
    Known.Zero.setBitsFrom(2);
    return;
  }
  EVT VT = Op.getValueType();
  if (Op.getResNo() != 0 || VT == MVT::Untyped)
    return;
  assert (Known.getBitWidth() == VT.getScalarSizeInBits() &&
          "KnownBits does not match VT in bitwidth");
  assert ((!VT.isVector() ||
           (DemandedElts.getBitWidth() == VT.getVectorNumElements())) &&
          "DemandedElts does not match VT number of elements");
  unsigned BitWidth = Known.getBitWidth();
  unsigned Opcode = Op.getOpcode();
  if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
    bool IsLogical = false;
    unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
    switch (Id) {
    case Intrinsic::s390_vpksh:   // PACKS
    case Intrinsic::s390_vpksf:
    case Intrinsic::s390_vpksg:
    case Intrinsic::s390_vpkshs:  // PACKS_CC
    case Intrinsic::s390_vpksfs:
    case Intrinsic::s390_vpksgs:
    case Intrinsic::s390_vpklsh:  // PACKLS
    case Intrinsic::s390_vpklsf:
    case Intrinsic::s390_vpklsg:
    case Intrinsic::s390_vpklshs: // PACKLS_CC
    case Intrinsic::s390_vpklsfs:
    case Intrinsic::s390_vpklsgs:
    case Intrinsic::s390_vpdi:
    case Intrinsic::s390_vsldb:
    case Intrinsic::s390_vperm:
      computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 1);
      break;
    case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH
    case Intrinsic::s390_vuplhh:
    case Intrinsic::s390_vuplhf:
    case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW
    case Intrinsic::s390_vupllh:
    case Intrinsic::s390_vupllf:
      IsLogical = true;
      LLVM_FALLTHROUGH;
    case Intrinsic::s390_vuphb:  // VECTOR UNPACK HIGH
    case Intrinsic::s390_vuphh:
    case Intrinsic::s390_vuphf:
    case Intrinsic::s390_vuplb:  // VECTOR UNPACK LOW
    case Intrinsic::s390_vuplhw:
    case Intrinsic::s390_vuplf: {
      SDValue SrcOp = Op.getOperand(1);
      APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 0);
      Known = DAG.computeKnownBits(SrcOp, SrcDemE, Depth + 1);
      if (IsLogical) {
        Known = Known.zext(BitWidth, true);
      } else
        Known = Known.sext(BitWidth);
      break;
    }
    default:
      break;
    }
  } else {
    switch (Opcode) {
    case SystemZISD::JOIN_DWORDS:
    case SystemZISD::SELECT_CCMASK:
      computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 0);
      break;
    case SystemZISD::REPLICATE: {
      SDValue SrcOp = Op.getOperand(0);
      Known = DAG.computeKnownBits(SrcOp, Depth + 1);
      if (Known.getBitWidth() < BitWidth && isa<ConstantSDNode>(SrcOp))
        Known = Known.sext(BitWidth); // VREPI sign extends the immedate.
      break;
    }
    default:
      break;
    }
  }

  // Known has the width of the source operand(s). Adjust if needed to match
  // the passed bitwidth.
  if (Known.getBitWidth() != BitWidth)
    Known = Known.zextOrTrunc(BitWidth, false);
}

static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts,
                                        const SelectionDAG &DAG, unsigned Depth,
                                        unsigned OpNo) {
  APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo);
  unsigned LHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo), Src0DemE, Depth + 1);
  if (LHS == 1) return 1; // Early out.
  APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1);
  unsigned RHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1);
  if (RHS == 1) return 1; // Early out.
  unsigned Common = std::min(LHS, RHS);
  unsigned SrcBitWidth = Op.getOperand(OpNo).getScalarValueSizeInBits();
  EVT VT = Op.getValueType();
  unsigned VTBits = VT.getScalarSizeInBits();
  if (SrcBitWidth > VTBits) { // PACK
    unsigned SrcExtraBits = SrcBitWidth - VTBits;
    if (Common > SrcExtraBits)
      return (Common - SrcExtraBits);
    return 1;
  }
  assert (SrcBitWidth == VTBits && "Expected operands of same bitwidth.");
  return Common;
}

unsigned
SystemZTargetLowering::ComputeNumSignBitsForTargetNode(
    SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
    unsigned Depth) const {
  if (Op.getResNo() != 0)
    return 1;
  unsigned Opcode = Op.getOpcode();
  if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
    unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
    switch (Id) {
    case Intrinsic::s390_vpksh:   // PACKS
    case Intrinsic::s390_vpksf:
    case Intrinsic::s390_vpksg:
    case Intrinsic::s390_vpkshs:  // PACKS_CC
    case Intrinsic::s390_vpksfs:
    case Intrinsic::s390_vpksgs:
    case Intrinsic::s390_vpklsh:  // PACKLS
    case Intrinsic::s390_vpklsf:
    case Intrinsic::s390_vpklsg:
    case Intrinsic::s390_vpklshs: // PACKLS_CC
    case Intrinsic::s390_vpklsfs:
    case Intrinsic::s390_vpklsgs:
    case Intrinsic::s390_vpdi:
    case Intrinsic::s390_vsldb:
    case Intrinsic::s390_vperm:
      return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 1);
    case Intrinsic::s390_vuphb:  // VECTOR UNPACK HIGH
    case Intrinsic::s390_vuphh:
    case Intrinsic::s390_vuphf:
    case Intrinsic::s390_vuplb:  // VECTOR UNPACK LOW
    case Intrinsic::s390_vuplhw:
    case Intrinsic::s390_vuplf: {
      SDValue PackedOp = Op.getOperand(1);
      APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 1);
      unsigned Tmp = DAG.ComputeNumSignBits(PackedOp, SrcDemE, Depth + 1);
      EVT VT = Op.getValueType();
      unsigned VTBits = VT.getScalarSizeInBits();
      Tmp += VTBits - PackedOp.getScalarValueSizeInBits();
      return Tmp;
    }
    default:
      break;
    }
  } else {
    switch (Opcode) {
    case SystemZISD::SELECT_CCMASK:
      return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 0);
    default:
      break;
    }
  }

  return 1;
}

//===----------------------------------------------------------------------===//
// Custom insertion
//===----------------------------------------------------------------------===//

// Create a new basic block after MBB.
static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
  MachineFunction &MF = *MBB->getParent();
  MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
  MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
  return NewMBB;
}

// Split MBB after MI and return the new block (the one that contains
// instructions after MI).
static MachineBasicBlock *splitBlockAfter(MachineBasicBlock::iterator MI,
                                          MachineBasicBlock *MBB) {
  MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
  NewMBB->splice(NewMBB->begin(), MBB,
                 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
  NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
  return NewMBB;
}

// Split MBB before MI and return the new block (the one that contains MI).
static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI,
                                           MachineBasicBlock *MBB) {
  MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
  NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
  NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
  return NewMBB;
}

// Force base value Base into a register before MI.  Return the register.
static Register forceReg(MachineInstr &MI, MachineOperand &Base,
                         const SystemZInstrInfo *TII) {
  if (Base.isReg())
    return Base.getReg();

  MachineBasicBlock *MBB = MI.getParent();
  MachineFunction &MF = *MBB->getParent();
  MachineRegisterInfo &MRI = MF.getRegInfo();

  Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
  BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg)
      .add(Base)
      .addImm(0)
      .addReg(0);
  return Reg;
}

// The CC operand of MI might be missing a kill marker because there
// were multiple uses of CC, and ISel didn't know which to mark.
// Figure out whether MI should have had a kill marker.
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB) {
  // Scan forward through BB for a use/def of CC.
  MachineBasicBlock::iterator miI(std::next(MachineBasicBlock::iterator(MI)));
  for (MachineBasicBlock::iterator miE = MBB->end(); miI != miE; ++miI) {
    const MachineInstr& mi = *miI;
    if (mi.readsRegister(SystemZ::CC))
      return false;
    if (mi.definesRegister(SystemZ::CC))
      break; // Should have kill-flag - update below.
  }

  // If we hit the end of the block, check whether CC is live into a
  // successor.
  if (miI == MBB->end()) {
    for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI)
      if ((*SI)->isLiveIn(SystemZ::CC))
        return false;
  }

  return true;
}

// Return true if it is OK for this Select pseudo-opcode to be cascaded
// together with other Select pseudo-opcodes into a single basic-block with
// a conditional jump around it.
static bool isSelectPseudo(MachineInstr &MI) {
  switch (MI.getOpcode()) {
  case SystemZ::Select32:
  case SystemZ::Select64:
  case SystemZ::SelectF32:
  case SystemZ::SelectF64:
  case SystemZ::SelectF128:
  case SystemZ::SelectVR32:
  case SystemZ::SelectVR64:
  case SystemZ::SelectVR128:
    return true;

  default:
    return false;
  }
}

// Helper function, which inserts PHI functions into SinkMBB:
//   %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
// where %FalseValue(i) and %TrueValue(i) are taken from Selects.
static void createPHIsForSelects(SmallVector<MachineInstr*, 8> &Selects,
                                 MachineBasicBlock *TrueMBB,
                                 MachineBasicBlock *FalseMBB,
                                 MachineBasicBlock *SinkMBB) {
  MachineFunction *MF = TrueMBB->getParent();
  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();

  MachineInstr *FirstMI = Selects.front();
  unsigned CCValid = FirstMI->getOperand(3).getImm();
  unsigned CCMask = FirstMI->getOperand(4).getImm();

  MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();

  // As we are creating the PHIs, we have to be careful if there is more than
  // one.  Later Selects may reference the results of earlier Selects, but later
  // PHIs have to reference the individual true/false inputs from earlier PHIs.
  // That also means that PHI construction must work forward from earlier to
  // later, and that the code must maintain a mapping from earlier PHI's
  // destination registers, and the registers that went into the PHI.
  DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;

  for (auto MI : Selects) {
    Register DestReg = MI->getOperand(0).getReg();
    Register TrueReg = MI->getOperand(1).getReg();
    Register FalseReg = MI->getOperand(2).getReg();

    // If this Select we are generating is the opposite condition from
    // the jump we generated, then we have to swap the operands for the
    // PHI that is going to be generated.
    if (MI->getOperand(4).getImm() == (CCValid ^ CCMask))
      std::swap(TrueReg, FalseReg);

    if (RegRewriteTable.find(TrueReg) != RegRewriteTable.end())
      TrueReg = RegRewriteTable[TrueReg].first;

    if (RegRewriteTable.find(FalseReg) != RegRewriteTable.end())
      FalseReg = RegRewriteTable[FalseReg].second;

    DebugLoc DL = MI->getDebugLoc();
    BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(SystemZ::PHI), DestReg)
      .addReg(TrueReg).addMBB(TrueMBB)
      .addReg(FalseReg).addMBB(FalseMBB);

    // Add this PHI to the rewrite table.
    RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
  }

  MF->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
}

// Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
MachineBasicBlock *
SystemZTargetLowering::emitSelect(MachineInstr &MI,
                                  MachineBasicBlock *MBB) const {
  assert(isSelectPseudo(MI) && "Bad call to emitSelect()");
  const SystemZInstrInfo *TII =
      static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());

  unsigned CCValid = MI.getOperand(3).getImm();
  unsigned CCMask = MI.getOperand(4).getImm();

  // If we have a sequence of Select* pseudo instructions using the
  // same condition code value, we want to expand all of them into
  // a single pair of basic blocks using the same condition.
  SmallVector<MachineInstr*, 8> Selects;
  SmallVector<MachineInstr*, 8> DbgValues;
  Selects.push_back(&MI);
  unsigned Count = 0;
  for (MachineBasicBlock::iterator NextMIIt =
         std::next(MachineBasicBlock::iterator(MI));
       NextMIIt != MBB->end(); ++NextMIIt) {
    if (NextMIIt->definesRegister(SystemZ::CC))
      break;
    if (isSelectPseudo(*NextMIIt)) {
      assert(NextMIIt->getOperand(3).getImm() == CCValid &&
             "Bad CCValid operands since CC was not redefined.");
      if (NextMIIt->getOperand(4).getImm() == CCMask ||
          NextMIIt->getOperand(4).getImm() == (CCValid ^ CCMask)) {
        Selects.push_back(&*NextMIIt);
        continue;
      }
      break;
    }
    bool User = false;
    for (auto SelMI : Selects)
      if (NextMIIt->readsVirtualRegister(SelMI->getOperand(0).getReg())) {
        User = true;
        break;
      }
    if (NextMIIt->isDebugInstr()) {
      if (User) {
        assert(NextMIIt->isDebugValue() && "Unhandled debug opcode.");
        DbgValues.push_back(&*NextMIIt);
      }
    }
    else if (User || ++Count > 20)
      break;
  }

  MachineInstr *LastMI = Selects.back();
  bool CCKilled =
      (LastMI->killsRegister(SystemZ::CC) || checkCCKill(*LastMI, MBB));
  MachineBasicBlock *StartMBB = MBB;
  MachineBasicBlock *JoinMBB  = splitBlockAfter(LastMI, MBB);
  MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);

  // Unless CC was killed in the last Select instruction, mark it as
  // live-in to both FalseMBB and JoinMBB.
  if (!CCKilled) {
    FalseMBB->addLiveIn(SystemZ::CC);
    JoinMBB->addLiveIn(SystemZ::CC);
  }

  //  StartMBB:
  //   BRC CCMask, JoinMBB
  //   # fallthrough to FalseMBB
  MBB = StartMBB;
  BuildMI(MBB, MI.getDebugLoc(), TII->get(SystemZ::BRC))
    .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
  MBB->addSuccessor(JoinMBB);
  MBB->addSuccessor(FalseMBB);

  //  FalseMBB:
  //   # fallthrough to JoinMBB
  MBB = FalseMBB;
  MBB->addSuccessor(JoinMBB);

  //  JoinMBB:
  //   %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
  //  ...
  MBB = JoinMBB;
  createPHIsForSelects(Selects, StartMBB, FalseMBB, MBB);
  for (auto SelMI : Selects)
    SelMI->eraseFromParent();

  MachineBasicBlock::iterator InsertPos = MBB->getFirstNonPHI();
  for (auto DbgMI : DbgValues)
    MBB->splice(InsertPos, StartMBB, DbgMI);

  return JoinMBB;
}

// Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
// StoreOpcode is the store to use and Invert says whether the store should
// happen when the condition is false rather than true.  If a STORE ON
// CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
                                                        MachineBasicBlock *MBB,
                                                        unsigned StoreOpcode,
                                                        unsigned STOCOpcode,
                                                        bool Invert) const {
  const SystemZInstrInfo *TII =
      static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());

  Register SrcReg = MI.getOperand(0).getReg();
  MachineOperand Base = MI.getOperand(1);
  int64_t Disp = MI.getOperand(2).getImm();
  Register IndexReg = MI.getOperand(3).getReg();
  unsigned CCValid = MI.getOperand(4).getImm();
  unsigned CCMask = MI.getOperand(5).getImm();
  DebugLoc DL = MI.getDebugLoc();

  StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);

  // Use STOCOpcode if possible.  We could use different store patterns in
  // order to avoid matching the index register, but the performance trade-offs
  // might be more complicated in that case.
  if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
    if (Invert)
      CCMask ^= CCValid;

    // ISel pattern matching also adds a load memory operand of the same
    // address, so take special care to find the storing memory operand.
    MachineMemOperand *MMO = nullptr;
    for (auto *I : MI.memoperands())
      if (I->isStore()) {
          MMO = I;
          break;
        }

    BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
      .addReg(SrcReg)
      .add(Base)
      .addImm(Disp)
      .addImm(CCValid)
      .addImm(CCMask)
      .addMemOperand(MMO);

    MI.eraseFromParent();
    return MBB;
  }

  // Get the condition needed to branch around the store.
  if (!Invert)
    CCMask ^= CCValid;

  MachineBasicBlock *StartMBB = MBB;
  MachineBasicBlock *JoinMBB  = splitBlockBefore(MI, MBB);
  MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);

  // Unless CC was killed in the CondStore instruction, mark it as
  // live-in to both FalseMBB and JoinMBB.
  if (!MI.killsRegister(SystemZ::CC) && !checkCCKill(MI, JoinMBB)) {
    FalseMBB->addLiveIn(SystemZ::CC);
    JoinMBB->addLiveIn(SystemZ::CC);
  }

  //  StartMBB:
  //   BRC CCMask, JoinMBB
  //   # fallthrough to FalseMBB
  MBB = StartMBB;
  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
    .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
  MBB->addSuccessor(JoinMBB);
  MBB->addSuccessor(FalseMBB);

  //  FalseMBB:
  //   store %SrcReg, %Disp(%Index,%Base)
  //   # fallthrough to JoinMBB
  MBB = FalseMBB;
  BuildMI(MBB, DL, TII->get(StoreOpcode))
      .addReg(SrcReg)
      .add(Base)
      .addImm(Disp)
      .addReg(IndexReg);
  MBB->addSuccessor(JoinMBB);

  MI.eraseFromParent();
  return JoinMBB;
}

// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
// or ATOMIC_SWAP{,W} instruction MI.  BinOpcode is the instruction that
// performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
// BitSize is the width of the field in bits, or 0 if this is a partword
// ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
// is one of the operands.  Invert says whether the field should be
// inverted after performing BinOpcode (e.g. for NAND).
MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(
    MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode,
    unsigned BitSize, bool Invert) const {
  MachineFunction &MF = *MBB->getParent();
  const SystemZInstrInfo *TII =
      static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
  MachineRegisterInfo &MRI = MF.getRegInfo();
  bool IsSubWord = (BitSize < 32);

  // Extract the operands.  Base can be a register or a frame index.
  // Src2 can be a register or immediate.
  Register Dest = MI.getOperand(0).getReg();
  MachineOperand Base = earlyUseOperand(MI.getOperand(1));
  int64_t Disp = MI.getOperand(2).getImm();
  MachineOperand Src2 = earlyUseOperand(MI.getOperand(3));
  Register BitShift = IsSubWord ? MI.getOperand(4).getReg() : Register();
  Register NegBitShift = IsSubWord ? MI.getOperand(5).getReg() : Register();
  DebugLoc DL = MI.getDebugLoc();
  if (IsSubWord)
    BitSize = MI.getOperand(6).getImm();

  // Subword operations use 32-bit registers.
  const TargetRegisterClass *RC = (BitSize <= 32 ?
                                   &SystemZ::GR32BitRegClass :
                                   &SystemZ::GR64BitRegClass);
  unsigned LOpcode  = BitSize <= 32 ? SystemZ::L  : SystemZ::LG;
  unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;

  // Get the right opcodes for the displacement.
  LOpcode  = TII->getOpcodeForOffset(LOpcode,  Disp);
  CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
  assert(LOpcode && CSOpcode && "Displacement out of range");

  // Create virtual registers for temporary results.
  Register OrigVal       = MRI.createVirtualRegister(RC);
  Register OldVal        = MRI.createVirtualRegister(RC);
  Register NewVal        = (BinOpcode || IsSubWord ?
                            MRI.createVirtualRegister(RC) : Src2.getReg());
  Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
  Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);

  // Insert a basic block for the main loop.
  MachineBasicBlock *StartMBB = MBB;
  MachineBasicBlock *DoneMBB  = splitBlockBefore(MI, MBB);
  MachineBasicBlock *LoopMBB  = emitBlockAfter(StartMBB);

  //  StartMBB:
  //   ...
  //   %OrigVal = L Disp(%Base)
  //   # fall through to LoopMMB
  MBB = StartMBB;
  BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0);
  MBB->addSuccessor(LoopMBB);

  //  LoopMBB:
  //   %OldVal        = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
  //   %RotatedOldVal = RLL %OldVal, 0(%BitShift)
  //   %RotatedNewVal = OP %RotatedOldVal, %Src2
  //   %NewVal        = RLL %RotatedNewVal, 0(%NegBitShift)
  //   %Dest          = CS %OldVal, %NewVal, Disp(%Base)
  //   JNE LoopMBB
  //   # fall through to DoneMMB
  MBB = LoopMBB;
  BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
    .addReg(OrigVal).addMBB(StartMBB)
    .addReg(Dest).addMBB(LoopMBB);
  if (IsSubWord)
    BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
      .addReg(OldVal).addReg(BitShift).addImm(0);
  if (Invert) {
    // Perform the operation normally and then invert every bit of the field.
    Register Tmp = MRI.createVirtualRegister(RC);
    BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2);
    if (BitSize <= 32)
      // XILF with the upper BitSize bits set.
      BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
        .addReg(Tmp).addImm(-1U << (32 - BitSize));
    else {
      // Use LCGR and add -1 to the result, which is more compact than
      // an XILF, XILH pair.
      Register Tmp2 = MRI.createVirtualRegister(RC);
      BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
      BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
        .addReg(Tmp2).addImm(-1);
    }
  } else if (BinOpcode)
    // A simply binary operation.
    BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
        .addReg(RotatedOldVal)
        .add(Src2);
  else if (IsSubWord)
    // Use RISBG to rotate Src2 into position and use it to replace the
    // field in RotatedOldVal.
    BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
      .addReg(RotatedOldVal).addReg(Src2.getReg())
      .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize);
  if (IsSubWord)
    BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
      .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
  BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
      .addReg(OldVal)
      .addReg(NewVal)
      .add(Base)
      .addImm(Disp);
  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
    .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
  MBB->addSuccessor(LoopMBB);
  MBB->addSuccessor(DoneMBB);

  MI.eraseFromParent();
  return DoneMBB;
}

// Implement EmitInstrWithCustomInserter for pseudo
// ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI.  CompareOpcode is the
// instruction that should be used to compare the current field with the
// minimum or maximum value.  KeepOldMask is the BRC condition-code mask
// for when the current field should be kept.  BitSize is the width of
// the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax(
    MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode,
    unsigned KeepOldMask, unsigned BitSize) const {
  MachineFunction &MF = *MBB->getParent();
  const SystemZInstrInfo *TII =
      static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
  MachineRegisterInfo &MRI = MF.getRegInfo();
  bool IsSubWord = (BitSize < 32);

  // Extract the operands.  Base can be a register or a frame index.
  Register Dest = MI.getOperand(0).getReg();
  MachineOperand Base = earlyUseOperand(MI.getOperand(1));
  int64_t Disp = MI.getOperand(2).getImm();
  Register Src2 = MI.getOperand(3).getReg();
  Register BitShift = (IsSubWord ? MI.getOperand(4).getReg() : Register());
  Register NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : Register());
  DebugLoc DL = MI.getDebugLoc();
  if (IsSubWord)
    BitSize = MI.getOperand(6).getImm();

  // Subword operations use 32-bit registers.
  const TargetRegisterClass *RC = (BitSize <= 32 ?
                                   &SystemZ::GR32BitRegClass :
                                   &SystemZ::GR64BitRegClass);
  unsigned LOpcode  = BitSize <= 32 ? SystemZ::L  : SystemZ::LG;
  unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;

  // Get the right opcodes for the displacement.
  LOpcode  = TII->getOpcodeForOffset(LOpcode,  Disp);
  CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
  assert(LOpcode && CSOpcode && "Displacement out of range");

  // Create virtual registers for temporary results.
  Register OrigVal       = MRI.createVirtualRegister(RC);
  Register OldVal        = MRI.createVirtualRegister(RC);
  Register NewVal        = MRI.createVirtualRegister(RC);
  Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
  Register RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
  Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);

  // Insert 3 basic blocks for the loop.
  MachineBasicBlock *StartMBB  = MBB;
  MachineBasicBlock *DoneMBB   = splitBlockBefore(MI, MBB);
  MachineBasicBlock *LoopMBB   = emitBlockAfter(StartMBB);
  MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB);
  MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB);

  //  StartMBB:
  //   ...
  //   %OrigVal     = L Disp(%Base)
  //   # fall through to LoopMMB
  MBB = StartMBB;
  BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0);
  MBB->addSuccessor(LoopMBB);

  //  LoopMBB:
  //   %OldVal        = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
  //   %RotatedOldVal = RLL %OldVal, 0(%BitShift)
  //   CompareOpcode %RotatedOldVal, %Src2
  //   BRC KeepOldMask, UpdateMBB
  MBB = LoopMBB;
  BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
    .addReg(OrigVal).addMBB(StartMBB)
    .addReg(Dest).addMBB(UpdateMBB);
  if (IsSubWord)
    BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
      .addReg(OldVal).addReg(BitShift).addImm(0);
  BuildMI(MBB, DL, TII->get(CompareOpcode))
    .addReg(RotatedOldVal).addReg(Src2);
  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
    .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
  MBB->addSuccessor(UpdateMBB);
  MBB->addSuccessor(UseAltMBB);

  //  UseAltMBB:
  //   %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
  //   # fall through to UpdateMMB
  MBB = UseAltMBB;
  if (IsSubWord)
    BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
      .addReg(RotatedOldVal).addReg(Src2)
      .addImm(32).addImm(31 + BitSize).addImm(0);
  MBB->addSuccessor(UpdateMBB);

  //  UpdateMBB:
  //   %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
  //                        [ %RotatedAltVal, UseAltMBB ]
  //   %NewVal        = RLL %RotatedNewVal, 0(%NegBitShift)
  //   %Dest          = CS %OldVal, %NewVal, Disp(%Base)
  //   JNE LoopMBB
  //   # fall through to DoneMMB
  MBB = UpdateMBB;
  BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
    .addReg(RotatedOldVal).addMBB(LoopMBB)
    .addReg(RotatedAltVal).addMBB(UseAltMBB);
  if (IsSubWord)
    BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
      .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
  BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
      .addReg(OldVal)
      .addReg(NewVal)
      .add(Base)
      .addImm(Disp);
  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
    .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
  MBB->addSuccessor(LoopMBB);
  MBB->addSuccessor(DoneMBB);

  MI.eraseFromParent();
  return DoneMBB;
}

// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
// instruction MI.
MachineBasicBlock *
SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,
                                          MachineBasicBlock *MBB) const {

  MachineFunction &MF = *MBB->getParent();
  const SystemZInstrInfo *TII =
      static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
  MachineRegisterInfo &MRI = MF.getRegInfo();

  // Extract the operands.  Base can be a register or a frame index.
  Register Dest = MI.getOperand(0).getReg();
  MachineOperand Base = earlyUseOperand(MI.getOperand(1));
  int64_t Disp = MI.getOperand(2).getImm();
  Register OrigCmpVal = MI.getOperand(3).getReg();
  Register OrigSwapVal = MI.getOperand(4).getReg();
  Register BitShift = MI.getOperand(5).getReg();
  Register NegBitShift = MI.getOperand(6).getReg();
  int64_t BitSize = MI.getOperand(7).getImm();
  DebugLoc DL = MI.getDebugLoc();

  const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;

  // Get the right opcodes for the displacement.
  unsigned LOpcode  = TII->getOpcodeForOffset(SystemZ::L,  Disp);
  unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp);
  assert(LOpcode && CSOpcode && "Displacement out of range");

  // Create virtual registers for temporary results.
  Register OrigOldVal = MRI.createVirtualRegister(RC);
  Register OldVal = MRI.createVirtualRegister(RC);
  Register CmpVal = MRI.createVirtualRegister(RC);
  Register SwapVal = MRI.createVirtualRegister(RC);
  Register StoreVal = MRI.createVirtualRegister(RC);
  Register RetryOldVal = MRI.createVirtualRegister(RC);
  Register RetryCmpVal = MRI.createVirtualRegister(RC);
  Register RetrySwapVal = MRI.createVirtualRegister(RC);

  // Insert 2 basic blocks for the loop.
  MachineBasicBlock *StartMBB = MBB;
  MachineBasicBlock *DoneMBB  = splitBlockBefore(MI, MBB);
  MachineBasicBlock *LoopMBB  = emitBlockAfter(StartMBB);
  MachineBasicBlock *SetMBB   = emitBlockAfter(LoopMBB);

  //  StartMBB:
  //   ...
  //   %OrigOldVal     = L Disp(%Base)
  //   # fall through to LoopMMB
  MBB = StartMBB;
  BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
      .add(Base)
      .addImm(Disp)
      .addReg(0);
  MBB->addSuccessor(LoopMBB);

  //  LoopMBB:
  //   %OldVal        = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
  //   %CmpVal        = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
  //   %SwapVal       = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
  //   %Dest          = RLL %OldVal, BitSize(%BitShift)
  //                      ^^ The low BitSize bits contain the field
  //                         of interest.
  //   %RetryCmpVal   = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
  //                      ^^ Replace the upper 32-BitSize bits of the
  //                         comparison value with those that we loaded,
  //                         so that we can use a full word comparison.
  //   CR %Dest, %RetryCmpVal
  //   JNE DoneMBB
  //   # Fall through to SetMBB
  MBB = LoopMBB;
  BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
    .addReg(OrigOldVal).addMBB(StartMBB)
    .addReg(RetryOldVal).addMBB(SetMBB);
  BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
    .addReg(OrigCmpVal).addMBB(StartMBB)
    .addReg(RetryCmpVal).addMBB(SetMBB);
  BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
    .addReg(OrigSwapVal).addMBB(StartMBB)
    .addReg(RetrySwapVal).addMBB(SetMBB);
  BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
    .addReg(OldVal).addReg(BitShift).addImm(BitSize);
  BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
    .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
  BuildMI(MBB, DL, TII->get(SystemZ::CR))
    .addReg(Dest).addReg(RetryCmpVal);
  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
    .addImm(SystemZ::CCMASK_ICMP)
    .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
  MBB->addSuccessor(DoneMBB);
  MBB->addSuccessor(SetMBB);

  //  SetMBB:
  //   %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
  //                      ^^ Replace the upper 32-BitSize bits of the new
  //                         value with those that we loaded.
  //   %StoreVal    = RLL %RetrySwapVal, -BitSize(%NegBitShift)
  //                      ^^ Rotate the new field to its proper position.
  //   %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
  //   JNE LoopMBB
  //   # fall through to ExitMMB
  MBB = SetMBB;
  BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
    .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
  BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
    .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
  BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
      .addReg(OldVal)
      .addReg(StoreVal)
      .add(Base)
      .addImm(Disp);
  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
    .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
  MBB->addSuccessor(LoopMBB);
  MBB->addSuccessor(DoneMBB);

  // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in
  // to the block after the loop.  At this point, CC may have been defined
  // either by the CR in LoopMBB or by the CS in SetMBB.
  if (!MI.registerDefIsDead(SystemZ::CC))
    DoneMBB->addLiveIn(SystemZ::CC);

  MI.eraseFromParent();
  return DoneMBB;
}

// Emit a move from two GR64s to a GR128.
MachineBasicBlock *
SystemZTargetLowering::emitPair128(MachineInstr &MI,
                                   MachineBasicBlock *MBB) const {
  MachineFunction &MF = *MBB->getParent();
  const SystemZInstrInfo *TII =
      static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
  MachineRegisterInfo &MRI = MF.getRegInfo();
  DebugLoc DL = MI.getDebugLoc();

  Register Dest = MI.getOperand(0).getReg();
  Register Hi = MI.getOperand(1).getReg();
  Register Lo = MI.getOperand(2).getReg();
  Register Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
  Register Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);

  BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1);
  BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2)
    .addReg(Tmp1).addReg(Hi).addImm(SystemZ::subreg_h64);
  BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
    .addReg(Tmp2).addReg(Lo).addImm(SystemZ::subreg_l64);

  MI.eraseFromParent();
  return MBB;
}

// Emit an extension from a GR64 to a GR128.  ClearEven is true
// if the high register of the GR128 value must be cleared or false if
// it's "don't care".
MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI,
                                                     MachineBasicBlock *MBB,
                                                     bool ClearEven) const {
  MachineFunction &MF = *MBB->getParent();
  const SystemZInstrInfo *TII =
      static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
  MachineRegisterInfo &MRI = MF.getRegInfo();
  DebugLoc DL = MI.getDebugLoc();

  Register Dest = MI.getOperand(0).getReg();
  Register Src = MI.getOperand(1).getReg();
  Register In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);

  BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
  if (ClearEven) {
    Register NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
    Register Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);

    BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
      .addImm(0);
    BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
      .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64);
    In128 = NewIn128;
  }
  BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
    .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64);

  MI.eraseFromParent();
  return MBB;
}

MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper(
    MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
  MachineFunction &MF = *MBB->getParent();
  const SystemZInstrInfo *TII =
      static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
  MachineRegisterInfo &MRI = MF.getRegInfo();
  DebugLoc DL = MI.getDebugLoc();

  MachineOperand DestBase = earlyUseOperand(MI.getOperand(0));
  uint64_t DestDisp = MI.getOperand(1).getImm();
  MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2));
  uint64_t SrcDisp = MI.getOperand(3).getImm();
  uint64_t Length = MI.getOperand(4).getImm();

  // When generating more than one CLC, all but the last will need to
  // branch to the end when a difference is found.
  MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
                               splitBlockAfter(MI, MBB) : nullptr);

  // Check for the loop form, in which operand 5 is the trip count.
  if (MI.getNumExplicitOperands() > 5) {
    bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);

    Register StartCountReg = MI.getOperand(5).getReg();
    Register StartSrcReg   = forceReg(MI, SrcBase, TII);
    Register StartDestReg  = (HaveSingleBase ? StartSrcReg :
                              forceReg(MI, DestBase, TII));

    const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
    Register ThisSrcReg  = MRI.createVirtualRegister(RC);
    Register ThisDestReg = (HaveSingleBase ? ThisSrcReg :
                            MRI.createVirtualRegister(RC));
    Register NextSrcReg  = MRI.createVirtualRegister(RC);
    Register NextDestReg = (HaveSingleBase ? NextSrcReg :
                            MRI.createVirtualRegister(RC));

    RC = &SystemZ::GR64BitRegClass;
    Register ThisCountReg = MRI.createVirtualRegister(RC);
    Register NextCountReg = MRI.createVirtualRegister(RC);

    MachineBasicBlock *StartMBB = MBB;
    MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
    MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
    MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB);

    //  StartMBB:
    //   # fall through to LoopMMB
    MBB->addSuccessor(LoopMBB);

    //  LoopMBB:
    //   %ThisDestReg = phi [ %StartDestReg, StartMBB ],
    //                      [ %NextDestReg, NextMBB ]
    //   %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
    //                     [ %NextSrcReg, NextMBB ]
    //   %ThisCountReg = phi [ %StartCountReg, StartMBB ],
    //                       [ %NextCountReg, NextMBB ]
    //   ( PFD 2, 768+DestDisp(%ThisDestReg) )
    //   Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
    //   ( JLH EndMBB )
    //
    // The prefetch is used only for MVC.  The JLH is used only for CLC.
    MBB = LoopMBB;

    BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
      .addReg(StartDestReg).addMBB(StartMBB)
      .addReg(NextDestReg).addMBB(NextMBB);
    if (!HaveSingleBase)
      BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
        .addReg(StartSrcReg).addMBB(StartMBB)
        .addReg(NextSrcReg).addMBB(NextMBB);
    BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
      .addReg(StartCountReg).addMBB(StartMBB)
      .addReg(NextCountReg).addMBB(NextMBB);
    if (Opcode == SystemZ::MVC)
      BuildMI(MBB, DL, TII->get(SystemZ::PFD))
        .addImm(SystemZ::PFD_WRITE)
        .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0);
    BuildMI(MBB, DL, TII->get(Opcode))
      .addReg(ThisDestReg).addImm(DestDisp).addImm(256)
      .addReg(ThisSrcReg).addImm(SrcDisp);
    if (EndMBB) {
      BuildMI(MBB, DL, TII->get(SystemZ::BRC))
        .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
        .addMBB(EndMBB);
      MBB->addSuccessor(EndMBB);
      MBB->addSuccessor(NextMBB);
    }

    // NextMBB:
    //   %NextDestReg = LA 256(%ThisDestReg)
    //   %NextSrcReg = LA 256(%ThisSrcReg)
    //   %NextCountReg = AGHI %ThisCountReg, -1
    //   CGHI %NextCountReg, 0
    //   JLH LoopMBB
    //   # fall through to DoneMMB
    //
    // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
    MBB = NextMBB;

    BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
      .addReg(ThisDestReg).addImm(256).addReg(0);
    if (!HaveSingleBase)
      BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
        .addReg(ThisSrcReg).addImm(256).addReg(0);
    BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
      .addReg(ThisCountReg).addImm(-1);
    BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
      .addReg(NextCountReg).addImm(0);
    BuildMI(MBB, DL, TII->get(SystemZ::BRC))
      .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
      .addMBB(LoopMBB);
    MBB->addSuccessor(LoopMBB);
    MBB->addSuccessor(DoneMBB);

    DestBase = MachineOperand::CreateReg(NextDestReg, false);
    SrcBase = MachineOperand::CreateReg(NextSrcReg, false);
    Length &= 255;
    if (EndMBB && !Length)
      // If the loop handled the whole CLC range, DoneMBB will be empty with
      // CC live-through into EndMBB, so add it as live-in.
      DoneMBB->addLiveIn(SystemZ::CC);
    MBB = DoneMBB;
  }
  // Handle any remaining bytes with straight-line code.
  while (Length > 0) {
    uint64_t ThisLength = std::min(Length, uint64_t(256));
    // The previous iteration might have created out-of-range displacements.
    // Apply them using LAY if so.
    if (!isUInt<12>(DestDisp)) {
      Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
      BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
          .add(DestBase)
          .addImm(DestDisp)
          .addReg(0);
      DestBase = MachineOperand::CreateReg(Reg, false);
      DestDisp = 0;
    }
    if (!isUInt<12>(SrcDisp)) {
      Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
      BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
          .add(SrcBase)
          .addImm(SrcDisp)
          .addReg(0);
      SrcBase = MachineOperand::CreateReg(Reg, false);
      SrcDisp = 0;
    }
    BuildMI(*MBB, MI, DL, TII->get(Opcode))
        .add(DestBase)
        .addImm(DestDisp)
        .addImm(ThisLength)
        .add(SrcBase)
        .addImm(SrcDisp)
        .setMemRefs(MI.memoperands());
    DestDisp += ThisLength;
    SrcDisp += ThisLength;
    Length -= ThisLength;
    // If there's another CLC to go, branch to the end if a difference
    // was found.
    if (EndMBB && Length > 0) {
      MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB);
      BuildMI(MBB, DL, TII->get(SystemZ::BRC))
        .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
        .addMBB(EndMBB);
      MBB->addSuccessor(EndMBB);
      MBB->addSuccessor(NextMBB);
      MBB = NextMBB;
    }
  }
  if (EndMBB) {
    MBB->addSuccessor(EndMBB);
    MBB = EndMBB;
    MBB->addLiveIn(SystemZ::CC);
  }

  MI.eraseFromParent();
  return MBB;
}

// Decompose string pseudo-instruction MI into a loop that continually performs
// Opcode until CC != 3.
MachineBasicBlock *SystemZTargetLowering::emitStringWrapper(
    MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
  MachineFunction &MF = *MBB->getParent();
  const SystemZInstrInfo *TII =
      static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
  MachineRegisterInfo &MRI = MF.getRegInfo();
  DebugLoc DL = MI.getDebugLoc();

  uint64_t End1Reg = MI.getOperand(0).getReg();
  uint64_t Start1Reg = MI.getOperand(1).getReg();
  uint64_t Start2Reg = MI.getOperand(2).getReg();
  uint64_t CharReg = MI.getOperand(3).getReg();

  const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
  uint64_t This1Reg = MRI.createVirtualRegister(RC);
  uint64_t This2Reg = MRI.createVirtualRegister(RC);
  uint64_t End2Reg  = MRI.createVirtualRegister(RC);

  MachineBasicBlock *StartMBB = MBB;
  MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
  MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);

  //  StartMBB:
  //   # fall through to LoopMMB
  MBB->addSuccessor(LoopMBB);

  //  LoopMBB:
  //   %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
  //   %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
  //   R0L = %CharReg
  //   %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
  //   JO LoopMBB
  //   # fall through to DoneMMB
  //
  // The load of R0L can be hoisted by post-RA LICM.
  MBB = LoopMBB;

  BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
    .addReg(Start1Reg).addMBB(StartMBB)
    .addReg(End1Reg).addMBB(LoopMBB);
  BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
    .addReg(Start2Reg).addMBB(StartMBB)
    .addReg(End2Reg).addMBB(LoopMBB);
  BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
  BuildMI(MBB, DL, TII->get(Opcode))
    .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
    .addReg(This1Reg).addReg(This2Reg);
  BuildMI(MBB, DL, TII->get(SystemZ::BRC))
    .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
  MBB->addSuccessor(LoopMBB);
  MBB->addSuccessor(DoneMBB);

  DoneMBB->addLiveIn(SystemZ::CC);

  MI.eraseFromParent();
  return DoneMBB;
}

// Update TBEGIN instruction with final opcode and register clobbers.
MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin(
    MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode,
    bool NoFloat) const {
  MachineFunction &MF = *MBB->getParent();
  const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
  const SystemZInstrInfo *TII = Subtarget.getInstrInfo();

  // Update opcode.
  MI.setDesc(TII->get(Opcode));

  // We cannot handle a TBEGIN that clobbers the stack or frame pointer.
  // Make sure to add the corresponding GRSM bits if they are missing.
  uint64_t Control = MI.getOperand(2).getImm();
  static const unsigned GPRControlBit[16] = {
    0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
    0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
  };
  Control |= GPRControlBit[15];
  if (TFI->hasFP(MF))
    Control |= GPRControlBit[11];
  MI.getOperand(2).setImm(Control);

  // Add GPR clobbers.
  for (int I = 0; I < 16; I++) {
    if ((Control & GPRControlBit[I]) == 0) {
      unsigned Reg = SystemZMC::GR64Regs[I];
      MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
    }
  }

  // Add FPR/VR clobbers.
  if (!NoFloat && (Control & 4) != 0) {
    if (Subtarget.hasVector()) {
      for (int I = 0; I < 32; I++) {
        unsigned Reg = SystemZMC::VR128Regs[I];
        MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
      }
    } else {
      for (int I = 0; I < 16; I++) {
        unsigned Reg = SystemZMC::FP64Regs[I];
        MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
      }
    }
  }

  return MBB;
}

MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0(
    MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
  MachineFunction &MF = *MBB->getParent();
  MachineRegisterInfo *MRI = &MF.getRegInfo();
  const SystemZInstrInfo *TII =
      static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
  DebugLoc DL = MI.getDebugLoc();

  Register SrcReg = MI.getOperand(0).getReg();

  // Create new virtual register of the same class as source.
  const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
  Register DstReg = MRI->createVirtualRegister(RC);

  // Replace pseudo with a normal load-and-test that models the def as
  // well.
  BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg)
    .addReg(SrcReg);
  MI.eraseFromParent();

  return MBB;
}

MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
    MachineInstr &MI, MachineBasicBlock *MBB) const {
  switch (MI.getOpcode()) {
  case SystemZ::Select32:
  case SystemZ::Select64:
  case SystemZ::SelectF32:
  case SystemZ::SelectF64:
  case SystemZ::SelectF128:
  case SystemZ::SelectVR32:
  case SystemZ::SelectVR64:
  case SystemZ::SelectVR128:
    return emitSelect(MI, MBB);

  case SystemZ::CondStore8Mux:
    return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false);
  case SystemZ::CondStore8MuxInv:
    return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true);
  case SystemZ::CondStore16Mux:
    return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false);
  case SystemZ::CondStore16MuxInv:
    return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true);
  case SystemZ::CondStore32Mux:
    return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false);
  case SystemZ::CondStore32MuxInv:
    return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true);
  case SystemZ::CondStore8:
    return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
  case SystemZ::CondStore8Inv:
    return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
  case SystemZ::CondStore16:
    return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
  case SystemZ::CondStore16Inv:
    return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
  case SystemZ::CondStore32:
    return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
  case SystemZ::CondStore32Inv:
    return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
  case SystemZ::CondStore64:
    return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
  case SystemZ::CondStore64Inv:
    return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
  case SystemZ::CondStoreF32:
    return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
  case SystemZ::CondStoreF32Inv:
    return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
  case SystemZ::CondStoreF64:
    return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
  case SystemZ::CondStoreF64Inv:
    return emitCondStore(MI, MBB, SystemZ::STD, 0, true);

  case SystemZ::PAIR128:
    return emitPair128(MI, MBB);
  case SystemZ::AEXT128:
    return emitExt128(MI, MBB, false);
  case SystemZ::ZEXT128:
    return emitExt128(MI, MBB, true);

  case SystemZ::ATOMIC_SWAPW:
    return emitAtomicLoadBinary(MI, MBB, 0, 0);
  case SystemZ::ATOMIC_SWAP_32:
    return emitAtomicLoadBinary(MI, MBB, 0, 32);
  case SystemZ::ATOMIC_SWAP_64:
    return emitAtomicLoadBinary(MI, MBB, 0, 64);

  case SystemZ::ATOMIC_LOADW_AR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
  case SystemZ::ATOMIC_LOADW_AFI:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
  case SystemZ::ATOMIC_LOAD_AR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
  case SystemZ::ATOMIC_LOAD_AHI:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
  case SystemZ::ATOMIC_LOAD_AFI:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
  case SystemZ::ATOMIC_LOAD_AGR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
  case SystemZ::ATOMIC_LOAD_AGHI:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
  case SystemZ::ATOMIC_LOAD_AGFI:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);

  case SystemZ::ATOMIC_LOADW_SR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
  case SystemZ::ATOMIC_LOAD_SR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
  case SystemZ::ATOMIC_LOAD_SGR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);

  case SystemZ::ATOMIC_LOADW_NR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
  case SystemZ::ATOMIC_LOADW_NILH:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
  case SystemZ::ATOMIC_LOAD_NR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
  case SystemZ::ATOMIC_LOAD_NILL:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
  case SystemZ::ATOMIC_LOAD_NILH:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
  case SystemZ::ATOMIC_LOAD_NILF:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
  case SystemZ::ATOMIC_LOAD_NGR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
  case SystemZ::ATOMIC_LOAD_NILL64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
  case SystemZ::ATOMIC_LOAD_NILH64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
  case SystemZ::ATOMIC_LOAD_NIHL64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
  case SystemZ::ATOMIC_LOAD_NIHH64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
  case SystemZ::ATOMIC_LOAD_NILF64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
  case SystemZ::ATOMIC_LOAD_NIHF64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);

  case SystemZ::ATOMIC_LOADW_OR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
  case SystemZ::ATOMIC_LOADW_OILH:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
  case SystemZ::ATOMIC_LOAD_OR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
  case SystemZ::ATOMIC_LOAD_OILL:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
  case SystemZ::ATOMIC_LOAD_OILH:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
  case SystemZ::ATOMIC_LOAD_OILF:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
  case SystemZ::ATOMIC_LOAD_OGR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
  case SystemZ::ATOMIC_LOAD_OILL64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
  case SystemZ::ATOMIC_LOAD_OILH64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
  case SystemZ::ATOMIC_LOAD_OIHL64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
  case SystemZ::ATOMIC_LOAD_OIHH64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
  case SystemZ::ATOMIC_LOAD_OILF64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
  case SystemZ::ATOMIC_LOAD_OIHF64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);

  case SystemZ::ATOMIC_LOADW_XR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
  case SystemZ::ATOMIC_LOADW_XILF:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
  case SystemZ::ATOMIC_LOAD_XR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
  case SystemZ::ATOMIC_LOAD_XILF:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
  case SystemZ::ATOMIC_LOAD_XGR:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
  case SystemZ::ATOMIC_LOAD_XILF64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
  case SystemZ::ATOMIC_LOAD_XIHF64:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);

  case SystemZ::ATOMIC_LOADW_NRi:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
  case SystemZ::ATOMIC_LOADW_NILHi:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
  case SystemZ::ATOMIC_LOAD_NRi:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
  case SystemZ::ATOMIC_LOAD_NILLi:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
  case SystemZ::ATOMIC_LOAD_NILHi:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
  case SystemZ::ATOMIC_LOAD_NILFi:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
  case SystemZ::ATOMIC_LOAD_NGRi:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
  case SystemZ::ATOMIC_LOAD_NILL64i:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
  case SystemZ::ATOMIC_LOAD_NILH64i:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
  case SystemZ::ATOMIC_LOAD_NIHL64i:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true);
  case SystemZ::ATOMIC_LOAD_NIHH64i:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true);
  case SystemZ::ATOMIC_LOAD_NILF64i:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
  case SystemZ::ATOMIC_LOAD_NIHF64i:
    return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true);

  case SystemZ::ATOMIC_LOADW_MIN:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
                                SystemZ::CCMASK_CMP_LE, 0);
  case SystemZ::ATOMIC_LOAD_MIN_32:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
                                SystemZ::CCMASK_CMP_LE, 32);
  case SystemZ::ATOMIC_LOAD_MIN_64:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
                                SystemZ::CCMASK_CMP_LE, 64);

  case SystemZ::ATOMIC_LOADW_MAX:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
                                SystemZ::CCMASK_CMP_GE, 0);
  case SystemZ::ATOMIC_LOAD_MAX_32:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
                                SystemZ::CCMASK_CMP_GE, 32);
  case SystemZ::ATOMIC_LOAD_MAX_64:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
                                SystemZ::CCMASK_CMP_GE, 64);

  case SystemZ::ATOMIC_LOADW_UMIN:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
                                SystemZ::CCMASK_CMP_LE, 0);
  case SystemZ::ATOMIC_LOAD_UMIN_32:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
                                SystemZ::CCMASK_CMP_LE, 32);
  case SystemZ::ATOMIC_LOAD_UMIN_64:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
                                SystemZ::CCMASK_CMP_LE, 64);

  case SystemZ::ATOMIC_LOADW_UMAX:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
                                SystemZ::CCMASK_CMP_GE, 0);
  case SystemZ::ATOMIC_LOAD_UMAX_32:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
                                SystemZ::CCMASK_CMP_GE, 32);
  case SystemZ::ATOMIC_LOAD_UMAX_64:
    return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
                                SystemZ::CCMASK_CMP_GE, 64);

  case SystemZ::ATOMIC_CMP_SWAPW:
    return emitAtomicCmpSwapW(MI, MBB);
  case SystemZ::MVCSequence:
  case SystemZ::MVCLoop:
    return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
  case SystemZ::NCSequence:
  case SystemZ::NCLoop:
    return emitMemMemWrapper(MI, MBB, SystemZ::NC);
  case SystemZ::OCSequence:
  case SystemZ::OCLoop:
    return emitMemMemWrapper(MI, MBB, SystemZ::OC);
  case SystemZ::XCSequence:
  case SystemZ::XCLoop:
    return emitMemMemWrapper(MI, MBB, SystemZ::XC);
  case SystemZ::CLCSequence:
  case SystemZ::CLCLoop:
    return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
  case SystemZ::CLSTLoop:
    return emitStringWrapper(MI, MBB, SystemZ::CLST);
  case SystemZ::MVSTLoop:
    return emitStringWrapper(MI, MBB, SystemZ::MVST);
  case SystemZ::SRSTLoop:
    return emitStringWrapper(MI, MBB, SystemZ::SRST);
  case SystemZ::TBEGIN:
    return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false);
  case SystemZ::TBEGIN_nofloat:
    return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true);
  case SystemZ::TBEGINC:
    return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true);
  case SystemZ::LTEBRCompare_VecPseudo:
    return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR);
  case SystemZ::LTDBRCompare_VecPseudo:
    return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR);
  case SystemZ::LTXBRCompare_VecPseudo:
    return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR);

  case TargetOpcode::STACKMAP:
  case TargetOpcode::PATCHPOINT:
    return emitPatchPoint(MI, MBB);

  default:
    llvm_unreachable("Unexpected instr type to insert");
  }
}

// This is only used by the isel schedulers, and is needed only to prevent
// compiler from crashing when list-ilp is used.
const TargetRegisterClass *
SystemZTargetLowering::getRepRegClassFor(MVT VT) const {
  if (VT == MVT::Untyped)
    return &SystemZ::ADDR128BitRegClass;
  return TargetLowering::getRepRegClassFor(VT);
}